2020-10-28 12:18:35 -04:00
|
|
|
/*
|
|
|
|
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"encoding/gob"
|
2020-11-20 13:43:07 -05:00
|
|
|
"errors"
|
2020-10-28 12:18:35 -04:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/minio/minio/cmd/logger"
|
2020-12-29 04:57:28 -05:00
|
|
|
"github.com/minio/minio/pkg/color"
|
2020-10-28 12:18:35 -04:00
|
|
|
"github.com/minio/minio/pkg/console"
|
|
|
|
)
|
|
|
|
|
|
|
|
type listPathOptions struct {
|
|
|
|
// ID of the listing.
|
|
|
|
// This will be used to persist the list.
|
|
|
|
ID string
|
|
|
|
|
|
|
|
// Bucket of the listing.
|
|
|
|
Bucket string
|
|
|
|
|
|
|
|
// Directory inside the bucket.
|
|
|
|
BaseDir string
|
|
|
|
|
|
|
|
// Scan/return only content with prefix.
|
|
|
|
Prefix string
|
|
|
|
|
2020-11-18 13:44:18 -05:00
|
|
|
// FilterPrefix will return only results with this prefix when scanning.
|
|
|
|
// Should never contain a slash.
|
|
|
|
// Prefix should still be set.
|
|
|
|
FilterPrefix string
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
// Marker to resume listing.
|
|
|
|
// The response will be the first entry AFTER this object name.
|
|
|
|
Marker string
|
|
|
|
|
|
|
|
// Limit the number of results.
|
|
|
|
Limit int
|
|
|
|
|
|
|
|
// The number of disks to ask. Special values:
|
|
|
|
// 0 uses default number of disks.
|
|
|
|
// -1 use at least 50% of disks or at least the default number.
|
|
|
|
AskDisks int
|
|
|
|
|
|
|
|
// InclDeleted will keep all entries where latest version is a delete marker.
|
|
|
|
InclDeleted bool
|
|
|
|
|
|
|
|
// Scan recursively.
|
|
|
|
// If false only main directory will be scanned.
|
|
|
|
// Should always be true if Separator is n SlashSeparator.
|
|
|
|
Recursive bool
|
|
|
|
|
|
|
|
// Separator to use.
|
|
|
|
Separator string
|
|
|
|
|
|
|
|
// Create indicates that the lister should not attempt to load an existing cache.
|
|
|
|
Create bool
|
|
|
|
|
|
|
|
// CurrentCycle indicates the current bloom cycle.
|
|
|
|
// Will be used if a new scan is started.
|
|
|
|
CurrentCycle uint64
|
|
|
|
|
|
|
|
// OldestCycle indicates the oldest cycle acceptable.
|
|
|
|
OldestCycle uint64
|
|
|
|
|
|
|
|
// Include pure directories.
|
|
|
|
IncludeDirectories bool
|
|
|
|
|
2020-12-15 14:25:36 -05:00
|
|
|
// discardResult will not persist the cache to storage.
|
|
|
|
// When the initial results are returned listing will be canceled.
|
|
|
|
discardResult bool
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
gob.Register(listPathOptions{})
|
|
|
|
}
|
|
|
|
|
2020-10-30 12:33:16 -04:00
|
|
|
// newMetacache constructs a new metacache from the options.
|
|
|
|
func (o listPathOptions) newMetacache() metacache {
|
|
|
|
return metacache{
|
|
|
|
id: o.ID,
|
|
|
|
bucket: o.Bucket,
|
|
|
|
root: o.BaseDir,
|
|
|
|
recursive: o.Recursive,
|
|
|
|
status: scanStateStarted,
|
|
|
|
error: "",
|
|
|
|
started: UTCNow(),
|
|
|
|
lastHandout: UTCNow(),
|
|
|
|
lastUpdate: UTCNow(),
|
|
|
|
ended: time.Time{},
|
|
|
|
startedCycle: o.CurrentCycle,
|
|
|
|
endedCycle: 0,
|
|
|
|
dataVersion: metacacheStreamVersion,
|
2020-11-18 13:44:18 -05:00
|
|
|
filter: o.FilterPrefix,
|
2020-10-30 12:33:16 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-14 16:13:33 -05:00
|
|
|
func (o *listPathOptions) debugf(format string, data ...interface{}) {
|
2020-12-17 19:52:47 -05:00
|
|
|
if serverDebugLog {
|
2020-12-29 04:57:28 -05:00
|
|
|
console.Debugf(format+"\n", data...)
|
2020-12-14 16:13:33 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (o *listPathOptions) debugln(data ...interface{}) {
|
2020-12-17 19:52:47 -05:00
|
|
|
if serverDebugLog {
|
2020-12-14 16:13:33 -05:00
|
|
|
console.Debugln(data...)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
// gatherResults will collect all results on the input channel and filter results according to the options.
|
|
|
|
// Caller should close the channel when done.
|
|
|
|
// The returned function will return the results once there is enough or input is closed.
|
|
|
|
func (o *listPathOptions) gatherResults(in <-chan metaCacheEntry) func() (metaCacheEntriesSorted, error) {
|
|
|
|
var resultsDone = make(chan metaCacheEntriesSorted)
|
|
|
|
// Copy so we can mutate
|
|
|
|
resCh := resultsDone
|
|
|
|
resErr := io.EOF
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
var results metaCacheEntriesSorted
|
|
|
|
for entry := range in {
|
|
|
|
if resCh == nil {
|
|
|
|
// past limit
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !o.IncludeDirectories && entry.isDir() {
|
|
|
|
continue
|
|
|
|
}
|
2020-12-14 16:13:33 -05:00
|
|
|
o.debugln("gather got:", entry.name)
|
2020-10-28 12:18:35 -04:00
|
|
|
if o.Marker != "" && entry.name <= o.Marker {
|
2020-12-14 16:13:33 -05:00
|
|
|
o.debugln("pre marker")
|
2020-10-28 12:18:35 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !strings.HasPrefix(entry.name, o.Prefix) {
|
2020-12-14 16:13:33 -05:00
|
|
|
o.debugln("not in prefix")
|
2020-10-28 12:18:35 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !o.Recursive && !entry.isInDir(o.Prefix, o.Separator) {
|
2020-12-14 16:13:33 -05:00
|
|
|
o.debugln("not in dir", o.Prefix, o.Separator)
|
2020-10-28 12:18:35 -04:00
|
|
|
continue
|
|
|
|
}
|
2020-12-22 12:16:43 -05:00
|
|
|
if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() {
|
|
|
|
o.debugln("latest is delete marker")
|
|
|
|
continue
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
if o.Limit > 0 && results.len() >= o.Limit {
|
|
|
|
// We have enough and we have more.
|
|
|
|
// Do not return io.EOF
|
|
|
|
if resCh != nil {
|
|
|
|
resErr = nil
|
|
|
|
resCh <- results
|
|
|
|
resCh = nil
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
2020-12-14 16:13:33 -05:00
|
|
|
o.debugln("adding...")
|
2020-10-28 12:18:35 -04:00
|
|
|
results.o = append(results.o, entry)
|
|
|
|
}
|
|
|
|
if resCh != nil {
|
|
|
|
resErr = io.EOF
|
|
|
|
resCh <- results
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
return func() (metaCacheEntriesSorted, error) {
|
|
|
|
return <-resultsDone, resErr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-30 12:33:16 -04:00
|
|
|
// updateMetacacheListing will update the metacache listing.
|
|
|
|
func (o *listPathOptions) updateMetacacheListing(m metacache, rpc *peerRESTClient) (metacache, error) {
|
|
|
|
if rpc == nil {
|
2020-11-03 15:47:52 -05:00
|
|
|
return localMetacacheMgr.updateCacheEntry(m)
|
2020-10-30 12:33:16 -04:00
|
|
|
}
|
|
|
|
return rpc.UpdateMetacacheListing(context.Background(), m)
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
|
2020-12-14 15:07:07 -05:00
|
|
|
const metacachePrefix = ".metacache"
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
func metacachePrefixForID(bucket, id string) string {
|
2020-12-14 15:07:07 -05:00
|
|
|
return pathJoin(bucketMetaPrefix, bucket, metacachePrefix, id)
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// objectPath returns the object path of the cache.
|
2021-02-24 18:51:41 -05:00
|
|
|
func (o *listPathOptions) objectPath() string {
|
|
|
|
return pathJoin(metacachePrefixForID(o.Bucket, o.ID), "block.s2")
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
|
2020-11-18 13:44:18 -05:00
|
|
|
func (o *listPathOptions) SetFilter() {
|
|
|
|
switch {
|
|
|
|
case metacacheSharePrefix:
|
|
|
|
return
|
|
|
|
case o.CurrentCycle != o.OldestCycle:
|
|
|
|
// We have a clean bloom filter
|
|
|
|
return
|
|
|
|
case o.Prefix == o.BaseDir:
|
|
|
|
// No additional prefix
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Remove basedir.
|
|
|
|
o.FilterPrefix = strings.TrimPrefix(o.Prefix, o.BaseDir)
|
|
|
|
// Remove leading and trailing slashes.
|
|
|
|
o.FilterPrefix = strings.Trim(o.FilterPrefix, slashSeparator)
|
|
|
|
|
|
|
|
if strings.Contains(o.FilterPrefix, slashSeparator) {
|
|
|
|
// Sanity check, should not happen.
|
|
|
|
o.FilterPrefix = ""
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
// filter will apply the options and return the number of objects requested by the limit.
|
|
|
|
// Will return io.EOF if there are no more entries with the same filter.
|
|
|
|
// The last entry can be used as a marker to resume the listing.
|
|
|
|
func (r *metacacheReader) filter(o listPathOptions) (entries metaCacheEntriesSorted, err error) {
|
|
|
|
// Forward to prefix, if any
|
|
|
|
err = r.forwardTo(o.Prefix)
|
|
|
|
if err != nil {
|
|
|
|
return entries, err
|
|
|
|
}
|
|
|
|
if o.Marker != "" {
|
|
|
|
err = r.forwardTo(o.Marker)
|
|
|
|
if err != nil {
|
|
|
|
return entries, err
|
|
|
|
}
|
|
|
|
next, err := r.peek()
|
|
|
|
if err != nil {
|
|
|
|
return entries, err
|
|
|
|
}
|
|
|
|
if next.name == o.Marker {
|
|
|
|
err := r.skip(1)
|
|
|
|
if err != nil {
|
|
|
|
return entries, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-12-14 16:13:33 -05:00
|
|
|
o.debugln("forwarded to ", o.Prefix, "marker:", o.Marker, "sep:", o.Separator)
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
// Filter
|
|
|
|
if !o.Recursive {
|
|
|
|
entries.o = make(metaCacheEntries, 0, o.Limit)
|
|
|
|
pastPrefix := false
|
|
|
|
err := r.readFn(func(entry metaCacheEntry) bool {
|
|
|
|
if o.Prefix != "" && !strings.HasPrefix(entry.name, o.Prefix) {
|
|
|
|
// We are past the prefix, don't continue.
|
|
|
|
pastPrefix = true
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if !o.IncludeDirectories && entry.isDir() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if !entry.isInDir(o.Prefix, o.Separator) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() {
|
|
|
|
return entries.len() < o.Limit
|
|
|
|
}
|
|
|
|
entries.o = append(entries.o, entry)
|
|
|
|
return entries.len() < o.Limit
|
|
|
|
})
|
2020-10-28 16:19:53 -04:00
|
|
|
if (err != nil && err.Error() == io.EOF.Error()) || pastPrefix || r.nextEOF() {
|
2020-10-28 12:18:35 -04:00
|
|
|
return entries, io.EOF
|
|
|
|
}
|
|
|
|
return entries, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should not need to filter more.
|
|
|
|
return r.readN(o.Limit, o.InclDeleted, o.IncludeDirectories, o.Prefix)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOptions) (entries metaCacheEntriesSorted, err error) {
|
2021-02-24 18:51:41 -05:00
|
|
|
r, err := er.metaCache.Open(pathJoin(minioMetaBucket, o.objectPath()), time.Now().Add(-time.Hour))
|
|
|
|
if err != nil {
|
|
|
|
return entries, io.EOF
|
|
|
|
}
|
2020-10-28 12:18:35 -04:00
|
|
|
|
2021-02-24 18:51:41 -05:00
|
|
|
tmp, err := newMetacacheReader(r)
|
|
|
|
if err != nil {
|
|
|
|
return entries, err
|
|
|
|
}
|
2020-11-02 20:20:27 -05:00
|
|
|
|
2021-02-24 18:51:41 -05:00
|
|
|
e, err := tmp.filter(o)
|
|
|
|
entries.o = append(entries.o, e.o...)
|
|
|
|
if o.Limit > 0 && entries.len() > o.Limit {
|
|
|
|
entries.truncate(o.Limit)
|
|
|
|
return entries, nil
|
|
|
|
}
|
2020-11-02 20:20:27 -05:00
|
|
|
|
2021-02-24 18:51:41 -05:00
|
|
|
if err == nil {
|
|
|
|
// We stopped within the listing, we are done for now...
|
|
|
|
return entries, nil
|
|
|
|
}
|
2020-11-25 04:11:22 -05:00
|
|
|
|
2021-02-24 18:51:41 -05:00
|
|
|
if !errors.Is(err, io.EOF) {
|
|
|
|
logger.LogIf(ctx, err)
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
2021-02-24 18:51:41 -05:00
|
|
|
|
|
|
|
return entries, err
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Will return io.EOF if continuing would not yield more results.
|
|
|
|
func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions) (entries metaCacheEntriesSorted, err error) {
|
2020-12-29 04:57:28 -05:00
|
|
|
o.debugf(color.Green("listPath:")+" with options: %#v", o)
|
2020-11-20 13:43:07 -05:00
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
// See if we have the listing stored.
|
2020-12-15 14:25:36 -05:00
|
|
|
if !o.Create && !o.discardResult {
|
2020-10-28 12:18:35 -04:00
|
|
|
entries, err := er.streamMetadataParts(ctx, o)
|
2020-11-20 13:43:07 -05:00
|
|
|
if IsErr(err, []error{
|
|
|
|
nil,
|
|
|
|
context.Canceled,
|
|
|
|
context.DeadlineExceeded,
|
|
|
|
}...) {
|
|
|
|
// Expected good errors we don't need to return error.
|
|
|
|
return entries, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if !errors.Is(err, io.EOF) { // io.EOF is expected and should be returned but no need to log it.
|
|
|
|
// Log an return errors on unexpected errors.
|
|
|
|
logger.LogIf(ctx, err)
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
2020-11-20 13:43:07 -05:00
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
return entries, err
|
|
|
|
}
|
|
|
|
|
|
|
|
meta := o.newMetacache()
|
2020-10-30 12:33:16 -04:00
|
|
|
rpc := globalNotificationSys.restClientFromHash(o.Bucket)
|
2020-10-28 12:18:35 -04:00
|
|
|
var metaMu sync.Mutex
|
2020-10-30 12:33:16 -04:00
|
|
|
|
2020-12-29 04:57:28 -05:00
|
|
|
o.debugln(color.Green("listPath:")+" scanning bucket:", o.Bucket, "basedir:", o.BaseDir, "prefix:", o.Prefix, "marker:", o.Marker)
|
2020-10-30 12:33:16 -04:00
|
|
|
|
|
|
|
// Disconnect from call above, but cancel on exit.
|
|
|
|
ctx, cancel := context.WithCancel(GlobalContext)
|
|
|
|
// We need to ask disks.
|
|
|
|
disks := er.getOnlineDisks()
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
defer func() {
|
2020-12-29 04:57:28 -05:00
|
|
|
o.debugln(color.Green("listPath:")+" returning:", entries.len(), "err:", err)
|
2020-11-20 13:43:07 -05:00
|
|
|
if err != nil && !errors.Is(err, io.EOF) {
|
2020-11-18 13:28:22 -05:00
|
|
|
go func(err string) {
|
|
|
|
metaMu.Lock()
|
|
|
|
if meta.status != scanStateError {
|
|
|
|
meta.error = err
|
|
|
|
meta.status = scanStateError
|
|
|
|
}
|
|
|
|
meta, _ = o.updateMetacacheListing(meta, rpc)
|
|
|
|
metaMu.Unlock()
|
|
|
|
}(err.Error())
|
2020-10-30 12:33:16 -04:00
|
|
|
cancel()
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2020-10-29 12:25:43 -04:00
|
|
|
askDisks := o.AskDisks
|
2020-12-11 20:54:31 -05:00
|
|
|
listingQuorum := askDisks - 1
|
|
|
|
// Special case: ask all disks if the drive count is 4
|
2021-01-22 15:09:24 -05:00
|
|
|
if askDisks == -1 || er.setDriveCount == 4 {
|
2020-12-23 12:26:40 -05:00
|
|
|
askDisks = len(disks) // with 'strict' quorum list on all online disks.
|
2021-01-22 15:09:24 -05:00
|
|
|
listingQuorum = getReadQuorum(er.setDriveCount)
|
2020-12-11 20:54:31 -05:00
|
|
|
}
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
if len(disks) < askDisks {
|
|
|
|
err = InsufficientReadQuorum{}
|
|
|
|
logger.LogIf(ctx, fmt.Errorf("listPath: Insufficient disks, %d of %d needed are available", len(disks), askDisks))
|
|
|
|
cancel()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-10-29 12:25:43 -04:00
|
|
|
// Select askDisks random disks.
|
2020-10-28 12:18:35 -04:00
|
|
|
if len(disks) > askDisks {
|
|
|
|
disks = disks[:askDisks]
|
|
|
|
}
|
2020-10-29 12:25:43 -04:00
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
// Create output for our results.
|
2020-12-24 18:02:02 -05:00
|
|
|
var cacheCh chan metaCacheEntry
|
|
|
|
if !o.discardResult {
|
|
|
|
cacheCh = make(chan metaCacheEntry, metacacheBlockSize)
|
|
|
|
}
|
2020-10-28 12:18:35 -04:00
|
|
|
|
|
|
|
// Create filter for results.
|
|
|
|
filterCh := make(chan metaCacheEntry, 100)
|
|
|
|
filteredResults := o.gatherResults(filterCh)
|
|
|
|
closeChannels := func() {
|
2020-12-24 18:02:02 -05:00
|
|
|
if !o.discardResult {
|
|
|
|
close(cacheCh)
|
|
|
|
}
|
2020-10-28 12:18:35 -04:00
|
|
|
close(filterCh)
|
|
|
|
}
|
|
|
|
|
2020-12-15 14:25:36 -05:00
|
|
|
// Cancel listing on return if non-saved list.
|
|
|
|
if o.discardResult {
|
|
|
|
defer cancel()
|
|
|
|
}
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
go func() {
|
|
|
|
defer cancel()
|
|
|
|
// Save continuous updates
|
|
|
|
go func() {
|
2020-10-30 12:33:16 -04:00
|
|
|
var err error
|
2020-10-28 12:18:35 -04:00
|
|
|
ticker := time.NewTicker(10 * time.Second)
|
|
|
|
defer ticker.Stop()
|
|
|
|
var exit bool
|
|
|
|
for !exit {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
case <-ctx.Done():
|
|
|
|
exit = true
|
|
|
|
}
|
|
|
|
metaMu.Lock()
|
|
|
|
meta.endedCycle = intDataUpdateTracker.current()
|
2020-10-30 12:33:16 -04:00
|
|
|
meta, err = o.updateMetacacheListing(meta, rpc)
|
|
|
|
if meta.status == scanStateError {
|
2021-01-08 06:22:19 -05:00
|
|
|
logger.LogIf(ctx, err)
|
2020-10-28 12:18:35 -04:00
|
|
|
cancel()
|
|
|
|
exit = true
|
|
|
|
}
|
2020-10-30 12:33:16 -04:00
|
|
|
metaMu.Unlock()
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2021-02-24 18:51:41 -05:00
|
|
|
wc := er.metaCache.Create(pathJoin(minioMetaBucket, o.objectPath()))
|
2020-12-24 18:02:02 -05:00
|
|
|
|
|
|
|
var bw *metacacheBlockWriter
|
|
|
|
// Don't save single object listings.
|
|
|
|
if !o.discardResult {
|
|
|
|
// Write results to disk.
|
|
|
|
bw = newMetacacheBlockWriter(cacheCh, func(b *metacacheBlock) error {
|
2021-02-24 18:51:41 -05:00
|
|
|
n, err := wc.Write(b.data)
|
2020-12-24 18:02:02 -05:00
|
|
|
if err != nil {
|
|
|
|
metaMu.Lock()
|
|
|
|
if meta.error != "" {
|
|
|
|
meta.status = scanStateError
|
|
|
|
meta.error = err.Error()
|
|
|
|
}
|
|
|
|
metaMu.Unlock()
|
|
|
|
cancel()
|
2020-10-28 16:19:53 -04:00
|
|
|
return err
|
|
|
|
}
|
2021-02-24 18:51:41 -05:00
|
|
|
if n != len(b.data) {
|
|
|
|
metaMu.Lock()
|
|
|
|
if meta.error != "" {
|
|
|
|
meta.status = scanStateError
|
|
|
|
meta.error = io.ErrShortWrite.Error()
|
2020-12-24 18:02:02 -05:00
|
|
|
}
|
2021-02-24 18:51:41 -05:00
|
|
|
metaMu.Unlock()
|
|
|
|
cancel()
|
|
|
|
return io.ErrShortWrite
|
2020-12-24 18:02:02 -05:00
|
|
|
}
|
2021-02-24 18:51:41 -05:00
|
|
|
o.debugln(color.Green("listPath:")+" saving block to", o.objectPath())
|
2020-12-24 18:02:02 -05:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
2020-10-28 12:18:35 -04:00
|
|
|
|
|
|
|
// How to resolve results.
|
|
|
|
resolver := metadataResolutionParams{
|
2020-12-11 20:54:31 -05:00
|
|
|
dirQuorum: listingQuorum,
|
|
|
|
objQuorum: listingQuorum,
|
2020-10-28 12:18:35 -04:00
|
|
|
bucket: o.Bucket,
|
|
|
|
}
|
|
|
|
|
2020-12-01 15:07:39 -05:00
|
|
|
err := listPathRaw(ctx, listPathRawOptions{
|
|
|
|
disks: disks,
|
|
|
|
bucket: o.Bucket,
|
|
|
|
path: o.BaseDir,
|
|
|
|
recursive: o.Recursive,
|
|
|
|
filterPrefix: o.FilterPrefix,
|
2020-12-11 20:54:31 -05:00
|
|
|
minDisks: listingQuorum,
|
2020-12-01 15:07:39 -05:00
|
|
|
agreed: func(entry metaCacheEntry) {
|
2020-12-24 18:02:02 -05:00
|
|
|
if !o.discardResult {
|
|
|
|
cacheCh <- entry
|
|
|
|
}
|
2020-12-01 15:07:39 -05:00
|
|
|
filterCh <- entry
|
|
|
|
},
|
|
|
|
partial: func(entries metaCacheEntries, nAgreed int, errs []error) {
|
|
|
|
// Results Disagree :-(
|
|
|
|
entry, ok := entries.resolve(&resolver)
|
|
|
|
if ok {
|
2020-12-24 18:02:02 -05:00
|
|
|
if !o.discardResult {
|
|
|
|
cacheCh <- *entry
|
|
|
|
}
|
2020-12-01 15:07:39 -05:00
|
|
|
filterCh <- *entry
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
2020-12-01 15:07:39 -05:00
|
|
|
},
|
|
|
|
})
|
2020-10-28 12:18:35 -04:00
|
|
|
|
2020-12-01 15:07:39 -05:00
|
|
|
metaMu.Lock()
|
|
|
|
if err != nil {
|
|
|
|
meta.status = scanStateError
|
|
|
|
meta.error = err.Error()
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
2020-10-30 12:33:16 -04:00
|
|
|
// Save success
|
2020-10-28 12:18:35 -04:00
|
|
|
if meta.error == "" {
|
2020-10-30 12:33:16 -04:00
|
|
|
meta.status = scanStateSuccess
|
|
|
|
meta.endedCycle = intDataUpdateTracker.current()
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
2020-12-01 15:07:39 -05:00
|
|
|
|
2020-10-30 12:33:16 -04:00
|
|
|
meta, _ = o.updateMetacacheListing(meta, rpc)
|
2020-10-28 12:18:35 -04:00
|
|
|
metaMu.Unlock()
|
2020-10-30 12:33:16 -04:00
|
|
|
|
|
|
|
closeChannels()
|
2020-12-24 18:02:02 -05:00
|
|
|
if !o.discardResult {
|
|
|
|
if err := bw.Close(); err != nil {
|
|
|
|
metaMu.Lock()
|
|
|
|
meta.error = err.Error()
|
|
|
|
meta.status = scanStateError
|
2021-01-08 06:22:19 -05:00
|
|
|
meta, _ = o.updateMetacacheListing(meta, rpc)
|
2020-12-24 18:02:02 -05:00
|
|
|
metaMu.Unlock()
|
|
|
|
}
|
2021-02-24 18:51:41 -05:00
|
|
|
if err := wc.Close(); err != nil {
|
|
|
|
metaMu.Lock()
|
|
|
|
meta.error = err.Error()
|
|
|
|
meta.status = scanStateError
|
|
|
|
meta, _ = o.updateMetacacheListing(meta, rpc)
|
|
|
|
metaMu.Unlock()
|
|
|
|
}
|
2020-10-30 12:33:16 -04:00
|
|
|
}
|
2020-10-28 12:18:35 -04:00
|
|
|
}()
|
|
|
|
|
|
|
|
return filteredResults()
|
|
|
|
}
|
2020-12-01 15:07:39 -05:00
|
|
|
|
|
|
|
type listPathRawOptions struct {
|
|
|
|
disks []StorageAPI
|
|
|
|
bucket, path string
|
|
|
|
recursive bool
|
2021-02-18 14:06:54 -05:00
|
|
|
|
|
|
|
// Only return results with this prefix.
|
2020-12-01 15:07:39 -05:00
|
|
|
filterPrefix string
|
2021-02-18 14:06:54 -05:00
|
|
|
|
|
|
|
// Forward to this prefix before returning results.
|
|
|
|
forwardTo string
|
|
|
|
|
2020-12-01 15:07:39 -05:00
|
|
|
// Minimum number of good disks to continue.
|
|
|
|
// An error will be returned if this many disks returned an error.
|
|
|
|
minDisks int
|
|
|
|
reportNotFound bool
|
|
|
|
|
|
|
|
// Callbacks with results:
|
|
|
|
// If set to nil, it will not be called.
|
|
|
|
|
|
|
|
// agreed is called if all disks agreed.
|
|
|
|
agreed func(entry metaCacheEntry)
|
|
|
|
|
|
|
|
// partial will be returned when there is disagreement between disks.
|
|
|
|
// if disk did not return any result, but also haven't errored
|
|
|
|
// the entry will be empty and errs will
|
|
|
|
partial func(entries metaCacheEntries, nAgreed int, errs []error)
|
|
|
|
|
|
|
|
// finished will be called when all streams have finished and
|
|
|
|
// more than one disk returned an error.
|
|
|
|
// Will not be called if everything operates as expected.
|
|
|
|
finished func(errs []error)
|
|
|
|
}
|
|
|
|
|
|
|
|
// listPathRaw will list a path on the provided drives.
|
|
|
|
// See listPathRawOptions on how results are delivered.
|
|
|
|
// Directories are always returned.
|
|
|
|
// Cache will be bypassed.
|
|
|
|
// Context cancellation will be respected but may take a while to effectuate.
|
|
|
|
func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
|
|
|
|
disks := opts.disks
|
|
|
|
if len(disks) == 0 {
|
|
|
|
return fmt.Errorf("listPathRaw: 0 drives provided")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Disconnect from call above, but cancel on exit.
|
|
|
|
ctx, cancel := context.WithCancel(GlobalContext)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
askDisks := len(disks)
|
2020-12-25 02:04:03 -05:00
|
|
|
readers := make([]*metacacheReader, askDisks)
|
2020-12-01 15:07:39 -05:00
|
|
|
for i := range disks {
|
|
|
|
r, w := io.Pipe()
|
|
|
|
d := disks[i]
|
|
|
|
readers[i], err = newMetacacheReader(r)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Send request to each disk.
|
|
|
|
go func() {
|
2021-01-07 12:52:53 -05:00
|
|
|
werr := d.WalkDir(ctx, WalkDirOptions{
|
2020-12-01 15:07:39 -05:00
|
|
|
Bucket: opts.bucket,
|
|
|
|
BaseDir: opts.path,
|
|
|
|
Recursive: opts.recursive,
|
|
|
|
ReportNotFound: opts.reportNotFound,
|
2021-02-18 14:06:54 -05:00
|
|
|
FilterPrefix: opts.filterPrefix,
|
|
|
|
ForwardTo: opts.forwardTo,
|
|
|
|
}, w)
|
2021-01-07 12:52:53 -05:00
|
|
|
w.CloseWithError(werr)
|
|
|
|
if werr != io.EOF && werr != nil && werr.Error() != errFileNotFound.Error() && werr.Error() != errVolumeNotFound.Error() {
|
|
|
|
logger.LogIf(ctx, werr)
|
2020-12-01 15:07:39 -05:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
topEntries := make(metaCacheEntries, len(readers))
|
|
|
|
errs := make([]error, len(readers))
|
|
|
|
for {
|
|
|
|
// Get the top entry from each
|
|
|
|
var current metaCacheEntry
|
|
|
|
var atEOF, fnf, hasErr, agree int
|
|
|
|
for i := range topEntries {
|
|
|
|
topEntries[i] = metaCacheEntry{}
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
for i, r := range readers {
|
|
|
|
if errs[i] != nil {
|
|
|
|
hasErr++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
entry, err := r.peek()
|
|
|
|
switch err {
|
|
|
|
case io.EOF:
|
|
|
|
atEOF++
|
|
|
|
continue
|
|
|
|
case nil:
|
|
|
|
default:
|
|
|
|
if err.Error() == errFileNotFound.Error() {
|
|
|
|
atEOF++
|
|
|
|
fnf++
|
|
|
|
continue
|
|
|
|
}
|
2021-01-07 12:52:53 -05:00
|
|
|
if err.Error() == errVolumeNotFound.Error() {
|
|
|
|
atEOF++
|
|
|
|
fnf++
|
|
|
|
continue
|
|
|
|
}
|
2020-12-01 15:07:39 -05:00
|
|
|
hasErr++
|
|
|
|
errs[i] = err
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// If no current, add it.
|
|
|
|
if current.name == "" {
|
|
|
|
topEntries[i] = entry
|
|
|
|
current = entry
|
|
|
|
agree++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// If exact match, we agree.
|
|
|
|
if current.matches(&entry, opts.bucket) {
|
|
|
|
topEntries[i] = entry
|
|
|
|
agree++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// If only the name matches we didn't agree, but add it for resolution.
|
|
|
|
if entry.name == current.name {
|
|
|
|
topEntries[i] = entry
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// We got different entries
|
|
|
|
if entry.name > current.name {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// We got a new, better current.
|
|
|
|
// Clear existing entries.
|
|
|
|
for i := range topEntries[:i] {
|
|
|
|
topEntries[i] = metaCacheEntry{}
|
|
|
|
}
|
|
|
|
agree = 1
|
|
|
|
current = entry
|
|
|
|
topEntries[i] = entry
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop if we exceed number of bad disks
|
|
|
|
if hasErr > len(disks)-opts.minDisks && hasErr > 0 {
|
|
|
|
if opts.finished != nil {
|
|
|
|
opts.finished(errs)
|
|
|
|
}
|
|
|
|
var combinedErr []string
|
|
|
|
for i, err := range errs {
|
|
|
|
if err != nil {
|
|
|
|
combinedErr = append(combinedErr, fmt.Sprintf("disk %d returned: %s", i, err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return errors.New(strings.Join(combinedErr, ", "))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Break if all at EOF or error.
|
|
|
|
if atEOF+hasErr == len(readers) {
|
|
|
|
if hasErr > 0 && opts.finished != nil {
|
|
|
|
opts.finished(errs)
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if fnf == len(readers) {
|
|
|
|
return errFileNotFound
|
|
|
|
}
|
|
|
|
if agree == len(readers) {
|
|
|
|
// Everybody agreed
|
|
|
|
for _, r := range readers {
|
|
|
|
r.skip(1)
|
|
|
|
}
|
|
|
|
if opts.agreed != nil {
|
|
|
|
opts.agreed(current)
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if opts.partial != nil {
|
|
|
|
opts.partial(topEntries, agree, errs)
|
|
|
|
}
|
|
|
|
// Skip the inputs we used.
|
|
|
|
for i, r := range readers {
|
|
|
|
if topEntries[i].name != "" {
|
|
|
|
r.skip(1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|