2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2020-10-28 12:18:35 -04:00
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2021-07-05 18:34:41 -04:00
|
|
|
"context"
|
2022-07-04 09:40:46 -04:00
|
|
|
"errors"
|
2020-10-28 12:18:35 -04:00
|
|
|
"os"
|
2022-09-30 18:44:21 -04:00
|
|
|
"path"
|
2020-10-28 12:18:35 -04:00
|
|
|
"sort"
|
|
|
|
"strings"
|
2021-07-05 18:34:41 -04:00
|
|
|
|
2021-09-02 20:45:30 -04:00
|
|
|
"github.com/minio/minio/internal/logger"
|
2021-07-05 18:34:41 -04:00
|
|
|
"github.com/minio/pkg/console"
|
2020-10-28 12:18:35 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
// metaCacheEntry is an object or a directory within an unknown bucket.
|
|
|
|
type metaCacheEntry struct {
|
|
|
|
// name is the full name of the object including prefixes
|
|
|
|
name string
|
|
|
|
// Metadata. If none is present it is not an object but only a prefix.
|
|
|
|
// Entries without metadata will only be present in non-recursive scans.
|
|
|
|
metadata []byte
|
|
|
|
|
|
|
|
// cached contains the metadata if decoded.
|
2021-12-02 14:29:16 -05:00
|
|
|
cached *xlMetaV2
|
2021-08-13 14:39:27 -04:00
|
|
|
|
|
|
|
// Indicates the entry can be reused and only one reference to metadata is expected.
|
|
|
|
reusable bool
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// isDir returns if the entry is representing a prefix directory.
|
|
|
|
func (e metaCacheEntry) isDir() bool {
|
2021-07-05 18:34:41 -04:00
|
|
|
return len(e.metadata) == 0 && strings.HasSuffix(e.name, slashSeparator)
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// isObject returns if the entry is representing an object.
|
|
|
|
func (e metaCacheEntry) isObject() bool {
|
|
|
|
return len(e.metadata) > 0
|
|
|
|
}
|
|
|
|
|
2021-12-08 20:34:52 -05:00
|
|
|
// isObjectDir returns if the entry is representing an object__XL_DIR__
|
|
|
|
func (e metaCacheEntry) isObjectDir() bool {
|
|
|
|
return len(e.metadata) > 0 && strings.HasSuffix(e.name, slashSeparator)
|
|
|
|
}
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
// hasPrefix returns whether an entry has a specific prefix
|
|
|
|
func (e metaCacheEntry) hasPrefix(s string) bool {
|
|
|
|
return strings.HasPrefix(e.name, s)
|
|
|
|
}
|
|
|
|
|
2021-12-02 14:29:16 -05:00
|
|
|
// matches returns if the entries have the same versions.
|
|
|
|
// If strict is false we allow signatures to mismatch.
|
|
|
|
func (e *metaCacheEntry) matches(other *metaCacheEntry, strict bool) (prefer *metaCacheEntry, matches bool) {
|
2020-10-28 12:18:35 -04:00
|
|
|
if e == nil && other == nil {
|
2021-12-02 14:29:16 -05:00
|
|
|
return nil, true
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
2021-12-02 14:29:16 -05:00
|
|
|
if e == nil {
|
|
|
|
return other, false
|
|
|
|
}
|
|
|
|
if other == nil {
|
|
|
|
return e, false
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
|
2021-12-02 14:29:16 -05:00
|
|
|
// Name should match...
|
|
|
|
if e.name != other.name {
|
|
|
|
if e.name < other.name {
|
|
|
|
return e, false
|
|
|
|
}
|
|
|
|
return other, false
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
2020-12-22 12:16:43 -05:00
|
|
|
|
2022-09-14 15:11:27 -04:00
|
|
|
if other.isDir() || e.isDir() {
|
|
|
|
if e.isDir() {
|
|
|
|
return e, other.isDir() == e.isDir()
|
|
|
|
}
|
|
|
|
return other, other.isDir() == e.isDir()
|
|
|
|
}
|
2021-12-02 14:29:16 -05:00
|
|
|
eVers, eErr := e.xlmeta()
|
|
|
|
oVers, oErr := other.xlmeta()
|
2020-10-28 12:18:35 -04:00
|
|
|
if eErr != nil || oErr != nil {
|
2021-12-02 14:29:16 -05:00
|
|
|
return nil, false
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
2020-12-22 12:16:43 -05:00
|
|
|
|
2021-06-30 10:43:19 -04:00
|
|
|
// check both fileInfo's have same number of versions, if not skip
|
|
|
|
// the `other` entry.
|
2021-12-02 14:29:16 -05:00
|
|
|
if len(eVers.versions) != len(oVers.versions) {
|
|
|
|
eTime := eVers.latestModtime()
|
|
|
|
oTime := oVers.latestModtime()
|
|
|
|
if !eTime.Equal(oTime) {
|
|
|
|
if eTime.After(oTime) {
|
|
|
|
return e, false
|
|
|
|
}
|
|
|
|
return other, false
|
|
|
|
}
|
|
|
|
// Tiebreak on version count.
|
|
|
|
if len(eVers.versions) > len(oVers.versions) {
|
|
|
|
return e, false
|
|
|
|
}
|
|
|
|
return other, false
|
2021-07-02 12:54:00 -04:00
|
|
|
}
|
|
|
|
|
2021-12-02 14:29:16 -05:00
|
|
|
// Check if each version matches...
|
|
|
|
for i, eVer := range eVers.versions {
|
|
|
|
oVer := oVers.versions[i]
|
|
|
|
if eVer.header != oVer.header {
|
|
|
|
if !strict && eVer.header.matchesNotStrict(oVer.header) {
|
|
|
|
if prefer == nil {
|
|
|
|
if eVer.header.sortsBefore(oVer.header) {
|
|
|
|
prefer = e
|
|
|
|
} else {
|
|
|
|
prefer = other
|
|
|
|
}
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if prefer != nil {
|
|
|
|
return prefer, false
|
|
|
|
}
|
2021-07-02 12:54:00 -04:00
|
|
|
|
2021-12-02 14:29:16 -05:00
|
|
|
if eVer.header.sortsBefore(oVer.header) {
|
|
|
|
return e, false
|
|
|
|
}
|
|
|
|
return other, false
|
2021-07-02 12:54:00 -04:00
|
|
|
}
|
|
|
|
}
|
2021-12-02 14:29:16 -05:00
|
|
|
// If we match, return e
|
|
|
|
if prefer == nil {
|
|
|
|
prefer = e
|
2021-07-02 12:54:00 -04:00
|
|
|
}
|
2021-12-02 14:29:16 -05:00
|
|
|
return prefer, true
|
2021-07-02 12:54:00 -04:00
|
|
|
}
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
// isInDir returns whether the entry is in the dir when considering the separator.
|
|
|
|
func (e metaCacheEntry) isInDir(dir, separator string) bool {
|
|
|
|
if len(dir) == 0 {
|
|
|
|
// Root
|
|
|
|
idx := strings.Index(e.name, separator)
|
|
|
|
return idx == -1 || idx == len(e.name)-len(separator)
|
|
|
|
}
|
|
|
|
ext := strings.TrimPrefix(e.name, dir)
|
|
|
|
if len(ext) != len(e.name) {
|
|
|
|
idx := strings.Index(ext, separator)
|
|
|
|
// If separator is not found or is last entry, ok.
|
|
|
|
return idx == -1 || idx == len(ext)-len(separator)
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// isLatestDeletemarker returns whether the latest version is a delete marker.
|
|
|
|
// If metadata is NOT versioned false will always be returned.
|
|
|
|
// If v2 and UNABLE to load metadata true will be returned.
|
|
|
|
func (e *metaCacheEntry) isLatestDeletemarker() bool {
|
|
|
|
if e.cached != nil {
|
2021-12-02 14:29:16 -05:00
|
|
|
if len(e.cached.versions) == 0 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return e.cached.versions[0].header.Type == DeleteType
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
if !isXL2V1Format(e.metadata) {
|
|
|
|
return false
|
|
|
|
}
|
2022-05-31 22:06:57 -04:00
|
|
|
if meta, _, err := isIndexedMetaV2(e.metadata); meta != nil {
|
2021-11-18 15:15:22 -05:00
|
|
|
return meta.IsLatestDeleteMarker()
|
2022-05-31 22:06:57 -04:00
|
|
|
} else if err != nil {
|
|
|
|
return true
|
2021-11-18 15:15:22 -05:00
|
|
|
}
|
|
|
|
// Fall back...
|
2021-12-02 14:29:16 -05:00
|
|
|
xlMeta, err := e.xlmeta()
|
|
|
|
if err != nil || len(xlMeta.versions) == 0 {
|
2020-10-28 12:18:35 -04:00
|
|
|
return true
|
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
return xlMeta.versions[0].header.Type == DeleteType
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
|
2023-01-30 12:13:53 -05:00
|
|
|
// isAllFreeVersions returns if all objects are free versions.
|
|
|
|
// If metadata is NOT versioned false will always be returned.
|
|
|
|
// If v2 and UNABLE to load metadata true will be returned.
|
|
|
|
func (e *metaCacheEntry) isAllFreeVersions() bool {
|
|
|
|
if e.cached != nil {
|
|
|
|
if len(e.cached.versions) == 0 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
for _, v := range e.cached.versions {
|
|
|
|
if !v.header.FreeVersion() {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if !isXL2V1Format(e.metadata) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if meta, _, err := isIndexedMetaV2(e.metadata); meta != nil {
|
|
|
|
return meta.AllHidden(false)
|
|
|
|
} else if err != nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
// Fall back...
|
|
|
|
xlMeta, err := e.xlmeta()
|
|
|
|
if err != nil || len(xlMeta.versions) == 0 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
// Check versions..
|
|
|
|
for _, v := range e.cached.versions {
|
|
|
|
if !v.header.FreeVersion() {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
// fileInfo returns the decoded metadata.
|
|
|
|
// If entry is a directory it is returned as that.
|
|
|
|
// If versioned the latest version will be returned.
|
2021-12-02 14:29:16 -05:00
|
|
|
func (e *metaCacheEntry) fileInfo(bucket string) (FileInfo, error) {
|
2020-10-28 12:18:35 -04:00
|
|
|
if e.isDir() {
|
2021-12-02 14:29:16 -05:00
|
|
|
return FileInfo{
|
2020-10-28 12:18:35 -04:00
|
|
|
Volume: bucket,
|
|
|
|
Name: e.name,
|
2020-11-02 20:07:52 -05:00
|
|
|
Mode: uint32(os.ModeDir),
|
2020-10-28 12:18:35 -04:00
|
|
|
}, nil
|
|
|
|
}
|
2021-12-02 14:29:16 -05:00
|
|
|
if e.cached != nil {
|
2022-03-04 23:01:26 -05:00
|
|
|
if len(e.cached.versions) == 0 {
|
|
|
|
// This special case is needed to handle xlMeta.versions == 0
|
|
|
|
return FileInfo{
|
|
|
|
Volume: bucket,
|
|
|
|
Name: e.name,
|
|
|
|
Deleted: true,
|
|
|
|
IsLatest: true,
|
|
|
|
ModTime: timeSentinel1970,
|
|
|
|
}, nil
|
|
|
|
}
|
2022-12-21 19:24:07 -05:00
|
|
|
return e.cached.ToFileInfo(bucket, e.name, "", false)
|
2021-12-02 14:29:16 -05:00
|
|
|
}
|
|
|
|
return getFileInfo(e.metadata, bucket, e.name, "", false)
|
|
|
|
}
|
|
|
|
|
|
|
|
// xlmeta returns the decoded metadata.
|
|
|
|
// This should not be called on directories.
|
|
|
|
func (e *metaCacheEntry) xlmeta() (*xlMetaV2, error) {
|
|
|
|
if e.isDir() {
|
|
|
|
return nil, errFileNotFound
|
|
|
|
}
|
2020-10-28 12:18:35 -04:00
|
|
|
if e.cached == nil {
|
2021-08-26 17:06:04 -04:00
|
|
|
if len(e.metadata) == 0 {
|
|
|
|
// only happens if the entry is not found.
|
|
|
|
return nil, errFileNotFound
|
|
|
|
}
|
2021-12-02 14:29:16 -05:00
|
|
|
var xl xlMetaV2
|
|
|
|
err := xl.LoadOrConvert(e.metadata)
|
2020-10-28 12:18:35 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-12-02 14:29:16 -05:00
|
|
|
e.cached = &xl
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
return e.cached, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// fileInfoVersions returns the metadata as FileInfoVersions.
|
|
|
|
// If entry is a directory it is returned as that.
|
|
|
|
func (e *metaCacheEntry) fileInfoVersions(bucket string) (FileInfoVersions, error) {
|
|
|
|
if e.isDir() {
|
|
|
|
return FileInfoVersions{
|
|
|
|
Volume: bucket,
|
|
|
|
Name: e.name,
|
|
|
|
Versions: []FileInfo{
|
|
|
|
{
|
|
|
|
Volume: bucket,
|
|
|
|
Name: e.name,
|
2020-11-02 20:07:52 -05:00
|
|
|
Mode: uint32(os.ModeDir),
|
2020-10-28 12:18:35 -04:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
2021-12-02 14:29:16 -05:00
|
|
|
// Too small gains to reuse cache here.
|
2020-11-22 02:48:50 -05:00
|
|
|
return getFileInfoVersions(e.metadata, bucket, e.name)
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// metaCacheEntries is a slice of metacache entries.
|
|
|
|
type metaCacheEntries []metaCacheEntry
|
|
|
|
|
|
|
|
// less function for sorting.
|
|
|
|
func (m metaCacheEntries) less(i, j int) bool {
|
|
|
|
return m[i].name < m[j].name
|
|
|
|
}
|
|
|
|
|
|
|
|
// sort entries by name.
|
|
|
|
// m is sorted and a sorted metadata object is returned.
|
|
|
|
// Changes to m will also be reflected in the returned object.
|
|
|
|
func (m metaCacheEntries) sort() metaCacheEntriesSorted {
|
|
|
|
if m.isSorted() {
|
|
|
|
return metaCacheEntriesSorted{o: m}
|
|
|
|
}
|
|
|
|
sort.Slice(m, m.less)
|
|
|
|
return metaCacheEntriesSorted{o: m}
|
|
|
|
}
|
|
|
|
|
|
|
|
// isSorted returns whether the objects are sorted.
|
|
|
|
// This is usually orders of magnitude faster than actually sorting.
|
|
|
|
func (m metaCacheEntries) isSorted() bool {
|
|
|
|
return sort.SliceIsSorted(m, m.less)
|
|
|
|
}
|
|
|
|
|
|
|
|
// shallowClone will create a shallow clone of the array objects,
|
|
|
|
// but object metadata will not be cloned.
|
|
|
|
func (m metaCacheEntries) shallowClone() metaCacheEntries {
|
|
|
|
dst := make(metaCacheEntries, len(m))
|
|
|
|
copy(dst, m)
|
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
|
|
|
type metadataResolutionParams struct {
|
2022-03-25 11:50:07 -04:00
|
|
|
dirQuorum int // Number if disks needed for a directory to 'exist'.
|
|
|
|
objQuorum int // Number of disks needed for an object to 'exist'.
|
|
|
|
|
|
|
|
// An optimization request only an 'n' amount of versions from xl.meta
|
|
|
|
// to avoid resolving all versions to figure out the latest 'version'
|
|
|
|
// for ListObjects, ListObjectsV2
|
|
|
|
requestedVersions int
|
|
|
|
|
|
|
|
bucket string // Name of the bucket. Used for generating cached fileinfo.
|
|
|
|
strict bool // Versions must match exactly, including all metadata.
|
2021-07-02 12:54:00 -04:00
|
|
|
|
|
|
|
// Reusable slice for resolution
|
2021-12-02 14:29:16 -05:00
|
|
|
candidates [][]xlMetaV2ShallowVersion
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
|
2021-07-02 12:54:00 -04:00
|
|
|
// resolve multiple entries.
|
|
|
|
// entries are resolved by majority, then if tied by mod-time and versions.
|
2021-12-02 14:29:16 -05:00
|
|
|
// Names must match on all entries in m.
|
2020-10-28 12:18:35 -04:00
|
|
|
func (m metaCacheEntries) resolve(r *metadataResolutionParams) (selected *metaCacheEntry, ok bool) {
|
|
|
|
if len(m) == 0 {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
|
|
|
|
dirExists := 0
|
2021-07-02 12:54:00 -04:00
|
|
|
if cap(r.candidates) < len(m) {
|
2021-12-02 14:29:16 -05:00
|
|
|
r.candidates = make([][]xlMetaV2ShallowVersion, 0, len(m))
|
2021-07-02 12:54:00 -04:00
|
|
|
}
|
2021-11-20 05:02:57 -05:00
|
|
|
r.candidates = r.candidates[:0]
|
2021-12-02 14:29:16 -05:00
|
|
|
objsAgree := 0
|
|
|
|
objsValid := 0
|
2020-10-28 12:18:35 -04:00
|
|
|
for i := range m {
|
|
|
|
entry := &m[i]
|
2021-12-02 14:29:16 -05:00
|
|
|
// Empty entry
|
2020-10-28 12:18:35 -04:00
|
|
|
if entry.name == "" {
|
|
|
|
continue
|
|
|
|
}
|
2021-06-30 10:43:19 -04:00
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
if entry.isDir() {
|
|
|
|
dirExists++
|
|
|
|
selected = entry
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-12-02 14:29:16 -05:00
|
|
|
// Get new entry metadata,
|
|
|
|
// shallow decode.
|
|
|
|
xl, err := entry.xlmeta()
|
|
|
|
if err != nil {
|
2022-07-04 09:40:46 -04:00
|
|
|
if !errors.Is(err, errFileNotFound) {
|
|
|
|
logger.LogIf(GlobalContext, err)
|
|
|
|
}
|
2020-10-28 12:18:35 -04:00
|
|
|
continue
|
|
|
|
}
|
2021-12-02 14:29:16 -05:00
|
|
|
objsValid++
|
2020-12-22 12:16:43 -05:00
|
|
|
|
2021-12-02 14:29:16 -05:00
|
|
|
// Add all valid to candidates.
|
|
|
|
r.candidates = append(r.candidates, xl.versions)
|
|
|
|
|
|
|
|
// We select the first object we find as a candidate and see if all match that.
|
|
|
|
// This is to quickly identify if all agree.
|
|
|
|
if selected == nil {
|
|
|
|
selected = entry
|
|
|
|
objsAgree = 1
|
|
|
|
continue
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
2021-12-02 14:29:16 -05:00
|
|
|
// Names match, check meta...
|
|
|
|
if prefer, ok := entry.matches(selected, r.strict); ok {
|
|
|
|
selected = prefer
|
|
|
|
objsAgree++
|
|
|
|
continue
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
}
|
2021-12-02 14:29:16 -05:00
|
|
|
|
|
|
|
// Return dir entries, if enough...
|
|
|
|
if selected != nil && selected.isDir() && dirExists >= r.dirQuorum {
|
2021-07-02 12:54:00 -04:00
|
|
|
return selected, true
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
2021-03-06 12:25:48 -05:00
|
|
|
|
2021-12-02 14:29:16 -05:00
|
|
|
// If we would never be able to reach read quorum.
|
|
|
|
if objsValid < r.objQuorum {
|
|
|
|
return nil, false
|
|
|
|
}
|
2021-12-06 05:59:51 -05:00
|
|
|
|
2021-12-02 14:29:16 -05:00
|
|
|
// If all objects agree.
|
|
|
|
if selected != nil && objsAgree == objsValid {
|
2021-07-02 12:54:00 -04:00
|
|
|
return selected, true
|
2021-12-02 14:29:16 -05:00
|
|
|
}
|
2021-08-26 17:06:04 -04:00
|
|
|
|
2021-12-06 05:59:51 -05:00
|
|
|
// If cached is nil we shall skip the entry.
|
|
|
|
if selected.cached == nil {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
|
2021-12-02 14:29:16 -05:00
|
|
|
// Merge if we have disagreement.
|
|
|
|
// Create a new merged result.
|
|
|
|
selected = &metaCacheEntry{
|
|
|
|
name: selected.name,
|
|
|
|
reusable: true,
|
|
|
|
cached: &xlMetaV2{metaV: selected.cached.metaV},
|
|
|
|
}
|
2022-03-25 11:50:07 -04:00
|
|
|
selected.cached.versions = mergeXLV2Versions(r.objQuorum, r.strict, r.requestedVersions, r.candidates...)
|
2021-12-02 14:29:16 -05:00
|
|
|
if len(selected.cached.versions) == 0 {
|
|
|
|
return nil, false
|
|
|
|
}
|
2021-08-26 17:06:04 -04:00
|
|
|
|
2021-12-02 14:29:16 -05:00
|
|
|
// Reserialize
|
|
|
|
var err error
|
|
|
|
selected.metadata, err = selected.cached.AppendTo(metaDataPoolGet())
|
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(context.Background(), err)
|
|
|
|
return nil, false
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
2021-12-02 14:29:16 -05:00
|
|
|
return selected, true
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
|
2020-12-01 15:07:39 -05:00
|
|
|
// firstFound returns the first found and the number of set entries.
|
|
|
|
func (m metaCacheEntries) firstFound() (first *metaCacheEntry, n int) {
|
2022-03-08 12:04:20 -05:00
|
|
|
for i, entry := range m {
|
2020-12-01 15:07:39 -05:00
|
|
|
if entry.name != "" {
|
|
|
|
n++
|
|
|
|
if first == nil {
|
2022-03-08 12:04:20 -05:00
|
|
|
first = &m[i]
|
2020-12-01 15:07:39 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return first, n
|
|
|
|
}
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
// names will return all names in order.
|
|
|
|
// Since this allocates it should not be used in critical functions.
|
|
|
|
func (m metaCacheEntries) names() []string {
|
|
|
|
res := make([]string, 0, len(m))
|
|
|
|
for _, obj := range m {
|
|
|
|
res = append(res, obj.name)
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
// metaCacheEntriesSorted contains metacache entries that are sorted.
|
|
|
|
type metaCacheEntriesSorted struct {
|
|
|
|
o metaCacheEntries
|
|
|
|
// list id is not serialized
|
|
|
|
listID string
|
2021-08-13 14:39:27 -04:00
|
|
|
// Reuse buffers
|
|
|
|
reuse bool
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// shallowClone will create a shallow clone of the array objects,
|
|
|
|
// but object metadata will not be cloned.
|
|
|
|
func (m metaCacheEntriesSorted) shallowClone() metaCacheEntriesSorted {
|
|
|
|
// We have value receiver so we already have a copy.
|
|
|
|
m.o = m.o.shallowClone()
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
|
|
|
|
// fileInfoVersions converts the metadata to FileInfoVersions where possible.
|
|
|
|
// Metadata that cannot be decoded is skipped.
|
2020-12-19 12:36:04 -05:00
|
|
|
func (m *metaCacheEntriesSorted) fileInfoVersions(bucket, prefix, delimiter, afterV string) (versions []ObjectInfo) {
|
2020-10-28 12:18:35 -04:00
|
|
|
versions = make([]ObjectInfo, 0, m.len())
|
|
|
|
prevPrefix := ""
|
2022-05-31 05:57:57 -04:00
|
|
|
vcfg, _ := globalBucketVersioningSys.Get(bucket)
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
for _, entry := range m.o {
|
|
|
|
if entry.isObject() {
|
2020-11-04 10:56:58 -05:00
|
|
|
if delimiter != "" {
|
|
|
|
idx := strings.Index(strings.TrimPrefix(entry.name, prefix), delimiter)
|
|
|
|
if idx >= 0 {
|
|
|
|
idx = len(prefix) + idx + len(delimiter)
|
|
|
|
currPrefix := entry.name[:idx]
|
|
|
|
if currPrefix == prevPrefix {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
prevPrefix = currPrefix
|
2020-12-19 12:36:04 -05:00
|
|
|
versions = append(versions, ObjectInfo{
|
|
|
|
IsDir: true,
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: currPrefix,
|
|
|
|
})
|
2020-11-04 10:56:58 -05:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
fiv, err := entry.fileInfoVersions(bucket)
|
2021-02-11 13:22:03 -05:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
fiVersions := fiv.Versions
|
2020-10-28 12:18:35 -04:00
|
|
|
if afterV != "" {
|
2021-02-11 13:22:03 -05:00
|
|
|
vidMarkerIdx := fiv.findVersionIndex(afterV)
|
|
|
|
if vidMarkerIdx >= 0 {
|
|
|
|
fiVersions = fiVersions[vidMarkerIdx+1:]
|
|
|
|
}
|
2020-10-28 12:18:35 -04:00
|
|
|
afterV = ""
|
|
|
|
}
|
2021-02-11 13:22:03 -05:00
|
|
|
|
|
|
|
for _, version := range fiVersions {
|
2022-06-06 18:14:56 -04:00
|
|
|
versioned := vcfg != nil && vcfg.Versioned(entry.name)
|
2022-05-31 05:57:57 -04:00
|
|
|
versions = append(versions, version.ToObjectInfo(bucket, entry.name, versioned))
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
2021-02-11 13:22:03 -05:00
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
continue
|
|
|
|
}
|
2020-11-04 10:56:58 -05:00
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
if entry.isDir() {
|
|
|
|
if delimiter == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
idx := strings.Index(strings.TrimPrefix(entry.name, prefix), delimiter)
|
|
|
|
if idx < 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
idx = len(prefix) + idx + len(delimiter)
|
|
|
|
currPrefix := entry.name[:idx]
|
|
|
|
if currPrefix == prevPrefix {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
prevPrefix = currPrefix
|
2020-12-19 12:36:04 -05:00
|
|
|
versions = append(versions, ObjectInfo{
|
|
|
|
IsDir: true,
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: currPrefix,
|
|
|
|
})
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-19 12:36:04 -05:00
|
|
|
return versions
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
|
2022-03-22 15:39:45 -04:00
|
|
|
// fileInfos converts the metadata to ObjectInfo where possible.
|
2020-10-28 12:18:35 -04:00
|
|
|
// Metadata that cannot be decoded is skipped.
|
2020-12-19 12:36:04 -05:00
|
|
|
func (m *metaCacheEntriesSorted) fileInfos(bucket, prefix, delimiter string) (objects []ObjectInfo) {
|
2020-10-28 12:18:35 -04:00
|
|
|
objects = make([]ObjectInfo, 0, m.len())
|
|
|
|
prevPrefix := ""
|
2022-05-31 05:57:57 -04:00
|
|
|
|
|
|
|
vcfg, _ := globalBucketVersioningSys.Get(bucket)
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
for _, entry := range m.o {
|
|
|
|
if entry.isObject() {
|
2020-11-04 10:56:58 -05:00
|
|
|
if delimiter != "" {
|
|
|
|
idx := strings.Index(strings.TrimPrefix(entry.name, prefix), delimiter)
|
|
|
|
if idx >= 0 {
|
|
|
|
idx = len(prefix) + idx + len(delimiter)
|
|
|
|
currPrefix := entry.name[:idx]
|
|
|
|
if currPrefix == prevPrefix {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
prevPrefix = currPrefix
|
2020-12-19 12:36:04 -05:00
|
|
|
objects = append(objects, ObjectInfo{
|
|
|
|
IsDir: true,
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: currPrefix,
|
|
|
|
})
|
2020-11-04 10:56:58 -05:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
fi, err := entry.fileInfo(bucket)
|
|
|
|
if err == nil {
|
2022-06-06 18:14:56 -04:00
|
|
|
versioned := vcfg != nil && vcfg.Versioned(entry.name)
|
2022-05-31 05:57:57 -04:00
|
|
|
objects = append(objects, fi.ToObjectInfo(bucket, entry.name, versioned))
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if entry.isDir() {
|
|
|
|
if delimiter == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
idx := strings.Index(strings.TrimPrefix(entry.name, prefix), delimiter)
|
|
|
|
if idx < 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
idx = len(prefix) + idx + len(delimiter)
|
|
|
|
currPrefix := entry.name[:idx]
|
|
|
|
if currPrefix == prevPrefix {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
prevPrefix = currPrefix
|
2020-12-19 12:36:04 -05:00
|
|
|
objects = append(objects, ObjectInfo{
|
|
|
|
IsDir: true,
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: currPrefix,
|
|
|
|
})
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-19 12:36:04 -05:00
|
|
|
return objects
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// forwardTo will truncate m so only entries that are s or after is in the list.
|
|
|
|
func (m *metaCacheEntriesSorted) forwardTo(s string) {
|
|
|
|
if s == "" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
idx := sort.Search(len(m.o), func(i int) bool {
|
|
|
|
return m.o[i].name >= s
|
|
|
|
})
|
2021-08-13 14:39:27 -04:00
|
|
|
if m.reuse {
|
|
|
|
for i, entry := range m.o[:idx] {
|
2021-08-23 14:17:27 -04:00
|
|
|
metaDataPoolPut(entry.metadata)
|
2021-08-13 14:39:27 -04:00
|
|
|
m.o[i].metadata = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
m.o = m.o[idx:]
|
|
|
|
}
|
|
|
|
|
2021-03-01 11:12:02 -05:00
|
|
|
// forwardPast will truncate m so only entries that are after s is in the list.
|
|
|
|
func (m *metaCacheEntriesSorted) forwardPast(s string) {
|
|
|
|
if s == "" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
idx := sort.Search(len(m.o), func(i int) bool {
|
|
|
|
return m.o[i].name > s
|
|
|
|
})
|
2021-08-13 14:39:27 -04:00
|
|
|
if m.reuse {
|
|
|
|
for i, entry := range m.o[:idx] {
|
2021-08-23 14:17:27 -04:00
|
|
|
metaDataPoolPut(entry.metadata)
|
2021-08-13 14:39:27 -04:00
|
|
|
m.o[i].metadata = nil
|
|
|
|
}
|
|
|
|
}
|
2021-03-01 11:12:02 -05:00
|
|
|
m.o = m.o[idx:]
|
|
|
|
}
|
|
|
|
|
2021-07-05 18:34:41 -04:00
|
|
|
// mergeEntryChannels will merge entries from in and return them sorted on out.
|
|
|
|
// To signify no more results are on an input channel, close it.
|
|
|
|
// The output channel will be closed when all inputs are emptied.
|
|
|
|
// If file names are equal, compareMeta is called to select which one to choose.
|
|
|
|
// The entry not chosen will be discarded.
|
|
|
|
// If the context is canceled the function will return the error,
|
|
|
|
// otherwise the function will return nil.
|
2022-11-04 14:33:22 -04:00
|
|
|
func mergeEntryChannels(ctx context.Context, in []chan metaCacheEntry, out chan<- metaCacheEntry, readQuorum int) error {
|
2021-07-05 18:34:41 -04:00
|
|
|
defer close(out)
|
|
|
|
top := make([]*metaCacheEntry, len(in))
|
|
|
|
nDone := 0
|
|
|
|
ctxDone := ctx.Done()
|
|
|
|
|
|
|
|
// Use simpler forwarder.
|
|
|
|
if len(in) == 1 {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctxDone:
|
|
|
|
return ctx.Err()
|
|
|
|
case v, ok := <-in[0]:
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-ctxDone:
|
|
|
|
return ctx.Err()
|
|
|
|
case out <- v:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
selectFrom := func(idx int) error {
|
|
|
|
select {
|
|
|
|
case <-ctxDone:
|
|
|
|
return ctx.Err()
|
|
|
|
case entry, ok := <-in[idx]:
|
|
|
|
if !ok {
|
|
|
|
top[idx] = nil
|
|
|
|
nDone++
|
|
|
|
} else {
|
|
|
|
top[idx] = &entry
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// Populate all...
|
|
|
|
for i := range in {
|
|
|
|
if err := selectFrom(i); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
last := ""
|
2022-11-04 14:33:22 -04:00
|
|
|
var toMerge []int
|
2021-07-05 18:34:41 -04:00
|
|
|
|
|
|
|
// Choose the best to return.
|
|
|
|
for {
|
|
|
|
if nDone == len(in) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
best := top[0]
|
|
|
|
bestIdx := 0
|
2022-11-04 14:33:22 -04:00
|
|
|
toMerge = toMerge[:0]
|
2021-07-05 18:34:41 -04:00
|
|
|
for i, other := range top[1:] {
|
|
|
|
otherIdx := i + 1
|
|
|
|
if other == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if best == nil {
|
|
|
|
best = other
|
|
|
|
bestIdx = otherIdx
|
|
|
|
continue
|
|
|
|
}
|
2022-09-30 18:44:21 -04:00
|
|
|
// We should make sure to avoid objects and directories
|
|
|
|
// of this fashion such as
|
|
|
|
// - foo-1
|
|
|
|
// - foo-1/
|
|
|
|
// we should avoid this situation by making sure that
|
|
|
|
// we compare the `foo-1/` after path.Clean() to
|
|
|
|
// de-dup the entries.
|
|
|
|
if path.Clean(best.name) == path.Clean(other.name) {
|
2022-11-04 14:33:22 -04:00
|
|
|
toMerge = append(toMerge, otherIdx)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if best.name > other.name {
|
|
|
|
toMerge = toMerge[:0]
|
|
|
|
best = other
|
|
|
|
bestIdx = otherIdx
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Merge any unmerged
|
|
|
|
if len(toMerge) > 0 {
|
|
|
|
versions := make([]xlMetaV2ShallowVersion, 0, len(toMerge)+1)
|
|
|
|
xl, err := best.xlmeta()
|
|
|
|
if err == nil {
|
|
|
|
versions = append(versions, xl.versions...)
|
|
|
|
}
|
|
|
|
for _, idx := range toMerge {
|
|
|
|
other := top[idx]
|
|
|
|
if other == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
xl2, err := other.xlmeta()
|
|
|
|
if err != nil {
|
|
|
|
if err := selectFrom(idx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if xl == nil {
|
|
|
|
// Discard current "best"
|
2021-07-05 18:34:41 -04:00
|
|
|
if err := selectFrom(bestIdx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-11-04 14:33:22 -04:00
|
|
|
bestIdx = idx
|
2021-07-05 18:34:41 -04:00
|
|
|
best = other
|
2022-11-04 14:33:22 -04:00
|
|
|
xl = xl2
|
|
|
|
} else {
|
|
|
|
// Mark read, unless we added it as new "best".
|
|
|
|
if err := selectFrom(idx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-07-05 18:34:41 -04:00
|
|
|
}
|
2022-11-04 14:33:22 -04:00
|
|
|
versions = append(versions, xl2.versions...)
|
2021-07-05 18:34:41 -04:00
|
|
|
}
|
2022-11-04 14:33:22 -04:00
|
|
|
if xl != nil && len(versions) > 0 {
|
|
|
|
// Merge all versions. 'strict' doesn't matter since we only need one.
|
|
|
|
xl.versions = mergeXLV2Versions(readQuorum, true, 0, versions)
|
|
|
|
if meta, err := xl.AppendTo(metaDataPoolGet()); err == nil {
|
|
|
|
if best.reusable {
|
|
|
|
metaDataPoolPut(best.metadata)
|
|
|
|
}
|
|
|
|
best.metadata = meta
|
|
|
|
best.cached = xl
|
|
|
|
}
|
2021-07-05 18:34:41 -04:00
|
|
|
}
|
2022-11-04 14:33:22 -04:00
|
|
|
toMerge = toMerge[:0]
|
2021-07-05 18:34:41 -04:00
|
|
|
}
|
|
|
|
if best.name > last {
|
2022-06-03 08:59:02 -04:00
|
|
|
select {
|
|
|
|
case <-ctxDone:
|
|
|
|
return ctx.Err()
|
|
|
|
case out <- *best:
|
|
|
|
last = best.name
|
|
|
|
}
|
2021-11-16 12:28:29 -05:00
|
|
|
} else if serverDebugLog {
|
|
|
|
console.Debugln("mergeEntryChannels: discarding duplicate", best.name, "<=", last)
|
2021-07-05 18:34:41 -04:00
|
|
|
}
|
|
|
|
// Replace entry we just sent.
|
|
|
|
if err := selectFrom(bestIdx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
// merge will merge other into m.
|
|
|
|
// If the same entries exists in both and metadata matches only one is added,
|
|
|
|
// otherwise the entry from m will be placed first.
|
|
|
|
// Operation time is expected to be O(n+m).
|
|
|
|
func (m *metaCacheEntriesSorted) merge(other metaCacheEntriesSorted, limit int) {
|
|
|
|
merged := make(metaCacheEntries, 0, m.len()+other.len())
|
|
|
|
a := m.entries()
|
|
|
|
b := other.entries()
|
|
|
|
for len(a) > 0 && len(b) > 0 {
|
2023-03-06 11:56:10 -05:00
|
|
|
switch {
|
|
|
|
case a[0].name == b[0].name && bytes.Equal(a[0].metadata, b[0].metadata):
|
2020-10-28 12:18:35 -04:00
|
|
|
// Same, discard one.
|
|
|
|
merged = append(merged, a[0])
|
|
|
|
a = a[1:]
|
|
|
|
b = b[1:]
|
2023-03-06 11:56:10 -05:00
|
|
|
case a[0].name < b[0].name:
|
2020-10-28 12:18:35 -04:00
|
|
|
merged = append(merged, a[0])
|
|
|
|
a = a[1:]
|
2023-03-06 11:56:10 -05:00
|
|
|
default:
|
2020-10-28 12:18:35 -04:00
|
|
|
merged = append(merged, b[0])
|
|
|
|
b = b[1:]
|
|
|
|
}
|
|
|
|
if limit > 0 && len(merged) >= limit {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Append anything left.
|
|
|
|
if limit < 0 || len(merged) < limit {
|
|
|
|
merged = append(merged, a...)
|
|
|
|
merged = append(merged, b...)
|
|
|
|
}
|
|
|
|
m.o = merged
|
|
|
|
}
|
|
|
|
|
|
|
|
// filterPrefix will filter m to only contain entries with the specified prefix.
|
|
|
|
func (m *metaCacheEntriesSorted) filterPrefix(s string) {
|
|
|
|
if s == "" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
m.forwardTo(s)
|
|
|
|
for i, o := range m.o {
|
|
|
|
if !o.hasPrefix(s) {
|
|
|
|
m.o = m.o[:i]
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// filterObjectsOnly will remove prefix directories.
|
|
|
|
// Order is preserved, but the underlying slice is modified.
|
|
|
|
func (m *metaCacheEntriesSorted) filterObjectsOnly() {
|
|
|
|
dst := m.o[:0]
|
|
|
|
for _, o := range m.o {
|
|
|
|
if !o.isDir() {
|
|
|
|
dst = append(dst, o)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m.o = dst
|
|
|
|
}
|
|
|
|
|
|
|
|
// filterPrefixesOnly will remove objects.
|
|
|
|
// Order is preserved, but the underlying slice is modified.
|
|
|
|
func (m *metaCacheEntriesSorted) filterPrefixesOnly() {
|
|
|
|
dst := m.o[:0]
|
|
|
|
for _, o := range m.o {
|
|
|
|
if o.isDir() {
|
|
|
|
dst = append(dst, o)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m.o = dst
|
|
|
|
}
|
|
|
|
|
|
|
|
// filterRecursiveEntries will keep entries only with the prefix that doesn't contain separator.
|
|
|
|
// This can be used to remove recursive listings.
|
|
|
|
// To return root elements only set prefix to an empty string.
|
|
|
|
// Order is preserved, but the underlying slice is modified.
|
|
|
|
func (m *metaCacheEntriesSorted) filterRecursiveEntries(prefix, separator string) {
|
|
|
|
dst := m.o[:0]
|
|
|
|
if prefix != "" {
|
|
|
|
m.forwardTo(prefix)
|
|
|
|
for _, o := range m.o {
|
|
|
|
ext := strings.TrimPrefix(o.name, prefix)
|
|
|
|
if len(ext) != len(o.name) {
|
|
|
|
if !strings.Contains(ext, separator) {
|
|
|
|
dst = append(dst, o)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// No prefix, simpler
|
|
|
|
for _, o := range m.o {
|
|
|
|
if !strings.Contains(o.name, separator) {
|
|
|
|
dst = append(dst, o)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m.o = dst
|
|
|
|
}
|
|
|
|
|
|
|
|
// truncate the number of entries to maximum n.
|
|
|
|
func (m *metaCacheEntriesSorted) truncate(n int) {
|
|
|
|
if m == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(m.o) > n {
|
2021-08-13 14:39:27 -04:00
|
|
|
if m.reuse {
|
|
|
|
for i, entry := range m.o[n:] {
|
2021-08-23 14:17:27 -04:00
|
|
|
metaDataPoolPut(entry.metadata)
|
2021-08-13 14:39:27 -04:00
|
|
|
m.o[n+i].metadata = nil
|
|
|
|
}
|
|
|
|
}
|
2020-10-28 12:18:35 -04:00
|
|
|
m.o = m.o[:n]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// len returns the number of objects and prefix dirs in m.
|
|
|
|
func (m *metaCacheEntriesSorted) len() int {
|
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return len(m.o)
|
|
|
|
}
|
|
|
|
|
|
|
|
// entries returns the underlying objects as is currently represented.
|
|
|
|
func (m *metaCacheEntriesSorted) entries() metaCacheEntries {
|
|
|
|
if m == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return m.o
|
|
|
|
}
|