cached diskIDs are not needed for scanner healing (#14170)

This PR removes an unnecessary state that gets
passed around for DiskIDs, which is not necessary
since each disk exactly knows which pool and which
set it belongs to on a running system.

Currently cached DiskId's won't work properly
because it always ends up skipping offline disks
and never runs healing when disks are offline, as
it expects all the cached diskIDs to be present
always. This also sort of made things in-flexible
in terms perhaps a new diskID for `format.json`.
(however this is not a big issue)

This is an unnecessary requirement that healing
via scanner needs all drives to be online, instead
healing should trigger even when partial nodes
and drives are available this ensures that we
keep the SLA in-tact on the objects when disks
are offline for a prolonged period of time.
This commit is contained in:
Harshavardhana 2022-01-26 08:34:56 -08:00 committed by GitHub
parent 7db05a80dd
commit 57118919d2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 159 additions and 461 deletions

View File

@ -22,6 +22,7 @@ import (
"context" "context"
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt"
"io/fs" "io/fs"
"math" "math"
"math/rand" "math/rand"
@ -33,6 +34,7 @@ import (
"time" "time"
"github.com/bits-and-blooms/bloom/v3" "github.com/bits-and-blooms/bloom/v3"
"github.com/dustin/go-humanize"
"github.com/minio/madmin-go" "github.com/minio/madmin-go"
"github.com/minio/minio/internal/bucket/lifecycle" "github.com/minio/minio/internal/bucket/lifecycle"
"github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/bucket/replication"
@ -181,7 +183,8 @@ type folderScanner struct {
healFolderInclude uint32 // Include a clean folder one in n cycles. healFolderInclude uint32 // Include a clean folder one in n cycles.
healObjectSelect uint32 // Do a heal check on an object once every n cycles. Must divide into healFolderInclude healObjectSelect uint32 // Do a heal check on an object once every n cycles. Must divide into healFolderInclude
disks []StorageAPI disks []StorageAPI
disksQuorum int
// If set updates will be sent regularly to this channel. // If set updates will be sent regularly to this channel.
// Will not be closed when returned. // Will not be closed when returned.
@ -247,7 +250,7 @@ var globalScannerStats scannerStats
// The returned cache will always be valid, but may not be updated from the existing. // The returned cache will always be valid, but may not be updated from the existing.
// Before each operation sleepDuration is called which can be used to temporarily halt the scanner. // Before each operation sleepDuration is called which can be used to temporarily halt the scanner.
// If the supplied context is canceled the function will return at the first chance. // If the supplied context is canceled the function will return at the first chance.
func scanDataFolder(ctx context.Context, basePath string, cache dataUsageCache, getSize getSizeFn) (dataUsageCache, error) { func scanDataFolder(ctx context.Context, poolIdx, setIdx int, basePath string, cache dataUsageCache, getSize getSizeFn) (dataUsageCache, error) {
t := UTCNow() t := UTCNow()
logPrefix := color.Green("data-usage: ") logPrefix := color.Green("data-usage: ")
@ -280,13 +283,15 @@ func scanDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
} }
// Add disks for set healing. // Add disks for set healing.
if len(cache.Disks) > 0 { if poolIdx >= 0 && setIdx >= 0 {
objAPI, ok := newObjectLayerFn().(*erasureServerPools) objAPI, ok := newObjectLayerFn().(*erasureServerPools)
if ok { if ok {
s.disks = objAPI.GetDisksID(cache.Disks...) if poolIdx < len(objAPI.serverPools) && setIdx < len(objAPI.serverPools[poolIdx].sets) {
if len(s.disks) != len(cache.Disks) { // Pass the disks belonging to the set.
console.Debugf(logPrefix+"Missing disks, want %d, found %d. Cannot heal. %s\n", len(cache.Disks), len(s.disks), logSuffix) s.disks = objAPI.serverPools[poolIdx].sets[setIdx].getDisks()
s.disks = s.disks[:0] s.disksQuorum = objAPI.serverPools[poolIdx].sets[setIdx].defaultRQuorum()
} else {
logger.LogIf(ctx, fmt.Errorf("Matching pool %s, set %s not found", humanize.Ordinal(poolIdx+1), humanize.Ordinal(setIdx+1)))
} }
} }
} }
@ -623,7 +628,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
} }
objAPI, ok := newObjectLayerFn().(*erasureServerPools) objAPI, ok := newObjectLayerFn().(*erasureServerPools)
if !ok || len(f.disks) == 0 { if !ok || len(f.disks) == 0 || f.disksQuorum == 0 {
break break
} }
@ -645,8 +650,8 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
// This means that the next run will not look for it. // This means that the next run will not look for it.
// How to resolve results. // How to resolve results.
resolver := metadataResolutionParams{ resolver := metadataResolutionParams{
dirQuorum: getReadQuorum(len(f.disks)), dirQuorum: f.disksQuorum,
objQuorum: getReadQuorum(len(f.disks)), objQuorum: f.disksQuorum,
bucket: "", bucket: "",
strict: false, strict: false,
} }
@ -676,8 +681,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
path: prefix, path: prefix,
recursive: true, recursive: true,
reportNotFound: true, reportNotFound: true,
minDisks: len(f.disks), // We want full consistency. minDisks: f.disksQuorum,
// Weird, maybe transient error.
agreed: func(entry metaCacheEntry) { agreed: func(entry metaCacheEntry) {
if f.dataUsageScannerDebug { if f.dataUsageScannerDebug {
console.Debugf(healObjectsPrefix+" got agreement: %v\n", entry.name) console.Debugf(healObjectsPrefix+" got agreement: %v\n", entry.name)

View File

@ -225,7 +225,6 @@ type dataUsageEntryV6 struct {
type dataUsageCache struct { type dataUsageCache struct {
Info dataUsageCacheInfo Info dataUsageCacheInfo
Cache map[string]dataUsageEntry Cache map[string]dataUsageEntry
Disks []string
} }
//msgp:encode ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4 dataUsageCacheV5 dataUsageCacheV6 //msgp:encode ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4 dataUsageCacheV5 dataUsageCacheV6
@ -234,35 +233,30 @@ type dataUsageCache struct {
// dataUsageCacheV2 contains a cache of data usage entries version 2. // dataUsageCacheV2 contains a cache of data usage entries version 2.
type dataUsageCacheV2 struct { type dataUsageCacheV2 struct {
Info dataUsageCacheInfo Info dataUsageCacheInfo
Disks []string
Cache map[string]dataUsageEntryV2 Cache map[string]dataUsageEntryV2
} }
// dataUsageCacheV3 contains a cache of data usage entries version 3. // dataUsageCacheV3 contains a cache of data usage entries version 3.
type dataUsageCacheV3 struct { type dataUsageCacheV3 struct {
Info dataUsageCacheInfo Info dataUsageCacheInfo
Disks []string
Cache map[string]dataUsageEntryV3 Cache map[string]dataUsageEntryV3
} }
// dataUsageCacheV4 contains a cache of data usage entries version 4. // dataUsageCacheV4 contains a cache of data usage entries version 4.
type dataUsageCacheV4 struct { type dataUsageCacheV4 struct {
Info dataUsageCacheInfo Info dataUsageCacheInfo
Disks []string
Cache map[string]dataUsageEntryV4 Cache map[string]dataUsageEntryV4
} }
// dataUsageCacheV5 contains a cache of data usage entries version 5. // dataUsageCacheV5 contains a cache of data usage entries version 5.
type dataUsageCacheV5 struct { type dataUsageCacheV5 struct {
Info dataUsageCacheInfo Info dataUsageCacheInfo
Disks []string
Cache map[string]dataUsageEntryV5 Cache map[string]dataUsageEntryV5
} }
// dataUsageCacheV6 contains a cache of data usage entries version 6. // dataUsageCacheV6 contains a cache of data usage entries version 6.
type dataUsageCacheV6 struct { type dataUsageCacheV6 struct {
Info dataUsageCacheInfo Info dataUsageCacheInfo
Disks []string
Cache map[string]dataUsageEntryV6 Cache map[string]dataUsageEntryV6
} }
@ -1037,7 +1031,6 @@ func (d *dataUsageCache) deserialize(r io.Reader) error {
return err return err
} }
d.Info = dold.Info d.Info = dold.Info
d.Disks = dold.Disks
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
for k, v := range dold.Cache { for k, v := range dold.Cache {
d.Cache[k] = dataUsageEntry{ d.Cache[k] = dataUsageEntry{
@ -1061,7 +1054,6 @@ func (d *dataUsageCache) deserialize(r io.Reader) error {
return err return err
} }
d.Info = dold.Info d.Info = dold.Info
d.Disks = dold.Disks
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
for k, v := range dold.Cache { for k, v := range dold.Cache {
due := dataUsageEntry{ due := dataUsageEntry{
@ -1101,7 +1093,6 @@ func (d *dataUsageCache) deserialize(r io.Reader) error {
return err return err
} }
d.Info = dold.Info d.Info = dold.Info
d.Disks = dold.Disks
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
for k, v := range dold.Cache { for k, v := range dold.Cache {
due := dataUsageEntry{ due := dataUsageEntry{
@ -1153,7 +1144,6 @@ func (d *dataUsageCache) deserialize(r io.Reader) error {
return err return err
} }
d.Info = dold.Info d.Info = dold.Info
d.Disks = dold.Disks
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
for k, v := range dold.Cache { for k, v := range dold.Cache {
due := dataUsageEntry{ due := dataUsageEntry{
@ -1205,7 +1195,6 @@ func (d *dataUsageCache) deserialize(r io.Reader) error {
return err return err
} }
d.Info = dold.Info d.Info = dold.Info
d.Disks = dold.Disks
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
for k, v := range dold.Cache { for k, v := range dold.Cache {
var replicationStats *replicationAllStats var replicationStats *replicationAllStats

View File

@ -338,25 +338,6 @@ func (z *dataUsageCache) DecodeMsg(dc *msgp.Reader) (err error) {
} }
z.Cache[za0001] = za0002 z.Cache[za0001] = za0002
} }
case "Disks":
var zb0003 uint32
zb0003, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0003) {
z.Disks = (z.Disks)[:zb0003]
} else {
z.Disks = make([]string, zb0003)
}
for za0003 := range z.Disks {
z.Disks[za0003], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Disks", za0003)
return
}
}
default: default:
err = dc.Skip() err = dc.Skip()
if err != nil { if err != nil {
@ -370,9 +351,9 @@ func (z *dataUsageCache) DecodeMsg(dc *msgp.Reader) (err error) {
// EncodeMsg implements msgp.Encodable // EncodeMsg implements msgp.Encodable
func (z *dataUsageCache) EncodeMsg(en *msgp.Writer) (err error) { func (z *dataUsageCache) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 3 // map header, size 2
// write "Info" // write "Info"
err = en.Append(0x83, 0xa4, 0x49, 0x6e, 0x66, 0x6f) err = en.Append(0x82, 0xa4, 0x49, 0x6e, 0x66, 0x6f)
if err != nil { if err != nil {
return return
} }
@ -403,32 +384,15 @@ func (z *dataUsageCache) EncodeMsg(en *msgp.Writer) (err error) {
return return
} }
} }
// write "Disks"
err = en.Append(0xa5, 0x44, 0x69, 0x73, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Disks)))
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
for za0003 := range z.Disks {
err = en.WriteString(z.Disks[za0003])
if err != nil {
err = msgp.WrapError(err, "Disks", za0003)
return
}
}
return return
} }
// MarshalMsg implements msgp.Marshaler // MarshalMsg implements msgp.Marshaler
func (z *dataUsageCache) MarshalMsg(b []byte) (o []byte, err error) { func (z *dataUsageCache) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize()) o = msgp.Require(b, z.Msgsize())
// map header, size 3 // map header, size 2
// string "Info" // string "Info"
o = append(o, 0x83, 0xa4, 0x49, 0x6e, 0x66, 0x6f) o = append(o, 0x82, 0xa4, 0x49, 0x6e, 0x66, 0x6f)
o, err = z.Info.MarshalMsg(o) o, err = z.Info.MarshalMsg(o)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Info") err = msgp.WrapError(err, "Info")
@ -445,12 +409,6 @@ func (z *dataUsageCache) MarshalMsg(b []byte) (o []byte, err error) {
return return
} }
} }
// string "Disks"
o = append(o, 0xa5, 0x44, 0x69, 0x73, 0x6b, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Disks)))
for za0003 := range z.Disks {
o = msgp.AppendString(o, z.Disks[za0003])
}
return return
} }
@ -508,25 +466,6 @@ func (z *dataUsageCache) UnmarshalMsg(bts []byte) (o []byte, err error) {
} }
z.Cache[za0001] = za0002 z.Cache[za0001] = za0002
} }
case "Disks":
var zb0003 uint32
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0003) {
z.Disks = (z.Disks)[:zb0003]
} else {
z.Disks = make([]string, zb0003)
}
for za0003 := range z.Disks {
z.Disks[za0003], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Disks", za0003)
return
}
}
default: default:
bts, err = msgp.Skip(bts) bts, err = msgp.Skip(bts)
if err != nil { if err != nil {
@ -548,10 +487,6 @@ func (z *dataUsageCache) Msgsize() (s int) {
s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize() s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize()
} }
} }
s += 6 + msgp.ArrayHeaderSize
for za0003 := range z.Disks {
s += msgp.StringPrefixSize + len(z.Disks[za0003])
}
return return
} }
@ -811,54 +746,35 @@ func (z *dataUsageCacheV2) DecodeMsg(dc *msgp.Reader) (err error) {
err = msgp.WrapError(err, "Info") err = msgp.WrapError(err, "Info")
return return
} }
case "Disks":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0002) {
z.Disks = (z.Disks)[:zb0002]
} else {
z.Disks = make([]string, zb0002)
}
for za0001 := range z.Disks {
z.Disks[za0001], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Disks", za0001)
return
}
}
case "Cache": case "Cache":
var zb0003 uint32 var zb0002 uint32
zb0003, err = dc.ReadMapHeader() zb0002, err = dc.ReadMapHeader()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV2, zb0003) z.Cache = make(map[string]dataUsageEntryV2, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { for key := range z.Cache {
delete(z.Cache, key) delete(z.Cache, key)
} }
} }
for zb0003 > 0 { for zb0002 > 0 {
zb0003-- zb0002--
var za0002 string var za0001 string
var za0003 dataUsageEntryV2 var za0002 dataUsageEntryV2
za0002, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
err = za0003.DecodeMsg(dc) err = za0002.DecodeMsg(dc)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache", za0002) err = msgp.WrapError(err, "Cache", za0001)
return return
} }
z.Cache[za0002] = za0003 z.Cache[za0001] = za0002
} }
default: default:
err = dc.Skip() err = dc.Skip()
@ -895,54 +811,35 @@ func (z *dataUsageCacheV2) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "Info") err = msgp.WrapError(err, "Info")
return return
} }
case "Disks":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0002) {
z.Disks = (z.Disks)[:zb0002]
} else {
z.Disks = make([]string, zb0002)
}
for za0001 := range z.Disks {
z.Disks[za0001], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Disks", za0001)
return
}
}
case "Cache": case "Cache":
var zb0003 uint32 var zb0002 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV2, zb0003) z.Cache = make(map[string]dataUsageEntryV2, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { for key := range z.Cache {
delete(z.Cache, key) delete(z.Cache, key)
} }
} }
for zb0003 > 0 { for zb0002 > 0 {
var za0002 string var za0001 string
var za0003 dataUsageEntryV2 var za0002 dataUsageEntryV2
zb0003-- zb0002--
za0002, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
bts, err = za0003.UnmarshalMsg(bts) bts, err = za0002.UnmarshalMsg(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache", za0002) err = msgp.WrapError(err, "Cache", za0001)
return return
} }
z.Cache[za0002] = za0003 z.Cache[za0001] = za0002
} }
default: default:
bts, err = msgp.Skip(bts) bts, err = msgp.Skip(bts)
@ -958,15 +855,11 @@ func (z *dataUsageCacheV2) UnmarshalMsg(bts []byte) (o []byte, err error) {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *dataUsageCacheV2) Msgsize() (s int) { func (z *dataUsageCacheV2) Msgsize() (s int) {
s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.ArrayHeaderSize s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.MapHeaderSize
for za0001 := range z.Disks {
s += msgp.StringPrefixSize + len(z.Disks[za0001])
}
s += 6 + msgp.MapHeaderSize
if z.Cache != nil { if z.Cache != nil {
for za0002, za0003 := range z.Cache { for za0001, za0002 := range z.Cache {
_ = za0003 _ = za0002
s += msgp.StringPrefixSize + len(za0002) + za0003.Msgsize() s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize()
} }
} }
return return
@ -996,54 +889,35 @@ func (z *dataUsageCacheV3) DecodeMsg(dc *msgp.Reader) (err error) {
err = msgp.WrapError(err, "Info") err = msgp.WrapError(err, "Info")
return return
} }
case "Disks":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0002) {
z.Disks = (z.Disks)[:zb0002]
} else {
z.Disks = make([]string, zb0002)
}
for za0001 := range z.Disks {
z.Disks[za0001], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Disks", za0001)
return
}
}
case "Cache": case "Cache":
var zb0003 uint32 var zb0002 uint32
zb0003, err = dc.ReadMapHeader() zb0002, err = dc.ReadMapHeader()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV3, zb0003) z.Cache = make(map[string]dataUsageEntryV3, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { for key := range z.Cache {
delete(z.Cache, key) delete(z.Cache, key)
} }
} }
for zb0003 > 0 { for zb0002 > 0 {
zb0003-- zb0002--
var za0002 string var za0001 string
var za0003 dataUsageEntryV3 var za0002 dataUsageEntryV3
za0002, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
err = za0003.DecodeMsg(dc) err = za0002.DecodeMsg(dc)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache", za0002) err = msgp.WrapError(err, "Cache", za0001)
return return
} }
z.Cache[za0002] = za0003 z.Cache[za0001] = za0002
} }
default: default:
err = dc.Skip() err = dc.Skip()
@ -1080,54 +954,35 @@ func (z *dataUsageCacheV3) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "Info") err = msgp.WrapError(err, "Info")
return return
} }
case "Disks":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0002) {
z.Disks = (z.Disks)[:zb0002]
} else {
z.Disks = make([]string, zb0002)
}
for za0001 := range z.Disks {
z.Disks[za0001], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Disks", za0001)
return
}
}
case "Cache": case "Cache":
var zb0003 uint32 var zb0002 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV3, zb0003) z.Cache = make(map[string]dataUsageEntryV3, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { for key := range z.Cache {
delete(z.Cache, key) delete(z.Cache, key)
} }
} }
for zb0003 > 0 { for zb0002 > 0 {
var za0002 string var za0001 string
var za0003 dataUsageEntryV3 var za0002 dataUsageEntryV3
zb0003-- zb0002--
za0002, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
bts, err = za0003.UnmarshalMsg(bts) bts, err = za0002.UnmarshalMsg(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache", za0002) err = msgp.WrapError(err, "Cache", za0001)
return return
} }
z.Cache[za0002] = za0003 z.Cache[za0001] = za0002
} }
default: default:
bts, err = msgp.Skip(bts) bts, err = msgp.Skip(bts)
@ -1143,15 +998,11 @@ func (z *dataUsageCacheV3) UnmarshalMsg(bts []byte) (o []byte, err error) {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *dataUsageCacheV3) Msgsize() (s int) { func (z *dataUsageCacheV3) Msgsize() (s int) {
s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.ArrayHeaderSize s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.MapHeaderSize
for za0001 := range z.Disks {
s += msgp.StringPrefixSize + len(z.Disks[za0001])
}
s += 6 + msgp.MapHeaderSize
if z.Cache != nil { if z.Cache != nil {
for za0002, za0003 := range z.Cache { for za0001, za0002 := range z.Cache {
_ = za0003 _ = za0002
s += msgp.StringPrefixSize + len(za0002) + za0003.Msgsize() s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize()
} }
} }
return return
@ -1181,54 +1032,35 @@ func (z *dataUsageCacheV4) DecodeMsg(dc *msgp.Reader) (err error) {
err = msgp.WrapError(err, "Info") err = msgp.WrapError(err, "Info")
return return
} }
case "Disks":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0002) {
z.Disks = (z.Disks)[:zb0002]
} else {
z.Disks = make([]string, zb0002)
}
for za0001 := range z.Disks {
z.Disks[za0001], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Disks", za0001)
return
}
}
case "Cache": case "Cache":
var zb0003 uint32 var zb0002 uint32
zb0003, err = dc.ReadMapHeader() zb0002, err = dc.ReadMapHeader()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV4, zb0003) z.Cache = make(map[string]dataUsageEntryV4, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { for key := range z.Cache {
delete(z.Cache, key) delete(z.Cache, key)
} }
} }
for zb0003 > 0 { for zb0002 > 0 {
zb0003-- zb0002--
var za0002 string var za0001 string
var za0003 dataUsageEntryV4 var za0002 dataUsageEntryV4
za0002, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
err = za0003.DecodeMsg(dc) err = za0002.DecodeMsg(dc)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache", za0002) err = msgp.WrapError(err, "Cache", za0001)
return return
} }
z.Cache[za0002] = za0003 z.Cache[za0001] = za0002
} }
default: default:
err = dc.Skip() err = dc.Skip()
@ -1265,54 +1097,35 @@ func (z *dataUsageCacheV4) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "Info") err = msgp.WrapError(err, "Info")
return return
} }
case "Disks":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0002) {
z.Disks = (z.Disks)[:zb0002]
} else {
z.Disks = make([]string, zb0002)
}
for za0001 := range z.Disks {
z.Disks[za0001], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Disks", za0001)
return
}
}
case "Cache": case "Cache":
var zb0003 uint32 var zb0002 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV4, zb0003) z.Cache = make(map[string]dataUsageEntryV4, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { for key := range z.Cache {
delete(z.Cache, key) delete(z.Cache, key)
} }
} }
for zb0003 > 0 { for zb0002 > 0 {
var za0002 string var za0001 string
var za0003 dataUsageEntryV4 var za0002 dataUsageEntryV4
zb0003-- zb0002--
za0002, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
bts, err = za0003.UnmarshalMsg(bts) bts, err = za0002.UnmarshalMsg(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache", za0002) err = msgp.WrapError(err, "Cache", za0001)
return return
} }
z.Cache[za0002] = za0003 z.Cache[za0001] = za0002
} }
default: default:
bts, err = msgp.Skip(bts) bts, err = msgp.Skip(bts)
@ -1328,15 +1141,11 @@ func (z *dataUsageCacheV4) UnmarshalMsg(bts []byte) (o []byte, err error) {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *dataUsageCacheV4) Msgsize() (s int) { func (z *dataUsageCacheV4) Msgsize() (s int) {
s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.ArrayHeaderSize s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.MapHeaderSize
for za0001 := range z.Disks {
s += msgp.StringPrefixSize + len(z.Disks[za0001])
}
s += 6 + msgp.MapHeaderSize
if z.Cache != nil { if z.Cache != nil {
for za0002, za0003 := range z.Cache { for za0001, za0002 := range z.Cache {
_ = za0003 _ = za0002
s += msgp.StringPrefixSize + len(za0002) + za0003.Msgsize() s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize()
} }
} }
return return
@ -1366,54 +1175,35 @@ func (z *dataUsageCacheV5) DecodeMsg(dc *msgp.Reader) (err error) {
err = msgp.WrapError(err, "Info") err = msgp.WrapError(err, "Info")
return return
} }
case "Disks":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0002) {
z.Disks = (z.Disks)[:zb0002]
} else {
z.Disks = make([]string, zb0002)
}
for za0001 := range z.Disks {
z.Disks[za0001], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Disks", za0001)
return
}
}
case "Cache": case "Cache":
var zb0003 uint32 var zb0002 uint32
zb0003, err = dc.ReadMapHeader() zb0002, err = dc.ReadMapHeader()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV5, zb0003) z.Cache = make(map[string]dataUsageEntryV5, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { for key := range z.Cache {
delete(z.Cache, key) delete(z.Cache, key)
} }
} }
for zb0003 > 0 { for zb0002 > 0 {
zb0003-- zb0002--
var za0002 string var za0001 string
var za0003 dataUsageEntryV5 var za0002 dataUsageEntryV5
za0002, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
err = za0003.DecodeMsg(dc) err = za0002.DecodeMsg(dc)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache", za0002) err = msgp.WrapError(err, "Cache", za0001)
return return
} }
z.Cache[za0002] = za0003 z.Cache[za0001] = za0002
} }
default: default:
err = dc.Skip() err = dc.Skip()
@ -1450,54 +1240,35 @@ func (z *dataUsageCacheV5) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "Info") err = msgp.WrapError(err, "Info")
return return
} }
case "Disks":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0002) {
z.Disks = (z.Disks)[:zb0002]
} else {
z.Disks = make([]string, zb0002)
}
for za0001 := range z.Disks {
z.Disks[za0001], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Disks", za0001)
return
}
}
case "Cache": case "Cache":
var zb0003 uint32 var zb0002 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV5, zb0003) z.Cache = make(map[string]dataUsageEntryV5, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { for key := range z.Cache {
delete(z.Cache, key) delete(z.Cache, key)
} }
} }
for zb0003 > 0 { for zb0002 > 0 {
var za0002 string var za0001 string
var za0003 dataUsageEntryV5 var za0002 dataUsageEntryV5
zb0003-- zb0002--
za0002, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
bts, err = za0003.UnmarshalMsg(bts) bts, err = za0002.UnmarshalMsg(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache", za0002) err = msgp.WrapError(err, "Cache", za0001)
return return
} }
z.Cache[za0002] = za0003 z.Cache[za0001] = za0002
} }
default: default:
bts, err = msgp.Skip(bts) bts, err = msgp.Skip(bts)
@ -1513,15 +1284,11 @@ func (z *dataUsageCacheV5) UnmarshalMsg(bts []byte) (o []byte, err error) {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *dataUsageCacheV5) Msgsize() (s int) { func (z *dataUsageCacheV5) Msgsize() (s int) {
s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.ArrayHeaderSize s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.MapHeaderSize
for za0001 := range z.Disks {
s += msgp.StringPrefixSize + len(z.Disks[za0001])
}
s += 6 + msgp.MapHeaderSize
if z.Cache != nil { if z.Cache != nil {
for za0002, za0003 := range z.Cache { for za0001, za0002 := range z.Cache {
_ = za0003 _ = za0002
s += msgp.StringPrefixSize + len(za0002) + za0003.Msgsize() s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize()
} }
} }
return return
@ -1551,54 +1318,35 @@ func (z *dataUsageCacheV6) DecodeMsg(dc *msgp.Reader) (err error) {
err = msgp.WrapError(err, "Info") err = msgp.WrapError(err, "Info")
return return
} }
case "Disks":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0002) {
z.Disks = (z.Disks)[:zb0002]
} else {
z.Disks = make([]string, zb0002)
}
for za0001 := range z.Disks {
z.Disks[za0001], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Disks", za0001)
return
}
}
case "Cache": case "Cache":
var zb0003 uint32 var zb0002 uint32
zb0003, err = dc.ReadMapHeader() zb0002, err = dc.ReadMapHeader()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV6, zb0003) z.Cache = make(map[string]dataUsageEntryV6, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { for key := range z.Cache {
delete(z.Cache, key) delete(z.Cache, key)
} }
} }
for zb0003 > 0 { for zb0002 > 0 {
zb0003-- zb0002--
var za0002 string var za0001 string
var za0003 dataUsageEntryV6 var za0002 dataUsageEntryV6
za0002, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
err = za0003.DecodeMsg(dc) err = za0002.DecodeMsg(dc)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache", za0002) err = msgp.WrapError(err, "Cache", za0001)
return return
} }
z.Cache[za0002] = za0003 z.Cache[za0001] = za0002
} }
default: default:
err = dc.Skip() err = dc.Skip()
@ -1635,54 +1383,35 @@ func (z *dataUsageCacheV6) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "Info") err = msgp.WrapError(err, "Info")
return return
} }
case "Disks":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Disks")
return
}
if cap(z.Disks) >= int(zb0002) {
z.Disks = (z.Disks)[:zb0002]
} else {
z.Disks = make([]string, zb0002)
}
for za0001 := range z.Disks {
z.Disks[za0001], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Disks", za0001)
return
}
}
case "Cache": case "Cache":
var zb0003 uint32 var zb0002 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV6, zb0003) z.Cache = make(map[string]dataUsageEntryV6, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { for key := range z.Cache {
delete(z.Cache, key) delete(z.Cache, key)
} }
} }
for zb0003 > 0 { for zb0002 > 0 {
var za0002 string var za0001 string
var za0003 dataUsageEntryV6 var za0002 dataUsageEntryV6
zb0003-- zb0002--
za0002, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
bts, err = za0003.UnmarshalMsg(bts) bts, err = za0002.UnmarshalMsg(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache", za0002) err = msgp.WrapError(err, "Cache", za0001)
return return
} }
z.Cache[za0002] = za0003 z.Cache[za0001] = za0002
} }
default: default:
bts, err = msgp.Skip(bts) bts, err = msgp.Skip(bts)
@ -1698,15 +1427,11 @@ func (z *dataUsageCacheV6) UnmarshalMsg(bts []byte) (o []byte, err error) {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *dataUsageCacheV6) Msgsize() (s int) { func (z *dataUsageCacheV6) Msgsize() (s int) {
s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.ArrayHeaderSize s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.MapHeaderSize
for za0001 := range z.Disks {
s += msgp.StringPrefixSize + len(z.Disks[za0001])
}
s += 6 + msgp.MapHeaderSize
if z.Cache != nil { if z.Cache != nil {
for za0002, za0003 := range z.Cache { for za0001, za0002 := range z.Cache {
_ = za0003 _ = za0002
s += msgp.StringPrefixSize + len(za0002) + za0003.Msgsize() s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize()
} }
} }
return return

View File

@ -67,7 +67,7 @@ func TestDataUsageUpdate(t *testing.T) {
return return
} }
got, err := scanDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize) got, err := scanDataFolder(context.Background(), 0, 0, base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -178,7 +178,7 @@ func TestDataUsageUpdate(t *testing.T) {
} }
// Changed dir must be picked up in this many cycles. // Changed dir must be picked up in this many cycles.
for i := 0; i < dataUsageUpdateDirCycles; i++ { for i := 0; i < dataUsageUpdateDirCycles; i++ {
got, err = scanDataFolder(context.Background(), base, got, getSize) got, err = scanDataFolder(context.Background(), 0, 0, base, got, getSize)
got.Info.NextCycle++ got.Info.NextCycle++
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -289,7 +289,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
} }
return return
} }
got, err := scanDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, getSize) got, err := scanDataFolder(context.Background(), 0, 0, base, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, getSize)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -423,7 +423,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
} }
// Changed dir must be picked up in this many cycles. // Changed dir must be picked up in this many cycles.
for i := 0; i < dataUsageUpdateDirCycles; i++ { for i := 0; i < dataUsageUpdateDirCycles; i++ {
got, err = scanDataFolder(context.Background(), base, got, getSize) got, err = scanDataFolder(context.Background(), 0, 0, base, got, getSize)
got.Info.NextCycle++ got.Info.NextCycle++
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -575,7 +575,7 @@ func TestDataUsageCacheSerialize(t *testing.T) {
} }
return return
} }
want, err := scanDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize) want, err := scanDataFolder(context.Background(), 0, 0, base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -357,23 +357,6 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, bf
return nil return nil
} }
// Collect disks for healing.
allDisks := er.getDisks()
allDiskIDs := make([]string, 0, len(allDisks))
for _, disk := range allDisks {
if disk == OfflineDisk {
// its possible that disk is OfflineDisk
continue
}
id, _ := disk.GetDiskID()
if id == "" {
// its possible that disk is unformatted
// or just went offline
continue
}
allDiskIDs = append(allDiskIDs, id)
}
// Load bucket totals // Load bucket totals
oldCache := dataUsageCache{} oldCache := dataUsageCache{}
if err := oldCache.load(ctx, er, dataUsageCacheName); err != nil { if err := oldCache.load(ctx, er, dataUsageCacheName); err != nil {
@ -479,7 +462,6 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, bf
} }
cache.Info.BloomFilter = bloom cache.Info.BloomFilter = bloom
cache.Info.SkipHealing = healing cache.Info.SkipHealing = healing
cache.Disks = allDiskIDs
cache.Info.NextCycle = wantCycle cache.Info.NextCycle = wantCycle
if cache.Info.Name != bucket.Name { if cache.Info.Name != bucket.Name {
logger.LogIf(ctx, fmt.Errorf("cache name mismatch: %s != %s", cache.Info.Name, bucket.Name)) logger.LogIf(ctx, fmt.Errorf("cache name mismatch: %s != %s", cache.Info.Name, bucket.Name))

View File

@ -352,7 +352,7 @@ func (fs *FSObjects) scanBucket(ctx context.Context, bucket string, cache dataUs
} }
// Load bucket info. // Load bucket info.
cache, err = scanDataFolder(ctx, fs.fsPath, cache, func(item scannerItem) (sizeSummary, error) { cache, err = scanDataFolder(ctx, -1, -1, fs.fsPath, cache, func(item scannerItem) (sizeSummary, error) {
bucket, object := item.bucket, item.objectPath() bucket, object := item.bucket, item.objectPath()
fsMetaBytes, err := xioutil.ReadFile(pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile)) fsMetaBytes, err := xioutil.ReadFile(pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile))
if err != nil && !osIsNotExist(err) { if err != nil && !osIsNotExist(err) {

View File

@ -113,10 +113,6 @@ func path2BucketObject(s string) (bucket, prefix string) {
return path2BucketObjectWithBasePath("", s) return path2BucketObjectWithBasePath("", s)
} }
func getReadQuorum(drive int) int {
return drive - getDefaultParityBlocks(drive)
}
func getWriteQuorum(drive int) int { func getWriteQuorum(drive int) int {
parity := getDefaultParityBlocks(drive) parity := getDefaultParityBlocks(drive)
quorum := drive - parity quorum := drive - parity

View File

@ -451,7 +451,9 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates
cache.Info.updates = updates cache.Info.updates = updates
dataUsageInfo, err := scanDataFolder(ctx, s.diskPath, cache, func(item scannerItem) (sizeSummary, error) { poolIdx, setIdx, _ := s.GetDiskLoc()
dataUsageInfo, err := scanDataFolder(ctx, poolIdx, setIdx, s.diskPath, cache, func(item scannerItem) (sizeSummary, error) {
// Look for `xl.meta/xl.json' at the leaf. // Look for `xl.meta/xl.json' at the leaf.
if !strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFile) && if !strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFile) &&
!strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFileV1) { !strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFileV1) {