diff --git a/cmd/data-scanner.go b/cmd/data-scanner.go index b177ded90..5cebf342f 100644 --- a/cmd/data-scanner.go +++ b/cmd/data-scanner.go @@ -22,6 +22,7 @@ import ( "context" "encoding/binary" "errors" + "fmt" "io/fs" "math" "math/rand" @@ -33,6 +34,7 @@ import ( "time" "github.com/bits-and-blooms/bloom/v3" + "github.com/dustin/go-humanize" "github.com/minio/madmin-go" "github.com/minio/minio/internal/bucket/lifecycle" "github.com/minio/minio/internal/bucket/replication" @@ -181,7 +183,8 @@ type folderScanner struct { healFolderInclude uint32 // Include a clean folder one in n cycles. healObjectSelect uint32 // Do a heal check on an object once every n cycles. Must divide into healFolderInclude - disks []StorageAPI + disks []StorageAPI + disksQuorum int // If set updates will be sent regularly to this channel. // Will not be closed when returned. @@ -247,7 +250,7 @@ var globalScannerStats scannerStats // The returned cache will always be valid, but may not be updated from the existing. // Before each operation sleepDuration is called which can be used to temporarily halt the scanner. // If the supplied context is canceled the function will return at the first chance. -func scanDataFolder(ctx context.Context, basePath string, cache dataUsageCache, getSize getSizeFn) (dataUsageCache, error) { +func scanDataFolder(ctx context.Context, poolIdx, setIdx int, basePath string, cache dataUsageCache, getSize getSizeFn) (dataUsageCache, error) { t := UTCNow() logPrefix := color.Green("data-usage: ") @@ -280,13 +283,15 @@ func scanDataFolder(ctx context.Context, basePath string, cache dataUsageCache, } // Add disks for set healing. - if len(cache.Disks) > 0 { + if poolIdx >= 0 && setIdx >= 0 { objAPI, ok := newObjectLayerFn().(*erasureServerPools) if ok { - s.disks = objAPI.GetDisksID(cache.Disks...) - if len(s.disks) != len(cache.Disks) { - console.Debugf(logPrefix+"Missing disks, want %d, found %d. Cannot heal. %s\n", len(cache.Disks), len(s.disks), logSuffix) - s.disks = s.disks[:0] + if poolIdx < len(objAPI.serverPools) && setIdx < len(objAPI.serverPools[poolIdx].sets) { + // Pass the disks belonging to the set. + s.disks = objAPI.serverPools[poolIdx].sets[setIdx].getDisks() + s.disksQuorum = objAPI.serverPools[poolIdx].sets[setIdx].defaultRQuorum() + } else { + logger.LogIf(ctx, fmt.Errorf("Matching pool %s, set %s not found", humanize.Ordinal(poolIdx+1), humanize.Ordinal(setIdx+1))) } } } @@ -623,7 +628,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int } objAPI, ok := newObjectLayerFn().(*erasureServerPools) - if !ok || len(f.disks) == 0 { + if !ok || len(f.disks) == 0 || f.disksQuorum == 0 { break } @@ -645,8 +650,8 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int // This means that the next run will not look for it. // How to resolve results. resolver := metadataResolutionParams{ - dirQuorum: getReadQuorum(len(f.disks)), - objQuorum: getReadQuorum(len(f.disks)), + dirQuorum: f.disksQuorum, + objQuorum: f.disksQuorum, bucket: "", strict: false, } @@ -676,8 +681,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int path: prefix, recursive: true, reportNotFound: true, - minDisks: len(f.disks), // We want full consistency. - // Weird, maybe transient error. + minDisks: f.disksQuorum, agreed: func(entry metaCacheEntry) { if f.dataUsageScannerDebug { console.Debugf(healObjectsPrefix+" got agreement: %v\n", entry.name) diff --git a/cmd/data-usage-cache.go b/cmd/data-usage-cache.go index 84c5e6846..a89f9ca8a 100644 --- a/cmd/data-usage-cache.go +++ b/cmd/data-usage-cache.go @@ -225,7 +225,6 @@ type dataUsageEntryV6 struct { type dataUsageCache struct { Info dataUsageCacheInfo Cache map[string]dataUsageEntry - Disks []string } //msgp:encode ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4 dataUsageCacheV5 dataUsageCacheV6 @@ -234,35 +233,30 @@ type dataUsageCache struct { // dataUsageCacheV2 contains a cache of data usage entries version 2. type dataUsageCacheV2 struct { Info dataUsageCacheInfo - Disks []string Cache map[string]dataUsageEntryV2 } // dataUsageCacheV3 contains a cache of data usage entries version 3. type dataUsageCacheV3 struct { Info dataUsageCacheInfo - Disks []string Cache map[string]dataUsageEntryV3 } // dataUsageCacheV4 contains a cache of data usage entries version 4. type dataUsageCacheV4 struct { Info dataUsageCacheInfo - Disks []string Cache map[string]dataUsageEntryV4 } // dataUsageCacheV5 contains a cache of data usage entries version 5. type dataUsageCacheV5 struct { Info dataUsageCacheInfo - Disks []string Cache map[string]dataUsageEntryV5 } // dataUsageCacheV6 contains a cache of data usage entries version 6. type dataUsageCacheV6 struct { Info dataUsageCacheInfo - Disks []string Cache map[string]dataUsageEntryV6 } @@ -1037,7 +1031,6 @@ func (d *dataUsageCache) deserialize(r io.Reader) error { return err } d.Info = dold.Info - d.Disks = dold.Disks d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) for k, v := range dold.Cache { d.Cache[k] = dataUsageEntry{ @@ -1061,7 +1054,6 @@ func (d *dataUsageCache) deserialize(r io.Reader) error { return err } d.Info = dold.Info - d.Disks = dold.Disks d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) for k, v := range dold.Cache { due := dataUsageEntry{ @@ -1101,7 +1093,6 @@ func (d *dataUsageCache) deserialize(r io.Reader) error { return err } d.Info = dold.Info - d.Disks = dold.Disks d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) for k, v := range dold.Cache { due := dataUsageEntry{ @@ -1153,7 +1144,6 @@ func (d *dataUsageCache) deserialize(r io.Reader) error { return err } d.Info = dold.Info - d.Disks = dold.Disks d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) for k, v := range dold.Cache { due := dataUsageEntry{ @@ -1205,7 +1195,6 @@ func (d *dataUsageCache) deserialize(r io.Reader) error { return err } d.Info = dold.Info - d.Disks = dold.Disks d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) for k, v := range dold.Cache { var replicationStats *replicationAllStats diff --git a/cmd/data-usage-cache_gen.go b/cmd/data-usage-cache_gen.go index deb79d3cb..3fc0c7cae 100644 --- a/cmd/data-usage-cache_gen.go +++ b/cmd/data-usage-cache_gen.go @@ -338,25 +338,6 @@ func (z *dataUsageCache) DecodeMsg(dc *msgp.Reader) (err error) { } z.Cache[za0001] = za0002 } - case "Disks": - var zb0003 uint32 - zb0003, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "Disks") - return - } - if cap(z.Disks) >= int(zb0003) { - z.Disks = (z.Disks)[:zb0003] - } else { - z.Disks = make([]string, zb0003) - } - for za0003 := range z.Disks { - z.Disks[za0003], err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Disks", za0003) - return - } - } default: err = dc.Skip() if err != nil { @@ -370,9 +351,9 @@ func (z *dataUsageCache) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z *dataUsageCache) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 3 + // map header, size 2 // write "Info" - err = en.Append(0x83, 0xa4, 0x49, 0x6e, 0x66, 0x6f) + err = en.Append(0x82, 0xa4, 0x49, 0x6e, 0x66, 0x6f) if err != nil { return } @@ -403,32 +384,15 @@ func (z *dataUsageCache) EncodeMsg(en *msgp.Writer) (err error) { return } } - // write "Disks" - err = en.Append(0xa5, 0x44, 0x69, 0x73, 0x6b, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.Disks))) - if err != nil { - err = msgp.WrapError(err, "Disks") - return - } - for za0003 := range z.Disks { - err = en.WriteString(z.Disks[za0003]) - if err != nil { - err = msgp.WrapError(err, "Disks", za0003) - return - } - } return } // MarshalMsg implements msgp.Marshaler func (z *dataUsageCache) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 3 + // map header, size 2 // string "Info" - o = append(o, 0x83, 0xa4, 0x49, 0x6e, 0x66, 0x6f) + o = append(o, 0x82, 0xa4, 0x49, 0x6e, 0x66, 0x6f) o, err = z.Info.MarshalMsg(o) if err != nil { err = msgp.WrapError(err, "Info") @@ -445,12 +409,6 @@ func (z *dataUsageCache) MarshalMsg(b []byte) (o []byte, err error) { return } } - // string "Disks" - o = append(o, 0xa5, 0x44, 0x69, 0x73, 0x6b, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.Disks))) - for za0003 := range z.Disks { - o = msgp.AppendString(o, z.Disks[za0003]) - } return } @@ -508,25 +466,6 @@ func (z *dataUsageCache) UnmarshalMsg(bts []byte) (o []byte, err error) { } z.Cache[za0001] = za0002 } - case "Disks": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Disks") - return - } - if cap(z.Disks) >= int(zb0003) { - z.Disks = (z.Disks)[:zb0003] - } else { - z.Disks = make([]string, zb0003) - } - for za0003 := range z.Disks { - z.Disks[za0003], bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Disks", za0003) - return - } - } default: bts, err = msgp.Skip(bts) if err != nil { @@ -548,10 +487,6 @@ func (z *dataUsageCache) Msgsize() (s int) { s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize() } } - s += 6 + msgp.ArrayHeaderSize - for za0003 := range z.Disks { - s += msgp.StringPrefixSize + len(z.Disks[za0003]) - } return } @@ -811,54 +746,35 @@ func (z *dataUsageCacheV2) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "Info") return } - case "Disks": - var zb0002 uint32 - zb0002, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "Disks") - return - } - if cap(z.Disks) >= int(zb0002) { - z.Disks = (z.Disks)[:zb0002] - } else { - z.Disks = make([]string, zb0002) - } - for za0001 := range z.Disks { - z.Disks[za0001], err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Disks", za0001) - return - } - } case "Cache": - var zb0003 uint32 - zb0003, err = dc.ReadMapHeader() + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV2, zb0003) + z.Cache = make(map[string]dataUsageEntryV2, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { - zb0003-- - var za0002 string - var za0003 dataUsageEntryV2 - za0002, err = dc.ReadString() + for zb0002 > 0 { + zb0002-- + var za0001 string + var za0002 dataUsageEntryV2 + za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Cache") return } - err = za0003.DecodeMsg(dc) + err = za0002.DecodeMsg(dc) if err != nil { - err = msgp.WrapError(err, "Cache", za0002) + err = msgp.WrapError(err, "Cache", za0001) return } - z.Cache[za0002] = za0003 + z.Cache[za0001] = za0002 } default: err = dc.Skip() @@ -895,54 +811,35 @@ func (z *dataUsageCacheV2) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "Info") return } - case "Disks": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Disks") - return - } - if cap(z.Disks) >= int(zb0002) { - z.Disks = (z.Disks)[:zb0002] - } else { - z.Disks = make([]string, zb0002) - } - for za0001 := range z.Disks { - z.Disks[za0001], bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Disks", za0001) - return - } - } case "Cache": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV2, zb0003) + z.Cache = make(map[string]dataUsageEntryV2, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { - var za0002 string - var za0003 dataUsageEntryV2 - zb0003-- - za0002, bts, err = msgp.ReadStringBytes(bts) + for zb0002 > 0 { + var za0001 string + var za0002 dataUsageEntryV2 + zb0002-- + za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") return } - bts, err = za0003.UnmarshalMsg(bts) + bts, err = za0002.UnmarshalMsg(bts) if err != nil { - err = msgp.WrapError(err, "Cache", za0002) + err = msgp.WrapError(err, "Cache", za0001) return } - z.Cache[za0002] = za0003 + z.Cache[za0001] = za0002 } default: bts, err = msgp.Skip(bts) @@ -958,15 +855,11 @@ func (z *dataUsageCacheV2) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *dataUsageCacheV2) Msgsize() (s int) { - s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.ArrayHeaderSize - for za0001 := range z.Disks { - s += msgp.StringPrefixSize + len(z.Disks[za0001]) - } - s += 6 + msgp.MapHeaderSize + s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.MapHeaderSize if z.Cache != nil { - for za0002, za0003 := range z.Cache { - _ = za0003 - s += msgp.StringPrefixSize + len(za0002) + za0003.Msgsize() + for za0001, za0002 := range z.Cache { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize() } } return @@ -996,54 +889,35 @@ func (z *dataUsageCacheV3) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "Info") return } - case "Disks": - var zb0002 uint32 - zb0002, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "Disks") - return - } - if cap(z.Disks) >= int(zb0002) { - z.Disks = (z.Disks)[:zb0002] - } else { - z.Disks = make([]string, zb0002) - } - for za0001 := range z.Disks { - z.Disks[za0001], err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Disks", za0001) - return - } - } case "Cache": - var zb0003 uint32 - zb0003, err = dc.ReadMapHeader() + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV3, zb0003) + z.Cache = make(map[string]dataUsageEntryV3, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { - zb0003-- - var za0002 string - var za0003 dataUsageEntryV3 - za0002, err = dc.ReadString() + for zb0002 > 0 { + zb0002-- + var za0001 string + var za0002 dataUsageEntryV3 + za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Cache") return } - err = za0003.DecodeMsg(dc) + err = za0002.DecodeMsg(dc) if err != nil { - err = msgp.WrapError(err, "Cache", za0002) + err = msgp.WrapError(err, "Cache", za0001) return } - z.Cache[za0002] = za0003 + z.Cache[za0001] = za0002 } default: err = dc.Skip() @@ -1080,54 +954,35 @@ func (z *dataUsageCacheV3) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "Info") return } - case "Disks": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Disks") - return - } - if cap(z.Disks) >= int(zb0002) { - z.Disks = (z.Disks)[:zb0002] - } else { - z.Disks = make([]string, zb0002) - } - for za0001 := range z.Disks { - z.Disks[za0001], bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Disks", za0001) - return - } - } case "Cache": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV3, zb0003) + z.Cache = make(map[string]dataUsageEntryV3, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { - var za0002 string - var za0003 dataUsageEntryV3 - zb0003-- - za0002, bts, err = msgp.ReadStringBytes(bts) + for zb0002 > 0 { + var za0001 string + var za0002 dataUsageEntryV3 + zb0002-- + za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") return } - bts, err = za0003.UnmarshalMsg(bts) + bts, err = za0002.UnmarshalMsg(bts) if err != nil { - err = msgp.WrapError(err, "Cache", za0002) + err = msgp.WrapError(err, "Cache", za0001) return } - z.Cache[za0002] = za0003 + z.Cache[za0001] = za0002 } default: bts, err = msgp.Skip(bts) @@ -1143,15 +998,11 @@ func (z *dataUsageCacheV3) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *dataUsageCacheV3) Msgsize() (s int) { - s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.ArrayHeaderSize - for za0001 := range z.Disks { - s += msgp.StringPrefixSize + len(z.Disks[za0001]) - } - s += 6 + msgp.MapHeaderSize + s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.MapHeaderSize if z.Cache != nil { - for za0002, za0003 := range z.Cache { - _ = za0003 - s += msgp.StringPrefixSize + len(za0002) + za0003.Msgsize() + for za0001, za0002 := range z.Cache { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize() } } return @@ -1181,54 +1032,35 @@ func (z *dataUsageCacheV4) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "Info") return } - case "Disks": - var zb0002 uint32 - zb0002, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "Disks") - return - } - if cap(z.Disks) >= int(zb0002) { - z.Disks = (z.Disks)[:zb0002] - } else { - z.Disks = make([]string, zb0002) - } - for za0001 := range z.Disks { - z.Disks[za0001], err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Disks", za0001) - return - } - } case "Cache": - var zb0003 uint32 - zb0003, err = dc.ReadMapHeader() + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV4, zb0003) + z.Cache = make(map[string]dataUsageEntryV4, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { - zb0003-- - var za0002 string - var za0003 dataUsageEntryV4 - za0002, err = dc.ReadString() + for zb0002 > 0 { + zb0002-- + var za0001 string + var za0002 dataUsageEntryV4 + za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Cache") return } - err = za0003.DecodeMsg(dc) + err = za0002.DecodeMsg(dc) if err != nil { - err = msgp.WrapError(err, "Cache", za0002) + err = msgp.WrapError(err, "Cache", za0001) return } - z.Cache[za0002] = za0003 + z.Cache[za0001] = za0002 } default: err = dc.Skip() @@ -1265,54 +1097,35 @@ func (z *dataUsageCacheV4) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "Info") return } - case "Disks": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Disks") - return - } - if cap(z.Disks) >= int(zb0002) { - z.Disks = (z.Disks)[:zb0002] - } else { - z.Disks = make([]string, zb0002) - } - for za0001 := range z.Disks { - z.Disks[za0001], bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Disks", za0001) - return - } - } case "Cache": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV4, zb0003) + z.Cache = make(map[string]dataUsageEntryV4, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { - var za0002 string - var za0003 dataUsageEntryV4 - zb0003-- - za0002, bts, err = msgp.ReadStringBytes(bts) + for zb0002 > 0 { + var za0001 string + var za0002 dataUsageEntryV4 + zb0002-- + za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") return } - bts, err = za0003.UnmarshalMsg(bts) + bts, err = za0002.UnmarshalMsg(bts) if err != nil { - err = msgp.WrapError(err, "Cache", za0002) + err = msgp.WrapError(err, "Cache", za0001) return } - z.Cache[za0002] = za0003 + z.Cache[za0001] = za0002 } default: bts, err = msgp.Skip(bts) @@ -1328,15 +1141,11 @@ func (z *dataUsageCacheV4) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *dataUsageCacheV4) Msgsize() (s int) { - s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.ArrayHeaderSize - for za0001 := range z.Disks { - s += msgp.StringPrefixSize + len(z.Disks[za0001]) - } - s += 6 + msgp.MapHeaderSize + s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.MapHeaderSize if z.Cache != nil { - for za0002, za0003 := range z.Cache { - _ = za0003 - s += msgp.StringPrefixSize + len(za0002) + za0003.Msgsize() + for za0001, za0002 := range z.Cache { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize() } } return @@ -1366,54 +1175,35 @@ func (z *dataUsageCacheV5) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "Info") return } - case "Disks": - var zb0002 uint32 - zb0002, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "Disks") - return - } - if cap(z.Disks) >= int(zb0002) { - z.Disks = (z.Disks)[:zb0002] - } else { - z.Disks = make([]string, zb0002) - } - for za0001 := range z.Disks { - z.Disks[za0001], err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Disks", za0001) - return - } - } case "Cache": - var zb0003 uint32 - zb0003, err = dc.ReadMapHeader() + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV5, zb0003) + z.Cache = make(map[string]dataUsageEntryV5, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { - zb0003-- - var za0002 string - var za0003 dataUsageEntryV5 - za0002, err = dc.ReadString() + for zb0002 > 0 { + zb0002-- + var za0001 string + var za0002 dataUsageEntryV5 + za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Cache") return } - err = za0003.DecodeMsg(dc) + err = za0002.DecodeMsg(dc) if err != nil { - err = msgp.WrapError(err, "Cache", za0002) + err = msgp.WrapError(err, "Cache", za0001) return } - z.Cache[za0002] = za0003 + z.Cache[za0001] = za0002 } default: err = dc.Skip() @@ -1450,54 +1240,35 @@ func (z *dataUsageCacheV5) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "Info") return } - case "Disks": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Disks") - return - } - if cap(z.Disks) >= int(zb0002) { - z.Disks = (z.Disks)[:zb0002] - } else { - z.Disks = make([]string, zb0002) - } - for za0001 := range z.Disks { - z.Disks[za0001], bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Disks", za0001) - return - } - } case "Cache": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV5, zb0003) + z.Cache = make(map[string]dataUsageEntryV5, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { - var za0002 string - var za0003 dataUsageEntryV5 - zb0003-- - za0002, bts, err = msgp.ReadStringBytes(bts) + for zb0002 > 0 { + var za0001 string + var za0002 dataUsageEntryV5 + zb0002-- + za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") return } - bts, err = za0003.UnmarshalMsg(bts) + bts, err = za0002.UnmarshalMsg(bts) if err != nil { - err = msgp.WrapError(err, "Cache", za0002) + err = msgp.WrapError(err, "Cache", za0001) return } - z.Cache[za0002] = za0003 + z.Cache[za0001] = za0002 } default: bts, err = msgp.Skip(bts) @@ -1513,15 +1284,11 @@ func (z *dataUsageCacheV5) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *dataUsageCacheV5) Msgsize() (s int) { - s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.ArrayHeaderSize - for za0001 := range z.Disks { - s += msgp.StringPrefixSize + len(z.Disks[za0001]) - } - s += 6 + msgp.MapHeaderSize + s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.MapHeaderSize if z.Cache != nil { - for za0002, za0003 := range z.Cache { - _ = za0003 - s += msgp.StringPrefixSize + len(za0002) + za0003.Msgsize() + for za0001, za0002 := range z.Cache { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize() } } return @@ -1551,54 +1318,35 @@ func (z *dataUsageCacheV6) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "Info") return } - case "Disks": - var zb0002 uint32 - zb0002, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "Disks") - return - } - if cap(z.Disks) >= int(zb0002) { - z.Disks = (z.Disks)[:zb0002] - } else { - z.Disks = make([]string, zb0002) - } - for za0001 := range z.Disks { - z.Disks[za0001], err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Disks", za0001) - return - } - } case "Cache": - var zb0003 uint32 - zb0003, err = dc.ReadMapHeader() + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV6, zb0003) + z.Cache = make(map[string]dataUsageEntryV6, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { - zb0003-- - var za0002 string - var za0003 dataUsageEntryV6 - za0002, err = dc.ReadString() + for zb0002 > 0 { + zb0002-- + var za0001 string + var za0002 dataUsageEntryV6 + za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Cache") return } - err = za0003.DecodeMsg(dc) + err = za0002.DecodeMsg(dc) if err != nil { - err = msgp.WrapError(err, "Cache", za0002) + err = msgp.WrapError(err, "Cache", za0001) return } - z.Cache[za0002] = za0003 + z.Cache[za0001] = za0002 } default: err = dc.Skip() @@ -1635,54 +1383,35 @@ func (z *dataUsageCacheV6) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "Info") return } - case "Disks": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Disks") - return - } - if cap(z.Disks) >= int(zb0002) { - z.Disks = (z.Disks)[:zb0002] - } else { - z.Disks = make([]string, zb0002) - } - for za0001 := range z.Disks { - z.Disks[za0001], bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Disks", za0001) - return - } - } case "Cache": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") return } if z.Cache == nil { - z.Cache = make(map[string]dataUsageEntryV6, zb0003) + z.Cache = make(map[string]dataUsageEntryV6, zb0002) } else if len(z.Cache) > 0 { for key := range z.Cache { delete(z.Cache, key) } } - for zb0003 > 0 { - var za0002 string - var za0003 dataUsageEntryV6 - zb0003-- - za0002, bts, err = msgp.ReadStringBytes(bts) + for zb0002 > 0 { + var za0001 string + var za0002 dataUsageEntryV6 + zb0002-- + za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") return } - bts, err = za0003.UnmarshalMsg(bts) + bts, err = za0002.UnmarshalMsg(bts) if err != nil { - err = msgp.WrapError(err, "Cache", za0002) + err = msgp.WrapError(err, "Cache", za0001) return } - z.Cache[za0002] = za0003 + z.Cache[za0001] = za0002 } default: bts, err = msgp.Skip(bts) @@ -1698,15 +1427,11 @@ func (z *dataUsageCacheV6) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *dataUsageCacheV6) Msgsize() (s int) { - s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.ArrayHeaderSize - for za0001 := range z.Disks { - s += msgp.StringPrefixSize + len(z.Disks[za0001]) - } - s += 6 + msgp.MapHeaderSize + s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.MapHeaderSize if z.Cache != nil { - for za0002, za0003 := range z.Cache { - _ = za0003 - s += msgp.StringPrefixSize + len(za0002) + za0003.Msgsize() + for za0001, za0002 := range z.Cache { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize() } } return diff --git a/cmd/data-usage_test.go b/cmd/data-usage_test.go index 573eea69f..460bc9ae5 100644 --- a/cmd/data-usage_test.go +++ b/cmd/data-usage_test.go @@ -67,7 +67,7 @@ func TestDataUsageUpdate(t *testing.T) { return } - got, err := scanDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize) + got, err := scanDataFolder(context.Background(), 0, 0, base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize) if err != nil { t.Fatal(err) } @@ -178,7 +178,7 @@ func TestDataUsageUpdate(t *testing.T) { } // Changed dir must be picked up in this many cycles. for i := 0; i < dataUsageUpdateDirCycles; i++ { - got, err = scanDataFolder(context.Background(), base, got, getSize) + got, err = scanDataFolder(context.Background(), 0, 0, base, got, getSize) got.Info.NextCycle++ if err != nil { t.Fatal(err) @@ -289,7 +289,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) { } return } - got, err := scanDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, getSize) + got, err := scanDataFolder(context.Background(), 0, 0, base, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, getSize) if err != nil { t.Fatal(err) } @@ -423,7 +423,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) { } // Changed dir must be picked up in this many cycles. for i := 0; i < dataUsageUpdateDirCycles; i++ { - got, err = scanDataFolder(context.Background(), base, got, getSize) + got, err = scanDataFolder(context.Background(), 0, 0, base, got, getSize) got.Info.NextCycle++ if err != nil { t.Fatal(err) @@ -575,7 +575,7 @@ func TestDataUsageCacheSerialize(t *testing.T) { } return } - want, err := scanDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize) + want, err := scanDataFolder(context.Background(), 0, 0, base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize) if err != nil { t.Fatal(err) } diff --git a/cmd/erasure.go b/cmd/erasure.go index 67da63f10..3a7110fb9 100644 --- a/cmd/erasure.go +++ b/cmd/erasure.go @@ -357,23 +357,6 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, bf return nil } - // Collect disks for healing. - allDisks := er.getDisks() - allDiskIDs := make([]string, 0, len(allDisks)) - for _, disk := range allDisks { - if disk == OfflineDisk { - // its possible that disk is OfflineDisk - continue - } - id, _ := disk.GetDiskID() - if id == "" { - // its possible that disk is unformatted - // or just went offline - continue - } - allDiskIDs = append(allDiskIDs, id) - } - // Load bucket totals oldCache := dataUsageCache{} if err := oldCache.load(ctx, er, dataUsageCacheName); err != nil { @@ -479,7 +462,6 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, bf } cache.Info.BloomFilter = bloom cache.Info.SkipHealing = healing - cache.Disks = allDiskIDs cache.Info.NextCycle = wantCycle if cache.Info.Name != bucket.Name { logger.LogIf(ctx, fmt.Errorf("cache name mismatch: %s != %s", cache.Info.Name, bucket.Name)) diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index ae29e61ca..f68c3dccf 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -352,7 +352,7 @@ func (fs *FSObjects) scanBucket(ctx context.Context, bucket string, cache dataUs } // Load bucket info. - cache, err = scanDataFolder(ctx, fs.fsPath, cache, func(item scannerItem) (sizeSummary, error) { + cache, err = scanDataFolder(ctx, -1, -1, fs.fsPath, cache, func(item scannerItem) (sizeSummary, error) { bucket, object := item.bucket, item.objectPath() fsMetaBytes, err := xioutil.ReadFile(pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile)) if err != nil && !osIsNotExist(err) { diff --git a/cmd/utils.go b/cmd/utils.go index 1ba737b78..d8585d394 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -113,10 +113,6 @@ func path2BucketObject(s string) (bucket, prefix string) { return path2BucketObjectWithBasePath("", s) } -func getReadQuorum(drive int) int { - return drive - getDefaultParityBlocks(drive) -} - func getWriteQuorum(drive int) int { parity := getDefaultParityBlocks(drive) quorum := drive - parity diff --git a/cmd/xl-storage.go b/cmd/xl-storage.go index f06beef76..4f0bc28e1 100644 --- a/cmd/xl-storage.go +++ b/cmd/xl-storage.go @@ -451,7 +451,9 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates cache.Info.updates = updates - dataUsageInfo, err := scanDataFolder(ctx, s.diskPath, cache, func(item scannerItem) (sizeSummary, error) { + poolIdx, setIdx, _ := s.GetDiskLoc() + + dataUsageInfo, err := scanDataFolder(ctx, poolIdx, setIdx, s.diskPath, cache, func(item scannerItem) (sizeSummary, error) { // Look for `xl.meta/xl.json' at the leaf. if !strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFile) && !strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFileV1) {