fix: remove all unused code (#12360)

This commit is contained in:
Harshavardhana 2021-05-24 09:28:19 -07:00 committed by GitHub
parent 41e9c6572f
commit ebf75ef10d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 29 additions and 497 deletions

View File

@ -19,6 +19,9 @@ linters:
- structcheck - structcheck
- gomodguard - gomodguard
- gofmt - gofmt
- unused
- structcheck
- unconvert
issues: issues:
exclude-use-default: false exclude-use-default: false
@ -26,11 +29,5 @@ issues:
- should have a package comment - should have a package comment
- error strings should not be capitalized or end with punctuation or a newline - error strings should not be capitalized or end with punctuation or a newline
run:
skip-dirs:
- pkg/rpc
- pkg/argon2
- pkg/s3select/internal
service: service:
golangci-lint-version: 1.20.0 # use the fixed version to not introduce new linters unexpectedly golangci-lint-version: 1.20.0 # use the fixed version to not introduce new linters unexpectedly

View File

@ -486,22 +486,6 @@ func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
} }
} }
// resetHealStatusCounters - reset the healSequence status counters between
// each monthly background heal scanning activity.
// This is used only in case of Background healing scenario, where
// we use a single long running healSequence which reactively heals
// objects passed to the SourceCh.
func (h *healSequence) resetHealStatusCounters() {
h.mutex.Lock()
defer h.mutex.Unlock()
h.currentStatus.Items = []madmin.HealResultItem{}
h.lastSentResultIndex = 0
h.scannedItemsMap = make(map[madmin.HealItemType]int64)
h.healedItemsMap = make(map[madmin.HealItemType]int64)
h.healFailedItemsMap = make(map[string]int64)
}
// getScannedItemsCount - returns a count of all scanned items // getScannedItemsCount - returns a count of all scanned items
func (h *healSequence) getScannedItemsCount() int64 { func (h *healSequence) getScannedItemsCount() int64 {
var count int64 var count int64

View File

@ -31,7 +31,7 @@ func TestNewRequestID(t *testing.T) {
var e rune var e rune
for _, char := range id { for _, char := range id {
e = rune(char) e = char
// Ensure that it is alphanumeric, in this case, between 0-9 and A-Z. // Ensure that it is alphanumeric, in this case, between 0-9 and A-Z.
if !(('0' <= e && e <= '9') || ('A' <= e && e <= 'Z')) { if !(('0' <= e && e <= '9') || ('A' <= e && e <= 'Z')) {

View File

@ -111,7 +111,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
} else { } else {
textPartData = textData[j*partSize:] textPartData = textData[j*partSize:]
} }
md5hex := getMD5Hash([]byte(textPartData)) md5hex := getMD5Hash(textPartData)
var partInfo PartInfo var partInfo PartInfo
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j, partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
mustGetPutObjReader(b, bytes.NewReader(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{}) mustGetPutObjReader(b, bytes.NewReader(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
@ -206,7 +206,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// generate md5sum for the generated data. // generate md5sum for the generated data.
// md5sum of the data to written is required as input for PutObject. // md5sum of the data to written is required as input for PutObject.
md5hex := getMD5Hash([]byte(textData)) md5hex := getMD5Hash(textData)
sha256hex := "" sha256hex := ""
// benchmark utility which helps obtain number of allocations and bytes allocated per ops. // benchmark utility which helps obtain number of allocations and bytes allocated per ops.

View File

@ -31,14 +31,6 @@ import (
"github.com/minio/minio/pkg/ioutil" "github.com/minio/minio/pkg/ioutil"
) )
type errHashMismatch struct {
message string
}
func (err *errHashMismatch) Error() string {
return err.message
}
// Calculates bitrot in chunks and writes the hash into the stream. // Calculates bitrot in chunks and writes the hash into the stream.
type streamingBitrotWriter struct { type streamingBitrotWriter struct {
iow io.WriteCloser iow io.WriteCloser

View File

@ -352,7 +352,7 @@ func (sys *BucketTargetSys) getRemoteARN(bucket string, target *madmin.BucketTar
return tgt.Arn return tgt.Arn
} }
} }
if !madmin.ServiceType(target.Type).IsValid() { if !target.Type.IsValid() {
return "" return ""
} }
return generateARN(target) return generateARN(target)

View File

@ -137,7 +137,7 @@ func (c *CoreDNS) list(key string, domain bool) ([]SrvRecord, error) {
var srvRecords []SrvRecord var srvRecords []SrvRecord
for _, n := range r.Kvs { for _, n := range r.Kvs {
var srvRecord SrvRecord var srvRecord SrvRecord
if err = json.Unmarshal([]byte(n.Value), &srvRecord); err != nil { if err = json.Unmarshal(n.Value, &srvRecord); err != nil {
return nil, err return nil, err
} }
srvRecord.Key = strings.TrimPrefix(string(n.Key), key) srvRecord.Key = strings.TrimPrefix(string(n.Key), key)

View File

@ -218,12 +218,6 @@ func (h dataUsageHash) mod(cycle uint32, cycles uint32) bool {
return uint32(xxhash.Sum64String(string(h)))%cycles == cycle%cycles return uint32(xxhash.Sum64String(string(h)))%cycles == cycle%cycles
} }
// addChildString will add a child based on its name.
// If it already exists it will not be added again.
func (e *dataUsageEntry) addChildString(name string) {
e.addChild(hashPath(name))
}
// addChild will add a child based on its hash. // addChild will add a child based on its hash.
// If it already exists it will not be added again. // If it already exists it will not be added again.
func (e *dataUsageEntry) addChild(hash dataUsageHash) { func (e *dataUsageEntry) addChild(hash dataUsageHash) {
@ -291,17 +285,6 @@ func (d *dataUsageCache) searchParent(h dataUsageHash) *dataUsageHash {
return nil return nil
} }
// Returns nil if not found.
func (d *dataUsageCache) subCache(path string) dataUsageCache {
dst := dataUsageCache{Info: dataUsageCacheInfo{
Name: path,
LastUpdate: d.Info.LastUpdate,
BloomFilter: d.Info.BloomFilter,
}}
dst.copyWithChildren(d, dataUsageHash(hashPath(path).Key()), nil)
return dst
}
// deleteRecursive will delete an entry recursively, but not change its parent. // deleteRecursive will delete an entry recursively, but not change its parent.
func (d *dataUsageCache) deleteRecursive(h dataUsageHash) { func (d *dataUsageCache) deleteRecursive(h dataUsageHash) {
if existing, ok := d.Cache[h.String()]; ok { if existing, ok := d.Cache[h.String()]; ok {
@ -313,37 +296,6 @@ func (d *dataUsageCache) deleteRecursive(h dataUsageHash) {
} }
} }
// deleteChildren will delete any children, but not the entry itself.
func (d *dataUsageCache) deleteChildren(h dataUsageHash) {
if existing, ok := d.Cache[h.String()]; ok {
for child := range existing.Children {
d.deleteRecursive(dataUsageHash(child))
}
}
}
// replaceRootChild will replace the child of root in d with the root of 'other'.
func (d *dataUsageCache) replaceRootChild(other dataUsageCache) {
otherRoot := other.root()
if otherRoot == nil {
logger.LogIf(GlobalContext, errors.New("replaceRootChild: Source has no root"))
return
}
thisRoot := d.root()
if thisRoot == nil {
logger.LogIf(GlobalContext, errors.New("replaceRootChild: Root of current not found"))
return
}
thisRootHash := d.rootHash()
otherRootHash := other.rootHash()
if thisRootHash == otherRootHash {
logger.LogIf(GlobalContext, errors.New("replaceRootChild: Root of child matches root of destination"))
return
}
d.deleteRecursive(other.rootHash())
d.copyWithChildren(&other, other.rootHash(), &thisRootHash)
}
// keepBuckets will keep only the buckets specified specified by delete all others. // keepBuckets will keep only the buckets specified specified by delete all others.
func (d *dataUsageCache) keepBuckets(b []BucketInfo) { func (d *dataUsageCache) keepBuckets(b []BucketInfo) {
lu := make(map[dataUsageHash]struct{}) lu := make(map[dataUsageHash]struct{})
@ -415,16 +367,6 @@ func (d *dataUsageCache) replace(path, parent string, e dataUsageEntry) {
} }
} }
// listCache will return all cache paths.
func (d *dataUsageCache) listCache() []string {
dst := make([]string, 0, len(d.Cache))
for k := range d.Cache {
dst = append(dst, k)
}
sort.Strings(dst)
return dst
}
// replaceHashed add or replaces an entry to the cache based on its hash. // replaceHashed add or replaces an entry to the cache based on its hash.
// If a parent is specified it will be added to that if not already there. // If a parent is specified it will be added to that if not already there.
// If the parent does not exist, it will be added. // If the parent does not exist, it will be added.

View File

@ -80,24 +80,6 @@ func (er erasureObjects) getOnlineDisks() (newDisks []StorageAPI) {
return newDisks return newDisks
} }
// getLoadBalancedNDisks - fetches load balanced (sufficiently randomized) disk slice
// with N disks online. If ndisks is zero or negative, then it will returns all disks,
// same if ndisks is greater than the number of all disks.
func (er erasureObjects) getLoadBalancedNDisks(ndisks int) (newDisks []StorageAPI) {
disks := er.getLoadBalancedDisks(ndisks != -1)
for _, disk := range disks {
if disk == nil {
continue
}
newDisks = append(newDisks, disk)
ndisks--
if ndisks == 0 {
break
}
}
return
}
// getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice. // getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice.
// ensures to skip disks if they are not healing and online. // ensures to skip disks if they are not healing and online.
func (er erasureObjects) getLoadBalancedDisks(optimized bool) []StorageAPI { func (er erasureObjects) getLoadBalancedDisks(optimized bool) []StorageAPI {

View File

@ -801,82 +801,6 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
return objReaderFn(reader, h, opts.CheckPrecondFn, closeFn, rwPoolUnlocker, nsUnlocker) return objReaderFn(reader, h, opts.CheckPrecondFn, closeFn, rwPoolUnlocker, nsUnlocker)
} }
// getObject - wrapper for GetObject
func (fs *FSObjects) getObject(ctx context.Context, bucket, object string, offset int64, length int64, writer io.Writer, etag string, lock bool) (err error) {
if _, err = fs.statBucketDir(ctx, bucket); err != nil {
return toObjectErr(err, bucket)
}
// Offset cannot be negative.
if offset < 0 {
logger.LogIf(ctx, errUnexpected, logger.Application)
return toObjectErr(errUnexpected, bucket, object)
}
// Writer cannot be nil.
if writer == nil {
logger.LogIf(ctx, errUnexpected, logger.Application)
return toObjectErr(errUnexpected, bucket, object)
}
// If its a directory request, we return an empty body.
if HasSuffix(object, SlashSeparator) {
_, err = writer.Write([]byte(""))
logger.LogIf(ctx, err)
return toObjectErr(err, bucket, object)
}
if bucket != minioMetaBucket {
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile)
if lock {
_, err = fs.rwPool.Open(fsMetaPath)
if err != nil && err != errFileNotFound {
logger.LogIf(ctx, err)
return toObjectErr(err, bucket, object)
}
defer fs.rwPool.Close(fsMetaPath)
}
}
if etag != "" && etag != defaultEtag {
objEtag, perr := fs.getObjectETag(ctx, bucket, object, lock)
if perr != nil {
return toObjectErr(perr, bucket, object)
}
if objEtag != etag {
logger.LogIf(ctx, InvalidETag{}, logger.Application)
return toObjectErr(InvalidETag{}, bucket, object)
}
}
// Read the object, doesn't exist returns an s3 compatible error.
fsObjPath := pathJoin(fs.fsPath, bucket, object)
reader, size, err := fsOpenFile(ctx, fsObjPath, offset)
if err != nil {
return toObjectErr(err, bucket, object)
}
defer reader.Close()
// For negative length we read everything.
if length < 0 {
length = size - offset
}
// Reply back invalid range if the input offset and length fall out of range.
if offset > size || offset+length > size {
err = InvalidRange{offset, length, size}
logger.LogIf(ctx, err, logger.Application)
return err
}
_, err = io.Copy(writer, io.LimitReader(reader, length))
// The writer will be closed incase of range queries, which will emit ErrClosedPipe.
if err == io.ErrClosedPipe {
err = nil
}
return toObjectErr(err, bucket, object)
}
// Create a new fs.json file, if the existing one is corrupt. Should happen very rarely. // Create a new fs.json file, if the existing one is corrupt. Should happen very rarely.
func (fs *FSObjects) createFsJSON(object, fsMetaPath string) error { func (fs *FSObjects) createFsJSON(object, fsMetaPath string) error {
fsMeta := newFSMetaV1() fsMeta := newFSMetaV1()
@ -1377,77 +1301,6 @@ func (fs *FSObjects) isObjectDir(bucket, prefix string) bool {
return len(entries) == 0 return len(entries) == 0
} }
// getObjectETag is a helper function, which returns only the md5sum
// of the file on the disk.
func (fs *FSObjects) getObjectETag(ctx context.Context, bucket, entry string, lock bool) (string, error) {
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, entry, fs.metaJSONFile)
var reader io.Reader
var fi os.FileInfo
var size int64
if lock {
// Read `fs.json` to perhaps contend with
// parallel Put() operations.
rlk, err := fs.rwPool.Open(fsMetaPath)
// Ignore if `fs.json` is not available, this is true for pre-existing data.
if err != nil && err != errFileNotFound {
logger.LogIf(ctx, err)
return "", toObjectErr(err, bucket, entry)
}
// If file is not found, we don't need to proceed forward.
if err == errFileNotFound {
return "", nil
}
// Read from fs metadata only if it exists.
defer fs.rwPool.Close(fsMetaPath)
// Fetch the size of the underlying file.
fi, err = rlk.LockedFile.Stat()
if err != nil {
logger.LogIf(ctx, err)
return "", toObjectErr(err, bucket, entry)
}
size = fi.Size()
reader = io.NewSectionReader(rlk.LockedFile, 0, fi.Size())
} else {
var err error
reader, size, err = fsOpenFile(ctx, fsMetaPath, 0)
if err != nil {
return "", toObjectErr(err, bucket, entry)
}
}
// `fs.json` can be empty due to previously failed
// PutObject() transaction, if we arrive at such
// a situation we just ignore and continue.
if size == 0 {
return "", nil
}
fsMetaBuf, err := ioutil.ReadAll(reader)
if err != nil {
logger.LogIf(ctx, err)
return "", toObjectErr(err, bucket, entry)
}
var fsMeta fsMetaV1
var json = jsoniter.ConfigCompatibleWithStandardLibrary
if err = json.Unmarshal(fsMetaBuf, &fsMeta); err != nil {
return "", err
}
// Check if FS metadata is valid, if not return error.
if !isFSMetaValid(fsMeta.Version) {
logger.LogIf(ctx, errCorruptedFormat)
return "", toObjectErr(errCorruptedFormat, bucket, entry)
}
return extractETag(fsMeta.Meta), nil
}
// ListObjectVersions not implemented for FS mode. // ListObjectVersions not implemented for FS mode.
func (fs *FSObjects) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (loi ListObjectVersionsInfo, e error) { func (fs *FSObjects) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (loi ListObjectVersionsInfo, e error) {
return loi, NotImplemented{} return loi, NotImplemented{}

View File

@ -215,9 +215,6 @@ var (
// Hold the old server credentials passed by the environment // Hold the old server credentials passed by the environment
globalOldCred auth.Credentials globalOldCred auth.Credentials
// Indicates if config is to be encrypted
globalConfigEncrypted bool
globalPublicCerts []*x509.Certificate globalPublicCerts []*x509.Certificate
globalDomainNames []string // Root domains for virtual host style requests globalDomainNames []string // Root domains for virtual host style requests

View File

@ -218,7 +218,7 @@ func Trace(f http.HandlerFunc, logBody bool, w http.ResponseWriter, r *http.Requ
Time: now, Time: now,
Proto: r.Proto, Proto: r.Proto,
Method: r.Method, Method: r.Method,
Path: r.URL.Path, Path: r.URL.RawPath,
RawQuery: redactLDAPPwd(r.URL.RawQuery), RawQuery: redactLDAPPwd(r.URL.RawQuery),
Client: handlers.GetSourceIP(r), Client: handlers.GetSourceIP(r),
Headers: reqHeaders, Headers: reqHeaders,

View File

@ -57,7 +57,7 @@ func etcdKvsToSet(prefix string, kvs []*mvccpb.KeyValue) set.StringSet {
// suffix := "config.json" // suffix := "config.json"
// result is foo // result is foo
func extractPathPrefixAndSuffix(s string, prefix string, suffix string) string { func extractPathPrefixAndSuffix(s string, prefix string, suffix string) string {
return pathClean(strings.TrimSuffix(strings.TrimPrefix(string(s), prefix), suffix)) return pathClean(strings.TrimSuffix(strings.TrimPrefix(s, prefix), suffix))
} }
// IAMEtcdStore implements IAMStorageAPI // IAMEtcdStore implements IAMStorageAPI
@ -331,7 +331,7 @@ func (ies *IAMEtcdStore) addUser(ctx context.Context, user string, userType IAMU
return []byte(globalOldCred.SecretKey), nil return []byte(globalOldCred.SecretKey), nil
} }
if _, err := jwtgo.ParseWithClaims(u.Credentials.SessionToken, m, stsTokenCallback); err == nil { if _, err := jwtgo.ParseWithClaims(u.Credentials.SessionToken, m, stsTokenCallback); err == nil {
jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.MapClaims(m)) jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, m)
if token, err := jwt.SignedString([]byte(globalActiveCred.SecretKey)); err == nil { if token, err := jwt.SignedString([]byte(globalActiveCred.SecretKey)); err == nil {
u.Credentials.SessionToken = token u.Credentials.SessionToken = token
err := ies.saveIAMConfig(ctx, &u, getUserIdentityPath(user, userType)) err := ies.saveIAMConfig(ctx, &u, getUserIdentityPath(user, userType))

View File

@ -386,23 +386,6 @@ func interestingCaches(root string, cachesRoot map[string][]string) []string {
return interesting return interesting
} }
// updateCache will update a cache by id.
// If the cache cannot be found nil is returned.
// The bucket cache will be locked until the done .
func (b *bucketMetacache) updateCache(id string) (cache *metacache, done func()) {
b.mu.Lock()
c, ok := b.caches[id]
if !ok {
b.mu.Unlock()
return nil, func() {}
}
return &c, func() {
c.lastUpdate = UTCNow()
b.caches[id] = c
b.mu.Unlock()
}
}
// updateCacheEntry will update a cache. // updateCacheEntry will update a cache.
// Returns the updated status. // Returns the updated status.
func (b *bucketMetacache) updateCacheEntry(update metacache) (metacache, error) { func (b *bucketMetacache) updateCacheEntry(update metacache) (metacache, error) {
@ -437,18 +420,6 @@ func (b *bucketMetacache) cloneCaches() (map[string]metacache, map[string][]stri
return dst, dst2 return dst, dst2
} }
// getCache will return a clone of a specific metacache.
// Will return nil if the cache doesn't exist.
func (b *bucketMetacache) getCache(id string) *metacache {
b.mu.RLock()
c, ok := b.caches[id]
b.mu.RUnlock()
if !ok {
return nil
}
return &c
}
// deleteAll will delete all on disk data for ALL caches. // deleteAll will delete all on disk data for ALL caches.
// Deletes are performed concurrently. // Deletes are performed concurrently.
func (b *bucketMetacache) deleteAll() { func (b *bucketMetacache) deleteAll() {

View File

@ -19,7 +19,6 @@ package cmd
import ( import (
"bytes" "bytes"
"io"
"os" "os"
"sort" "sort"
"strings" "strings"
@ -275,16 +274,6 @@ type metaCacheEntriesSorted struct {
listID string listID string
} }
// writeTo will write all objects to the provided output.
func (m metaCacheEntriesSorted) writeTo(writer io.Writer) error {
w := newMetacacheWriter(writer, 1<<20)
if err := w.write(m.o...); err != nil {
w.Close()
return err
}
return w.Close()
}
// shallowClone will create a shallow clone of the array objects, // shallowClone will create a shallow clone of the array objects,
// but object metadata will not be cloned. // but object metadata will not be cloned.
func (m metaCacheEntriesSorted) shallowClone() metaCacheEntriesSorted { func (m metaCacheEntriesSorted) shallowClone() metaCacheEntriesSorted {
@ -293,19 +282,6 @@ func (m metaCacheEntriesSorted) shallowClone() metaCacheEntriesSorted {
return m return m
} }
// iterate the entries in order.
// If the iterator function returns iterating stops.
func (m *metaCacheEntriesSorted) iterate(fn func(entry metaCacheEntry) (cont bool)) {
if m == nil {
return
}
for _, o := range m.o {
if !fn(o) {
return
}
}
}
// fileInfoVersions converts the metadata to FileInfoVersions where possible. // fileInfoVersions converts the metadata to FileInfoVersions where possible.
// Metadata that cannot be decoded is skipped. // Metadata that cannot be decoded is skipped.
func (m *metaCacheEntriesSorted) fileInfoVersions(bucket, prefix, delimiter, afterV string) (versions []ObjectInfo) { func (m *metaCacheEntriesSorted) fileInfoVersions(bucket, prefix, delimiter, afterV string) (versions []ObjectInfo) {
@ -488,17 +464,6 @@ func (m *metaCacheEntriesSorted) merge(other metaCacheEntriesSorted, limit int)
m.o = merged m.o = merged
} }
// filter allows selective filtering with the provided function.
func (m *metaCacheEntriesSorted) filter(fn func(entry *metaCacheEntry) bool) {
dst := m.o[:0]
for _, o := range m.o {
if fn(&o) {
dst = append(dst, o)
}
}
m.o = dst
}
// filterPrefix will filter m to only contain entries with the specified prefix. // filterPrefix will filter m to only contain entries with the specified prefix.
func (m *metaCacheEntriesSorted) filterPrefix(s string) { func (m *metaCacheEntriesSorted) filterPrefix(s string) {
if s == "" { if s == "" {

View File

@ -952,7 +952,7 @@ func getMinioProcMetrics() MetricsGroup {
metrics = append(metrics, metrics = append(metrics,
Metric{ Metric{
Description: getMinIOProcessCPUTime(), Description: getMinIOProcessCPUTime(),
Value: float64(stat.CPUTime()), Value: stat.CPUTime(),
}) })
return return
}, },

View File

@ -181,7 +181,7 @@ func healingMetricsPrometheus(ch chan<- prometheus.Metric) {
"Objects for which healing failed in current self healing run", "Objects for which healing failed in current self healing run",
[]string{"mount_path", "volume_status"}, nil), []string{"mount_path", "volume_status"}, nil),
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v), string(s[0]), string(s[1]), float64(v), s[0], s[1],
) )
} }
} }

View File

@ -498,75 +498,6 @@ func (sys *NotificationSys) updateBloomFilter(ctx context.Context, current uint6
return bf, nil return bf, nil
} }
// collectBloomFilter will collect bloom filters from all servers from the specified cycle.
func (sys *NotificationSys) collectBloomFilter(ctx context.Context, from uint64) (*bloomFilter, error) {
var req = bloomFilterRequest{
Current: 0,
Oldest: from,
}
// Load initial state from local...
var bf *bloomFilter
bfr, err := intDataUpdateTracker.cycleFilter(ctx, req)
logger.LogIf(ctx, err)
if err == nil && bfr.Complete {
nbf := intDataUpdateTracker.newBloomFilter()
bf = &nbf
_, err = bf.ReadFrom(bytes.NewReader(bfr.Filter))
logger.LogIf(ctx, err)
}
if !bfr.Complete {
// If local isn't complete just return early
return nil, nil
}
var mu sync.Mutex
g := errgroup.WithNErrs(len(sys.peerClients))
for idx, client := range sys.peerClients {
if client == nil {
continue
}
client := client
g.Go(func() error {
serverBF, err := client.cycleServerBloomFilter(ctx, req)
if false && intDataUpdateTracker.debug {
b, _ := json.MarshalIndent(serverBF, "", " ")
logger.Info("Disk %v, Bloom filter: %v", client.host.Name, string(b))
}
// Keep lock while checking result.
mu.Lock()
defer mu.Unlock()
if err != nil || !serverBF.Complete || bf == nil {
logger.LogIf(ctx, err)
bf = nil
return nil
}
var tmp bloom.BloomFilter
_, err = tmp.ReadFrom(bytes.NewReader(serverBF.Filter))
if err != nil {
logger.LogIf(ctx, err)
bf = nil
return nil
}
if bf.BloomFilter == nil {
bf.BloomFilter = &tmp
} else {
err = bf.Merge(&tmp)
if err != nil {
logger.LogIf(ctx, err)
bf = nil
return nil
}
}
return nil
}, idx)
}
g.Wait()
return bf, nil
}
// findEarliestCleanBloomFilter will find the earliest bloom filter across the cluster // findEarliestCleanBloomFilter will find the earliest bloom filter across the cluster
// where the directory is clean. // where the directory is clean.
// Due to how objects are stored this can include object names. // Due to how objects are stored this can include object names.

View File

@ -33,7 +33,7 @@ import (
) )
func md5Header(data []byte) map[string]string { func md5Header(data []byte) map[string]string {
return map[string]string{"etag": getMD5Hash([]byte(data))} return map[string]string{"etag": getMD5Hash(data)}
} }
// Wrapper for calling PutObject tests for both Erasure multiple disks and single node setup. // Wrapper for calling PutObject tests for both Erasure multiple disks and single node setup.

View File

@ -119,12 +119,12 @@ func checkAssumeRoleAuth(ctx context.Context, r *http.Request) (user auth.Creden
return user, true, ErrSTSAccessDenied return user, true, ErrSTSAccessDenied
case authTypeSigned: case authTypeSigned:
s3Err := isReqAuthenticated(ctx, r, globalServerRegion, serviceSTS) s3Err := isReqAuthenticated(ctx, r, globalServerRegion, serviceSTS)
if APIErrorCode(s3Err) != ErrNone { if s3Err != ErrNone {
return user, false, STSErrorCode(s3Err) return user, false, STSErrorCode(s3Err)
} }
user, _, s3Err = getReqAccessKeyV4(r, globalServerRegion, serviceSTS) user, _, s3Err = getReqAccessKeyV4(r, globalServerRegion, serviceSTS)
if APIErrorCode(s3Err) != ErrNone { if s3Err != ErrNone {
return user, false, STSErrorCode(s3Err) return user, false, STSErrorCode(s3Err)
} }

View File

@ -78,8 +78,6 @@ func TestMain(m *testing.M) {
SecretKey: auth.DefaultSecretKey, SecretKey: auth.DefaultSecretKey,
} }
globalConfigEncrypted = true
// disable ENVs which interfere with tests. // disable ENVs which interfere with tests.
for _, env := range []string{ for _, env := range []string{
crypto.EnvKMSAutoEncryption, crypto.EnvKMSAutoEncryption,
@ -1278,35 +1276,6 @@ func getRandomBucketName() string {
} }
// NewEOFWriter returns a Writer that writes to w,
// but returns EOF error after writing n bytes.
func NewEOFWriter(w io.Writer, n int64) io.Writer {
return &EOFWriter{w, n}
}
type EOFWriter struct {
w io.Writer
n int64
}
// io.Writer implementation designed to error out with io.EOF after reading `n` bytes.
func (t *EOFWriter) Write(p []byte) (n int, err error) {
if t.n <= 0 {
return -1, io.EOF
}
// real write
n = len(p)
if int64(n) > t.n {
n = int(t.n)
}
n, err = t.w.Write(p[0:n])
t.n -= int64(n)
if err == nil {
n = len(p)
}
return
}
// construct URL for http requests for bucket operations. // construct URL for http requests for bucket operations.
func makeTestTargetURL(endPoint, bucketName, objectName string, queryValues url.Values) string { func makeTestTargetURL(endPoint, bucketName, objectName string, queryValues url.Values) string {
urlStr := endPoint + SlashSeparator urlStr := endPoint + SlashSeparator

View File

@ -211,6 +211,9 @@ type xlMetaV2Version struct {
// Valid xl meta xlMetaV2Version is valid // Valid xl meta xlMetaV2Version is valid
func (j xlMetaV2Version) Valid() bool { func (j xlMetaV2Version) Valid() bool {
if !j.Type.valid() {
return false
}
switch j.Type { switch j.Type {
case LegacyType: case LegacyType:
return j.ObjectV1 != nil && return j.ObjectV1 != nil &&

View File

@ -758,25 +758,6 @@ func (s *xlStorage) DeleteVol(ctx context.Context, volume string, forceDelete bo
return nil return nil
} }
func (s *xlStorage) isLeaf(volume string, leafPath string) bool {
volumeDir, err := s.getVolDir(volume)
if err != nil {
return false
}
if err = Access(pathJoin(volumeDir, leafPath, xlStorageFormatFile)); err == nil {
return true
}
if osIsNotExist(err) {
// We need a fallback code where directory might contain
// legacy `xl.json`, in such situation we just rename
// and proceed if rename is successful we know that it
// is the leaf since `xl.json` was present.
return s.renameLegacyMetadata(volumeDir, leafPath) == nil
}
return false
}
// ListDir - return all the entries at the given directory path. // ListDir - return all the entries at the given directory path.
// If an entry is a directory it will be returned with a trailing SlashSeparator. // If an entry is a directory it will be returned with a trailing SlashSeparator.
func (s *xlStorage) ListDir(ctx context.Context, volume, dirPath string, count int) (entries []string, err error) { func (s *xlStorage) ListDir(ctx context.Context, volume, dirPath string, count int) (entries []string, err error) {

View File

@ -135,12 +135,12 @@ func TestMarshalLifecycleConfig(t *testing.T) {
{ {
Status: "Enabled", Status: "Enabled",
Filter: Filter{Prefix: Prefix{string: "prefix-1", set: true}}, Filter: Filter{Prefix: Prefix{string: "prefix-1", set: true}},
Expiration: Expiration{Date: ExpirationDate(midnightTS)}, Expiration: Expiration{Date: midnightTS},
}, },
{ {
Status: "Enabled", Status: "Enabled",
Filter: Filter{Prefix: Prefix{string: "prefix-1", set: true}}, Filter: Filter{Prefix: Prefix{string: "prefix-1", set: true}},
Expiration: Expiration{Date: ExpirationDate(midnightTS)}, Expiration: Expiration{Date: midnightTS},
NoncurrentVersionTransition: NoncurrentVersionTransition{NoncurrentDays: 2, StorageClass: "TEST"}, NoncurrentVersionTransition: NoncurrentVersionTransition{NoncurrentDays: 2, StorageClass: "TEST"},
}, },
}, },

View File

@ -65,7 +65,7 @@ func getNewUUID() (string, error) {
// validateID - checks if ID is valid or not. // validateID - checks if ID is valid or not.
func (r Rule) validateID() error { func (r Rule) validateID() error {
IDLen := len(string(r.ID)) IDLen := len(r.ID)
// generate new ID when not provided // generate new ID when not provided
// cannot be longer than 255 characters // cannot be longer than 255 characters
if IDLen == 0 { if IDLen == 0 {

View File

@ -33,10 +33,11 @@ func GetInfo(path string) (info Info, err error) {
} }
reservedBlocks := s.Bfree - s.Bavail reservedBlocks := s.Bfree - s.Bavail
info = Info{ info = Info{
Total: uint64(s.Frsize) * (s.Blocks - reservedBlocks), Total: uint64(s.Frsize) * (s.Blocks - reservedBlocks),
Free: uint64(s.Frsize) * s.Bavail, Free: uint64(s.Frsize) * s.Bavail,
Files: s.Files, Files: s.Files,
Ffree: s.Ffree, Ffree: s.Ffree,
//nolint:unconvert
FSType: getFSType(int64(s.Type)), FSType: getFSType(int64(s.Type)),
} }
// Check for overflows. // Check for overflows.

View File

@ -65,7 +65,7 @@ func (e *FuncExpr) getFunctionName() FuncName {
case e.SFunc != nil: case e.SFunc != nil:
return FuncName(strings.ToUpper(e.SFunc.FunctionName)) return FuncName(strings.ToUpper(e.SFunc.FunctionName))
case e.Count != nil: case e.Count != nil:
return FuncName(aggFnCount) return aggFnCount
case e.Cast != nil: case e.Cast != nil:
return sqlFnCast return sqlFnCast
case e.Substring != nil: case e.Substring != nil:

View File

@ -113,43 +113,6 @@ type nvmeIdentController struct {
Vs [1024]byte // Vendor Specific Vs [1024]byte // Vendor Specific
} // 4096 bytes } // 4096 bytes
type nvmeLBAF struct {
Ms uint16
Ds uint8
Rp uint8
}
//nolint:deadcode
type nvmeIdentNamespace struct {
Nsze uint64
Ncap uint64
Nuse uint64
Nsfeat uint8
Nlbaf uint8
Flbas uint8
Mc uint8
Dpc uint8
Dps uint8
Nmic uint8
Rescap uint8
Fpi uint8
Rsvd33 uint8
Nawun uint16
Nawupf uint16
Nacwu uint16
Nabsn uint16
Nabo uint16
Nabspf uint16
Rsvd46 [2]byte
Nvmcap [16]byte
Rsvd64 [40]byte
Nguid [16]byte
EUI64 [8]byte
Lbaf [16]nvmeLBAF
Rsvd192 [192]byte
Vs [3712]byte
} // 4096 bytes
//nolint:deadcode //nolint:deadcode
type nvmeSMARTLog struct { type nvmeSMARTLog struct {
CritWarning uint8 CritWarning uint8

View File

@ -70,6 +70,7 @@ func getSysinfoMemoryLimit() (limit uint64, err error) {
// Total RAM is always the multiplicative value // Total RAM is always the multiplicative value
// of unit size and total ram. // of unit size and total ram.
//nolint:unconvert
return uint64(unit) * uint64(totalRAM), nil return uint64(unit) * uint64(totalRAM), nil
} }