mirror of
https://github.com/minio/minio.git
synced 2025-11-20 18:06:10 -05:00
fix: reduce crawler memory usage by orders of magnitude (#11556)
currently crawler waits for an entire readdir call to return until it processes usage, lifecycle, replication and healing - instead we should pass the applicator all the way down to avoid building any special stack for all the contents in a single directory. This allows for - no need to remember the entire list of entries per directory before applying the required functions - no need to wait for entire readdir() call to finish before applying the required functions
This commit is contained in:
@@ -33,7 +33,7 @@ import (
|
||||
func renameAllBucketMetacache(epPath string) error {
|
||||
// Rename all previous `.minio.sys/buckets/<bucketname>/.metacache` to
|
||||
// to `.minio.sys/tmp/` for deletion.
|
||||
return readDirFilterFn(pathJoin(epPath, minioMetaBucket, bucketMetaPrefix), func(name string, typ os.FileMode) error {
|
||||
return readDirFn(pathJoin(epPath, minioMetaBucket, bucketMetaPrefix), func(name string, typ os.FileMode) error {
|
||||
if typ == os.ModeDir {
|
||||
tmpMetacacheOld := pathJoin(epPath, minioMetaTmpBucket+"-old", mustGetUUID())
|
||||
if err := renameAll(pathJoin(epPath, minioMetaBucket, metacachePrefixForID(name, slashSeparator)),
|
||||
|
||||
Reference in New Issue
Block a user