mirror of
https://github.com/minio/minio.git
synced 2025-11-07 21:02:58 -05:00
[feat] use rename instead of recursive deletes (#11641)
most of the delete calls today spend time in a blocking operation where multiple calls need to be recursively sent to delete the objects, instead we can use rename operation to atomically move the objects from the namespace to `tmp/.trash` we can schedule deletion of objects at this location once in 15, 30mins and we can also add wait times between each delete operation. this allows us to make delete's faster as well less chattier on the drives, each server runs locally a groutine which would clean this up regularly.
This commit is contained in:
@@ -403,21 +403,28 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto
|
||||
|
||||
// Initialize erasure objects for a given set.
|
||||
s.sets[i] = &erasureObjects{
|
||||
setNumber: i,
|
||||
setDriveCount: setDriveCount,
|
||||
defaultParityCount: defaultParityCount,
|
||||
getDisks: s.GetDisks(i),
|
||||
getLockers: s.GetLockers(i),
|
||||
getEndpoints: s.GetEndpoints(i),
|
||||
nsMutex: mutex,
|
||||
bp: bp,
|
||||
mrfOpCh: make(chan partialOperation, 10000),
|
||||
setNumber: i,
|
||||
setDriveCount: setDriveCount,
|
||||
defaultParityCount: defaultParityCount,
|
||||
getDisks: s.GetDisks(i),
|
||||
getLockers: s.GetLockers(i),
|
||||
getEndpoints: s.GetEndpoints(i),
|
||||
deletedCleanupSleeper: newDynamicSleeper(10, 10*time.Second),
|
||||
nsMutex: mutex,
|
||||
bp: bp,
|
||||
mrfOpCh: make(chan partialOperation, 10000),
|
||||
}
|
||||
}
|
||||
|
||||
// cleanup ".trash/" folder every 30 minutes with sufficient sleep cycles.
|
||||
const deletedObjectsCleanupInterval = 30 * time.Minute
|
||||
|
||||
// start cleanup stale uploads go-routine.
|
||||
go s.cleanupStaleUploads(ctx, GlobalStaleUploadsCleanupInterval, GlobalStaleUploadsExpiry)
|
||||
|
||||
// start cleanup of deleted objects.
|
||||
go s.cleanupDeletedObjects(ctx, deletedObjectsCleanupInterval)
|
||||
|
||||
// Start the disk monitoring and connect routine.
|
||||
go s.monitorAndConnectEndpoints(ctx, defaultMonitorConnectEndpointInterval)
|
||||
go s.maintainMRFList()
|
||||
@@ -426,6 +433,25 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *erasureSets) cleanupDeletedObjects(ctx context.Context, cleanupInterval time.Duration) {
|
||||
timer := time.NewTimer(cleanupInterval)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-timer.C:
|
||||
// Reset for the next interval
|
||||
timer.Reset(cleanupInterval)
|
||||
|
||||
for _, set := range s.sets {
|
||||
set.cleanupDeletedObjects(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *erasureSets) cleanupStaleUploads(ctx context.Context, cleanupInterval, expiry time.Duration) {
|
||||
timer := time.NewTimer(cleanupInterval)
|
||||
defer timer.Stop()
|
||||
|
||||
Reference in New Issue
Block a user