purge objects immediately with x-minio-force-delete in DeleteObject and DeleteBucket API (#15148)

This commit is contained in:
Praveen raj Mani 2022-07-11 21:45:54 +05:30 committed by GitHub
parent 00e235a1ee
commit b49fc33cb3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 220 additions and 79 deletions

View File

@ -163,7 +163,11 @@ func (h *healingTracker) save(ctx context.Context) error {
func (h *healingTracker) delete(ctx context.Context) error { func (h *healingTracker) delete(ctx context.Context) error {
return h.disk.Delete(ctx, minioMetaBucket, return h.disk.Delete(ctx, minioMetaBucket,
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename), pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename),
false) DeleteOptions{
Recursive: false,
Force: false,
},
)
} }
func (h *healingTracker) isHealed(bucket string) bool { func (h *healingTracker) isHealed(bucket string) bool {

View File

@ -196,7 +196,10 @@ func benchmarkErasureEncode(data, parity, dataDown, parityDown int, size int64,
if disk == OfflineDisk { if disk == OfflineDisk {
continue continue
} }
disk.Delete(context.Background(), "testbucket", "object", false) disk.Delete(context.Background(), "testbucket", "object", DeleteOptions{
Recursive: false,
Force: false,
})
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize()) writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize())
} }
_, err := erasure.Encode(context.Background(), bytes.NewReader(content), writers, buffer, erasure.dataBlocks+1) _, err := erasure.Encode(context.Background(), bytes.NewReader(content), writers, buffer, erasure.dataBlocks+1)

View File

@ -218,7 +218,10 @@ func TestListOnlineDisks(t *testing.T) {
// and check if that disk // and check if that disk
// appears in outDatedDisks. // appears in outDatedDisks.
tamperedIndex = index tamperedIndex = index
dErr := erasureDisks[index].Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), false) dErr := erasureDisks[index].Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
Recursive: false,
Force: false,
})
if dErr != nil { if dErr != nil {
t.Fatalf("Failed to delete %s - %v", filepath.Join(object, "part.1"), dErr) t.Fatalf("Failed to delete %s - %v", filepath.Join(object, "part.1"), dErr)
} }
@ -395,7 +398,10 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
// and check if that disk // and check if that disk
// appears in outDatedDisks. // appears in outDatedDisks.
tamperedIndex = index tamperedIndex = index
dErr := erasureDisks[index].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false) dErr := erasureDisks[index].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Force: false,
})
if dErr != nil { if dErr != nil {
t.Fatalf("Failed to delete %s - %v", pathJoin(object, xlStorageFormatFile), dErr) t.Fatalf("Failed to delete %s - %v", pathJoin(object, xlStorageFormatFile), dErr)
} }

View File

@ -655,7 +655,10 @@ func (er erasureObjects) healObjectDir(ctx context.Context, bucket, object strin
wg.Add(1) wg.Add(1)
go func(index int, disk StorageAPI) { go func(index int, disk StorageAPI) {
defer wg.Done() defer wg.Done()
_ = disk.Delete(ctx, bucket, object, false) _ = disk.Delete(ctx, bucket, object, DeleteOptions{
Recursive: false,
Force: false,
})
}(index, disk) }(index, disk)
} }
wg.Wait() wg.Wait()

View File

@ -610,7 +610,10 @@ func TestHealCorrectQuorum(t *testing.T) {
} }
for i := 0; i < nfi.Erasure.ParityBlocks; i++ { for i := 0; i < nfi.Erasure.ParityBlocks; i++ {
erasureDisks[i].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false) erasureDisks[i].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Force: false,
})
} }
// Try healing now, it should heal the content properly. // Try healing now, it should heal the content properly.
@ -634,7 +637,10 @@ func TestHealCorrectQuorum(t *testing.T) {
} }
for i := 0; i < nfi.Erasure.ParityBlocks; i++ { for i := 0; i < nfi.Erasure.ParityBlocks; i++ {
erasureDisks[i].Delete(context.Background(), minioMetaBucket, pathJoin(cfgFile, xlStorageFormatFile), false) erasureDisks[i].Delete(context.Background(), minioMetaBucket, pathJoin(cfgFile, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Force: false,
})
} }
// Try healing now, it should heal the content properly. // Try healing now, it should heal the content properly.
@ -714,7 +720,10 @@ func TestHealObjectCorruptedPools(t *testing.T) {
er := set.sets[0] er := set.sets[0]
erasureDisks := er.getDisks() erasureDisks := er.getDisks()
firstDisk := erasureDisks[0] firstDisk := erasureDisks[0]
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false) err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Force: false,
})
if err != nil { if err != nil {
t.Fatalf("Failed to delete a file - %v", err) t.Fatalf("Failed to delete a file - %v", err)
} }
@ -734,7 +743,10 @@ func TestHealObjectCorruptedPools(t *testing.T) {
t.Errorf("Expected xl.meta file to be present but stat failed - %v", err) t.Errorf("Expected xl.meta file to be present but stat failed - %v", err)
} }
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), false) err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
Recursive: false,
Force: false,
})
if err != nil { if err != nil {
t.Errorf("Failure during deleting part.1 - %v", err) t.Errorf("Failure during deleting part.1 - %v", err)
} }
@ -761,7 +773,10 @@ func TestHealObjectCorruptedPools(t *testing.T) {
t.Fatalf("FileInfo not equal after healing: %v != %v", fi, nfi) t.Fatalf("FileInfo not equal after healing: %v != %v", fi, nfi)
} }
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), false) err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
Recursive: false,
Force: false,
})
if err != nil { if err != nil {
t.Errorf("Failure during deleting part.1 - %v", err) t.Errorf("Failure during deleting part.1 - %v", err)
} }
@ -792,7 +807,10 @@ func TestHealObjectCorruptedPools(t *testing.T) {
// Test 4: checks if HealObject returns an error when xl.meta is not found // Test 4: checks if HealObject returns an error when xl.meta is not found
// in more than read quorum number of disks, to create a corrupted situation. // in more than read quorum number of disks, to create a corrupted situation.
for i := 0; i <= nfi.Erasure.DataBlocks; i++ { for i := 0; i <= nfi.Erasure.DataBlocks; i++ {
erasureDisks[i].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false) erasureDisks[i].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Force: false,
})
} }
// Try healing now, expect to receive errFileNotFound. // Try healing now, expect to receive errFileNotFound.
@ -884,7 +902,10 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
t.Fatalf("Failed to getLatestFileInfo - %v", err) t.Fatalf("Failed to getLatestFileInfo - %v", err)
} }
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false) err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Force: false,
})
if err != nil { if err != nil {
t.Fatalf("Failed to delete a file - %v", err) t.Fatalf("Failed to delete a file - %v", err)
} }
@ -936,7 +957,10 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
// Test 3: checks if HealObject returns an error when xl.meta is not found // Test 3: checks if HealObject returns an error when xl.meta is not found
// in more than read quorum number of disks, to create a corrupted situation. // in more than read quorum number of disks, to create a corrupted situation.
for i := 0; i <= nfi2.Erasure.DataBlocks; i++ { for i := 0; i <= nfi2.Erasure.DataBlocks; i++ {
erasureDisks[i].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false) erasureDisks[i].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Force: false,
})
} }
// Try healing now, expect to receive errFileNotFound. // Try healing now, expect to receive errFileNotFound.
@ -1033,7 +1057,10 @@ func TestHealObjectCorruptedParts(t *testing.T) {
} }
// Test 1, remove part.1 // Test 1, remove part.1
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), false) err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
Recursive: false,
Force: false,
})
if err != nil { if err != nil {
t.Fatalf("Failed to delete a file - %v", err) t.Fatalf("Failed to delete a file - %v", err)
} }
@ -1078,7 +1105,10 @@ func TestHealObjectCorruptedParts(t *testing.T) {
t.Fatalf("Failed to write a file - %v", err) t.Fatalf("Failed to write a file - %v", err)
} }
err = secondDisk.Delete(context.Background(), bucket, object, true) err = secondDisk.Delete(context.Background(), bucket, object, DeleteOptions{
Recursive: true,
Force: false,
})
if err != nil { if err != nil {
t.Fatalf("Failed to delete a file - %v", err) t.Fatalf("Failed to delete a file - %v", err)
} }
@ -1166,7 +1196,10 @@ func TestHealObjectErasure(t *testing.T) {
} }
// Delete the whole object folder // Delete the whole object folder
err = firstDisk.Delete(context.Background(), bucket, object, true) err = firstDisk.Delete(context.Background(), bucket, object, DeleteOptions{
Recursive: true,
Force: false,
})
if err != nil { if err != nil {
t.Fatalf("Failed to delete a file - %v", err) t.Fatalf("Failed to delete a file - %v", err)
} }

View File

@ -91,7 +91,10 @@ func (er erasureObjects) removeObjectPart(bucket, object, uploadID, dataDir stri
// Ignoring failure to remove parts that weren't present in CompleteMultipartUpload // Ignoring failure to remove parts that weren't present in CompleteMultipartUpload
// requests. xl.meta is the authoritative source of truth on which parts constitute // requests. xl.meta is the authoritative source of truth on which parts constitute
// the object. The presence of parts that don't belong in the object doesn't affect correctness. // the object. The presence of parts that don't belong in the object doesn't affect correctness.
_ = storageDisks[index].Delete(context.TODO(), minioMetaMultipartBucket, curpartPath, false) _ = storageDisks[index].Delete(context.TODO(), minioMetaMultipartBucket, curpartPath, DeleteOptions{
Recursive: false,
Force: false,
})
return nil return nil
}, index) }, index)
} }
@ -138,7 +141,10 @@ func (er erasureObjects) deleteAll(ctx context.Context, bucket, prefix string) {
wg.Add(1) wg.Add(1)
go func(disk StorageAPI) { go func(disk StorageAPI) {
defer wg.Done() defer wg.Done()
disk.Delete(ctx, bucket, prefix, true) disk.Delete(ctx, bucket, prefix, DeleteOptions{
Recursive: true,
Force: false,
})
}(disk) }(disk)
} }
wg.Wait() wg.Wait()

View File

@ -1367,8 +1367,14 @@ func (er erasureObjects) deletePrefix(ctx context.Context, bucket, prefix string
// Deletes // Deletes
// - The prefix and its children // - The prefix and its children
// - The prefix__XLDIR__ // - The prefix__XLDIR__
defer disks[index].Delete(ctx, bucket, dirPrefix, true) defer disks[index].Delete(ctx, bucket, dirPrefix, DeleteOptions{
return disks[index].Delete(ctx, bucket, prefix, true) Recursive: true,
Force: true,
})
return disks[index].Delete(ctx, bucket, prefix, DeleteOptions{
Recursive: true,
Force: true,
})
}, index) }, index)
} }
for _, err := range g.Wait() { for _, err := range g.Wait() {

View File

@ -528,7 +528,10 @@ func TestGetObjectNoQuorum(t *testing.T) {
files, _ := disk.ListDir(ctx, bucket, object, -1) files, _ := disk.ListDir(ctx, bucket, object, -1)
for _, file := range files { for _, file := range files {
if file != "xl.meta" { if file != "xl.meta" {
disk.Delete(ctx, bucket, pathJoin(object, file), true) disk.Delete(ctx, bucket, pathJoin(object, file), DeleteOptions{
Recursive: true,
Force: false,
})
} }
} }
} }
@ -629,7 +632,10 @@ func TestHeadObjectNoQuorum(t *testing.T) {
files, _ := disk.ListDir(ctx, bucket, object, -1) files, _ := disk.ListDir(ctx, bucket, object, -1)
for _, file := range files { for _, file := range files {
if file != "xl.meta" { if file != "xl.meta" {
disk.Delete(ctx, bucket, pathJoin(object, file), true) disk.Delete(ctx, bucket, pathJoin(object, file), DeleteOptions{
Recursive: true,
Force: false,
})
} }
} }
} }

View File

@ -1321,8 +1321,14 @@ func (es *erasureSingle) DeleteObjects(ctx context.Context, bucket string, objec
func (es *erasureSingle) deletePrefix(ctx context.Context, bucket, prefix string) error { func (es *erasureSingle) deletePrefix(ctx context.Context, bucket, prefix string) error {
dirPrefix := encodeDirObject(prefix) dirPrefix := encodeDirObject(prefix)
defer es.disk.Delete(ctx, bucket, dirPrefix, true) defer es.disk.Delete(ctx, bucket, dirPrefix, DeleteOptions{
return es.disk.Delete(ctx, bucket, prefix, true) Recursive: true,
Force: true,
})
return es.disk.Delete(ctx, bucket, prefix, DeleteOptions{
Recursive: true,
Force: true,
})
} }
// DeleteObject - deletes an object, this call doesn't necessary reply // DeleteObject - deletes an object, this call doesn't necessary reply
@ -1916,7 +1922,10 @@ func (es *erasureSingle) removeObjectPart(bucket, object, uploadID, dataDir stri
// Ignoring failure to remove parts that weren't present in CompleteMultipartUpload // Ignoring failure to remove parts that weren't present in CompleteMultipartUpload
// requests. xl.meta is the authoritative source of truth on which parts constitute // requests. xl.meta is the authoritative source of truth on which parts constitute
// the object. The presence of parts that don't belong in the object doesn't affect correctness. // the object. The presence of parts that don't belong in the object doesn't affect correctness.
_ = storageDisks[index].Delete(context.TODO(), minioMetaMultipartBucket, curpartPath, false) _ = storageDisks[index].Delete(context.TODO(), minioMetaMultipartBucket, curpartPath, DeleteOptions{
Recursive: false,
Force: false,
})
return nil return nil
}, index) }, index)
} }
@ -1954,7 +1963,10 @@ func (es *erasureSingle) cleanupStaleUploadsOnDisk(ctx context.Context, disk Sto
} }
wait := es.deletedCleanupSleeper.Timer(ctx) wait := es.deletedCleanupSleeper.Timer(ctx)
if now.Sub(vi.Created) > expiry { if now.Sub(vi.Created) > expiry {
disk.Delete(ctx, minioMetaTmpBucket, tmpDir, true) disk.Delete(ctx, minioMetaTmpBucket, tmpDir, DeleteOptions{
Recursive: true,
Force: false,
})
} }
wait() wait()
return nil return nil

View File

@ -367,7 +367,10 @@ func saveFormatErasure(disk StorageAPI, format *formatErasureV3, heal bool) erro
tmpFormat := mustGetUUID() tmpFormat := mustGetUUID()
// Purge any existing temporary file, okay to ignore errors here. // Purge any existing temporary file, okay to ignore errors here.
defer disk.Delete(context.TODO(), minioMetaBucket, tmpFormat, false) defer disk.Delete(context.TODO(), minioMetaBucket, tmpFormat, DeleteOptions{
Recursive: false,
Force: false,
})
// write to unique file. // write to unique file.
if err = disk.WriteAll(context.TODO(), minioMetaBucket, tmpFormat, formatBytes); err != nil { if err = disk.WriteAll(context.TODO(), minioMetaBucket, tmpFormat, formatBytes); err != nil {

View File

@ -221,11 +221,11 @@ func (d *naughtyDisk) CheckParts(ctx context.Context, volume string, path string
return d.disk.CheckParts(ctx, volume, path, fi) return d.disk.CheckParts(ctx, volume, path, fi)
} }
func (d *naughtyDisk) Delete(ctx context.Context, volume string, path string, recursive bool) (err error) { func (d *naughtyDisk) Delete(ctx context.Context, volume string, path string, deleteOpts DeleteOptions) (err error) {
if err := d.calcError(); err != nil { if err := d.calcError(); err != nil {
return err return err
} }
return d.disk.Delete(ctx, volume, path, recursive) return d.disk.Delete(ctx, volume, path, deleteOpts)
} }
func (d *naughtyDisk) DeleteVersions(ctx context.Context, volume string, versions []FileInfoVersions) []error { func (d *naughtyDisk) DeleteVersions(ctx context.Context, volume string, versions []FileInfoVersions) []error {

View File

@ -21,6 +21,13 @@ import (
"time" "time"
) )
// DeleteOptions represents the disk level delete options available for the APIs
//msgp:ignore DeleteOptions
type DeleteOptions struct {
Recursive bool
Force bool
}
//go:generate msgp -file=$GOFILE //go:generate msgp -file=$GOFILE
// DiskInfo is an extended type which returns current // DiskInfo is an extended type which returns current

View File

@ -95,7 +95,7 @@ type StorageAPI interface {
ReadFileStream(ctx context.Context, volume, path string, offset, length int64) (io.ReadCloser, error) ReadFileStream(ctx context.Context, volume, path string, offset, length int64) (io.ReadCloser, error)
RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) error RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) error
CheckParts(ctx context.Context, volume string, path string, fi FileInfo) error CheckParts(ctx context.Context, volume string, path string, fi FileInfo) error
Delete(ctx context.Context, volume string, path string, recursive bool) (err error) Delete(ctx context.Context, volume string, path string, deleteOpts DeleteOptions) (err error)
VerifyFile(ctx context.Context, volume, path string, fi FileInfo) error VerifyFile(ctx context.Context, volume, path string, fi FileInfo) error
StatInfoFile(ctx context.Context, volume, path string, glob bool) (stat []StatInfo, err error) StatInfoFile(ctx context.Context, volume, path string, glob bool) (stat []StatInfo, err error)
@ -223,7 +223,7 @@ func (p *unrecognizedDisk) CheckParts(ctx context.Context, volume string, path s
return errDiskNotFound return errDiskNotFound
} }
func (p *unrecognizedDisk) Delete(ctx context.Context, volume string, path string, recursive bool) (err error) { func (p *unrecognizedDisk) Delete(ctx context.Context, volume string, path string, deleteOpts DeleteOptions) (err error) {
return errDiskNotFound return errDiskNotFound
} }

View File

@ -581,11 +581,12 @@ func (client *storageRESTClient) ListDir(ctx context.Context, volume, dirPath st
} }
// DeleteFile - deletes a file. // DeleteFile - deletes a file.
func (client *storageRESTClient) Delete(ctx context.Context, volume string, path string, recursive bool) error { func (client *storageRESTClient) Delete(ctx context.Context, volume string, path string, deleteOpts DeleteOptions) error {
values := make(url.Values) values := make(url.Values)
values.Set(storageRESTVolume, volume) values.Set(storageRESTVolume, volume)
values.Set(storageRESTFilePath, path) values.Set(storageRESTFilePath, path)
values.Set(storageRESTRecursive, strconv.FormatBool(recursive)) values.Set(storageRESTRecursive, strconv.FormatBool(deleteOpts.Recursive))
values.Set(storageRESTForceDelete, strconv.FormatBool(deleteOpts.Force))
respBody, err := client.call(ctx, storageRESTMethodDeleteFile, values, nil, -1) respBody, err := client.call(ctx, storageRESTMethodDeleteFile, values, nil, -1)
defer xhttp.DrainBody(respBody) defer xhttp.DrainBody(respBody)

View File

@ -652,8 +652,15 @@ func (s *storageRESTServer) DeleteFileHandler(w http.ResponseWriter, r *http.Req
s.writeErrorResponse(w, err) s.writeErrorResponse(w, err)
return return
} }
force, err := strconv.ParseBool(r.Form.Get(storageRESTForceDelete))
err = s.storage.Delete(r.Context(), volume, filePath, recursive) if err != nil {
s.writeErrorResponse(w, err)
return
}
err = s.storage.Delete(r.Context(), volume, filePath, DeleteOptions{
Recursive: recursive,
Force: force,
})
if err != nil { if err != nil {
s.writeErrorResponse(w, err) s.writeErrorResponse(w, err)
} }

View File

@ -383,7 +383,10 @@ func testStorageAPIDeleteFile(t *testing.T, storage StorageAPI) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
err := storage.Delete(context.Background(), testCase.volumeName, testCase.objectName, false) err := storage.Delete(context.Background(), testCase.volumeName, testCase.objectName, DeleteOptions{
Recursive: false,
Force: false,
})
expectErr := (err != nil) expectErr := (err != nil)
if expectErr != testCase.expectErr { if expectErr != testCase.expectErr {

View File

@ -382,14 +382,14 @@ func (p *xlStorageDiskIDCheck) CheckParts(ctx context.Context, volume string, pa
return p.storage.CheckParts(ctx, volume, path, fi) return p.storage.CheckParts(ctx, volume, path, fi)
} }
func (p *xlStorageDiskIDCheck) Delete(ctx context.Context, volume string, path string, recursive bool) (err error) { func (p *xlStorageDiskIDCheck) Delete(ctx context.Context, volume string, path string, deleteOpts DeleteOptions) (err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricDelete, volume, path) ctx, done, err := p.TrackDiskHealth(ctx, storageMetricDelete, volume, path)
if err != nil { if err != nil {
return err return err
} }
defer done(&err) defer done(&err)
return p.storage.Delete(ctx, volume, path, recursive) return p.storage.Delete(ctx, volume, path, deleteOpts)
} }
// DeleteVersions deletes slice of versions, it can be same object // DeleteVersions deletes slice of versions, it can be same object
@ -768,7 +768,10 @@ func (p *xlStorageDiskIDCheck) monitorDiskStatus() {
if err != nil || len(b) != 10001 { if err != nil || len(b) != 10001 {
continue continue
} }
err = p.storage.Delete(context.Background(), minioMetaTmpBucket, fn, false) err = p.storage.Delete(context.Background(), minioMetaTmpBucket, fn, DeleteOptions{
Recursive: false,
Force: false,
})
if err == nil { if err == nil {
logger.Info("Able to read+write, bringing disk %s online.", p.storage.String()) logger.Info("Able to read+write, bringing disk %s online.", p.storage.String())
atomic.StoreInt32(&p.health.status, diskHealthOK) atomic.StoreInt32(&p.health.status, diskHealthOK)

View File

@ -818,7 +818,7 @@ func (s *xlStorage) DeleteVol(ctx context.Context, volume string, forceDelete bo
} }
if forceDelete { if forceDelete {
err = s.moveToTrash(volumeDir, true) err = s.moveToTrash(volumeDir, true, true)
} else { } else {
err = Remove(volumeDir) err = Remove(volumeDir)
} }
@ -903,7 +903,7 @@ func (s *xlStorage) deleteVersions(ctx context.Context, volume, path string, fis
if !isXL2V1Format(buf) { if !isXL2V1Format(buf) {
// Delete the meta file, if there are no more versions the // Delete the meta file, if there are no more versions the
// top level parent is automatically removed. // top level parent is automatically removed.
return s.deleteFile(volumeDir, pathJoin(volumeDir, path), true) return s.deleteFile(volumeDir, pathJoin(volumeDir, path), true, false)
} }
var xlMeta xlMetaV2 var xlMeta xlMetaV2
@ -939,7 +939,7 @@ func (s *xlStorage) deleteVersions(ctx context.Context, volume, path string, fis
if err = checkPathLength(filePath); err != nil { if err = checkPathLength(filePath); err != nil {
return err return err
} }
if err = s.moveToTrash(filePath, true); err != nil { if err = s.moveToTrash(filePath, true, false); err != nil {
if err != errFileNotFound { if err != errFileNotFound {
return err return err
} }
@ -959,9 +959,9 @@ func (s *xlStorage) deleteVersions(ctx context.Context, volume, path string, fis
} }
// Move xl.meta to trash // Move xl.meta to trash
err = s.moveToTrash(pathJoin(volumeDir, path, xlStorageFormatFile), false) err = s.moveToTrash(pathJoin(volumeDir, path, xlStorageFormatFile), false, false)
if err == nil || err == errFileNotFound { if err == nil || err == errFileNotFound {
s.deleteFile(volumeDir, pathJoin(volumeDir, path), false) s.deleteFile(volumeDir, pathJoin(volumeDir, path), false, false)
} }
return err return err
} }
@ -985,19 +985,36 @@ func (s *xlStorage) DeleteVersions(ctx context.Context, volume string, versions
return errs return errs
} }
func (s *xlStorage) moveToTrash(filePath string, recursive bool) error { func (s *xlStorage) moveToTrash(filePath string, recursive, force bool) error {
pathUUID := mustGetUUID() pathUUID := mustGetUUID()
targetPath := pathutil.Join(s.diskPath, minioMetaTmpDeletedBucket, pathUUID)
var renameFn func(source, target string) error
if recursive { if recursive {
return renameAll(filePath, pathutil.Join(s.diskPath, minioMetaTmpDeletedBucket, pathUUID)) renameFn = renameAll
} else {
renameFn = Rename
} }
return Rename(filePath, pathutil.Join(s.diskPath, minioMetaTmpDeletedBucket, pathUUID))
if err := renameFn(filePath, targetPath); err != nil {
return err
}
// immediately purge the target
if force {
removeAll(targetPath)
}
return nil
} }
// DeleteVersion - deletes FileInfo metadata for path at `xl.meta`. forceDelMarker // DeleteVersion - deletes FileInfo metadata for path at `xl.meta`. forceDelMarker
// will force creating a new `xl.meta` to create a new delete marker // will force creating a new `xl.meta` to create a new delete marker
func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi FileInfo, forceDelMarker bool) error { func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi FileInfo, forceDelMarker bool) error {
if HasSuffix(path, SlashSeparator) { if HasSuffix(path, SlashSeparator) {
return s.Delete(ctx, volume, path, false) return s.Delete(ctx, volume, path, DeleteOptions{
Recursive: false,
Force: false,
})
} }
buf, err := s.ReadAll(ctx, volume, pathJoin(path, xlStorageFormatFile)) buf, err := s.ReadAll(ctx, volume, pathJoin(path, xlStorageFormatFile))
@ -1035,7 +1052,7 @@ func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi F
if !isXL2V1Format(buf) { if !isXL2V1Format(buf) {
// Delete the meta file, if there are no more versions the // Delete the meta file, if there are no more versions the
// top level parent is automatically removed. // top level parent is automatically removed.
return s.deleteFile(volumeDir, pathJoin(volumeDir, path), true) return s.deleteFile(volumeDir, pathJoin(volumeDir, path), true, false)
} }
var xlMeta xlMetaV2 var xlMeta xlMetaV2
@ -1065,7 +1082,7 @@ func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi F
if err = checkPathLength(filePath); err != nil { if err = checkPathLength(filePath); err != nil {
return err return err
} }
if err = s.moveToTrash(filePath, true); err != nil { if err = s.moveToTrash(filePath, true, false); err != nil {
if err != errFileNotFound { if err != errFileNotFound {
return err return err
} }
@ -1088,9 +1105,9 @@ func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi F
return err return err
} }
err = s.moveToTrash(filePath, false) err = s.moveToTrash(filePath, false, false)
if err == nil || err == errFileNotFound { if err == nil || err == errFileNotFound {
s.deleteFile(volumeDir, pathJoin(volumeDir, path), false) s.deleteFile(volumeDir, pathJoin(volumeDir, path), false, false)
} }
return err return err
} }
@ -2014,7 +2031,7 @@ func (s *xlStorage) CheckParts(ctx context.Context, volume string, path string,
// move up the tree, deleting empty parent directories until it finds one // move up the tree, deleting empty parent directories until it finds one
// with files in it. Returns nil for a non-empty directory even when // with files in it. Returns nil for a non-empty directory even when
// recursive is set to false. // recursive is set to false.
func (s *xlStorage) deleteFile(basePath, deletePath string, recursive bool) error { func (s *xlStorage) deleteFile(basePath, deletePath string, recursive, force bool) error {
if basePath == "" || deletePath == "" { if basePath == "" || deletePath == "" {
return nil return nil
} }
@ -2027,7 +2044,7 @@ func (s *xlStorage) deleteFile(basePath, deletePath string, recursive bool) erro
var err error var err error
if recursive { if recursive {
err = s.moveToTrash(deletePath, true) err = s.moveToTrash(deletePath, true, force)
} else { } else {
err = Remove(deletePath) err = Remove(deletePath)
} }
@ -2060,13 +2077,13 @@ func (s *xlStorage) deleteFile(basePath, deletePath string, recursive bool) erro
// Delete parent directory obviously not recursively. Errors for // Delete parent directory obviously not recursively. Errors for
// parent directories shouldn't trickle down. // parent directories shouldn't trickle down.
s.deleteFile(basePath, deletePath, false) s.deleteFile(basePath, deletePath, false, false)
return nil return nil
} }
// DeleteFile - delete a file at path. // DeleteFile - delete a file at path.
func (s *xlStorage) Delete(ctx context.Context, volume string, path string, recursive bool) (err error) { func (s *xlStorage) Delete(ctx context.Context, volume string, path string, deleteOpts DeleteOptions) (err error) {
volumeDir, err := s.getVolDir(volume) volumeDir, err := s.getVolDir(volume)
if err != nil { if err != nil {
return err return err
@ -2092,7 +2109,7 @@ func (s *xlStorage) Delete(ctx context.Context, volume string, path string, recu
} }
// Delete file and delete parent directory as well if it's empty. // Delete file and delete parent directory as well if it's empty.
return s.deleteFile(volumeDir, filePath, recursive) return s.deleteFile(volumeDir, filePath, deleteOpts.Recursive, deleteOpts.Force)
} }
func skipAccessChecks(volume string) (ok bool) { func skipAccessChecks(volume string) (ok bool) {
@ -2270,7 +2287,7 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
// legacy data dir means its old content, honor system umask. // legacy data dir means its old content, honor system umask.
if err = mkdirAll(legacyDataPath, 0o777); err != nil { if err = mkdirAll(legacyDataPath, 0o777); err != nil {
// any failed mkdir-calls delete them. // any failed mkdir-calls delete them.
s.deleteFile(dstVolumeDir, legacyDataPath, true) s.deleteFile(dstVolumeDir, legacyDataPath, true, false)
return osErrToFileErr(err) return osErrToFileErr(err)
} }
@ -2282,7 +2299,7 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
if err = Rename(pathJoin(currentDataPath, entry), pathJoin(legacyDataPath, entry)); err != nil { if err = Rename(pathJoin(currentDataPath, entry), pathJoin(legacyDataPath, entry)); err != nil {
// Any failed rename calls un-roll previous transaction. // Any failed rename calls un-roll previous transaction.
s.deleteFile(dstVolumeDir, legacyDataPath, true) s.deleteFile(dstVolumeDir, legacyDataPath, true, false)
return osErrToFileErr(err) return osErrToFileErr(err)
} }
@ -2327,7 +2344,7 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
if err = xlMeta.AddVersion(fi); err != nil { if err = xlMeta.AddVersion(fi); err != nil {
if legacyPreserved { if legacyPreserved {
// Any failed rename calls un-roll previous transaction. // Any failed rename calls un-roll previous transaction.
s.deleteFile(dstVolumeDir, legacyDataPath, true) s.deleteFile(dstVolumeDir, legacyDataPath, true, false)
} }
return err return err
} }
@ -2338,7 +2355,7 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
if legacyPreserved { if legacyPreserved {
// Any failed rename calls un-roll previous transaction. // Any failed rename calls un-roll previous transaction.
s.deleteFile(dstVolumeDir, legacyDataPath, true) s.deleteFile(dstVolumeDir, legacyDataPath, true, false)
} }
return errFileCorrupt return errFileCorrupt
} }
@ -2347,7 +2364,7 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
if err = s.WriteAll(ctx, srcVolume, pathJoin(srcPath, xlStorageFormatFile), dstBuf); err != nil { if err = s.WriteAll(ctx, srcVolume, pathJoin(srcPath, xlStorageFormatFile), dstBuf); err != nil {
if legacyPreserved { if legacyPreserved {
// Any failed rename calls un-roll previous transaction. // Any failed rename calls un-roll previous transaction.
s.deleteFile(dstVolumeDir, legacyDataPath, true) s.deleteFile(dstVolumeDir, legacyDataPath, true, false)
} }
return osErrToFileErr(err) return osErrToFileErr(err)
} }
@ -2355,19 +2372,19 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
// renameAll only for objects that have xl.meta not saved inline. // renameAll only for objects that have xl.meta not saved inline.
if len(fi.Data) == 0 && fi.Size > 0 { if len(fi.Data) == 0 && fi.Size > 0 {
s.moveToTrash(dstDataPath, true) s.moveToTrash(dstDataPath, true, false)
if healing { if healing {
// If we are healing we should purge any legacyDataPath content, // If we are healing we should purge any legacyDataPath content,
// that was previously preserved during PutObject() call // that was previously preserved during PutObject() call
// on a versioned bucket. // on a versioned bucket.
s.moveToTrash(legacyDataPath, true) s.moveToTrash(legacyDataPath, true, false)
} }
if err = renameAll(srcDataPath, dstDataPath); err != nil { if err = renameAll(srcDataPath, dstDataPath); err != nil {
if legacyPreserved { if legacyPreserved {
// Any failed rename calls un-roll previous transaction. // Any failed rename calls un-roll previous transaction.
s.deleteFile(dstVolumeDir, legacyDataPath, true) s.deleteFile(dstVolumeDir, legacyDataPath, true, false)
} }
s.deleteFile(dstVolumeDir, dstDataPath, false) s.deleteFile(dstVolumeDir, dstDataPath, false, false)
return osErrToFileErr(err) return osErrToFileErr(err)
} }
} }
@ -2376,9 +2393,9 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
if err = renameAll(srcFilePath, dstFilePath); err != nil { if err = renameAll(srcFilePath, dstFilePath); err != nil {
if legacyPreserved { if legacyPreserved {
// Any failed rename calls un-roll previous transaction. // Any failed rename calls un-roll previous transaction.
s.deleteFile(dstVolumeDir, legacyDataPath, true) s.deleteFile(dstVolumeDir, legacyDataPath, true, false)
} }
s.deleteFile(dstVolumeDir, dstFilePath, false) s.deleteFile(dstVolumeDir, dstFilePath, false, false)
return osErrToFileErr(err) return osErrToFileErr(err)
} }
@ -2386,16 +2403,16 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
// movement, this is to ensure that previous data references can co-exist for // movement, this is to ensure that previous data references can co-exist for
// any recoverability. // any recoverability.
if oldDstDataPath != "" { if oldDstDataPath != "" {
s.moveToTrash(oldDstDataPath, true) s.moveToTrash(oldDstDataPath, true, false)
} }
} else { } else {
// Write meta-file directly, no data // Write meta-file directly, no data
if err = s.WriteAll(ctx, dstVolume, pathJoin(dstPath, xlStorageFormatFile), dstBuf); err != nil { if err = s.WriteAll(ctx, dstVolume, pathJoin(dstPath, xlStorageFormatFile), dstBuf); err != nil {
if legacyPreserved { if legacyPreserved {
// Any failed rename calls un-roll previous transaction. // Any failed rename calls un-roll previous transaction.
s.deleteFile(dstVolumeDir, legacyDataPath, true) s.deleteFile(dstVolumeDir, legacyDataPath, true, false)
} }
s.deleteFile(dstVolumeDir, dstFilePath, false) s.deleteFile(dstVolumeDir, dstFilePath, false, false)
return err return err
} }
} }
@ -2484,7 +2501,7 @@ func (s *xlStorage) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolum
// Remove parent dir of the source file if empty // Remove parent dir of the source file if empty
parentDir := pathutil.Dir(srcFilePath) parentDir := pathutil.Dir(srcFilePath)
s.deleteFile(srcVolumeDir, parentDir, false) s.deleteFile(srcVolumeDir, parentDir, false, false)
return nil return nil
} }

View File

@ -933,14 +933,20 @@ func TestXLStorageListDir(t *testing.T) {
t.Fatalf("Unable to initialize xlStorage, %s", err) t.Fatalf("Unable to initialize xlStorage, %s", err)
} }
if err = xlStorageNew.Delete(context.Background(), "mybucket", "myobject", false); err != errVolumeAccessDenied { if err = xlStorageNew.Delete(context.Background(), "mybucket", "myobject", DeleteOptions{
Recursive: false,
Force: false,
}); err != errVolumeAccessDenied {
t.Errorf("expected: %s, got: %s", errVolumeAccessDenied, err) t.Errorf("expected: %s, got: %s", errVolumeAccessDenied, err)
} }
} }
// TestXLStorage for delete on an removed disk. // TestXLStorage for delete on an removed disk.
// should fail with disk not found. // should fail with disk not found.
err = xlStorageDeletedStorage.Delete(context.Background(), "del-vol", "my-file", false) err = xlStorageDeletedStorage.Delete(context.Background(), "del-vol", "my-file", DeleteOptions{
Recursive: false,
Force: false,
})
if err != errDiskNotFound { if err != errDiskNotFound {
t.Errorf("Expected: \"Disk not found\", got \"%s\"", err) t.Errorf("Expected: \"Disk not found\", got \"%s\"", err)
} }
@ -1029,7 +1035,10 @@ func TestXLStorageDeleteFile(t *testing.T) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
if err = xlStorage.Delete(context.Background(), testCase.srcVol, testCase.srcPath, false); err != testCase.expectedErr { if err = xlStorage.Delete(context.Background(), testCase.srcVol, testCase.srcPath, DeleteOptions{
Recursive: false,
Force: false,
}); err != testCase.expectedErr {
t.Errorf("TestXLStorage case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) t.Errorf("TestXLStorage case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
} }
} }
@ -1054,7 +1063,10 @@ func TestXLStorageDeleteFile(t *testing.T) {
t.Fatalf("Unable to initialize xlStorage, %s", err) t.Fatalf("Unable to initialize xlStorage, %s", err)
} }
if err = xlStorageNew.Delete(context.Background(), "mybucket", "myobject", false); err != errVolumeAccessDenied { if err = xlStorageNew.Delete(context.Background(), "mybucket", "myobject", DeleteOptions{
Recursive: false,
Force: false,
}); err != errVolumeAccessDenied {
t.Errorf("expected: %s, got: %s", errVolumeAccessDenied, err) t.Errorf("expected: %s, got: %s", errVolumeAccessDenied, err)
} }
} }
@ -1072,7 +1084,10 @@ func TestXLStorageDeleteFile(t *testing.T) {
// TestXLStorage for delete on an removed disk. // TestXLStorage for delete on an removed disk.
// should fail with disk not found. // should fail with disk not found.
err = xlStorageDeletedStorage.Delete(context.Background(), "del-vol", "my-file", false) err = xlStorageDeletedStorage.Delete(context.Background(), "del-vol", "my-file", DeleteOptions{
Recursive: false,
Force: false,
})
if err != errDiskNotFound { if err != errDiskNotFound {
t.Errorf("Expected: \"Disk not found\", got \"%s\"", err) t.Errorf("Expected: \"Disk not found\", got \"%s\"", err)
} }
@ -1907,7 +1922,10 @@ func TestXLStorageVerifyFile(t *testing.T) {
t.Fatal("expected to fail bitrot check") t.Fatal("expected to fail bitrot check")
} }
if err := storage.Delete(context.Background(), volName, fileName, false); err != nil { if err := storage.Delete(context.Background(), volName, fileName, DeleteOptions{
Recursive: false,
Force: false,
}); err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -70,7 +70,10 @@ func TestUNCPaths(t *testing.T) {
} else if err == nil && !test.pass { } else if err == nil && !test.pass {
t.Error(err) t.Error(err)
} }
fs.Delete(context.Background(), "voldir", test.objName, false) fs.Delete(context.Background(), "voldir", test.objName, DeleteOptions{
Recursive: false,
Force: false,
})
}) })
} }
} }