fix: issues with handling delete markers in metacache (#11150)

Additional cases handled

- fix address situations where healing is not
  triggered on failed writes and deletes.

- consider object exists during listing when
  metadata can be successfully decoded.
This commit is contained in:
Harshavardhana 2020-12-22 09:16:43 -08:00 committed by GitHub
parent 274bbad5cb
commit 35fafb837b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 41 additions and 48 deletions

View File

@ -846,22 +846,22 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
} }
// Check if there is any offline disk and add it to the MRF list // Check if there is any offline disk and add it to the MRF list
for i, disk := range onlineDisks { for _, disk := range onlineDisks {
if disk == nil || storageDisks[i] == nil { if disk != nil && disk.IsOnline() {
continue
}
er.addPartial(bucket, object, fi.VersionID) er.addPartial(bucket, object, fi.VersionID)
break break
} }
}
for i := 0; i < len(onlineDisks); i++ { for i := 0; i < len(onlineDisks); i++ {
if onlineDisks[i] == nil { if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
continue
}
// Object info is the same in all disks, so we can pick // Object info is the same in all disks, so we can pick
// the first meta from online disk // the first meta from online disk
fi = partsMetadata[i] fi = partsMetadata[i]
break break
} }
}
// Success, return object info. // Success, return object info.
return fi.ToObjectInfo(bucket, object), nil return fi.ToObjectInfo(bucket, object), nil

View File

@ -727,21 +727,21 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
// Whether a disk was initially or becomes offline // Whether a disk was initially or becomes offline
// during this upload, send it to the MRF list. // during this upload, send it to the MRF list.
for i := 0; i < len(onlineDisks); i++ { for i := 0; i < len(onlineDisks); i++ {
if onlineDisks[i] == nil || storageDisks[i] == nil { if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
continue
}
er.addPartial(bucket, object, fi.VersionID) er.addPartial(bucket, object, fi.VersionID)
break break
} }
}
for i := 0; i < len(onlineDisks); i++ { for i := 0; i < len(onlineDisks); i++ {
if onlineDisks[i] == nil { if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
continue
}
// Object info is the same in all disks, so we can pick // Object info is the same in all disks, so we can pick
// the first meta from online disk // the first meta from online disk
fi = partsMetadata[i] fi = partsMetadata[i]
break break
} }
}
return fi.ToObjectInfo(bucket, object), nil return fi.ToObjectInfo(bucket, object), nil
} }
@ -922,18 +922,17 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec
for _, version := range versions { for _, version := range versions {
// Check if there is any offline disk and add it to the MRF list // Check if there is any offline disk and add it to the MRF list
for _, disk := range storageDisks { for _, disk := range storageDisks {
if disk == nil { if disk != nil && disk.IsOnline() {
// ignore delete markers for quorum // Skip attempted heal on online disks.
if version.Deleted {
continue continue
} }
// all other direct versionId references we should // all other direct versionId references we should
// ensure no dangling file is left over. // ensure no dangling file is left over.
er.addPartial(bucket, version.Name, version.VersionID) er.addPartial(bucket, version.Name, version.VersionID)
break break
} }
} }
}
return dobjects, errs return dobjects, errs
} }
@ -1042,11 +1041,12 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string
} }
for _, disk := range storageDisks { for _, disk := range storageDisks {
if disk == nil { if disk != nil && disk.IsOnline() {
continue
}
er.addPartial(bucket, object, opts.VersionID) er.addPartial(bucket, object, opts.VersionID)
break break
} }
}
return ObjectInfo{ return ObjectInfo{
Bucket: bucket, Bucket: bucket,

View File

@ -73,11 +73,13 @@ func (e *metaCacheEntry) matches(other *metaCacheEntry, bucket string) bool {
if len(e.metadata) != len(other.metadata) || e.name != other.name { if len(e.metadata) != len(other.metadata) || e.name != other.name {
return false return false
} }
eFi, eErr := e.fileInfo(bucket) eFi, eErr := e.fileInfo(bucket)
oFi, oErr := e.fileInfo(bucket) oFi, oErr := other.fileInfo(bucket)
if eErr != nil || oErr != nil { if eErr != nil || oErr != nil {
return eErr == oErr return eErr == oErr
} }
return eFi.ModTime.Equal(oFi.ModTime) && eFi.Size == oFi.Size && eFi.VersionID == oFi.VersionID return eFi.ModTime.Equal(oFi.ModTime) && eFi.Size == oFi.Size && eFi.VersionID == oFi.VersionID
} }
@ -213,11 +215,12 @@ func (m metaCacheEntries) resolve(r *metadataResolutionParams) (selected *metaCa
} }
// Get new entry metadata // Get new entry metadata
objExists++
fiv, err := entry.fileInfo(r.bucket) fiv, err := entry.fileInfo(r.bucket)
if err != nil { if err != nil {
continue continue
} }
objExists++
if selFIV == nil { if selFIV == nil {
selected = entry selected = entry
selFIV = fiv selFIV = fiv
@ -227,14 +230,8 @@ func (m metaCacheEntries) resolve(r *metadataResolutionParams) (selected *metaCa
if selected.matches(entry, r.bucket) { if selected.matches(entry, r.bucket) {
continue continue
} }
}
// Select latest modtime.
if fiv.ModTime.After(selFIV.ModTime) {
selected = entry
selFIV = fiv
continue
}
}
// If directory, we need quorum. // If directory, we need quorum.
if dirExists > 0 && dirExists < r.dirQuorum { if dirExists > 0 && dirExists < r.dirQuorum {
return nil, false return nil, false

View File

@ -147,13 +147,11 @@ func (z *erasureServerPools) listPath(ctx context.Context, o listPathOptions) (e
var wg sync.WaitGroup var wg sync.WaitGroup
var errs []error var errs []error
allAtEOF := true allAtEOF := true
asked := 0
mu.Lock() mu.Lock()
// Ask all sets and merge entries. // Ask all sets and merge entries.
for _, zone := range z.serverPools { for _, zone := range z.serverPools {
for _, set := range zone.sets { for _, set := range zone.sets {
wg.Add(1) wg.Add(1)
asked++
go func(i int, set *erasureObjects) { go func(i int, set *erasureObjects) {
defer wg.Done() defer wg.Done()
e, err := set.listPath(ctx, o) e, err := set.listPath(ctx, o)

View File

@ -168,12 +168,10 @@ func (o *listPathOptions) gatherResults(in <-chan metaCacheEntry) func() (metaCa
o.debugln("not in dir", o.Prefix, o.Separator) o.debugln("not in dir", o.Prefix, o.Separator)
continue continue
} }
if !o.InclDeleted && entry.isObject() { if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() {
if entry.isLatestDeletemarker() { o.debugln("latest is delete marker")
o.debugln("latest delete")
continue continue
} }
}
if o.Limit > 0 && results.len() >= o.Limit { if o.Limit > 0 && results.len() >= o.Limit {
// We have enough and we have more. // We have enough and we have more.
// Do not return io.EOF // Do not return io.EOF

View File

@ -519,7 +519,7 @@ func (r *metacacheReader) readN(n int, inclDeleted, inclDirs bool, prefix string
if !inclDirs && meta.isDir() { if !inclDirs && meta.isDir() {
continue continue
} }
if meta.isDir() && !inclDeleted && meta.isLatestDeletemarker() { if !inclDeleted && meta.isLatestDeletemarker() {
continue continue
} }
res = append(res, meta) res = append(res, meta)