do not update bloomFilters for temporary objects

This commit is contained in:
Harshavardhana 2021-05-15 19:54:07 -07:00
parent 4d876d03e8
commit 2ab9dc7609
2 changed files with 7 additions and 15 deletions

View File

@ -526,7 +526,6 @@ func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry str
// Similar to rename but renames data from srcEntry to dstEntry at dataDir // Similar to rename but renames data from srcEntry to dstEntry at dataDir
func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry string, metadata []FileInfo, dstBucket, dstEntry string, writeQuorum int) ([]StorageAPI, error) { func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry string, metadata []FileInfo, dstBucket, dstEntry string, writeQuorum int) ([]StorageAPI, error) {
defer ObjectPathUpdated(pathJoin(srcBucket, srcEntry))
defer ObjectPathUpdated(pathJoin(dstBucket, dstEntry)) defer ObjectPathUpdated(pathJoin(dstBucket, dstEntry))
g := errgroup.WithNErrs(len(disks)) g := errgroup.WithNErrs(len(disks))
@ -567,7 +566,6 @@ func rename(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBuc
dstEntry = retainSlash(dstEntry) dstEntry = retainSlash(dstEntry)
srcEntry = retainSlash(srcEntry) srcEntry = retainSlash(srcEntry)
} }
defer ObjectPathUpdated(pathJoin(srcBucket, srcEntry))
defer ObjectPathUpdated(pathJoin(dstBucket, dstEntry)) defer ObjectPathUpdated(pathJoin(dstBucket, dstEntry))
g := errgroup.WithNErrs(len(disks)) g := errgroup.WithNErrs(len(disks))
@ -611,14 +609,8 @@ func (er erasureObjects) PutObject(ctx context.Context, bucket string, object st
// putObject wrapper for erasureObjects PutObject // putObject wrapper for erasureObjects PutObject
func (er erasureObjects) putObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { func (er erasureObjects) putObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
defer func() {
ObjectPathUpdated(pathJoin(bucket, object))
}()
data := r.Reader data := r.Reader
uniqueID := mustGetUUID()
tempObj := uniqueID
// No metadata is set, allocate a new one. // No metadata is set, allocate a new one.
if opts.UserDefined == nil { if opts.UserDefined == nil {
opts.UserDefined = make(map[string]string) opts.UserDefined = make(map[string]string)
@ -664,7 +656,10 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
if opts.Versioned && fi.VersionID == "" { if opts.Versioned && fi.VersionID == "" {
fi.VersionID = mustGetUUID() fi.VersionID = mustGetUUID()
} }
fi.DataDir = mustGetUUID() fi.DataDir = mustGetUUID()
uniqueID := mustGetUUID()
tempObj := uniqueID
// Initialize erasure metadata. // Initialize erasure metadata.
for index := range partsMetadata { for index := range partsMetadata {
@ -1444,9 +1439,6 @@ func (er erasureObjects) updateRestoreMetadata(ctx context.Context, bucket, obje
// restoreTransitionedObject for multipart object chunks the file stream from remote tier into the same number of parts // restoreTransitionedObject for multipart object chunks the file stream from remote tier into the same number of parts
// as in the xl.meta for this version and rehydrates the part.n into the fi.DataDir for this version as in the xl.meta // as in the xl.meta for this version and rehydrates the part.n into the fi.DataDir for this version as in the xl.meta
func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket string, object string, opts ObjectOptions) error { func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket string, object string, opts ObjectOptions) error {
defer func() {
ObjectPathUpdated(pathJoin(bucket, object))
}()
setRestoreHeaderFn := func(oi ObjectInfo, rerr error) error { setRestoreHeaderFn := func(oi ObjectInfo, rerr error) error {
er.updateRestoreMetadata(ctx, bucket, object, oi, opts, rerr) er.updateRestoreMetadata(ctx, bucket, object, oi, opts, rerr)
return rerr return rerr

View File

@ -292,7 +292,7 @@ func newXLStorage(ep Endpoint) (*xlStorage, error) {
_, _ = rand.Read(rnd[:]) _, _ = rand.Read(rnd[:])
tmpFile := ".writable-check-" + hex.EncodeToString(rnd[:]) + ".tmp" tmpFile := ".writable-check-" + hex.EncodeToString(rnd[:]) + ".tmp"
filePath := pathJoin(p.diskPath, minioMetaTmpBucket, tmpFile) filePath := pathJoin(p.diskPath, minioMetaTmpBucket, tmpFile)
w, err := disk.OpenFileDirectIO(filePath, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0666) w, err := OpenFileDirectIO(filePath, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0666)
if err != nil { if err != nil {
return p, err return p, err
} }
@ -1105,7 +1105,7 @@ func (s *xlStorage) readAllData(volumeDir string, filePath string, requireDirect
var r io.ReadCloser var r io.ReadCloser
if requireDirectIO { if requireDirectIO {
var f *os.File var f *os.File
f, err = disk.OpenFileDirectIO(filePath, readMode, 0666) f, err = OpenFileDirectIO(filePath, readMode, 0666)
r = &odirectReader{f, nil, nil, true, true, s, nil} r = &odirectReader{f, nil, nil, true, true, s, nil}
} else { } else {
r, err = OpenFile(filePath, readMode, 0) r, err = OpenFile(filePath, readMode, 0)
@ -1394,7 +1394,7 @@ func (s *xlStorage) ReadFileStream(ctx context.Context, volume, path string, off
var file *os.File var file *os.File
// O_DIRECT only supported if offset is zero // O_DIRECT only supported if offset is zero
if offset == 0 && globalStorageClass.GetDMA() == storageclass.DMAReadWrite { if offset == 0 && globalStorageClass.GetDMA() == storageclass.DMAReadWrite {
file, err = disk.OpenFileDirectIO(filePath, readMode, 0666) file, err = OpenFileDirectIO(filePath, readMode, 0666)
} else { } else {
// Open the file for reading. // Open the file for reading.
file, err = OpenFile(filePath, readMode, 0666) file, err = OpenFile(filePath, readMode, 0666)
@ -1515,7 +1515,7 @@ func (s *xlStorage) CreateFile(ctx context.Context, volume, path string, fileSiz
if fileSize >= 0 && fileSize <= smallFileThreshold { if fileSize >= 0 && fileSize <= smallFileThreshold {
// For streams smaller than 128KiB we simply write them as O_DSYNC (fdatasync) // For streams smaller than 128KiB we simply write them as O_DSYNC (fdatasync)
// and not O_DIRECT to avoid the complexities of aligned I/O. // and not O_DIRECT to avoid the complexities of aligned I/O.
w, err := s.openFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC) w, err := s.openFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_EXCL)
if err != nil { if err != nil {
return err return err
} }