mirror of
https://github.com/minio/minio.git
synced 2025-04-16 00:49:09 -04:00
fix: inline data upon overwrites should be readable (#12369)
This PR fixes two bugs - Remove fi.Data upon overwrite of objects from inlined-data to non-inlined-data - Workaround for an existing bug on disk with latest releases to ignore fi.Data and instead read from the disk for non-inlined-data - Addtionally add a reserved metadata header to indicate data is inlined for a given version.
This commit is contained in:
parent
4fd1378242
commit
4840974d7a
@ -520,6 +520,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
|||||||
if partsMetadata[i].IsRemote() {
|
if partsMetadata[i].IsRemote() {
|
||||||
partsMetadata[i].DataDir = ""
|
partsMetadata[i].DataDir = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// Attempt a rename now from healed data to final location.
|
// Attempt a rename now from healed data to final location.
|
||||||
if err = disk.RenameData(ctx, minioMetaTmpBucket, tmpID, partsMetadata[i], bucket, object); err != nil {
|
if err = disk.RenameData(ctx, minioMetaTmpBucket, tmpID, partsMetadata[i], bucket, object); err != nil {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
@ -895,7 +896,6 @@ func (er erasureObjects) HealObject(ctx context.Context, bucket, object, version
|
|||||||
|
|
||||||
// Read metadata files from all the disks
|
// Read metadata files from all the disks
|
||||||
partsMetadata, errs := readAllFileInfo(healCtx, storageDisks, bucket, object, versionID, false)
|
partsMetadata, errs := readAllFileInfo(healCtx, storageDisks, bucket, object, versionID, false)
|
||||||
|
|
||||||
if isAllNotFound(errs) {
|
if isAllNotFound(errs) {
|
||||||
err = toObjectErr(errFileNotFound, bucket, object)
|
err = toObjectErr(errFileNotFound, bucket, object)
|
||||||
if versionID != "" {
|
if versionID != "" {
|
||||||
|
@ -214,37 +214,6 @@ func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object stri
|
|||||||
return fn(pr, h, opts.CheckPrecondFn, pipeCloser, nsUnlocker)
|
return fn(pr, h, opts.CheckPrecondFn, pipeCloser, nsUnlocker)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObject - reads an object erasured coded across multiple
|
|
||||||
// disks. Supports additional parameters like offset and length
|
|
||||||
// which are synonymous with HTTP Range requests.
|
|
||||||
//
|
|
||||||
// startOffset indicates the starting read location of the object.
|
|
||||||
// length indicates the total length of the object.
|
|
||||||
func (er erasureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
|
|
||||||
// Lock the object before reading.
|
|
||||||
lk := er.NewNSLock(bucket, object)
|
|
||||||
lkctx, err := lk.GetRLock(ctx, globalOperationTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ctx = lkctx.Context()
|
|
||||||
defer lk.RUnlock(lkctx.Cancel)
|
|
||||||
|
|
||||||
// Start offset cannot be negative.
|
|
||||||
if startOffset < 0 {
|
|
||||||
logger.LogIf(ctx, errUnexpected, logger.Application)
|
|
||||||
return errUnexpected
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writer cannot be nil.
|
|
||||||
if writer == nil {
|
|
||||||
logger.LogIf(ctx, errUnexpected)
|
|
||||||
return errUnexpected
|
|
||||||
}
|
|
||||||
|
|
||||||
return er.getObject(ctx, bucket, object, startOffset, length, writer, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI) error {
|
func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI) error {
|
||||||
// Reorder online disks based on erasure distribution order.
|
// Reorder online disks based on erasure distribution order.
|
||||||
// Reorder parts metadata based on erasure distribution order.
|
// Reorder parts metadata based on erasure distribution order.
|
||||||
@ -284,6 +253,27 @@ func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, obje
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return toObjectErr(err, bucket, object)
|
return toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This hack is needed to avoid a bug found when overwriting
|
||||||
|
// the inline data object with a un-inlined version, for the
|
||||||
|
// time being we need this as we have released inline-data
|
||||||
|
// version already and this bug is already present in newer
|
||||||
|
// releases.
|
||||||
|
//
|
||||||
|
// This mainly happens with objects < smallFileThreshold when
|
||||||
|
// they are overwritten with un-inlined objects >= smallFileThreshold,
|
||||||
|
// due to a bug in RenameData() the fi.Data is not niled leading to
|
||||||
|
// GetObject thinking that fi.Data is valid while fi.Size has
|
||||||
|
// changed already.
|
||||||
|
if _, ok := fi.Metadata[ReservedMetadataPrefixLower+"inline-data"]; ok {
|
||||||
|
shardFileSize := erasure.ShardFileSize(fi.Size)
|
||||||
|
if shardFileSize >= 0 && shardFileSize >= smallFileThreshold {
|
||||||
|
for i := range metaArr {
|
||||||
|
metaArr[i].Data = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var healOnce sync.Once
|
var healOnce sync.Once
|
||||||
|
|
||||||
// once we have obtained a common FileInfo i.e latest, we should stick
|
// once we have obtained a common FileInfo i.e latest, we should stick
|
||||||
@ -373,23 +363,6 @@ func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, obje
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getObject wrapper for erasure GetObject
|
|
||||||
func (er erasureObjects) getObject(ctx context.Context, bucket, object string, startOffset, length int64, writer io.Writer, opts ObjectOptions) error {
|
|
||||||
fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, bucket, object, opts, true)
|
|
||||||
if err != nil {
|
|
||||||
return toObjectErr(err, bucket, object)
|
|
||||||
}
|
|
||||||
if fi.Deleted {
|
|
||||||
if opts.VersionID == "" {
|
|
||||||
return toObjectErr(errFileNotFound, bucket, object)
|
|
||||||
}
|
|
||||||
// Make sure to return object info to provide extra information.
|
|
||||||
return toObjectErr(errMethodNotAllowed, bucket, object)
|
|
||||||
}
|
|
||||||
|
|
||||||
return er.getObjectWithFileInfo(ctx, bucket, object, startOffset, length, writer, fi, metaArr, onlineDisks)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
|
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
|
||||||
func (er erasureObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (info ObjectInfo, err error) {
|
func (er erasureObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (info ObjectInfo, err error) {
|
||||||
if !opts.NoLock {
|
if !opts.NoLock {
|
||||||
@ -797,6 +770,13 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||||||
partsMetadata[index].ModTime = modTime
|
partsMetadata[index].ModTime = modTime
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(inlineBuffers) > 0 {
|
||||||
|
// Set an additional header when data is inlined.
|
||||||
|
for index := range partsMetadata {
|
||||||
|
partsMetadata[index].Metadata[ReservedMetadataPrefixLower+"inline-data"] = "true"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Rename the successfully written temporary object to final location.
|
// Rename the successfully written temporary object to final location.
|
||||||
if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, bucket, object, writeQuorum); err != nil {
|
if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, bucket, object, writeQuorum); err != nil {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
|
@ -323,9 +323,18 @@ func TestGetObjectNoQuorum(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = xl.GetObject(ctx, bucket, object, 0, int64(len(buf)), ioutil.Discard, "", opts)
|
gr, err := xl.GetObjectNInfo(ctx, bucket, object, nil, nil, readLock, opts)
|
||||||
if err != toObjectErr(errErasureReadQuorum, bucket, object) {
|
if err != nil {
|
||||||
t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureReadQuorum, bucket, object), err)
|
if err != toObjectErr(errErasureReadQuorum, bucket, object) {
|
||||||
|
t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureReadQuorum, bucket, object), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if gr != nil {
|
||||||
|
_, err = io.Copy(ioutil.Discard, gr)
|
||||||
|
if err != toObjectErr(errErasureReadQuorum, bucket, object) {
|
||||||
|
t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureReadQuorum, bucket, object), err)
|
||||||
|
}
|
||||||
|
gr.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test use case 2: Make 9 disks offline, which leaves less than quorum number of disks
|
// Test use case 2: Make 9 disks offline, which leaves less than quorum number of disks
|
||||||
@ -359,9 +368,18 @@ func TestGetObjectNoQuorum(t *testing.T) {
|
|||||||
}
|
}
|
||||||
z.serverPools[0].erasureDisksMu.Unlock()
|
z.serverPools[0].erasureDisksMu.Unlock()
|
||||||
// Fetch object from store.
|
// Fetch object from store.
|
||||||
err = xl.GetObject(ctx, bucket, object, 0, int64(len("abcd")), ioutil.Discard, "", opts)
|
gr, err := xl.GetObjectNInfo(ctx, bucket, object, nil, nil, readLock, opts)
|
||||||
if err != toObjectErr(errErasureReadQuorum, bucket, object) {
|
if err != nil {
|
||||||
t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err)
|
if err != toObjectErr(errErasureReadQuorum, bucket, object) {
|
||||||
|
t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureReadQuorum, bucket, object), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if gr != nil {
|
||||||
|
_, err = io.Copy(ioutil.Discard, gr)
|
||||||
|
if err != toObjectErr(errErasureReadQuorum, bucket, object) {
|
||||||
|
t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureReadQuorum, bucket, object), err)
|
||||||
|
}
|
||||||
|
gr.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
const (
|
const (
|
||||||
storageRESTVersion = "v34" // Streaming Usage Updates
|
storageRESTVersion = "v35" // Inline bugfix needs all servers to be updated
|
||||||
storageRESTVersionPrefix = SlashSeparator + storageRESTVersion
|
storageRESTVersionPrefix = SlashSeparator + storageRESTVersion
|
||||||
storageRESTPrefix = minioReservedBucketPath + "/storage"
|
storageRESTPrefix = minioReservedBucketPath + "/storage"
|
||||||
)
|
)
|
||||||
|
@ -1076,6 +1076,11 @@ func (s *xlStorage) ReadVersion(ctx context.Context, volume, path, versionID str
|
|||||||
|
|
||||||
if readData {
|
if readData {
|
||||||
if len(fi.Data) > 0 || fi.Size == 0 {
|
if len(fi.Data) > 0 || fi.Size == 0 {
|
||||||
|
if len(fi.Data) > 0 {
|
||||||
|
if _, ok := fi.Metadata[ReservedMetadataPrefixLower+"inline-data"]; !ok {
|
||||||
|
fi.Metadata[ReservedMetadataPrefixLower+"inline-data"] = "true"
|
||||||
|
}
|
||||||
|
}
|
||||||
return fi, nil
|
return fi, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2023,6 +2028,7 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
|
|||||||
// Purge the destination path as we are not preserving anything
|
// Purge the destination path as we are not preserving anything
|
||||||
// versioned object was not requested.
|
// versioned object was not requested.
|
||||||
oldDstDataPath = pathJoin(dstVolumeDir, dstPath, ofi.DataDir)
|
oldDstDataPath = pathJoin(dstVolumeDir, dstPath, ofi.DataDir)
|
||||||
|
xlMeta.data.remove(nullVersionID, ofi.DataDir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user