mirror of
https://github.com/minio/minio.git
synced 2025-11-06 20:33:07 -05:00
api: Introduce metadata update APIs to update only metadata (#11962)
Current implementation heavily relies on readAllFileInfo but with the advent of xl.meta inlined with data, we cannot easily avoid reading data when we are only interested is updating metadata, this leads to invariably write amplification during metadata updates, repeatedly reading data when we are only interested in updating metadata. This PR ensures that we implement a metadata only update API at storage layer, that handles updates to metadata alone for any given version - given the version is valid and present. This helps reduce the chattiness for following calls.. - PutObjectTags - DeleteObjectTags - PutObjectLegalHold - PutObjectRetention - ReplicateObject (updates metadata on replication status)
This commit is contained in:
@@ -96,7 +96,6 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
|
||||
}
|
||||
|
||||
onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi)
|
||||
|
||||
versionID := srcInfo.VersionID
|
||||
if srcInfo.versionOnly {
|
||||
versionID = dstOpts.VersionID
|
||||
@@ -1201,6 +1200,56 @@ func (er erasureObjects) addPartial(bucket, object, versionID string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (er erasureObjects) PutObjectMetadata(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
|
||||
var err error
|
||||
// Lock the object before updating tags.
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
|
||||
disks := er.getDisks()
|
||||
|
||||
// Read metadata associated with the object from all disks.
|
||||
metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, false)
|
||||
|
||||
readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// List all online disks.
|
||||
_, modTime := listOnlineDisks(disks, metaArr, errs)
|
||||
|
||||
// Pick latest valid metadata.
|
||||
fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
if fi.Deleted {
|
||||
if opts.VersionID == "" {
|
||||
return ObjectInfo{}, toObjectErr(errFileNotFound, bucket, object)
|
||||
}
|
||||
return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object)
|
||||
}
|
||||
|
||||
for k, v := range opts.UserDefined {
|
||||
fi.Metadata[k] = v
|
||||
}
|
||||
fi.ModTime = opts.MTime
|
||||
fi.VersionID = opts.VersionID
|
||||
|
||||
if err = er.updateObjectMeta(ctx, bucket, object, fi); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
objInfo := fi.ToObjectInfo(bucket, object)
|
||||
return objInfo, nil
|
||||
|
||||
}
|
||||
|
||||
// PutObjectTags - replace or add tags to an existing object
|
||||
func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) (ObjectInfo, error) {
|
||||
var err error
|
||||
@@ -1215,15 +1264,15 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin
|
||||
disks := er.getDisks()
|
||||
|
||||
// Read metadata associated with the object from all disks.
|
||||
metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, true)
|
||||
metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, false)
|
||||
|
||||
readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount)
|
||||
readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// List all online disks.
|
||||
onlineDisks, modTime := listOnlineDisks(disks, metaArr, errs)
|
||||
_, modTime := listOnlineDisks(disks, metaArr, errs)
|
||||
|
||||
// Pick latest valid metadata.
|
||||
fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
|
||||
@@ -1237,118 +1286,43 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin
|
||||
return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object)
|
||||
}
|
||||
|
||||
onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi)
|
||||
for i, metaFi := range metaArr {
|
||||
if metaFi.IsValid() {
|
||||
// clean fi.Meta of tag key, before updating the new tags
|
||||
delete(metaFi.Metadata, xhttp.AmzObjectTagging)
|
||||
// Don't update for empty tags
|
||||
if tags != "" {
|
||||
metaFi.Metadata[xhttp.AmzObjectTagging] = tags
|
||||
}
|
||||
for k, v := range opts.UserDefined {
|
||||
metaFi.Metadata[k] = v
|
||||
}
|
||||
metaArr[i].Metadata = metaFi.Metadata
|
||||
}
|
||||
}
|
||||
|
||||
tempObj := mustGetUUID()
|
||||
|
||||
var online int
|
||||
// Cleanup in case of xl.meta writing failure
|
||||
defer func() {
|
||||
if online != len(onlineDisks) {
|
||||
er.deleteObject(context.Background(), minioMetaTmpBucket, tempObj, writeQuorum)
|
||||
}
|
||||
}()
|
||||
|
||||
// Write unique `xl.meta` for each disk.
|
||||
if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// Atomically rename metadata from tmp location to destination for each disk.
|
||||
if onlineDisks, err = renameFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, bucket, object, writeQuorum); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
online = countOnlineDisks(onlineDisks)
|
||||
|
||||
objInfo := fi.ToObjectInfo(bucket, object)
|
||||
objInfo.UserTags = tags
|
||||
|
||||
return objInfo, nil
|
||||
}
|
||||
|
||||
// updateObjectMeta will update the metadata of a file.
|
||||
func (er erasureObjects) updateObjectMeta(ctx context.Context, bucket, object string, meta map[string]string, opts ObjectOptions) error {
|
||||
if len(meta) == 0 {
|
||||
return nil
|
||||
}
|
||||
disks := er.getDisks()
|
||||
|
||||
// Read metadata associated with the object from all disks.
|
||||
metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, true)
|
||||
|
||||
readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount)
|
||||
if err != nil {
|
||||
return toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// List all online disks.
|
||||
_, modTime := listOnlineDisks(disks, metaArr, errs)
|
||||
|
||||
// Pick latest valid metadata.
|
||||
fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
|
||||
if err != nil {
|
||||
return toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// Update metadata
|
||||
for k, v := range meta {
|
||||
fi.Metadata[xhttp.AmzObjectTagging] = tags
|
||||
for k, v := range opts.UserDefined {
|
||||
fi.Metadata[k] = v
|
||||
}
|
||||
|
||||
if fi.Deleted {
|
||||
if opts.VersionID == "" {
|
||||
return toObjectErr(errFileNotFound, bucket, object)
|
||||
}
|
||||
return toObjectErr(errMethodNotAllowed, bucket, object)
|
||||
if err = er.updateObjectMeta(ctx, bucket, object, fi); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
for i := range metaArr {
|
||||
if errs[i] != nil {
|
||||
// Avoid disks where loading metadata fail
|
||||
continue
|
||||
}
|
||||
return fi.ToObjectInfo(bucket, object), nil
|
||||
}
|
||||
|
||||
metaArr[i].Metadata = fi.Metadata
|
||||
// updateObjectMeta will update the metadata of a file.
|
||||
func (er erasureObjects) updateObjectMeta(ctx context.Context, bucket, object string, fi FileInfo) error {
|
||||
if len(fi.Metadata) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
tempObj := mustGetUUID()
|
||||
disks := er.getDisks()
|
||||
|
||||
var online int
|
||||
// Cleanup in case of xl.meta writing failure
|
||||
defer func() {
|
||||
if online != len(disks) {
|
||||
er.deleteObject(context.Background(), minioMetaTmpBucket, tempObj, writeQuorum)
|
||||
}
|
||||
}()
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
|
||||
// Write unique `xl.meta` for each disk.
|
||||
if disks, err = writeUniqueFileInfo(ctx, disks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil {
|
||||
return toObjectErr(err, bucket, object)
|
||||
// Start writing `xl.meta` to all disks in parallel.
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
}
|
||||
return disks[index].UpdateMetadata(ctx, bucket, object, fi)
|
||||
}, index)
|
||||
}
|
||||
|
||||
// Atomically rename metadata from tmp location to destination for each disk.
|
||||
if disks, err = renameFileInfo(ctx, disks, minioMetaTmpBucket, tempObj, bucket, object, writeQuorum); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return toObjectErr(err, bucket, object)
|
||||
}
|
||||
// Wait for all the routines.
|
||||
mErrs := g.Wait()
|
||||
|
||||
online = countOnlineDisks(disks)
|
||||
return nil
|
||||
return reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, getWriteQuorum(len(disks)))
|
||||
}
|
||||
|
||||
// DeleteObjectTags - delete object tags from an existing object
|
||||
|
||||
Reference in New Issue
Block a user