mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
fix: validate incoming uploadID to be base64 encoded (#17865)
Bonus fixes include - do not have to write final xl.meta (renameData) does this already, saves some IOPs. - make sure to purge the multipart directory properly using a recursive delete, otherwise this can easily pile up and rely on the stale uploads cleanup. fixes #17863
This commit is contained in:
parent
065fd094d1
commit
dde1a12819
@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2023 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
@ -101,7 +101,6 @@ func (er erasureObjects) checkUploadIDExists(ctx context.Context, bucket, object
|
||||
|
||||
// Pick one from the first valid metadata.
|
||||
fi, err = pickValidFileInfo(ctx, partsMetadata, modTime, etag, quorum)
|
||||
|
||||
return fi, partsMetadata, err
|
||||
}
|
||||
|
||||
@ -310,7 +309,7 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec
|
||||
populatedUploadIds.Add(uploadID)
|
||||
uploads = append(uploads, MultipartInfo{
|
||||
Object: object,
|
||||
UploadID: uploadID,
|
||||
UploadID: base64.RawURLEncoding.EncodeToString([]byte(fmt.Sprintf("%s.%s", globalDeploymentID, uploadID))),
|
||||
Initiated: fi.ModTime,
|
||||
})
|
||||
}
|
||||
@ -954,10 +953,10 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
wctx := wlkctx.Context()
|
||||
ctx = wlkctx.Context()
|
||||
defer uploadIDLock.Unlock(wlkctx)
|
||||
|
||||
fi, partsMetadata, err := er.checkUploadIDExists(wctx, bucket, object, uploadID, true)
|
||||
fi, partsMetadata, err := er.checkUploadIDExists(ctx, bucket, object, uploadID, true)
|
||||
if err != nil {
|
||||
return oi, toObjectErr(err, bucket, object, uploadID)
|
||||
}
|
||||
@ -1227,12 +1226,6 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
||||
ctx = lkctx.Context()
|
||||
defer lk.Unlock(lkctx)
|
||||
|
||||
// Write final `xl.meta` at uploadID location
|
||||
onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum)
|
||||
if err != nil {
|
||||
return oi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
||||
}
|
||||
|
||||
// Remove parts that weren't present in CompleteMultipartUpload request.
|
||||
for _, curpart := range currentFI.Parts {
|
||||
if objectPartIndex(fi.Parts, curpart.Number) == -1 {
|
||||
@ -1242,13 +1235,13 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
||||
// Request 3: PutObjectPart 2
|
||||
// Request 4: CompleteMultipartUpload --part 2
|
||||
// N.B. 1st part is not present. This part should be removed from the storage.
|
||||
er.removeObjectPart(bucket, object, uploadID, fi.DataDir, curpart.Number)
|
||||
er.removeObjectPart(bucket, object, uploadID, currentFI.DataDir, curpart.Number)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove part.meta which is not needed anymore.
|
||||
for _, part := range fi.Parts {
|
||||
er.removePartMeta(bucket, object, uploadID, fi.DataDir, part.Number)
|
||||
for _, part := range currentFI.Parts {
|
||||
er.removePartMeta(bucket, object, uploadID, currentFI.DataDir, part.Number)
|
||||
}
|
||||
|
||||
// Rename the multipart object to final location.
|
||||
|
@ -1488,7 +1488,7 @@ func (z *erasureServerPools) CopyObjectPart(ctx context.Context, srcBucket, srcO
|
||||
|
||||
// PutObjectPart - writes part of an object to hashedSet based on the object name.
|
||||
func (z *erasureServerPools) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (PartInfo, error) {
|
||||
if err := checkPutObjectPartArgs(ctx, bucket, object, z); err != nil {
|
||||
if err := checkPutObjectPartArgs(ctx, bucket, object, uploadID, z); err != nil {
|
||||
return PartInfo{}, err
|
||||
}
|
||||
|
||||
@ -1520,7 +1520,7 @@ func (z *erasureServerPools) PutObjectPart(ctx context.Context, bucket, object,
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) {
|
||||
if err := checkListPartsArgs(ctx, bucket, object, z); err != nil {
|
||||
if err := checkListPartsArgs(ctx, bucket, object, uploadID, z); err != nil {
|
||||
return MultipartInfo{}, err
|
||||
}
|
||||
|
||||
@ -1551,7 +1551,7 @@ func (z *erasureServerPools) GetMultipartInfo(ctx context.Context, bucket, objec
|
||||
|
||||
// ListObjectParts - lists all uploaded parts to an object in hashedSet.
|
||||
func (z *erasureServerPools) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (ListPartsInfo, error) {
|
||||
if err := checkListPartsArgs(ctx, bucket, object, z); err != nil {
|
||||
if err := checkListPartsArgs(ctx, bucket, object, uploadID, z); err != nil {
|
||||
return ListPartsInfo{}, err
|
||||
}
|
||||
|
||||
@ -1580,7 +1580,7 @@ func (z *erasureServerPools) ListObjectParts(ctx context.Context, bucket, object
|
||||
|
||||
// Aborts an in-progress multipart operation on hashedSet based on the object name.
|
||||
func (z *erasureServerPools) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error {
|
||||
if err := checkAbortMultipartArgs(ctx, bucket, object, z); err != nil {
|
||||
if err := checkAbortMultipartArgs(ctx, bucket, object, uploadID, z); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1611,7 +1611,7 @@ func (z *erasureServerPools) AbortMultipartUpload(ctx context.Context, bucket, o
|
||||
|
||||
// CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name.
|
||||
func (z *erasureServerPools) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
if err = checkCompleteMultipartArgs(ctx, bucket, object, z); err != nil {
|
||||
if err = checkCompleteMultipartArgs(ctx, bucket, object, uploadID, z); err != nil {
|
||||
return objInfo, err
|
||||
}
|
||||
|
||||
|
@ -99,7 +99,6 @@ func checkListMultipartArgs(ctx context.Context, bucket, prefix, keyMarker, uplo
|
||||
}
|
||||
if uploadIDMarker != "" {
|
||||
if HasSuffix(keyMarker, SlashSeparator) {
|
||||
|
||||
logger.LogIf(ctx, InvalidUploadIDKeyCombination{
|
||||
UploadIDMarker: uploadIDMarker,
|
||||
KeyMarker: keyMarker,
|
||||
@ -125,24 +124,34 @@ func checkNewMultipartArgs(ctx context.Context, bucket, object string, obj Objec
|
||||
return checkObjectArgs(ctx, bucket, object, obj)
|
||||
}
|
||||
|
||||
// Checks for PutObjectPart arguments validity, also validates if bucket exists.
|
||||
func checkPutObjectPartArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error {
|
||||
func checkMultipartObjectArgs(ctx context.Context, bucket, object, uploadID string, obj ObjectLayer) error {
|
||||
_, err := base64.RawURLEncoding.DecodeString(uploadID)
|
||||
if err != nil {
|
||||
return MalformedUploadID{
|
||||
UploadID: uploadID,
|
||||
}
|
||||
}
|
||||
return checkObjectArgs(ctx, bucket, object, obj)
|
||||
}
|
||||
|
||||
// Checks for PutObjectPart arguments validity, also validates if bucket exists.
|
||||
func checkPutObjectPartArgs(ctx context.Context, bucket, object, uploadID string, obj ObjectLayer) error {
|
||||
return checkMultipartObjectArgs(ctx, bucket, object, uploadID, obj)
|
||||
}
|
||||
|
||||
// Checks for ListParts arguments validity, also validates if bucket exists.
|
||||
func checkListPartsArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error {
|
||||
return checkObjectArgs(ctx, bucket, object, obj)
|
||||
func checkListPartsArgs(ctx context.Context, bucket, object, uploadID string, obj ObjectLayer) error {
|
||||
return checkMultipartObjectArgs(ctx, bucket, object, uploadID, obj)
|
||||
}
|
||||
|
||||
// Checks for CompleteMultipartUpload arguments validity, also validates if bucket exists.
|
||||
func checkCompleteMultipartArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error {
|
||||
return checkObjectArgs(ctx, bucket, object, obj)
|
||||
func checkCompleteMultipartArgs(ctx context.Context, bucket, object, uploadID string, obj ObjectLayer) error {
|
||||
return checkMultipartObjectArgs(ctx, bucket, object, uploadID, obj)
|
||||
}
|
||||
|
||||
// Checks for AbortMultipartUpload arguments validity, also validates if bucket exists.
|
||||
func checkAbortMultipartArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error {
|
||||
return checkObjectArgs(ctx, bucket, object, obj)
|
||||
func checkAbortMultipartArgs(ctx context.Context, bucket, object, uploadID string, obj ObjectLayer) error {
|
||||
return checkMultipartObjectArgs(ctx, bucket, object, uploadID, obj)
|
||||
}
|
||||
|
||||
// Checks Object arguments validity, also validates if bucket exists.
|
||||
|
@ -951,8 +951,6 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
||||
}
|
||||
}
|
||||
|
||||
setEventStreamHeaders(w)
|
||||
|
||||
opts, err := completeMultipartOpts(ctx, r, bucket, object)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
|
@ -2528,11 +2528,14 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
|
||||
}
|
||||
}
|
||||
|
||||
// srcFilePath is always in minioMetaTmpBucket, an attempt to
|
||||
// remove the temporary folder is enough since at this point
|
||||
// ideally all transaction should be complete.
|
||||
|
||||
Remove(pathutil.Dir(srcFilePath))
|
||||
if srcVolume != minioMetaMultipartBucket {
|
||||
// srcFilePath is some-times minioMetaTmpBucket, an attempt to
|
||||
// remove the temporary folder is enough since at this point
|
||||
// ideally all transaction should be complete.
|
||||
Remove(pathutil.Dir(srcFilePath))
|
||||
} else {
|
||||
s.deleteFile(srcVolumeDir, pathutil.Dir(srcFilePath), true, false)
|
||||
}
|
||||
return sign, nil
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user