fix: validate incoming uploadID to be base64 encoded (#17865)

Bonus fixes include

- do not have to write final xl.meta (renameData) does this
  already, saves some IOPs.

- make sure to purge the multipart directory properly using
  a recursive delete, otherwise this can easily pile up and
  rely on the stale uploads cleanup.

fixes #17863
This commit is contained in:
Harshavardhana 2023-08-17 09:37:55 -07:00 committed by GitHub
parent 065fd094d1
commit dde1a12819
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 38 additions and 35 deletions

View File

@ -1,4 +1,4 @@
// Copyright (c) 2015-2021 MinIO, Inc. // Copyright (c) 2015-2023 MinIO, Inc.
// //
// This file is part of MinIO Object Storage stack // This file is part of MinIO Object Storage stack
// //
@ -101,7 +101,6 @@ func (er erasureObjects) checkUploadIDExists(ctx context.Context, bucket, object
// Pick one from the first valid metadata. // Pick one from the first valid metadata.
fi, err = pickValidFileInfo(ctx, partsMetadata, modTime, etag, quorum) fi, err = pickValidFileInfo(ctx, partsMetadata, modTime, etag, quorum)
return fi, partsMetadata, err return fi, partsMetadata, err
} }
@ -310,7 +309,7 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec
populatedUploadIds.Add(uploadID) populatedUploadIds.Add(uploadID)
uploads = append(uploads, MultipartInfo{ uploads = append(uploads, MultipartInfo{
Object: object, Object: object,
UploadID: uploadID, UploadID: base64.RawURLEncoding.EncodeToString([]byte(fmt.Sprintf("%s.%s", globalDeploymentID, uploadID))),
Initiated: fi.ModTime, Initiated: fi.ModTime,
}) })
} }
@ -954,10 +953,10 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
if err != nil { if err != nil {
return oi, err return oi, err
} }
wctx := wlkctx.Context() ctx = wlkctx.Context()
defer uploadIDLock.Unlock(wlkctx) defer uploadIDLock.Unlock(wlkctx)
fi, partsMetadata, err := er.checkUploadIDExists(wctx, bucket, object, uploadID, true) fi, partsMetadata, err := er.checkUploadIDExists(ctx, bucket, object, uploadID, true)
if err != nil { if err != nil {
return oi, toObjectErr(err, bucket, object, uploadID) return oi, toObjectErr(err, bucket, object, uploadID)
} }
@ -1227,12 +1226,6 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
ctx = lkctx.Context() ctx = lkctx.Context()
defer lk.Unlock(lkctx) defer lk.Unlock(lkctx)
// Write final `xl.meta` at uploadID location
onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum)
if err != nil {
return oi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
}
// Remove parts that weren't present in CompleteMultipartUpload request. // Remove parts that weren't present in CompleteMultipartUpload request.
for _, curpart := range currentFI.Parts { for _, curpart := range currentFI.Parts {
if objectPartIndex(fi.Parts, curpart.Number) == -1 { if objectPartIndex(fi.Parts, curpart.Number) == -1 {
@ -1242,13 +1235,13 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
// Request 3: PutObjectPart 2 // Request 3: PutObjectPart 2
// Request 4: CompleteMultipartUpload --part 2 // Request 4: CompleteMultipartUpload --part 2
// N.B. 1st part is not present. This part should be removed from the storage. // N.B. 1st part is not present. This part should be removed from the storage.
er.removeObjectPart(bucket, object, uploadID, fi.DataDir, curpart.Number) er.removeObjectPart(bucket, object, uploadID, currentFI.DataDir, curpart.Number)
} }
} }
// Remove part.meta which is not needed anymore. // Remove part.meta which is not needed anymore.
for _, part := range fi.Parts { for _, part := range currentFI.Parts {
er.removePartMeta(bucket, object, uploadID, fi.DataDir, part.Number) er.removePartMeta(bucket, object, uploadID, currentFI.DataDir, part.Number)
} }
// Rename the multipart object to final location. // Rename the multipart object to final location.

View File

@ -1488,7 +1488,7 @@ func (z *erasureServerPools) CopyObjectPart(ctx context.Context, srcBucket, srcO
// PutObjectPart - writes part of an object to hashedSet based on the object name. // PutObjectPart - writes part of an object to hashedSet based on the object name.
func (z *erasureServerPools) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (PartInfo, error) { func (z *erasureServerPools) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (PartInfo, error) {
if err := checkPutObjectPartArgs(ctx, bucket, object, z); err != nil { if err := checkPutObjectPartArgs(ctx, bucket, object, uploadID, z); err != nil {
return PartInfo{}, err return PartInfo{}, err
} }
@ -1520,7 +1520,7 @@ func (z *erasureServerPools) PutObjectPart(ctx context.Context, bucket, object,
} }
func (z *erasureServerPools) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) { func (z *erasureServerPools) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) {
if err := checkListPartsArgs(ctx, bucket, object, z); err != nil { if err := checkListPartsArgs(ctx, bucket, object, uploadID, z); err != nil {
return MultipartInfo{}, err return MultipartInfo{}, err
} }
@ -1551,7 +1551,7 @@ func (z *erasureServerPools) GetMultipartInfo(ctx context.Context, bucket, objec
// ListObjectParts - lists all uploaded parts to an object in hashedSet. // ListObjectParts - lists all uploaded parts to an object in hashedSet.
func (z *erasureServerPools) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (ListPartsInfo, error) { func (z *erasureServerPools) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (ListPartsInfo, error) {
if err := checkListPartsArgs(ctx, bucket, object, z); err != nil { if err := checkListPartsArgs(ctx, bucket, object, uploadID, z); err != nil {
return ListPartsInfo{}, err return ListPartsInfo{}, err
} }
@ -1580,7 +1580,7 @@ func (z *erasureServerPools) ListObjectParts(ctx context.Context, bucket, object
// Aborts an in-progress multipart operation on hashedSet based on the object name. // Aborts an in-progress multipart operation on hashedSet based on the object name.
func (z *erasureServerPools) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error { func (z *erasureServerPools) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error {
if err := checkAbortMultipartArgs(ctx, bucket, object, z); err != nil { if err := checkAbortMultipartArgs(ctx, bucket, object, uploadID, z); err != nil {
return err return err
} }
@ -1611,7 +1611,7 @@ func (z *erasureServerPools) AbortMultipartUpload(ctx context.Context, bucket, o
// CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name. // CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name.
func (z *erasureServerPools) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) { func (z *erasureServerPools) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
if err = checkCompleteMultipartArgs(ctx, bucket, object, z); err != nil { if err = checkCompleteMultipartArgs(ctx, bucket, object, uploadID, z); err != nil {
return objInfo, err return objInfo, err
} }

View File

@ -99,7 +99,6 @@ func checkListMultipartArgs(ctx context.Context, bucket, prefix, keyMarker, uplo
} }
if uploadIDMarker != "" { if uploadIDMarker != "" {
if HasSuffix(keyMarker, SlashSeparator) { if HasSuffix(keyMarker, SlashSeparator) {
logger.LogIf(ctx, InvalidUploadIDKeyCombination{ logger.LogIf(ctx, InvalidUploadIDKeyCombination{
UploadIDMarker: uploadIDMarker, UploadIDMarker: uploadIDMarker,
KeyMarker: keyMarker, KeyMarker: keyMarker,
@ -125,24 +124,34 @@ func checkNewMultipartArgs(ctx context.Context, bucket, object string, obj Objec
return checkObjectArgs(ctx, bucket, object, obj) return checkObjectArgs(ctx, bucket, object, obj)
} }
// Checks for PutObjectPart arguments validity, also validates if bucket exists. func checkMultipartObjectArgs(ctx context.Context, bucket, object, uploadID string, obj ObjectLayer) error {
func checkPutObjectPartArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error { _, err := base64.RawURLEncoding.DecodeString(uploadID)
if err != nil {
return MalformedUploadID{
UploadID: uploadID,
}
}
return checkObjectArgs(ctx, bucket, object, obj) return checkObjectArgs(ctx, bucket, object, obj)
} }
// Checks for PutObjectPart arguments validity, also validates if bucket exists.
func checkPutObjectPartArgs(ctx context.Context, bucket, object, uploadID string, obj ObjectLayer) error {
return checkMultipartObjectArgs(ctx, bucket, object, uploadID, obj)
}
// Checks for ListParts arguments validity, also validates if bucket exists. // Checks for ListParts arguments validity, also validates if bucket exists.
func checkListPartsArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error { func checkListPartsArgs(ctx context.Context, bucket, object, uploadID string, obj ObjectLayer) error {
return checkObjectArgs(ctx, bucket, object, obj) return checkMultipartObjectArgs(ctx, bucket, object, uploadID, obj)
} }
// Checks for CompleteMultipartUpload arguments validity, also validates if bucket exists. // Checks for CompleteMultipartUpload arguments validity, also validates if bucket exists.
func checkCompleteMultipartArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error { func checkCompleteMultipartArgs(ctx context.Context, bucket, object, uploadID string, obj ObjectLayer) error {
return checkObjectArgs(ctx, bucket, object, obj) return checkMultipartObjectArgs(ctx, bucket, object, uploadID, obj)
} }
// Checks for AbortMultipartUpload arguments validity, also validates if bucket exists. // Checks for AbortMultipartUpload arguments validity, also validates if bucket exists.
func checkAbortMultipartArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error { func checkAbortMultipartArgs(ctx context.Context, bucket, object, uploadID string, obj ObjectLayer) error {
return checkObjectArgs(ctx, bucket, object, obj) return checkMultipartObjectArgs(ctx, bucket, object, uploadID, obj)
} }
// Checks Object arguments validity, also validates if bucket exists. // Checks Object arguments validity, also validates if bucket exists.

View File

@ -951,8 +951,6 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
} }
} }
setEventStreamHeaders(w)
opts, err := completeMultipartOpts(ctx, r, bucket, object) opts, err := completeMultipartOpts(ctx, r, bucket, object)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)

View File

@ -2528,11 +2528,14 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
} }
} }
// srcFilePath is always in minioMetaTmpBucket, an attempt to if srcVolume != minioMetaMultipartBucket {
// remove the temporary folder is enough since at this point // srcFilePath is some-times minioMetaTmpBucket, an attempt to
// ideally all transaction should be complete. // remove the temporary folder is enough since at this point
// ideally all transaction should be complete.
Remove(pathutil.Dir(srcFilePath)) Remove(pathutil.Dir(srcFilePath))
} else {
s.deleteFile(srcVolumeDir, pathutil.Dir(srcFilePath), true, false)
}
return sign, nil return sign, nil
} }