api: Increase the maximum object size limit from 5GiB to 16GiB. (#3834)

The globalMaxObjectSize limit is instilled in S3 spec perhaps
due to certain limitations on S3 infrastructure. For minio we
don't have such limitations and we can stream a larger file
instead.

So we are going to bump this limit to 16GiB.

Fixes #3825
This commit is contained in:
Harshavardhana
2017-03-03 10:14:17 -08:00
committed by GitHub
parent 28c53a3555
commit bc52d911ef
7 changed files with 44 additions and 29 deletions

View File

@@ -628,7 +628,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
}
/// maximum copy size for multipart objects in a single operation
if isMaxObjectSize(length) {
if isMaxAllowedPartSize(length) {
writeErrorResponse(w, ErrEntityTooLarge, r.URL)
return
}
@@ -637,6 +637,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
// object is same then only metadata is updated.
partInfo, err := objectAPI.CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID, partID, startOffset, length)
if err != nil {
errorIf(err, "Unable to perform CopyObjectPart %s/%s", srcBucket, srcObject)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
@@ -687,7 +688,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
}
/// maximum Upload size for multipart objects in a single operation
if isMaxObjectSize(size) {
if isMaxAllowedPartSize(size) {
writeErrorResponse(w, ErrEntityTooLarge, r.URL)
return
}