mirror of https://github.com/minio/minio.git
fix: update allowed max chunk payloadsize to 16MiB (#13169)
fixes #13163
This commit is contained in:
parent
7f49c38e2d
commit
5c448b1b97
|
@ -20,9 +20,11 @@ package cmd
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||||
|
@ -2177,10 +2179,30 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||||
}
|
}
|
||||||
// Add more Gateway SDKs here if any in future.
|
// Add more Gateway SDKs here if any in future.
|
||||||
default:
|
default:
|
||||||
apiErr = APIError{
|
if errors.Is(err, errMalformedEncoding) {
|
||||||
Code: apiErr.Code,
|
apiErr = APIError{
|
||||||
Description: fmt.Sprintf("%s: cause(%v)", apiErr.Description, err),
|
Code: "BadRequest",
|
||||||
HTTPStatusCode: apiErr.HTTPStatusCode,
|
Description: err.Error(),
|
||||||
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
|
}
|
||||||
|
} else if errors.Is(err, errChunkTooBig) {
|
||||||
|
apiErr = APIError{
|
||||||
|
Code: "BadRequest",
|
||||||
|
Description: err.Error(),
|
||||||
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
|
}
|
||||||
|
} else if errors.Is(err, strconv.ErrRange) {
|
||||||
|
apiErr = APIError{
|
||||||
|
Code: "BadRequest",
|
||||||
|
Description: err.Error(),
|
||||||
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
apiErr = APIError{
|
||||||
|
Code: apiErr.Code,
|
||||||
|
Description: fmt.Sprintf("%s: cause(%v)", apiErr.Description, err),
|
||||||
|
HTTPStatusCode: apiErr.HTTPStatusCode,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1113,7 +1113,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
|
||||||
dataLen: 1024,
|
dataLen: 1024,
|
||||||
chunkSize: 1024,
|
chunkSize: 1024,
|
||||||
expectedContent: []byte{},
|
expectedContent: []byte{},
|
||||||
expectedRespStatus: http.StatusInternalServerError,
|
expectedRespStatus: http.StatusBadRequest,
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
shouldPass: false,
|
shouldPass: false,
|
||||||
|
@ -1174,7 +1174,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
|
||||||
dataLen: 1024,
|
dataLen: 1024,
|
||||||
chunkSize: 1024,
|
chunkSize: 1024,
|
||||||
expectedContent: []byte{},
|
expectedContent: []byte{},
|
||||||
expectedRespStatus: http.StatusInternalServerError,
|
expectedRespStatus: http.StatusBadRequest,
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
shouldPass: false,
|
shouldPass: false,
|
||||||
|
@ -3381,7 +3381,7 @@ func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketN
|
||||||
|
|
||||||
noAPIErr := APIError{}
|
noAPIErr := APIError{}
|
||||||
missingDateHeaderErr := getAPIError(ErrMissingDateHeader)
|
missingDateHeaderErr := getAPIError(ErrMissingDateHeader)
|
||||||
internalErr := getAPIError(ErrInternalError)
|
internalErr := getAPIError(ErrBadRequest)
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
fault Fault
|
fault Fault
|
||||||
expectedErr APIError
|
expectedErr APIError
|
||||||
|
|
|
@ -145,9 +145,12 @@ const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4Ki
|
||||||
// lineTooLong is generated as chunk header is bigger than 4KiB.
|
// lineTooLong is generated as chunk header is bigger than 4KiB.
|
||||||
var errLineTooLong = errors.New("header line too long")
|
var errLineTooLong = errors.New("header line too long")
|
||||||
|
|
||||||
// Malformed encoding is generated when chunk header is wrongly formed.
|
// malformed encoding is generated when chunk header is wrongly formed.
|
||||||
var errMalformedEncoding = errors.New("malformed chunked encoding")
|
var errMalformedEncoding = errors.New("malformed chunked encoding")
|
||||||
|
|
||||||
|
// chunk is considered too big if its bigger than > 16MiB.
|
||||||
|
var errChunkTooBig = errors.New("chunk too big: choose chunk size <= 16MiB")
|
||||||
|
|
||||||
// newSignV4ChunkedReader returns a new s3ChunkedReader that translates the data read from r
|
// newSignV4ChunkedReader returns a new s3ChunkedReader that translates the data read from r
|
||||||
// out of HTTP "chunked" format before returning it.
|
// out of HTTP "chunked" format before returning it.
|
||||||
// The s3ChunkedReader returns io.EOF when the final 0-length chunk is read.
|
// The s3ChunkedReader returns io.EOF when the final 0-length chunk is read.
|
||||||
|
@ -190,6 +193,22 @@ func (cr *s3ChunkedReader) Close() (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Now, we read one chunk from the underlying reader.
|
||||||
|
// A chunk has the following format:
|
||||||
|
// <chunk-size-as-hex> + ";chunk-signature=" + <signature-as-hex> + "\r\n" + <payload> + "\r\n"
|
||||||
|
//
|
||||||
|
// First, we read the chunk size but fail if it is larger
|
||||||
|
// than 16 MiB. We must not accept arbitrary large chunks.
|
||||||
|
// One 16 MiB is a reasonable max limit.
|
||||||
|
//
|
||||||
|
// Then we read the signature and payload data. We compute the SHA256 checksum
|
||||||
|
// of the payload and verify that it matches the expected signature value.
|
||||||
|
//
|
||||||
|
// The last chunk is *always* 0-sized. So, we must only return io.EOF if we have encountered
|
||||||
|
// a chunk with a chunk size = 0. However, this chunk still has a signature and we must
|
||||||
|
// verify it.
|
||||||
|
const maxChunkSize = 16 << 20 // 16 MiB
|
||||||
|
|
||||||
// Read - implements `io.Reader`, which transparently decodes
|
// Read - implements `io.Reader`, which transparently decodes
|
||||||
// the incoming AWS Signature V4 streaming signature.
|
// the incoming AWS Signature V4 streaming signature.
|
||||||
func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) {
|
func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) {
|
||||||
|
@ -205,21 +224,6 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) {
|
||||||
buf = buf[n:]
|
buf = buf[n:]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now, we read one chunk from the underlying reader.
|
|
||||||
// A chunk has the following format:
|
|
||||||
// <chunk-size-as-hex> + ";chunk-signature=" + <signature-as-hex> + "\r\n" + <payload> + "\r\n"
|
|
||||||
//
|
|
||||||
// Frist, we read the chunk size but fail if it is larger
|
|
||||||
// than 1 MB. We must not accept arbitrary large chunks.
|
|
||||||
// One 1 MB is a reasonable max limit.
|
|
||||||
//
|
|
||||||
// Then we read the signature and payload data. We compute the SHA256 checksum
|
|
||||||
// of the payload and verify that it matches the expected signature value.
|
|
||||||
//
|
|
||||||
// The last chunk is *always* 0-sized. So, we must only return io.EOF if we have encountered
|
|
||||||
// a chunk with a chunk size = 0. However, this chunk still has a signature and we must
|
|
||||||
// verify it.
|
|
||||||
const MaxSize = 1 << 20 // 1 MB
|
|
||||||
var size int
|
var size int
|
||||||
for {
|
for {
|
||||||
b, err := cr.reader.ReadByte()
|
b, err := cr.reader.ReadByte()
|
||||||
|
@ -249,8 +253,8 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) {
|
||||||
cr.err = errMalformedEncoding
|
cr.err = errMalformedEncoding
|
||||||
return n, cr.err
|
return n, cr.err
|
||||||
}
|
}
|
||||||
if size > MaxSize {
|
if size > maxChunkSize {
|
||||||
cr.err = errMalformedEncoding
|
cr.err = errChunkTooBig
|
||||||
return n, cr.err
|
return n, cr.err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue