mirror of
https://github.com/minio/minio.git
synced 2025-11-08 21:24:55 -05:00
fix: reduce using memory and temporary files. (#17206)
This commit is contained in:
@@ -22,8 +22,10 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
@@ -912,41 +914,111 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
reader, err := r.MultipartReader()
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Read multipart data and save in memory and in the disk if needed
|
||||
form, err := reader.ReadForm(maxFormMemory)
|
||||
if err != nil {
|
||||
var (
|
||||
fileBody io.ReadCloser
|
||||
fileName string
|
||||
)
|
||||
|
||||
maxParts := 1000
|
||||
// Canonicalize the form values into http.Header.
|
||||
formValues := make(http.Header)
|
||||
for {
|
||||
part, err := reader.NextRawPart()
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
if maxParts <= 0 {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, multipart.ErrMessageTooLarge)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
maxParts--
|
||||
|
||||
name := part.FormName()
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fileName = part.FileName()
|
||||
|
||||
// Multiple values for the same key (one map entry, longer slice) are cheaper
|
||||
// than the same number of values for different keys (many map entries), but
|
||||
// using a consistent per-value cost for overhead is simpler.
|
||||
const mapEntryOverhead = 200
|
||||
maxMemoryBytes := 2 * int64(10<<20)
|
||||
maxMemoryBytes -= int64(len(name))
|
||||
maxMemoryBytes -= mapEntryOverhead
|
||||
if maxMemoryBytes < 0 {
|
||||
// We can't actually take this path, since nextPart would already have
|
||||
// rejected the MIME headers for being too large. Check anyway.
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, multipart.ErrMessageTooLarge)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
if fileName == "" {
|
||||
// value, store as string in memory
|
||||
n, err := io.CopyN(&b, part, maxMemoryBytes+1)
|
||||
part.Close()
|
||||
if err != nil && err != io.EOF {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
maxMemoryBytes -= n
|
||||
if maxMemoryBytes < 0 {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, multipart.ErrMessageTooLarge)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
if n > maxFormFieldSize {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, multipart.ErrMessageTooLarge)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
formValues[http.CanonicalHeaderKey(name)] = append(formValues[http.CanonicalHeaderKey(name)], b.String())
|
||||
continue
|
||||
}
|
||||
|
||||
// In accordance with https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html
|
||||
// The file or text content.
|
||||
// The file or text content must be the last field in the form.
|
||||
// You cannot upload more than one file at a time.
|
||||
fileBody = part // we have found the File part of the request breakout
|
||||
defer part.Close()
|
||||
break
|
||||
}
|
||||
|
||||
if _, ok := formValues["Key"]; !ok {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, errors.New("The name of the uploaded key is missing"))
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove all tmp files created during multipart upload
|
||||
defer form.RemoveAll()
|
||||
|
||||
// Extract all form fields
|
||||
fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(ctx, form)
|
||||
if err != nil {
|
||||
if fileName == "" {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, errors.New("The file or text content is missing"))
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if file is provided, error out otherwise.
|
||||
if fileBody == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPOSTFileRequired), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Close multipart file
|
||||
defer fileBody.Close()
|
||||
|
||||
formValues.Set("Bucket", bucket)
|
||||
if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") {
|
||||
// S3 feature to replace ${filename} found in Key form field
|
||||
@@ -995,6 +1067,13 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(fileBody, -1, "", "", -1)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle policy if it is set.
|
||||
if len(policyBytes) > 0 {
|
||||
postPolicyForm, err := parsePostPolicyForm(bytes.NewReader(policyBytes))
|
||||
@@ -1015,15 +1094,8 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
// should not exceed the maximum single Put size (5 GiB)
|
||||
lengthRange := postPolicyForm.Conditions.ContentLengthRange
|
||||
if lengthRange.Valid {
|
||||
if fileSize < lengthRange.Min {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errDataTooSmall), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if fileSize > lengthRange.Max || isMaxObjectSize(fileSize) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errDataTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
hashReader.SetExpectedMin(lengthRange.Min)
|
||||
hashReader.SetExpectedMax(lengthRange.Max)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1035,12 +1107,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
rawReader := hashReader
|
||||
pReader := NewPutObjReader(rawReader)
|
||||
var objectEncryptionKey crypto.ObjectKey
|
||||
@@ -1095,9 +1161,8 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
info := ObjectInfo{Size: fileSize}
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize)
|
||||
// do not try to verify encrypted content/
|
||||
hashReader, err = hash.NewReader(reader, -1, "", "", -1)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
|
||||
Reference in New Issue
Block a user