2016-04-28 23:01:11 -04:00
|
|
|
/*
|
2017-01-18 15:24:34 -05:00
|
|
|
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc.
|
2016-04-28 23:01:11 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-04-28 23:01:11 -04:00
|
|
|
|
|
|
|
import (
|
2018-08-01 17:19:11 -04:00
|
|
|
"bytes"
|
2018-04-05 18:04:40 -04:00
|
|
|
"context"
|
2016-04-28 23:01:11 -04:00
|
|
|
"io"
|
2018-08-01 17:19:11 -04:00
|
|
|
"io/ioutil"
|
2016-07-24 01:51:12 -04:00
|
|
|
"mime/multipart"
|
2017-11-14 19:56:24 -05:00
|
|
|
"net"
|
2016-07-19 00:20:17 -04:00
|
|
|
"net/http"
|
2017-03-13 17:41:13 -04:00
|
|
|
"net/url"
|
2016-07-22 23:31:45 -04:00
|
|
|
"strings"
|
2017-10-24 22:04:51 -04:00
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2018-07-02 17:40:18 -04:00
|
|
|
"github.com/minio/minio/pkg/handlers"
|
2017-10-24 22:04:51 -04:00
|
|
|
httptracer "github.com/minio/minio/pkg/handlers"
|
2016-04-28 23:01:11 -04:00
|
|
|
)
|
|
|
|
|
2017-04-03 17:50:09 -04:00
|
|
|
// Parses location constraint from the incoming reader.
|
|
|
|
func parseLocationConstraint(r *http.Request) (location string, s3Error APIErrorCode) {
|
2016-07-19 00:20:17 -04:00
|
|
|
// If the request has no body with content-length set to 0,
|
|
|
|
// we do not have to validate location constraint. Bucket will
|
|
|
|
// be created at default region.
|
|
|
|
locationConstraint := createBucketLocationConfiguration{}
|
2016-09-29 18:51:00 -04:00
|
|
|
err := xmlDecoder(r.Body, &locationConstraint, r.ContentLength)
|
2017-04-03 17:50:09 -04:00
|
|
|
if err != nil && err != io.EOF {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(context.Background(), err)
|
2017-04-03 17:50:09 -04:00
|
|
|
// Treat all other failures as XML parsing errors.
|
|
|
|
return "", ErrMalformedXML
|
|
|
|
} // else for both err as nil or io.EOF
|
|
|
|
location = locationConstraint.Location
|
|
|
|
if location == "" {
|
2017-11-29 16:12:47 -05:00
|
|
|
location = globalServerConfig.GetRegion()
|
2016-04-28 23:01:11 -04:00
|
|
|
}
|
2017-04-03 17:50:09 -04:00
|
|
|
return location, ErrNone
|
|
|
|
}
|
|
|
|
|
|
|
|
// Validates input location is same as configured region
|
|
|
|
// of Minio server.
|
|
|
|
func isValidLocation(location string) bool {
|
2017-11-29 16:12:47 -05:00
|
|
|
return globalServerConfig.GetRegion() == "" || globalServerConfig.GetRegion() == location
|
2016-04-28 23:01:11 -04:00
|
|
|
}
|
2016-07-22 23:31:45 -04:00
|
|
|
|
|
|
|
// Supported headers that needs to be extracted.
|
|
|
|
var supportedHeaders = []string{
|
|
|
|
"content-type",
|
|
|
|
"cache-control",
|
2018-03-14 05:57:32 -04:00
|
|
|
"content-language",
|
2016-07-22 23:31:45 -04:00
|
|
|
"content-encoding",
|
|
|
|
"content-disposition",
|
2017-12-22 06:28:13 -05:00
|
|
|
amzStorageClass,
|
2018-03-28 17:14:06 -04:00
|
|
|
"expires",
|
2016-07-22 23:31:45 -04:00
|
|
|
// Add more supported headers here.
|
|
|
|
}
|
|
|
|
|
2016-12-26 19:29:26 -05:00
|
|
|
// isMetadataDirectiveValid - check if metadata-directive is valid.
|
|
|
|
func isMetadataDirectiveValid(h http.Header) bool {
|
|
|
|
_, ok := h[http.CanonicalHeaderKey("X-Amz-Metadata-Directive")]
|
|
|
|
if ok {
|
|
|
|
// Check atleast set metadata-directive is valid.
|
|
|
|
return (isMetadataCopy(h) || isMetadataReplace(h))
|
|
|
|
}
|
|
|
|
// By default if x-amz-metadata-directive is not we
|
|
|
|
// treat it as 'COPY' this function returns true.
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the metadata COPY is requested.
|
|
|
|
func isMetadataCopy(h http.Header) bool {
|
|
|
|
return h.Get("X-Amz-Metadata-Directive") == "COPY"
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the metadata REPLACE is requested.
|
|
|
|
func isMetadataReplace(h http.Header) bool {
|
|
|
|
return h.Get("X-Amz-Metadata-Directive") == "REPLACE"
|
|
|
|
}
|
|
|
|
|
|
|
|
// Splits an incoming path into bucket and object components.
|
|
|
|
func path2BucketAndObject(path string) (bucket, object string) {
|
|
|
|
// Skip the first element if it is '/', split the rest.
|
|
|
|
path = strings.TrimPrefix(path, "/")
|
|
|
|
pathComponents := strings.SplitN(path, "/", 2)
|
|
|
|
|
|
|
|
// Save the bucket and object extracted from path.
|
|
|
|
switch len(pathComponents) {
|
|
|
|
case 1:
|
|
|
|
bucket = pathComponents[0]
|
|
|
|
case 2:
|
|
|
|
bucket = pathComponents[0]
|
|
|
|
object = pathComponents[1]
|
|
|
|
}
|
|
|
|
return bucket, object
|
|
|
|
}
|
|
|
|
|
2017-08-22 19:53:35 -04:00
|
|
|
// userMetadataKeyPrefixes contains the prefixes of used-defined metadata keys.
|
|
|
|
// All values stored with a key starting with one of the following prefixes
|
|
|
|
// must be extracted from the header.
|
|
|
|
var userMetadataKeyPrefixes = []string{
|
|
|
|
"X-Amz-Meta-",
|
|
|
|
"X-Minio-Meta-",
|
|
|
|
}
|
|
|
|
|
2018-07-10 23:27:10 -04:00
|
|
|
// extractMetadata extracts metadata from HTTP header and HTTP queryString.
|
|
|
|
func extractMetadata(ctx context.Context, r *http.Request) (metadata map[string]string, err error) {
|
|
|
|
query := r.URL.Query()
|
|
|
|
header := r.Header
|
|
|
|
metadata = make(map[string]string)
|
|
|
|
// Extract all query values.
|
|
|
|
err = extractMetadataFromMap(ctx, query, metadata)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Extract all header values.
|
|
|
|
err = extractMetadataFromMap(ctx, header, metadata)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-03-13 17:41:13 -04:00
|
|
|
}
|
2017-12-22 06:28:13 -05:00
|
|
|
|
2018-07-10 23:27:10 -04:00
|
|
|
// Success.
|
|
|
|
return metadata, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// extractMetadata extracts metadata from map values.
|
|
|
|
func extractMetadataFromMap(ctx context.Context, v map[string][]string, m map[string]string) error {
|
|
|
|
if v == nil {
|
|
|
|
logger.LogIf(ctx, errInvalidArgument)
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
2017-12-22 06:28:13 -05:00
|
|
|
// Save all supported headers.
|
2016-07-22 23:31:45 -04:00
|
|
|
for _, supportedHeader := range supportedHeaders {
|
2018-07-10 23:27:10 -04:00
|
|
|
if value, ok := v[http.CanonicalHeaderKey(supportedHeader)]; ok {
|
|
|
|
m[supportedHeader] = value[0]
|
|
|
|
} else if value, ok := v[supportedHeader]; ok {
|
|
|
|
m[supportedHeader] = value[0]
|
2016-07-22 23:31:45 -04:00
|
|
|
}
|
|
|
|
}
|
2018-07-10 23:27:10 -04:00
|
|
|
for key := range v {
|
2017-08-22 19:53:35 -04:00
|
|
|
for _, prefix := range userMetadataKeyPrefixes {
|
2018-07-10 23:27:10 -04:00
|
|
|
if !strings.HasPrefix(strings.ToLower(key), strings.ToLower(prefix)) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
value, ok := v[key]
|
|
|
|
if ok {
|
2018-07-12 12:40:14 -04:00
|
|
|
m[key] = strings.Join(value, ",")
|
2017-08-22 19:53:35 -04:00
|
|
|
break
|
|
|
|
}
|
2016-07-22 23:31:45 -04:00
|
|
|
}
|
|
|
|
}
|
2018-07-10 23:27:10 -04:00
|
|
|
return nil
|
2016-12-19 19:14:04 -05:00
|
|
|
}
|
|
|
|
|
2017-03-13 17:41:13 -04:00
|
|
|
// The Query string for the redirect URL the client is
|
|
|
|
// redirected on successful upload.
|
|
|
|
func getRedirectPostRawQuery(objInfo ObjectInfo) string {
|
|
|
|
redirectValues := make(url.Values)
|
|
|
|
redirectValues.Set("bucket", objInfo.Bucket)
|
|
|
|
redirectValues.Set("key", objInfo.Name)
|
2017-05-14 15:05:51 -04:00
|
|
|
redirectValues.Set("etag", "\""+objInfo.ETag+"\"")
|
2017-03-13 17:41:13 -04:00
|
|
|
return redirectValues.Encode()
|
|
|
|
}
|
|
|
|
|
2018-11-02 21:40:08 -04:00
|
|
|
// Returns access key in the request Authorization header.
|
|
|
|
func getReqAccessKey(r *http.Request, region string) (accessKey string) {
|
2018-11-07 09:40:03 -05:00
|
|
|
cred, _, _ := getReqAccessKeyV4(r, region)
|
|
|
|
if cred.AccessKey == "" {
|
|
|
|
cred, _, _ = getReqAccessKeyV2(r)
|
2018-11-02 21:40:08 -04:00
|
|
|
}
|
2018-11-07 09:40:03 -05:00
|
|
|
return cred.AccessKey
|
2018-11-02 21:40:08 -04:00
|
|
|
}
|
|
|
|
|
2017-03-13 17:41:13 -04:00
|
|
|
// Extract request params to be sent with event notifiation.
|
|
|
|
func extractReqParams(r *http.Request) map[string]string {
|
|
|
|
if r == nil {
|
|
|
|
return nil
|
2016-12-19 19:14:04 -05:00
|
|
|
}
|
2017-03-13 17:41:13 -04:00
|
|
|
|
2018-11-02 21:40:08 -04:00
|
|
|
region := globalServerConfig.GetRegion()
|
2017-03-13 17:41:13 -04:00
|
|
|
// Success.
|
|
|
|
return map[string]string{
|
2018-11-02 21:40:08 -04:00
|
|
|
"region": region,
|
|
|
|
"accessKey": getReqAccessKey(r, region),
|
2018-07-02 17:40:18 -04:00
|
|
|
"sourceIPAddress": handlers.GetSourceIP(r),
|
2017-03-13 17:41:13 -04:00
|
|
|
// Add more fields here.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-23 17:40:54 -04:00
|
|
|
// Extract response elements to be sent with event notifiation.
|
|
|
|
func extractRespElements(w http.ResponseWriter) map[string]string {
|
|
|
|
|
|
|
|
return map[string]string{
|
2018-11-02 21:40:08 -04:00
|
|
|
"requestId": w.Header().Get(responseRequestIDKey),
|
2018-08-23 17:40:54 -04:00
|
|
|
"content-length": w.Header().Get("Content-Length"),
|
|
|
|
// Add more fields here.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-27 20:02:04 -04:00
|
|
|
// Trims away `aws-chunked` from the content-encoding header if present.
|
|
|
|
// Streaming signature clients can have custom content-encoding such as
|
|
|
|
// `aws-chunked,gzip` here we need to only save `gzip`.
|
|
|
|
// For more refer http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
|
|
|
|
func trimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string) {
|
|
|
|
if contentEnc == "" {
|
|
|
|
return contentEnc
|
|
|
|
}
|
|
|
|
var newEncs []string
|
|
|
|
for _, enc := range strings.Split(contentEnc, ",") {
|
|
|
|
if enc != streamingContentEncoding {
|
|
|
|
newEncs = append(newEncs, enc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return strings.Join(newEncs, ",")
|
|
|
|
}
|
|
|
|
|
2017-03-13 17:41:13 -04:00
|
|
|
// Validate form field size for s3 specification requirement.
|
2018-04-05 18:04:40 -04:00
|
|
|
func validateFormFieldSize(ctx context.Context, formValues http.Header) error {
|
2017-03-13 17:41:13 -04:00
|
|
|
// Iterate over form values
|
|
|
|
for k := range formValues {
|
|
|
|
// Check if value's field exceeds S3 limit
|
|
|
|
if int64(len(formValues.Get(k))) > maxFormFieldSize {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, errSizeUnexpected)
|
|
|
|
return errSizeUnexpected
|
2016-12-19 19:14:04 -05:00
|
|
|
}
|
|
|
|
}
|
2017-03-13 17:41:13 -04:00
|
|
|
|
|
|
|
// Success.
|
|
|
|
return nil
|
2016-07-22 23:31:45 -04:00
|
|
|
}
|
2016-07-24 01:51:12 -04:00
|
|
|
|
2016-07-28 15:02:22 -04:00
|
|
|
// Extract form fields and file data from a HTTP POST Policy
|
2018-04-05 18:04:40 -04:00
|
|
|
func extractPostPolicyFormValues(ctx context.Context, form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) {
|
2016-07-24 01:51:12 -04:00
|
|
|
/// HTML Form values
|
2016-07-28 15:02:22 -04:00
|
|
|
fileName = ""
|
2017-02-02 13:45:00 -05:00
|
|
|
|
2017-03-13 17:41:13 -04:00
|
|
|
// Canonicalize the form values into http.Header.
|
|
|
|
formValues = make(http.Header)
|
2017-02-02 13:45:00 -05:00
|
|
|
for k, v := range form.Value {
|
2017-03-13 17:41:13 -04:00
|
|
|
formValues[http.CanonicalHeaderKey(k)] = v
|
|
|
|
}
|
|
|
|
|
|
|
|
// Validate form values.
|
2018-04-05 18:04:40 -04:00
|
|
|
if err = validateFormFieldSize(ctx, formValues); err != nil {
|
2017-03-13 17:41:13 -04:00
|
|
|
return nil, "", 0, nil, err
|
2017-02-02 13:45:00 -05:00
|
|
|
}
|
|
|
|
|
2018-08-01 17:19:11 -04:00
|
|
|
// this means that filename="" was not specified for file key and Go has
|
|
|
|
// an ugly way of handling this situation. Refer here
|
|
|
|
// https://golang.org/src/mime/multipart/formdata.go#L61
|
|
|
|
if len(form.File) == 0 {
|
|
|
|
var b = &bytes.Buffer{}
|
|
|
|
for _, v := range formValues["File"] {
|
|
|
|
b.WriteString(v)
|
|
|
|
}
|
|
|
|
fileSize = int64(b.Len())
|
|
|
|
filePart = ioutil.NopCloser(b)
|
|
|
|
return filePart, fileName, fileSize, formValues, nil
|
|
|
|
}
|
|
|
|
|
2017-02-02 13:45:00 -05:00
|
|
|
// Iterator until we find a valid File field and break
|
|
|
|
for k, v := range form.File {
|
|
|
|
canonicalFormName := http.CanonicalHeaderKey(k)
|
|
|
|
if canonicalFormName == "File" {
|
|
|
|
if len(v) == 0 {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, errInvalidArgument)
|
|
|
|
return nil, "", 0, nil, errInvalidArgument
|
2016-07-24 01:51:12 -04:00
|
|
|
}
|
2017-02-02 13:45:00 -05:00
|
|
|
// Fetch fileHeader which has the uploaded file information
|
|
|
|
fileHeader := v[0]
|
|
|
|
// Set filename
|
|
|
|
fileName = fileHeader.Filename
|
|
|
|
// Open the uploaded part
|
|
|
|
filePart, err = fileHeader.Open()
|
2017-02-09 15:37:32 -05:00
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return nil, "", 0, nil, err
|
2017-02-09 15:37:32 -05:00
|
|
|
}
|
2017-02-02 13:45:00 -05:00
|
|
|
// Compute file size
|
|
|
|
fileSize, err = filePart.(io.Seeker).Seek(0, 2)
|
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return nil, "", 0, nil, err
|
2017-02-02 13:45:00 -05:00
|
|
|
}
|
|
|
|
// Reset Seek to the beginning
|
|
|
|
_, err = filePart.(io.Seeker).Seek(0, 0)
|
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return nil, "", 0, nil, err
|
2017-02-02 13:45:00 -05:00
|
|
|
}
|
|
|
|
// File found and ready for reading
|
|
|
|
break
|
2016-07-24 01:51:12 -04:00
|
|
|
}
|
|
|
|
}
|
2017-02-02 13:45:00 -05:00
|
|
|
return filePart, fileName, fileSize, formValues, nil
|
2016-07-24 01:51:12 -04:00
|
|
|
}
|
2017-10-24 22:04:51 -04:00
|
|
|
|
|
|
|
// Log headers and body.
|
|
|
|
func httpTraceAll(f http.HandlerFunc) http.HandlerFunc {
|
2018-01-05 14:24:31 -05:00
|
|
|
if globalHTTPTraceFile == nil {
|
2017-10-24 22:04:51 -04:00
|
|
|
return f
|
|
|
|
}
|
2018-01-05 14:24:31 -05:00
|
|
|
return httptracer.TraceReqHandlerFunc(f, globalHTTPTraceFile, true)
|
2017-10-24 22:04:51 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Log only the headers.
|
|
|
|
func httpTraceHdrs(f http.HandlerFunc) http.HandlerFunc {
|
2018-01-05 14:24:31 -05:00
|
|
|
if globalHTTPTraceFile == nil {
|
2017-10-24 22:04:51 -04:00
|
|
|
return f
|
|
|
|
}
|
2018-01-05 14:24:31 -05:00
|
|
|
return httptracer.TraceReqHandlerFunc(f, globalHTTPTraceFile, false)
|
2017-10-24 22:04:51 -04:00
|
|
|
}
|
2017-11-14 19:56:24 -05:00
|
|
|
|
|
|
|
// Returns "/bucketName/objectName" for path-style or virtual-host-style requests.
|
|
|
|
func getResource(path string, host string, domain string) (string, error) {
|
|
|
|
if domain == "" {
|
|
|
|
return path, nil
|
|
|
|
}
|
|
|
|
// If virtual-host-style is enabled construct the "resource" properly.
|
|
|
|
if strings.Contains(host, ":") {
|
|
|
|
// In bucket.mydomain.com:9000, strip out :9000
|
|
|
|
var err error
|
|
|
|
if host, _, err = net.SplitHostPort(host); err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
reqInfo := (&logger.ReqInfo{}).AppendTags("host", host)
|
|
|
|
reqInfo.AppendTags("path", path)
|
|
|
|
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
|
|
|
logger.LogIf(ctx, err)
|
2017-11-14 19:56:24 -05:00
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !strings.HasSuffix(host, "."+domain) {
|
|
|
|
return path, nil
|
|
|
|
}
|
|
|
|
bucket := strings.TrimSuffix(host, "."+domain)
|
|
|
|
return slashSeparator + pathJoin(bucket, path), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If none of the http routes match respond with MethodNotAllowed
|
|
|
|
func notFoundHandler(w http.ResponseWriter, r *http.Request) {
|
2018-11-26 15:15:12 -05:00
|
|
|
writeErrorResponse(w, ErrMethodNotAllowed, r.URL, guessIsBrowserReq(r))
|
2017-11-14 19:56:24 -05:00
|
|
|
return
|
|
|
|
}
|