2015-02-23 19:46:48 -05:00
|
|
|
|
/*
|
2018-02-23 18:07:21 -05:00
|
|
|
|
* Minio Cloud Storage, (C) 2015-2018 Minio, Inc.
|
2015-02-23 19:46:48 -05:00
|
|
|
|
*
|
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
|
*
|
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
*
|
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
objectAPI: Fix object API interface, remove unnecessary structs.
ObjectAPI changes.
```
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, *probe.Error)
ListMultipartUploads(bucket, objectPrefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, *probe.Error)
ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, *probe.Error)
```
2016-04-03 04:34:20 -04:00
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
2015-02-23 19:46:48 -05:00
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
|
* limitations under the License.
|
|
|
|
|
*/
|
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
|
package cmd
|
2015-02-15 20:03:27 -05:00
|
|
|
|
|
|
|
|
|
import (
|
2018-04-05 18:04:40 -04:00
|
|
|
|
"context"
|
2018-03-01 14:37:57 -05:00
|
|
|
|
"crypto/hmac"
|
|
|
|
|
"encoding/binary"
|
2016-03-12 19:08:15 -05:00
|
|
|
|
"encoding/hex"
|
fs: Break fs package to top-level and introduce ObjectAPI interface.
ObjectAPI interface brings in changes needed for XL ObjectAPI layer.
The new interface for any ObjectAPI layer is as below
```
// ObjectAPI interface.
type ObjectAPI interface {
// Bucket resource API.
DeleteBucket(bucket string) *probe.Error
ListBuckets() ([]BucketInfo, *probe.Error)
MakeBucket(bucket string) *probe.Error
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
// Bucket query API.
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error)
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
// Object resource API.
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error)
PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error)
DeleteObject(bucket, object string) *probe.Error
// Object query API.
NewMultipartUpload(bucket, object string) (string, *probe.Error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error)
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
}
```
2016-03-30 19:15:28 -04:00
|
|
|
|
"encoding/xml"
|
2017-11-07 18:18:59 -05:00
|
|
|
|
"io"
|
|
|
|
|
goioutil "io/ioutil"
|
2017-03-22 21:44:35 -04:00
|
|
|
|
"net"
|
2015-02-15 20:03:27 -05:00
|
|
|
|
"net/http"
|
2016-02-07 06:37:54 -05:00
|
|
|
|
"net/url"
|
fs: Break fs package to top-level and introduce ObjectAPI interface.
ObjectAPI interface brings in changes needed for XL ObjectAPI layer.
The new interface for any ObjectAPI layer is as below
```
// ObjectAPI interface.
type ObjectAPI interface {
// Bucket resource API.
DeleteBucket(bucket string) *probe.Error
ListBuckets() ([]BucketInfo, *probe.Error)
MakeBucket(bucket string) *probe.Error
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
// Bucket query API.
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error)
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
// Object resource API.
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error)
PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error)
DeleteObject(bucket, object string) *probe.Error
// Object query API.
NewMultipartUpload(bucket, object string) (string, *probe.Error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error)
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
}
```
2016-03-30 19:15:28 -04:00
|
|
|
|
"sort"
|
2015-05-04 02:16:10 -04:00
|
|
|
|
"strconv"
|
2018-08-15 06:30:19 -04:00
|
|
|
|
"strings"
|
2015-02-15 20:03:27 -05:00
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
|
snappy "github.com/golang/snappy"
|
2018-04-21 22:23:54 -04:00
|
|
|
|
"github.com/gorilla/mux"
|
2018-05-15 21:20:22 -04:00
|
|
|
|
miniogo "github.com/minio/minio-go"
|
2018-08-17 15:52:14 -04:00
|
|
|
|
"github.com/minio/minio/cmd/crypto"
|
2018-04-05 18:04:40 -04:00
|
|
|
|
"github.com/minio/minio/cmd/logger"
|
2018-05-15 21:20:22 -04:00
|
|
|
|
"github.com/minio/minio/pkg/dns"
|
2018-03-15 16:03:41 -04:00
|
|
|
|
"github.com/minio/minio/pkg/event"
|
2018-07-02 17:40:18 -04:00
|
|
|
|
"github.com/minio/minio/pkg/handlers"
|
2017-10-22 01:30:34 -04:00
|
|
|
|
"github.com/minio/minio/pkg/hash"
|
2017-11-07 18:18:59 -05:00
|
|
|
|
"github.com/minio/minio/pkg/ioutil"
|
2018-04-24 18:53:30 -04:00
|
|
|
|
"github.com/minio/minio/pkg/policy"
|
2018-08-15 06:30:19 -04:00
|
|
|
|
"github.com/minio/minio/pkg/s3select"
|
2018-03-01 14:37:57 -05:00
|
|
|
|
sha256 "github.com/minio/sha256-simd"
|
|
|
|
|
"github.com/minio/sio"
|
2015-05-09 22:39:00 -04:00
|
|
|
|
)
|
|
|
|
|
|
2017-08-08 14:04:04 -04:00
|
|
|
|
// supportedHeadGetReqParams - supported request parameters for GET and HEAD presigned request.
|
|
|
|
|
var supportedHeadGetReqParams = map[string]string{
|
2016-02-07 06:37:54 -05:00
|
|
|
|
"response-expires": "Expires",
|
|
|
|
|
"response-content-type": "Content-Type",
|
|
|
|
|
"response-cache-control": "Cache-Control",
|
2016-08-10 20:36:28 -04:00
|
|
|
|
"response-content-encoding": "Content-Encoding",
|
|
|
|
|
"response-content-language": "Content-Language",
|
2016-02-07 06:37:54 -05:00
|
|
|
|
"response-content-disposition": "Content-Disposition",
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
|
const (
|
|
|
|
|
compressionAlgorithmV1 = "golang/snappy/LZ77"
|
|
|
|
|
)
|
|
|
|
|
|
2017-08-08 14:04:04 -04:00
|
|
|
|
// setHeadGetRespHeaders - set any requested parameters as response headers.
|
|
|
|
|
func setHeadGetRespHeaders(w http.ResponseWriter, reqParams url.Values) {
|
2016-02-07 06:37:54 -05:00
|
|
|
|
for k, v := range reqParams {
|
2017-08-08 14:04:04 -04:00
|
|
|
|
if header, ok := supportedHeadGetReqParams[k]; ok {
|
2016-02-07 06:37:54 -05:00
|
|
|
|
w.Header()[header] = v
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-15 06:30:19 -04:00
|
|
|
|
// SelectObjectContentHandler - GET Object?select
|
|
|
|
|
// ----------
|
|
|
|
|
// This implementation of the GET operation retrieves object content based
|
|
|
|
|
// on an SQL expression. In the request, along with the sql expression, you must
|
|
|
|
|
// also specify a data serialization format (JSON, CSV) of the object.
|
|
|
|
|
func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
ctx := newContext(r, w, "SelectObject")
|
|
|
|
|
|
2018-10-12 15:25:59 -04:00
|
|
|
|
defer logger.AuditLog(ctx, r)
|
|
|
|
|
|
2018-08-15 06:30:19 -04:00
|
|
|
|
// Fetch object stat info.
|
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
|
|
|
|
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-10-09 18:04:53 -04:00
|
|
|
|
if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported
|
|
|
|
|
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-09-20 22:22:09 -04:00
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
|
object := vars["object"]
|
|
|
|
|
|
|
|
|
|
// Check for auth type to return S3 compatible error.
|
|
|
|
|
// type to return the correct error (NoSuchKey vs AccessDenied)
|
2018-08-15 06:30:19 -04:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
|
|
|
|
|
if getRequestAuthType(r) == authTypeAnonymous {
|
|
|
|
|
// As per "Permission" section in
|
2018-09-20 22:22:09 -04:00
|
|
|
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
|
|
|
|
|
// If the object you request does not exist,
|
|
|
|
|
// the error Amazon S3 returns depends on
|
|
|
|
|
// whether you also have the s3:ListBucket
|
|
|
|
|
// permission.
|
|
|
|
|
// * If you have the s3:ListBucket permission
|
|
|
|
|
// on the bucket, Amazon S3 will return an
|
|
|
|
|
// HTTP status code 404 ("no such key")
|
|
|
|
|
// error.
|
|
|
|
|
// * if you don’t have the s3:ListBucket
|
|
|
|
|
// permission, Amazon S3 will return an HTTP
|
|
|
|
|
// status code 403 ("access denied") error.`
|
2018-08-15 06:30:19 -04:00
|
|
|
|
if globalPolicySys.IsAllowed(policy.Args{
|
|
|
|
|
Action: policy.ListBucketAction,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
ConditionValues: getConditionValues(r, ""),
|
|
|
|
|
IsOwner: false,
|
|
|
|
|
}) {
|
2018-09-20 22:22:09 -04:00
|
|
|
|
getObjectInfo := objectAPI.GetObjectInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_, err := getObjectInfo(ctx, bucket, object, ObjectOptions{})
|
2018-08-15 06:30:19 -04:00
|
|
|
|
if toAPIErrorCode(err) == ErrNoSuchKey {
|
|
|
|
|
s3Error = ErrNoSuchKey
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
writeErrorResponse(w, s3Error, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-09-20 22:22:09 -04:00
|
|
|
|
|
|
|
|
|
// Get request range.
|
|
|
|
|
rangeHeader := r.Header.Get("Range")
|
|
|
|
|
if rangeHeader != "" {
|
|
|
|
|
writeErrorResponse(w, ErrUnsupportedRangeHeader, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-15 06:30:19 -04:00
|
|
|
|
if r.ContentLength <= 0 {
|
|
|
|
|
writeErrorResponse(w, ErrEmptyRequestBody, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-10-22 15:12:22 -04:00
|
|
|
|
var selectReq s3select.ObjectSelectRequest
|
2018-08-15 06:30:19 -04:00
|
|
|
|
if err := xmlDecoder(r.Body, &selectReq, r.ContentLength); err != nil {
|
|
|
|
|
writeErrorResponse(w, ErrMalformedXML, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
if !strings.EqualFold(string(selectReq.ExpressionType), "SQL") {
|
|
|
|
|
writeErrorResponse(w, ErrInvalidExpressionType, r.URL)
|
2018-08-15 06:30:19 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-09-20 22:22:09 -04:00
|
|
|
|
if len(selectReq.Expression) >= s3select.MaxExpressionLength {
|
|
|
|
|
writeErrorResponse(w, ErrExpressionTooLong, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
getObjectNInfo := objectAPI.GetObjectNInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectNInfo = api.CacheAPI().GetObjectNInfo
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-27 06:06:45 -04:00
|
|
|
|
gr, err := getObjectNInfo(ctx, bucket, object, nil, r.Header, readLock, ObjectOptions{})
|
2018-09-20 22:22:09 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
2018-08-31 16:10:12 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-09-20 22:22:09 -04:00
|
|
|
|
defer gr.Close()
|
|
|
|
|
|
|
|
|
|
objInfo := gr.ObjInfo
|
2018-08-15 06:30:19 -04:00
|
|
|
|
|
2018-10-22 15:12:22 -04:00
|
|
|
|
if selectReq.InputSerialization.CompressionType == s3select.SelectCompressionGZIP {
|
2018-08-15 06:30:19 -04:00
|
|
|
|
if !strings.Contains(objInfo.ContentType, "gzip") {
|
|
|
|
|
writeErrorResponse(w, ErrInvalidDataSource, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-10-22 15:12:22 -04:00
|
|
|
|
if selectReq.InputSerialization.CompressionType == s3select.SelectCompressionBZIP {
|
2018-08-15 06:30:19 -04:00
|
|
|
|
if !strings.Contains(objInfo.ContentType, "bzip") {
|
|
|
|
|
writeErrorResponse(w, ErrInvalidDataSource, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-10-22 15:12:22 -04:00
|
|
|
|
if selectReq.InputSerialization.CompressionType == "" {
|
|
|
|
|
selectReq.InputSerialization.CompressionType = s3select.SelectCompressionNONE
|
|
|
|
|
if !strings.Contains(objInfo.ContentType, "text/csv") && !strings.Contains(objInfo.ContentType, "application/json") {
|
2018-08-15 06:30:19 -04:00
|
|
|
|
writeErrorResponse(w, ErrInvalidDataSource, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-08-31 16:10:12 -04:00
|
|
|
|
if !strings.EqualFold(string(selectReq.ExpressionType), "SQL") {
|
|
|
|
|
writeErrorResponse(w, ErrInvalidExpressionType, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
if len(selectReq.Expression) >= s3select.MaxExpressionLength {
|
|
|
|
|
writeErrorResponse(w, ErrExpressionTooLong, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-10-22 15:12:22 -04:00
|
|
|
|
if selectReq.InputSerialization.CSV == nil && selectReq.InputSerialization.JSON == nil {
|
2018-09-20 05:34:26 -04:00
|
|
|
|
writeErrorResponse(w, ErrInvalidRequestParameter, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-10-22 15:12:22 -04:00
|
|
|
|
if selectReq.OutputSerialization.CSV == nil && selectReq.OutputSerialization.JSON == nil {
|
|
|
|
|
writeErrorResponse(w, ErrInvalidRequestParameter, r.URL)
|
2018-08-31 16:10:12 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-10-22 15:12:22 -04:00
|
|
|
|
|
|
|
|
|
if selectReq.InputSerialization.CSV != nil {
|
|
|
|
|
if selectReq.InputSerialization.CSV.FileHeaderInfo != s3select.CSVFileHeaderInfoUse &&
|
|
|
|
|
selectReq.InputSerialization.CSV.FileHeaderInfo != s3select.CSVFileHeaderInfoNone &&
|
|
|
|
|
selectReq.InputSerialization.CSV.FileHeaderInfo != s3select.CSVFileHeaderInfoIgnore &&
|
|
|
|
|
selectReq.InputSerialization.CSV.FileHeaderInfo != "" {
|
|
|
|
|
writeErrorResponse(w, ErrInvalidFileHeaderInfo, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
if selectReq.OutputSerialization.CSV.QuoteFields != s3select.CSVQuoteFieldsAlways &&
|
|
|
|
|
selectReq.OutputSerialization.CSV.QuoteFields != s3select.CSVQuoteFieldsAsNeeded &&
|
|
|
|
|
selectReq.OutputSerialization.CSV.QuoteFields != "" {
|
|
|
|
|
writeErrorResponse(w, ErrInvalidQuoteFields, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
if len(selectReq.InputSerialization.CSV.RecordDelimiter) > 2 {
|
|
|
|
|
writeErrorResponse(w, ErrInvalidRequestParameter, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-31 16:10:12 -04:00
|
|
|
|
}
|
2018-10-22 15:12:22 -04:00
|
|
|
|
if selectReq.InputSerialization.JSON != nil {
|
|
|
|
|
if selectReq.InputSerialization.JSON.Type != s3select.JSONTypeDocument &&
|
|
|
|
|
selectReq.InputSerialization.JSON.Type != s3select.JSONLinesType &&
|
|
|
|
|
selectReq.InputSerialization.JSON.Type != "" {
|
|
|
|
|
writeErrorResponse(w, ErrInvalidJSONType, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-10 12:20:28 -04:00
|
|
|
|
}
|
2018-08-15 06:30:19 -04:00
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
// Set encryption response headers
|
2018-08-15 06:30:19 -04:00
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
2018-09-23 13:24:10 -04:00
|
|
|
|
if crypto.IsEncrypted(objInfo.UserDefined) {
|
|
|
|
|
switch {
|
|
|
|
|
case crypto.S3.IsEncrypted(objInfo.UserDefined):
|
|
|
|
|
w.Header().Set(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
|
|
|
|
case crypto.SSEC.IsEncrypted(objInfo.UserDefined):
|
|
|
|
|
w.Header().Set(crypto.SSECAlgorithm, r.Header.Get(crypto.SSECAlgorithm))
|
|
|
|
|
w.Header().Set(crypto.SSECKeyMD5, r.Header.Get(crypto.SSECKeyMD5))
|
|
|
|
|
}
|
2018-08-15 06:30:19 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-22 15:12:22 -04:00
|
|
|
|
s3s, err := s3select.New(gr, objInfo.Size, selectReq)
|
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
2018-08-15 06:30:19 -04:00
|
|
|
|
}
|
2018-10-22 15:12:22 -04:00
|
|
|
|
|
|
|
|
|
// Parses the select query and checks for an error
|
|
|
|
|
_, _, _, _, _, _, err = s3select.ParseSelect(s3s)
|
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
2018-09-10 12:20:28 -04:00
|
|
|
|
}
|
2018-10-22 15:12:22 -04:00
|
|
|
|
|
|
|
|
|
// Executes the query on data-set
|
|
|
|
|
if err = s3select.Execute(w, s3s); err != nil {
|
2018-10-26 17:41:25 -04:00
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
2018-08-15 06:30:19 -04:00
|
|
|
|
}
|
2018-10-12 15:25:59 -04:00
|
|
|
|
|
|
|
|
|
for k, v := range objInfo.UserDefined {
|
|
|
|
|
logger.GetReqInfo(ctx).SetTags(k, v)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
logger.GetReqInfo(ctx).SetTags("etag", objInfo.ETag)
|
2018-08-15 06:30:19 -04:00
|
|
|
|
}
|
|
|
|
|
|
2015-06-30 23:15:48 -04:00
|
|
|
|
// GetObjectHandler - GET Object
|
2015-02-23 19:46:48 -05:00
|
|
|
|
// ----------
|
|
|
|
|
// This implementation of the GET operation retrieves object. To use GET,
|
|
|
|
|
// you must have READ access to the object.
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "GetObject")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2018-10-12 15:25:59 -04:00
|
|
|
|
defer logger.AuditLog(ctx, r)
|
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-08-18 00:07:19 -04:00
|
|
|
|
if crypto.S3.IsRequested(r.Header) || crypto.S3KMS.IsRequested(r.Header) { // If SSE-S3 or SSE-KMS present -> AWS fails with undefined error
|
|
|
|
|
writeErrorResponse(w, ErrBadRequest, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
|
object := vars["object"]
|
2018-09-27 06:06:45 -04:00
|
|
|
|
var opts ObjectOptions
|
2018-08-31 16:10:12 -04:00
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
// Check for auth type to return S3 compatible error.
|
|
|
|
|
// type to return the correct error (NoSuchKey vs AccessDenied)
|
2018-05-02 02:43:27 -04:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
|
|
|
|
|
if getRequestAuthType(r) == authTypeAnonymous {
|
2018-09-20 22:22:09 -04:00
|
|
|
|
// As per "Permission" section in
|
|
|
|
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
|
|
|
|
|
// If the object you request does not exist,
|
|
|
|
|
// the error Amazon S3 returns depends on
|
|
|
|
|
// whether you also have the s3:ListBucket
|
|
|
|
|
// permission.
|
|
|
|
|
// * If you have the s3:ListBucket permission
|
|
|
|
|
// on the bucket, Amazon S3 will return an
|
|
|
|
|
// HTTP status code 404 ("no such key")
|
|
|
|
|
// error.
|
|
|
|
|
// * if you don’t have the s3:ListBucket
|
|
|
|
|
// permission, Amazon S3 will return an HTTP
|
|
|
|
|
// status code 403 ("access denied") error.`
|
2018-05-02 02:43:27 -04:00
|
|
|
|
if globalPolicySys.IsAllowed(policy.Args{
|
2018-04-24 18:53:30 -04:00
|
|
|
|
Action: policy.ListBucketAction,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
ConditionValues: getConditionValues(r, ""),
|
|
|
|
|
IsOwner: false,
|
|
|
|
|
}) {
|
2018-09-20 22:22:09 -04:00
|
|
|
|
getObjectInfo := objectAPI.GetObjectInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-27 06:06:45 -04:00
|
|
|
|
_, err := getObjectInfo(ctx, bucket, object, opts)
|
2018-05-02 02:43:27 -04:00
|
|
|
|
if toAPIErrorCode(err) == ErrNoSuchKey {
|
|
|
|
|
s3Error = ErrNoSuchKey
|
|
|
|
|
}
|
2018-04-24 18:53:30 -04:00
|
|
|
|
}
|
2015-08-03 19:17:21 -04:00
|
|
|
|
}
|
2018-05-02 02:43:27 -04:00
|
|
|
|
writeErrorResponse(w, s3Error, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-08-28 16:08:30 -04:00
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
getObjectNInfo := objectAPI.GetObjectNInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectNInfo = api.CacheAPI().GetObjectNInfo
|
2018-08-28 16:08:30 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Get request range.
|
2018-09-20 22:22:09 -04:00
|
|
|
|
var rs *HTTPRangeSpec
|
2018-08-28 16:08:30 -04:00
|
|
|
|
rangeHeader := r.Header.Get("Range")
|
|
|
|
|
if rangeHeader != "" {
|
2018-09-20 22:22:09 -04:00
|
|
|
|
var err error
|
|
|
|
|
if rs, err = parseRequestRangeSpec(rangeHeader); err != nil {
|
|
|
|
|
// Handle only errInvalidRange. Ignore other
|
|
|
|
|
// parse error and treat it as regular Get
|
|
|
|
|
// request like Amazon S3.
|
2018-08-28 16:08:30 -04:00
|
|
|
|
if err == errInvalidRange {
|
|
|
|
|
writeErrorResponse(w, ErrInvalidRange, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-27 06:06:45 -04:00
|
|
|
|
gr, err := getObjectNInfo(ctx, bucket, object, rs, r.Header, readLock, opts)
|
2018-09-20 22:22:09 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
2016-06-26 21:10:08 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-09-20 22:22:09 -04:00
|
|
|
|
defer gr.Close()
|
|
|
|
|
objInfo := gr.ObjInfo
|
2016-06-26 21:10:08 -04:00
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
|
|
|
|
if _, err = DecryptObjectInfo(objInfo, r.Header); err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2016-05-28 18:13:15 -04:00
|
|
|
|
}
|
2017-04-08 04:39:20 -04:00
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
// Validate pre-conditions if any.
|
|
|
|
|
if checkPreconditions(w, r, objInfo) {
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-08-31 16:10:12 -04:00
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
// Set encryption response headers
|
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
2018-09-23 13:24:10 -04:00
|
|
|
|
if crypto.IsEncrypted(objInfo.UserDefined) {
|
|
|
|
|
switch {
|
|
|
|
|
case crypto.S3.IsEncrypted(objInfo.UserDefined):
|
|
|
|
|
w.Header().Set(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
|
|
|
|
case crypto.SSEC.IsEncrypted(objInfo.UserDefined):
|
|
|
|
|
w.Header().Set(crypto.SSECAlgorithm, r.Header.Get(crypto.SSECAlgorithm))
|
|
|
|
|
w.Header().Set(crypto.SSECKeyMD5, r.Header.Get(crypto.SSECKeyMD5))
|
|
|
|
|
}
|
2016-07-10 20:12:22 -04:00
|
|
|
|
}
|
2017-11-07 18:18:59 -05:00
|
|
|
|
}
|
2016-07-24 01:51:12 -04:00
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
if hErr := setObjectHeaders(w, objInfo, rs); hErr != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(hErr), r.URL)
|
|
|
|
|
return
|
2018-08-31 16:10:12 -04:00
|
|
|
|
}
|
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
setHeadGetRespHeaders(w, r.URL.Query())
|
2018-08-08 18:39:47 -04:00
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
statusCodeWritten := false
|
|
|
|
|
httpWriter := ioutil.WriteOnClose(w)
|
|
|
|
|
if rs != nil {
|
2018-08-08 18:39:47 -04:00
|
|
|
|
statusCodeWritten = true
|
|
|
|
|
w.WriteHeader(http.StatusPartialContent)
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
// Write object content to response body
|
|
|
|
|
if _, err = io.Copy(httpWriter, gr); err != nil {
|
2018-08-08 18:39:47 -04:00
|
|
|
|
if !httpWriter.HasWritten() && !statusCodeWritten { // write error response only if no data or headers has been written to client yet
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
2016-07-10 20:12:22 -04:00
|
|
|
|
}
|
2016-05-28 18:13:15 -04:00
|
|
|
|
return
|
fs: Break fs package to top-level and introduce ObjectAPI interface.
ObjectAPI interface brings in changes needed for XL ObjectAPI layer.
The new interface for any ObjectAPI layer is as below
```
// ObjectAPI interface.
type ObjectAPI interface {
// Bucket resource API.
DeleteBucket(bucket string) *probe.Error
ListBuckets() ([]BucketInfo, *probe.Error)
MakeBucket(bucket string) *probe.Error
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
// Bucket query API.
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error)
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
// Object resource API.
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error)
PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error)
DeleteObject(bucket, object string) *probe.Error
// Object query API.
NewMultipartUpload(bucket, object string) (string, *probe.Error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error)
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
}
```
2016-03-30 19:15:28 -04:00
|
|
|
|
}
|
2018-02-23 18:07:21 -05:00
|
|
|
|
|
2017-11-07 18:18:59 -05:00
|
|
|
|
if err = httpWriter.Close(); err != nil {
|
2018-08-08 18:39:47 -04:00
|
|
|
|
if !httpWriter.HasWritten() && !statusCodeWritten { // write error response only if no data or headers has been written to client yet
|
2017-11-07 18:18:59 -05:00
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2016-07-10 20:12:22 -04:00
|
|
|
|
}
|
2017-03-21 13:32:17 -04:00
|
|
|
|
|
2017-04-08 04:39:20 -04:00
|
|
|
|
// Get host and port from Request.RemoteAddr.
|
2018-07-02 17:40:18 -04:00
|
|
|
|
host, port, err := net.SplitHostPort(handlers.GetSourceIP(r))
|
2017-04-08 04:39:20 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
host, port = "", ""
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-21 13:32:17 -04:00
|
|
|
|
// Notify object accessed via a GET request.
|
2018-03-15 16:03:41 -04:00
|
|
|
|
sendEvent(eventArgs{
|
2018-08-23 17:40:54 -04:00
|
|
|
|
EventName: event.ObjectAccessedGet,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Object: objInfo,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
RespElements: extractRespElements(w),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
|
|
|
|
Host: host,
|
|
|
|
|
Port: port,
|
2017-03-21 13:32:17 -04:00
|
|
|
|
})
|
2018-10-12 15:25:59 -04:00
|
|
|
|
|
|
|
|
|
for k, v := range objInfo.UserDefined {
|
|
|
|
|
logger.GetReqInfo(ctx).SetTags(k, v)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
logger.GetReqInfo(ctx).SetTags("etag", objInfo.ETag)
|
2015-02-15 20:03:27 -05:00
|
|
|
|
}
|
|
|
|
|
|
2015-06-30 23:15:48 -04:00
|
|
|
|
// HeadObjectHandler - HEAD Object
|
2015-02-23 19:46:48 -05:00
|
|
|
|
// -----------
|
|
|
|
|
// The HEAD operation retrieves metadata from an object without returning the object itself.
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "HeadObject")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2018-10-12 15:25:59 -04:00
|
|
|
|
defer logger.AuditLog(ctx, r)
|
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponseHeadersOnly(w, ErrServerNotInitialized)
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-08-18 00:07:19 -04:00
|
|
|
|
if crypto.S3.IsRequested(r.Header) || crypto.S3KMS.IsRequested(r.Header) { // If SSE-S3 or SSE-KMS present -> AWS fails with undefined error
|
|
|
|
|
writeErrorResponse(w, ErrBadRequest, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
|
object := vars["object"]
|
2016-08-10 21:47:49 -04:00
|
|
|
|
|
2018-09-21 16:48:58 -04:00
|
|
|
|
getObjectInfo := objectAPI.GetObjectInfo
|
2018-03-28 17:14:06 -04:00
|
|
|
|
if api.CacheAPI() != nil {
|
2018-09-21 16:48:58 -04:00
|
|
|
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
2018-03-28 17:14:06 -04:00
|
|
|
|
}
|
|
|
|
|
|
2018-09-27 06:06:45 -04:00
|
|
|
|
var opts ObjectOptions
|
2018-05-02 02:43:27 -04:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
|
|
|
|
|
if getRequestAuthType(r) == authTypeAnonymous {
|
2018-09-20 22:22:09 -04:00
|
|
|
|
// As per "Permission" section in
|
|
|
|
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html
|
|
|
|
|
// If the object you request does not exist,
|
|
|
|
|
// the error Amazon S3 returns depends on
|
|
|
|
|
// whether you also have the s3:ListBucket
|
|
|
|
|
// permission.
|
|
|
|
|
// * If you have the s3:ListBucket permission
|
|
|
|
|
// on the bucket, Amazon S3 will return an
|
|
|
|
|
// HTTP status code 404 ("no such key")
|
|
|
|
|
// error.
|
|
|
|
|
// * if you don’t have the s3:ListBucket
|
|
|
|
|
// permission, Amazon S3 will return an HTTP
|
|
|
|
|
// status code 403 ("access denied") error.`
|
2018-05-02 02:43:27 -04:00
|
|
|
|
if globalPolicySys.IsAllowed(policy.Args{
|
2018-04-24 18:53:30 -04:00
|
|
|
|
Action: policy.ListBucketAction,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
ConditionValues: getConditionValues(r, ""),
|
|
|
|
|
IsOwner: false,
|
|
|
|
|
}) {
|
2018-09-10 12:42:43 -04:00
|
|
|
|
_, err := getObjectInfo(ctx, bucket, object, opts)
|
2018-05-02 02:43:27 -04:00
|
|
|
|
if toAPIErrorCode(err) == ErrNoSuchKey {
|
|
|
|
|
s3Error = ErrNoSuchKey
|
|
|
|
|
}
|
2018-04-24 18:53:30 -04:00
|
|
|
|
}
|
2015-09-19 06:20:07 -04:00
|
|
|
|
}
|
2018-05-02 02:43:27 -04:00
|
|
|
|
writeErrorResponseHeadersOnly(w, s3Error)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-04-24 18:53:30 -04:00
|
|
|
|
|
2018-09-23 13:24:10 -04:00
|
|
|
|
// Get request range.
|
|
|
|
|
var rs *HTTPRangeSpec
|
|
|
|
|
rangeHeader := r.Header.Get("Range")
|
|
|
|
|
if rangeHeader != "" {
|
|
|
|
|
var err error
|
|
|
|
|
if rs, err = parseRequestRangeSpec(rangeHeader); err != nil {
|
|
|
|
|
// Handle only errInvalidRange. Ignore other
|
|
|
|
|
// parse error and treat it as regular Get
|
|
|
|
|
// request like Amazon S3.
|
|
|
|
|
if err == errInvalidRange {
|
|
|
|
|
writeErrorResponseHeadersOnly(w, ErrInvalidRange)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-21 16:48:58 -04:00
|
|
|
|
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
|
2018-05-02 02:43:27 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponseHeadersOnly(w, toAPIErrorCode(err))
|
2015-08-03 19:17:21 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-09-20 22:22:09 -04:00
|
|
|
|
|
2018-02-23 18:07:21 -05:00
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
2018-09-23 13:24:10 -04:00
|
|
|
|
if _, err = DecryptObjectInfo(objInfo, r.Header); err != nil {
|
|
|
|
|
writeErrorResponseHeadersOnly(w, toAPIErrorCode(err))
|
2017-11-07 18:18:59 -05:00
|
|
|
|
return
|
2018-09-23 13:24:10 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Set encryption response headers
|
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
|
|
|
|
if crypto.IsEncrypted(objInfo.UserDefined) {
|
|
|
|
|
switch {
|
|
|
|
|
case crypto.S3.IsEncrypted(objInfo.UserDefined):
|
2018-08-17 15:52:14 -04:00
|
|
|
|
w.Header().Set(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
2018-09-23 13:24:10 -04:00
|
|
|
|
case crypto.SSEC.IsEncrypted(objInfo.UserDefined):
|
2018-10-16 15:24:27 -04:00
|
|
|
|
// Validate the SSE-C Key set in the header.
|
|
|
|
|
if _, err = crypto.SSEC.UnsealObjectKey(r.Header, objInfo.UserDefined, bucket, object); err != nil {
|
|
|
|
|
writeErrorResponseHeadersOnly(w, toAPIErrorCode(err))
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-08-17 15:52:14 -04:00
|
|
|
|
w.Header().Set(crypto.SSECAlgorithm, r.Header.Get(crypto.SSECAlgorithm))
|
|
|
|
|
w.Header().Set(crypto.SSECKeyMD5, r.Header.Get(crypto.SSECKeyMD5))
|
|
|
|
|
}
|
2017-11-07 18:18:59 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
2016-02-28 21:10:37 -05:00
|
|
|
|
|
2016-07-10 20:12:22 -04:00
|
|
|
|
// Validate pre-conditions if any.
|
2016-07-11 22:24:34 -04:00
|
|
|
|
if checkPreconditions(w, r, objInfo) {
|
2016-02-28 21:10:37 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-10 20:12:22 -04:00
|
|
|
|
// Set standard object headers.
|
2018-09-23 13:24:10 -04:00
|
|
|
|
if hErr := setObjectHeaders(w, objInfo, rs); hErr != nil {
|
2018-10-16 15:27:34 -04:00
|
|
|
|
writeErrorResponseHeadersOnly(w, toAPIErrorCode(hErr))
|
2018-09-20 22:22:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-02-28 21:10:37 -05:00
|
|
|
|
|
2017-08-08 14:04:04 -04:00
|
|
|
|
// Set any additional requested response headers.
|
|
|
|
|
setHeadGetRespHeaders(w, r.URL.Query())
|
|
|
|
|
|
2016-07-27 14:57:08 -04:00
|
|
|
|
// Successful response.
|
2018-09-23 13:24:10 -04:00
|
|
|
|
if rs != nil {
|
|
|
|
|
w.WriteHeader(http.StatusPartialContent)
|
|
|
|
|
} else {
|
|
|
|
|
w.WriteHeader(http.StatusOK)
|
|
|
|
|
}
|
2017-03-21 13:32:17 -04:00
|
|
|
|
|
2017-04-08 04:39:20 -04:00
|
|
|
|
// Get host and port from Request.RemoteAddr.
|
2018-07-02 17:40:18 -04:00
|
|
|
|
host, port, err := net.SplitHostPort(handlers.GetSourceIP(r))
|
2017-04-08 04:39:20 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
host, port = "", ""
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-21 13:32:17 -04:00
|
|
|
|
// Notify object accessed via a HEAD request.
|
2018-03-15 16:03:41 -04:00
|
|
|
|
sendEvent(eventArgs{
|
2018-08-23 17:40:54 -04:00
|
|
|
|
EventName: event.ObjectAccessedHead,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Object: objInfo,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
RespElements: extractRespElements(w),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
|
|
|
|
Host: host,
|
|
|
|
|
Port: port,
|
2017-03-21 13:32:17 -04:00
|
|
|
|
})
|
2018-10-12 15:25:59 -04:00
|
|
|
|
|
|
|
|
|
for k, v := range objInfo.UserDefined {
|
|
|
|
|
logger.GetReqInfo(ctx).SetTags(k, v)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
logger.GetReqInfo(ctx).SetTags("etag", objInfo.ETag)
|
2015-02-15 20:03:27 -05:00
|
|
|
|
}
|
|
|
|
|
|
2016-12-26 19:29:26 -05:00
|
|
|
|
// Extract metadata relevant for an CopyObject operation based on conditional
|
|
|
|
|
// header values specified in X-Amz-Metadata-Directive.
|
2018-07-10 23:27:10 -04:00
|
|
|
|
func getCpObjMetadataFromHeader(ctx context.Context, r *http.Request, userMeta map[string]string) (map[string]string, error) {
|
2018-02-23 18:07:21 -05:00
|
|
|
|
// Make a copy of the supplied metadata to avoid
|
|
|
|
|
// to change the original one.
|
|
|
|
|
defaultMeta := make(map[string]string, len(userMeta))
|
|
|
|
|
for k, v := range userMeta {
|
|
|
|
|
defaultMeta[k] = v
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-26 19:29:26 -05:00
|
|
|
|
// if x-amz-metadata-directive says REPLACE then
|
|
|
|
|
// we extract metadata from the input headers.
|
2018-07-10 23:27:10 -04:00
|
|
|
|
if isMetadataReplace(r.Header) {
|
|
|
|
|
return extractMetadata(ctx, r)
|
2016-12-26 19:29:26 -05:00
|
|
|
|
}
|
2017-01-06 03:37:00 -05:00
|
|
|
|
|
2016-12-26 19:29:26 -05:00
|
|
|
|
// if x-amz-metadata-directive says COPY then we
|
|
|
|
|
// return the default metadata.
|
2018-07-10 23:27:10 -04:00
|
|
|
|
if isMetadataCopy(r.Header) {
|
2017-07-05 19:56:10 -04:00
|
|
|
|
return defaultMeta, nil
|
2016-12-26 19:29:26 -05:00
|
|
|
|
}
|
2017-01-06 03:37:00 -05:00
|
|
|
|
|
2016-12-26 19:29:26 -05:00
|
|
|
|
// Copy is default behavior if not x-amz-metadata-directive is set.
|
2017-07-05 19:56:10 -04:00
|
|
|
|
return defaultMeta, nil
|
2016-12-26 19:29:26 -05:00
|
|
|
|
}
|
|
|
|
|
|
2016-02-27 06:04:52 -05:00
|
|
|
|
// CopyObjectHandler - Copy Object
|
|
|
|
|
// ----------
|
|
|
|
|
// This implementation of the PUT operation adds an object to a bucket
|
|
|
|
|
// while reading the object from another source.
|
2018-09-25 15:39:46 -04:00
|
|
|
|
// Notice: The S3 client can send secret keys in headers for encryption related jobs,
|
|
|
|
|
// the handler should ensure to remove these keys before sending them to the object layer.
|
|
|
|
|
// Currently these keys are:
|
|
|
|
|
// - X-Amz-Server-Side-Encryption-Customer-Key
|
|
|
|
|
// - X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "CopyObject")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2018-10-12 15:25:59 -04:00
|
|
|
|
defer logger.AuditLog(ctx, r)
|
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-10-09 18:04:53 -04:00
|
|
|
|
if crypto.S3KMS.IsRequested(r.Header) {
|
2018-08-18 00:07:19 -04:00
|
|
|
|
writeErrorResponse(w, ErrNotImplemented, r.URL) // SSE-KMS is not supported
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
dstBucket := vars["bucket"]
|
|
|
|
|
dstObject := vars["object"]
|
2016-08-10 21:47:49 -04:00
|
|
|
|
|
2018-04-24 18:53:30 -04:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, dstBucket, dstObject); s3Error != ErrNone {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, s3Error, r.URL)
|
2016-02-27 06:04:52 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-06 17:00:29 -04:00
|
|
|
|
// TODO: Reject requests where body/payload is present, for now we don't even read it.
|
2016-02-27 06:04:52 -05:00
|
|
|
|
|
2016-12-26 19:29:26 -05:00
|
|
|
|
// Copy source path.
|
|
|
|
|
cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source"))
|
2016-07-06 17:00:29 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
// Save unescaped string as is.
|
2016-12-26 19:29:26 -05:00
|
|
|
|
cpSrcPath = r.Header.Get("X-Amz-Copy-Source")
|
2016-07-06 17:00:29 -04:00
|
|
|
|
}
|
2016-02-27 06:04:52 -05:00
|
|
|
|
|
2016-12-26 19:29:26 -05:00
|
|
|
|
srcBucket, srcObject := path2BucketAndObject(cpSrcPath)
|
|
|
|
|
// If source object is empty or bucket is empty, reply back invalid copy source.
|
|
|
|
|
if srcObject == "" || srcBucket == "" {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrInvalidCopySource, r.URL)
|
2016-02-27 06:04:52 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-26 19:29:26 -05:00
|
|
|
|
// Check if metadata directive is valid.
|
|
|
|
|
if !isMetadataDirectiveValid(r.Header) {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrInvalidMetadataDirective, r.URL)
|
2016-02-27 06:04:52 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-10 12:42:43 -04:00
|
|
|
|
var srcOpts, dstOpts ObjectOptions
|
2016-07-09 15:13:40 -04:00
|
|
|
|
|
2018-03-27 19:44:45 -04:00
|
|
|
|
// Deny if WORM is enabled
|
|
|
|
|
if globalWORMEnabled {
|
2018-09-10 12:42:43 -04:00
|
|
|
|
if _, err = objectAPI.GetObjectInfo(ctx, dstBucket, dstObject, dstOpts); err == nil {
|
2018-03-27 19:44:45 -04:00
|
|
|
|
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-25 15:39:46 -04:00
|
|
|
|
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
|
|
|
|
|
|
|
|
|
getObjectNInfo := objectAPI.GetObjectNInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectNInfo = api.CacheAPI().GetObjectNInfo
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var lock = noLock
|
|
|
|
|
if !cpSrcDstSame {
|
|
|
|
|
lock = readLock
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-25 11:50:06 -04:00
|
|
|
|
var rs *HTTPRangeSpec
|
2018-09-27 06:06:45 -04:00
|
|
|
|
gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, lock, srcOpts)
|
2018-09-25 15:39:46 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
defer gr.Close()
|
|
|
|
|
srcInfo := gr.ObjInfo
|
|
|
|
|
|
2016-07-11 22:24:34 -04:00
|
|
|
|
// Verify before x-amz-copy-source preconditions before continuing with CopyObject.
|
2018-02-21 03:48:47 -05:00
|
|
|
|
if checkCopyObjectPreconditions(w, r, srcInfo) {
|
2016-03-16 15:57:29 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-02-27 06:04:52 -05:00
|
|
|
|
|
|
|
|
|
/// maximum Upload size for object in a single CopyObject operation.
|
2018-02-21 03:48:47 -05:00
|
|
|
|
if isMaxObjectSize(srcInfo.Size) {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrEntityTooLarge, r.URL)
|
2016-02-27 06:04:52 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-23 18:07:21 -05:00
|
|
|
|
// We have to copy metadata only if source and destination are same.
|
|
|
|
|
// this changes for encryption which can be observed below.
|
|
|
|
|
if cpSrcDstSame {
|
|
|
|
|
srcInfo.metadataOnly = true
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
|
// Checks if a remote putobject call is needed for CopyObject operation
|
|
|
|
|
// 1. If source and destination bucket names are same, it means no call needed to etcd to get destination info
|
|
|
|
|
// 2. If destination bucket doesn't exist locally, only then a etcd call is needed
|
|
|
|
|
var isRemoteCallRequired = func(ctx context.Context, src, dst string, objAPI ObjectLayer) bool {
|
|
|
|
|
if src == dst {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
_, berr := objAPI.GetBucketInfo(ctx, dst)
|
|
|
|
|
return berr == toObjectErr(errVolumeNotFound, dst)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var reader io.Reader
|
|
|
|
|
var length = srcInfo.Size
|
2018-10-23 14:46:20 -04:00
|
|
|
|
|
|
|
|
|
// Set the actual size to the decrypted size if encrypted.
|
|
|
|
|
actualSize := srcInfo.Size
|
|
|
|
|
if crypto.IsEncrypted(srcInfo.UserDefined) {
|
|
|
|
|
actualSize, err = srcInfo.DecryptedSize()
|
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
|
// No need to compress for remote etcd calls
|
|
|
|
|
// Pass the decompressed stream to such calls.
|
2018-10-23 14:46:20 -04:00
|
|
|
|
isCompressed := objectAPI.IsCompressionSupported() && isCompressible(r.Header, srcObject) && !isRemoteCallRequired(ctx, srcBucket, dstBucket, objectAPI)
|
|
|
|
|
if isCompressed {
|
|
|
|
|
// Storing the compression metadata.
|
|
|
|
|
srcInfo.UserDefined[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV1
|
|
|
|
|
srcInfo.UserDefined[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(actualSize, 10)
|
|
|
|
|
// Remove all source encrypted related metadata to
|
|
|
|
|
// avoid copying them in target object.
|
|
|
|
|
crypto.RemoveInternalEntries(srcInfo.UserDefined)
|
2018-09-27 23:36:17 -04:00
|
|
|
|
// Open a pipe for compression.
|
2018-09-28 03:44:59 -04:00
|
|
|
|
// Where pipeWriter is piped to srcInfo.Reader.
|
|
|
|
|
// gr writes to pipeWriter.
|
|
|
|
|
pipeReader, pipeWriter := io.Pipe()
|
|
|
|
|
reader = pipeReader
|
2018-09-27 23:36:17 -04:00
|
|
|
|
length = -1
|
|
|
|
|
|
2018-09-28 03:44:59 -04:00
|
|
|
|
snappyWriter := snappy.NewWriter(pipeWriter)
|
2018-09-27 23:36:17 -04:00
|
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
|
// Compress the decompressed source object.
|
2018-09-28 03:44:59 -04:00
|
|
|
|
_, cerr := io.Copy(snappyWriter, gr)
|
|
|
|
|
snappyWriter.Close()
|
|
|
|
|
pipeWriter.CloseWithError(cerr)
|
2018-09-27 23:36:17 -04:00
|
|
|
|
}()
|
|
|
|
|
} else {
|
|
|
|
|
// Remove the metadata for remote calls.
|
|
|
|
|
delete(srcInfo.UserDefined, ReservedMetadataPrefix+"compression")
|
|
|
|
|
delete(srcInfo.UserDefined, ReservedMetadataPrefix+"actual-size")
|
|
|
|
|
reader = gr
|
|
|
|
|
}
|
2018-03-02 20:24:02 -05:00
|
|
|
|
|
2018-10-23 14:46:20 -04:00
|
|
|
|
srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualSize)
|
2018-03-02 20:24:02 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-23 18:07:21 -05:00
|
|
|
|
var encMetadata = make(map[string]string)
|
2018-10-23 14:46:20 -04:00
|
|
|
|
if objectAPI.IsEncryptionSupported() && !isCompressed {
|
2018-10-19 13:41:13 -04:00
|
|
|
|
// Encryption parameters not applicable for this object.
|
|
|
|
|
if !crypto.IsEncrypted(srcInfo.UserDefined) && crypto.SSECopy.IsRequested(r.Header) {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(errInvalidEncryptionParameters), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
// Encryption parameters not present for this object.
|
|
|
|
|
if crypto.SSEC.IsEncrypted(srcInfo.UserDefined) && !crypto.SSECopy.IsRequested(r.Header) {
|
|
|
|
|
writeErrorResponse(w, ErrInvalidSSECustomerAlgorithm, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-02-23 18:07:21 -05:00
|
|
|
|
var oldKey, newKey []byte
|
2018-08-17 15:52:14 -04:00
|
|
|
|
sseCopyS3 := crypto.S3.IsEncrypted(srcInfo.UserDefined)
|
2018-10-19 13:41:13 -04:00
|
|
|
|
sseCopyC := crypto.SSEC.IsEncrypted(srcInfo.UserDefined) && crypto.SSECopy.IsRequested(r.Header)
|
2018-08-17 15:52:14 -04:00
|
|
|
|
sseC := crypto.SSEC.IsRequested(r.Header)
|
|
|
|
|
sseS3 := crypto.S3.IsRequested(r.Header)
|
2018-10-15 14:07:36 -04:00
|
|
|
|
|
|
|
|
|
isSourceEncrypted := sseCopyC || sseCopyS3
|
|
|
|
|
isTargetEncrypted := sseC || sseS3
|
|
|
|
|
|
|
|
|
|
if sseC {
|
|
|
|
|
newKey, err = ParseSSECustomerRequest(r)
|
2018-02-23 18:07:21 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// AWS S3 implementation requires us to only rotate keys
|
|
|
|
|
// when/ both keys are provided and destination is same
|
|
|
|
|
// otherwise we proceed to encrypt/decrypt.
|
2018-03-02 20:24:02 -05:00
|
|
|
|
if sseCopyC && sseC && cpSrcDstSame {
|
|
|
|
|
// Get the old key which needs to be rotated.
|
2018-09-20 22:22:09 -04:00
|
|
|
|
oldKey, err = ParseSSECopyCustomerRequest(r.Header, srcInfo.UserDefined)
|
2018-03-02 20:24:02 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-02-23 18:07:21 -05:00
|
|
|
|
for k, v := range srcInfo.UserDefined {
|
|
|
|
|
encMetadata[k] = v
|
|
|
|
|
}
|
2018-07-09 20:18:28 -04:00
|
|
|
|
if err = rotateKey(oldKey, newKey, srcBucket, srcObject, encMetadata); err != nil {
|
2018-02-23 18:07:21 -05:00
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-03-12 16:52:38 -04:00
|
|
|
|
|
|
|
|
|
// Since we are rotating the keys, make sure to update the metadata.
|
|
|
|
|
srcInfo.metadataOnly = true
|
2018-02-23 18:07:21 -05:00
|
|
|
|
} else {
|
2018-10-15 14:07:36 -04:00
|
|
|
|
if isSourceEncrypted || isTargetEncrypted {
|
2018-02-23 18:07:21 -05:00
|
|
|
|
// We are not only copying just metadata instead
|
|
|
|
|
// we are creating a new object at this point, even
|
|
|
|
|
// if source and destination are same objects.
|
|
|
|
|
srcInfo.metadataOnly = false
|
|
|
|
|
}
|
2018-10-15 14:07:36 -04:00
|
|
|
|
|
|
|
|
|
// Calculate the size of the target object
|
|
|
|
|
var targetSize int64
|
|
|
|
|
|
|
|
|
|
switch {
|
|
|
|
|
case !isSourceEncrypted && !isTargetEncrypted:
|
|
|
|
|
fallthrough
|
|
|
|
|
case isSourceEncrypted && isTargetEncrypted:
|
|
|
|
|
targetSize = srcInfo.Size
|
|
|
|
|
// Source not encrypted and target encrypted
|
|
|
|
|
case !isSourceEncrypted && isTargetEncrypted:
|
|
|
|
|
targetSize = srcInfo.EncryptedSize()
|
|
|
|
|
case isSourceEncrypted && !isTargetEncrypted:
|
|
|
|
|
targetSize, _ = srcInfo.DecryptedSize()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if isTargetEncrypted {
|
2018-08-17 15:52:14 -04:00
|
|
|
|
reader, err = newEncryptReader(reader, newKey, dstBucket, dstObject, encMetadata, sseS3)
|
2018-02-23 18:07:21 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-03-02 20:24:02 -05:00
|
|
|
|
}
|
2018-08-17 15:52:14 -04:00
|
|
|
|
|
2018-10-15 14:07:36 -04:00
|
|
|
|
if isSourceEncrypted {
|
|
|
|
|
// Remove all source encrypted related metadata to
|
|
|
|
|
// avoid copying them in target object.
|
2018-10-19 13:50:52 -04:00
|
|
|
|
crypto.RemoveInternalEntries(srcInfo.UserDefined)
|
2018-10-15 14:07:36 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
srcInfo.Reader, err = hash.NewReader(reader, targetSize, "", "", targetSize) // do not try to verify encrypted content
|
2018-03-02 20:24:02 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
2018-02-23 18:07:21 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-09-25 15:39:46 -04:00
|
|
|
|
|
2018-07-10 23:27:10 -04:00
|
|
|
|
srcInfo.UserDefined, err = getCpObjMetadataFromHeader(ctx, r, srcInfo.UserDefined)
|
2017-07-05 19:56:10 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, ErrInternalError, r.URL)
|
2017-10-03 13:38:25 -04:00
|
|
|
|
return
|
2017-07-05 19:56:10 -04:00
|
|
|
|
}
|
2017-10-03 13:38:25 -04:00
|
|
|
|
|
2018-02-23 18:07:21 -05:00
|
|
|
|
// We need to preserve the encryption headers set in EncryptRequest,
|
|
|
|
|
// so we do not want to override them, copy them instead.
|
|
|
|
|
for k, v := range encMetadata {
|
|
|
|
|
srcInfo.UserDefined[k] = v
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-25 15:39:46 -04:00
|
|
|
|
// Ensure that metadata does not contain sensitive information
|
|
|
|
|
crypto.RemoveSensitiveEntries(srcInfo.UserDefined)
|
|
|
|
|
|
2016-12-26 19:29:26 -05:00
|
|
|
|
// Check if x-amz-metadata-directive was not set to REPLACE and source,
|
2018-02-23 18:07:21 -05:00
|
|
|
|
// desination are same objects. Apply this restriction also when
|
|
|
|
|
// metadataOnly is true indicating that we are not overwriting the object.
|
2018-03-06 19:04:48 -05:00
|
|
|
|
// if encryption is enabled we do not need explicit "REPLACE" metadata to
|
|
|
|
|
// be enabled as well - this is to allow for key-rotation.
|
2018-08-17 15:52:14 -04:00
|
|
|
|
if !isMetadataReplace(r.Header) && srcInfo.metadataOnly && !crypto.SSEC.IsEncrypted(srcInfo.UserDefined) {
|
2016-12-26 19:29:26 -05:00
|
|
|
|
// If x-amz-metadata-directive is not set to REPLACE then we need
|
|
|
|
|
// to error out if source and destination are same.
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrInvalidCopyDest, r.URL)
|
2016-12-26 19:29:26 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-12-10 19:15:12 -05:00
|
|
|
|
|
2018-05-11 15:02:30 -04:00
|
|
|
|
var objInfo ObjectInfo
|
|
|
|
|
|
2018-05-15 21:20:22 -04:00
|
|
|
|
// Returns a minio-go Client configured to access remote host described by destDNSRecord
|
|
|
|
|
// Applicable only in a federated deployment
|
|
|
|
|
var getRemoteInstanceClient = func(host string, port int) (*miniogo.Core, error) {
|
|
|
|
|
// In a federated deployment, all the instances share config files and hence expected to have same
|
|
|
|
|
// credentials. So, access current instances creds and use it to create client for remote instance
|
|
|
|
|
endpoint := net.JoinHostPort(host, strconv.Itoa(port))
|
|
|
|
|
accessKey := globalServerConfig.Credential.AccessKey
|
|
|
|
|
secretKey := globalServerConfig.Credential.SecretKey
|
|
|
|
|
return miniogo.NewCore(endpoint, accessKey, secretKey, globalIsSSL)
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-11 15:02:30 -04:00
|
|
|
|
if isRemoteCallRequired(ctx, srcBucket, dstBucket, objectAPI) {
|
2018-05-15 21:20:22 -04:00
|
|
|
|
if globalDNSConfig == nil {
|
|
|
|
|
writeErrorResponse(w, ErrNoSuchBucket, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
var dstRecords []dns.SrvRecord
|
|
|
|
|
if dstRecords, err = globalDNSConfig.Get(dstBucket); err == nil {
|
|
|
|
|
// Send PutObject request to appropriate instance (in federated deployment)
|
|
|
|
|
host, port := getRandomHostPort(dstRecords)
|
|
|
|
|
client, rerr := getRemoteInstanceClient(host, port)
|
|
|
|
|
if rerr != nil {
|
|
|
|
|
writeErrorResponse(w, ErrInternalError, r.URL)
|
|
|
|
|
return
|
2018-05-11 15:02:30 -04:00
|
|
|
|
}
|
2018-09-10 12:42:43 -04:00
|
|
|
|
remoteObjInfo, rerr := client.PutObject(dstBucket, dstObject, srcInfo.Reader, srcInfo.Size, "", "", srcInfo.UserDefined, dstOpts.ServerSideEncryption)
|
2018-05-15 21:20:22 -04:00
|
|
|
|
if rerr != nil {
|
|
|
|
|
writeErrorResponse(w, ErrInternalError, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
objInfo.ETag = remoteObjInfo.ETag
|
|
|
|
|
objInfo.ModTime = remoteObjInfo.LastModified
|
2018-05-11 15:02:30 -04:00
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// Copy source object to destination, if source and destination
|
|
|
|
|
// object is same then only metadata is updated.
|
2018-09-10 12:42:43 -04:00
|
|
|
|
objInfo, err = objectAPI.CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts)
|
2018-05-11 15:02:30 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2016-02-27 06:04:52 -05:00
|
|
|
|
}
|
2016-04-16 15:48:41 -04:00
|
|
|
|
|
2017-05-14 15:05:51 -04:00
|
|
|
|
response := generateCopyObjectResponse(objInfo.ETag, objInfo.ModTime)
|
2016-03-06 15:16:22 -05:00
|
|
|
|
encodedSuccessResponse := encodeResponse(response)
|
2017-01-06 03:37:00 -05:00
|
|
|
|
|
|
|
|
|
// Write success response.
|
|
|
|
|
writeSuccessResponseXML(w, encodedSuccessResponse)
|
2016-07-24 01:51:12 -04:00
|
|
|
|
|
2017-03-22 21:44:35 -04:00
|
|
|
|
// Get host and port from Request.RemoteAddr.
|
2018-07-02 17:40:18 -04:00
|
|
|
|
host, port, err := net.SplitHostPort(handlers.GetSourceIP(r))
|
2017-03-22 21:44:35 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
host, port = "", ""
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-23 14:46:20 -04:00
|
|
|
|
if objInfo.IsCompressed() {
|
|
|
|
|
objInfo.Size = actualSize
|
2018-09-27 23:36:17 -04:00
|
|
|
|
}
|
|
|
|
|
|
2016-09-29 01:46:19 -04:00
|
|
|
|
// Notify object created event.
|
2018-03-15 16:03:41 -04:00
|
|
|
|
sendEvent(eventArgs{
|
|
|
|
|
EventName: event.ObjectCreatedCopy,
|
|
|
|
|
BucketName: dstBucket,
|
|
|
|
|
Object: objInfo,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
|
|
|
|
Host: host,
|
|
|
|
|
Port: port,
|
2016-09-29 01:46:19 -04:00
|
|
|
|
})
|
2018-10-12 15:25:59 -04:00
|
|
|
|
|
|
|
|
|
for k, v := range objInfo.UserDefined {
|
|
|
|
|
logger.GetReqInfo(ctx).SetTags(k, v)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
logger.GetReqInfo(ctx).SetTags("etag", objInfo.ETag)
|
2016-02-27 06:04:52 -05:00
|
|
|
|
}
|
|
|
|
|
|
2015-06-30 23:15:48 -04:00
|
|
|
|
// PutObjectHandler - PUT Object
|
2015-02-23 19:46:48 -05:00
|
|
|
|
// ----------
|
|
|
|
|
// This implementation of the PUT operation adds an object to a bucket.
|
2018-09-25 15:39:46 -04:00
|
|
|
|
// Notice: The S3 client can send secret keys in headers for encryption related jobs,
|
|
|
|
|
// the handler should ensure to remove these keys before sending them to the object layer.
|
|
|
|
|
// Currently these keys are:
|
|
|
|
|
// - X-Amz-Server-Side-Encryption-Customer-Key
|
|
|
|
|
// - X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "PutObject")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2018-10-12 15:25:59 -04:00
|
|
|
|
defer logger.AuditLog(ctx, r)
|
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-10-09 18:04:53 -04:00
|
|
|
|
if crypto.S3KMS.IsRequested(r.Header) {
|
2018-08-18 00:07:19 -04:00
|
|
|
|
writeErrorResponse(w, ErrNotImplemented, r.URL) // SSE-KMS is not supported
|
2016-02-27 06:04:52 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-09-30 17:32:13 -04:00
|
|
|
|
|
2016-02-15 20:42:39 -05:00
|
|
|
|
vars := mux.Vars(r)
|
2016-02-27 06:04:52 -05:00
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
|
object := vars["object"]
|
2015-04-22 22:29:39 -04:00
|
|
|
|
|
2018-08-18 00:07:19 -04:00
|
|
|
|
// X-Amz-Copy-Source shouldn't be set for this call.
|
|
|
|
|
if _, ok := r.Header["X-Amz-Copy-Source"]; ok {
|
|
|
|
|
writeErrorResponse(w, ErrInvalidCopySource, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-26 23:36:16 -05:00
|
|
|
|
// Validate storage class metadata if present
|
|
|
|
|
if _, ok := r.Header[amzStorageClassCanonical]; ok {
|
|
|
|
|
if !isValidStorageClassMeta(r.Header.Get(amzStorageClassCanonical)) {
|
|
|
|
|
writeErrorResponse(w, ErrInvalidStorageClass, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-12 04:08:55 -05:00
|
|
|
|
// Get Content-Md5 sent by client and verify if valid
|
2018-03-16 14:22:34 -04:00
|
|
|
|
md5Bytes, err := checkValidMD5(r.Header)
|
2016-03-12 19:08:15 -05:00
|
|
|
|
if err != nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrInvalidDigest, r.URL)
|
2015-04-22 19:28:13 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-09-30 17:32:13 -04:00
|
|
|
|
|
2015-12-28 02:00:36 -05:00
|
|
|
|
/// if Content-Length is unknown/missing, deny the request
|
2016-02-15 20:42:39 -05:00
|
|
|
|
size := r.ContentLength
|
2016-08-08 23:56:29 -04:00
|
|
|
|
rAuthType := getRequestAuthType(r)
|
|
|
|
|
if rAuthType == authTypeStreamingSigned {
|
2018-03-16 14:22:34 -04:00
|
|
|
|
if sizeStr, ok := r.Header["X-Amz-Decoded-Content-Length"]; ok {
|
|
|
|
|
if sizeStr[0] == "" {
|
|
|
|
|
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
size, err = strconv.ParseInt(sizeStr[0], 10, 64)
|
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2016-08-08 23:56:29 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
2017-01-20 19:33:01 -05:00
|
|
|
|
if size == -1 {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
2015-04-29 05:19:51 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-09-30 17:32:13 -04:00
|
|
|
|
|
2015-04-29 13:51:59 -04:00
|
|
|
|
/// maximum Upload size for objects in a single operation
|
2015-04-29 05:19:51 -04:00
|
|
|
|
if isMaxObjectSize(size) {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrEntityTooLarge, r.URL)
|
2015-04-29 05:19:51 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2015-07-02 23:31:22 -04:00
|
|
|
|
|
2018-07-10 23:27:10 -04:00
|
|
|
|
metadata, err := extractMetadata(ctx, r)
|
2017-07-05 19:56:10 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, ErrInternalError, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-07-10 23:27:10 -04:00
|
|
|
|
|
2017-02-20 15:07:03 -05:00
|
|
|
|
if rAuthType == authTypeStreamingSigned {
|
2017-03-27 20:02:04 -04:00
|
|
|
|
if contentEncoding, ok := metadata["content-encoding"]; ok {
|
|
|
|
|
contentEncoding = trimAwsChunkedContentEncoding(contentEncoding)
|
|
|
|
|
if contentEncoding != "" {
|
|
|
|
|
// Make sure to trim and save the content-encoding
|
|
|
|
|
// parameter for a streaming signature which is set
|
|
|
|
|
// to a custom value for example: "aws-chunked,gzip".
|
|
|
|
|
metadata["content-encoding"] = contentEncoding
|
|
|
|
|
} else {
|
|
|
|
|
// Trimmed content encoding is empty when the header
|
|
|
|
|
// value is set to "aws-chunked" only.
|
|
|
|
|
|
|
|
|
|
// Make sure to delete the content-encoding parameter
|
|
|
|
|
// for a streaming signature which is set to value
|
|
|
|
|
// for example: "aws-chunked"
|
|
|
|
|
delete(metadata, "content-encoding")
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-02-20 15:07:03 -05:00
|
|
|
|
}
|
|
|
|
|
|
2017-10-22 01:30:34 -04:00
|
|
|
|
var (
|
|
|
|
|
md5hex = hex.EncodeToString(md5Bytes)
|
|
|
|
|
sha256hex = ""
|
2017-11-07 18:18:59 -05:00
|
|
|
|
reader io.Reader
|
|
|
|
|
s3Err APIErrorCode
|
2018-03-28 17:14:06 -04:00
|
|
|
|
putObject = objectAPI.PutObject
|
2017-10-22 01:30:34 -04:00
|
|
|
|
)
|
2017-11-07 18:18:59 -05:00
|
|
|
|
reader = r.Body
|
2018-10-09 17:00:01 -04:00
|
|
|
|
|
|
|
|
|
// Check if put is allowed
|
|
|
|
|
if s3Err = isPutAllowed(rAuthType, bucket, object, r); s3Err != ErrNone {
|
|
|
|
|
writeErrorResponse(w, s3Err, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-08 23:56:29 -04:00
|
|
|
|
switch rAuthType {
|
|
|
|
|
case authTypeStreamingSigned:
|
|
|
|
|
// Initialize stream signature verifier.
|
2017-11-07 18:18:59 -05:00
|
|
|
|
reader, s3Err = newSignV4ChunkedReader(r)
|
|
|
|
|
if s3Err != ErrNone {
|
|
|
|
|
writeErrorResponse(w, s3Err, r.URL)
|
2016-08-08 23:56:29 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-09-30 17:32:13 -04:00
|
|
|
|
case authTypeSignedV2, authTypePresignedV2:
|
2017-11-07 18:18:59 -05:00
|
|
|
|
s3Err = isReqAuthenticatedV2(r)
|
|
|
|
|
if s3Err != ErrNone {
|
|
|
|
|
writeErrorResponse(w, s3Err, r.URL)
|
2016-09-30 17:32:13 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-01-09 02:19:50 -05:00
|
|
|
|
|
2016-04-07 06:04:18 -04:00
|
|
|
|
case authTypePresigned, authTypeSigned:
|
2017-11-29 16:12:47 -05:00
|
|
|
|
if s3Err = reqSignatureV4Verify(r, globalServerConfig.GetRegion()); s3Err != ErrNone {
|
2017-11-07 18:18:59 -05:00
|
|
|
|
writeErrorResponse(w, s3Err, r.URL)
|
2016-10-02 18:51:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
if !skipContentSha256Cksum(r) {
|
2017-11-05 06:02:19 -05:00
|
|
|
|
sha256hex = getContentSha256Cksum(r)
|
2016-10-02 18:51:49 -04:00
|
|
|
|
}
|
2016-02-16 21:50:36 -05:00
|
|
|
|
}
|
2017-10-22 01:30:34 -04:00
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
|
actualSize := size
|
|
|
|
|
|
|
|
|
|
if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) && size > 0 {
|
|
|
|
|
// Storing the compression metadata.
|
|
|
|
|
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV1
|
|
|
|
|
metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10)
|
|
|
|
|
|
|
|
|
|
pipeReader, pipeWriter := io.Pipe()
|
|
|
|
|
snappyWriter := snappy.NewWriter(pipeWriter)
|
|
|
|
|
|
2018-09-28 03:44:59 -04:00
|
|
|
|
var actualReader *hash.Reader
|
|
|
|
|
actualReader, err = hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
2018-09-27 23:36:17 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-09-28 03:44:59 -04:00
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
|
go func() {
|
|
|
|
|
// Writing to the compressed writer.
|
2018-09-28 03:44:59 -04:00
|
|
|
|
_, cerr := io.CopyN(snappyWriter, actualReader, actualSize)
|
|
|
|
|
snappyWriter.Close()
|
|
|
|
|
pipeWriter.CloseWithError(cerr)
|
2018-09-27 23:36:17 -04:00
|
|
|
|
}()
|
2018-09-28 03:44:59 -04:00
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
|
// Set compression metrics.
|
|
|
|
|
size = -1 // Since compressed size is un-predictable.
|
|
|
|
|
md5hex = "" // Do not try to verify the content.
|
|
|
|
|
sha256hex = ""
|
|
|
|
|
reader = pipeReader
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
2017-10-22 01:30:34 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-09-10 12:42:43 -04:00
|
|
|
|
opts := ObjectOptions{}
|
2018-03-27 19:44:45 -04:00
|
|
|
|
// Deny if WORM is enabled
|
|
|
|
|
if globalWORMEnabled {
|
2018-09-10 12:42:43 -04:00
|
|
|
|
if _, err = objectAPI.GetObjectInfo(ctx, bucket, object, opts); err == nil {
|
2018-03-27 19:44:45 -04:00
|
|
|
|
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-09 18:19:30 -05:00
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
2018-08-17 15:52:14 -04:00
|
|
|
|
if hasServerSideEncryptionHeader(r.Header) && !hasSuffix(object, slashSeparator) { // handle SSE requests
|
2018-07-09 20:18:28 -04:00
|
|
|
|
reader, err = EncryptRequest(hashReader, r, bucket, object, metadata)
|
2018-02-09 18:19:30 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
info := ObjectInfo{Size: size}
|
2018-09-27 23:36:17 -04:00
|
|
|
|
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", size) // do not try to verify encrypted content
|
2018-02-09 18:19:30 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2017-11-07 18:18:59 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-25 15:39:46 -04:00
|
|
|
|
// Ensure that metadata does not contain sensitive information
|
|
|
|
|
crypto.RemoveSensitiveEntries(metadata)
|
|
|
|
|
|
2018-08-17 15:52:14 -04:00
|
|
|
|
if api.CacheAPI() != nil && !hasServerSideEncryptionHeader(r.Header) {
|
2018-03-28 17:14:06 -04:00
|
|
|
|
putObject = api.CacheAPI().PutObject
|
|
|
|
|
}
|
2018-07-10 23:27:10 -04:00
|
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
|
// Create the object..
|
2018-09-10 12:42:43 -04:00
|
|
|
|
objInfo, err := putObject(ctx, bucket, object, hashReader, metadata, opts)
|
2015-09-19 06:20:07 -04:00
|
|
|
|
if err != nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
2015-08-03 19:17:21 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-02-23 18:07:21 -05:00
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
|
if objInfo.IsCompressed() {
|
|
|
|
|
// Ignore compressed ETag.
|
|
|
|
|
objInfo.ETag = objInfo.ETag + "-1"
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-14 15:05:51 -04:00
|
|
|
|
w.Header().Set("ETag", "\""+objInfo.ETag+"\"")
|
2018-02-09 18:19:30 -05:00
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
2018-09-23 13:24:10 -04:00
|
|
|
|
if crypto.IsEncrypted(objInfo.UserDefined) {
|
|
|
|
|
switch {
|
|
|
|
|
case crypto.S3.IsEncrypted(objInfo.UserDefined):
|
|
|
|
|
w.Header().Set(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
|
|
|
|
case crypto.SSEC.IsRequested(r.Header):
|
|
|
|
|
w.Header().Set(crypto.SSECAlgorithm, r.Header.Get(crypto.SSECAlgorithm))
|
|
|
|
|
w.Header().Set(crypto.SSECKeyMD5, r.Header.Get(crypto.SSECKeyMD5))
|
|
|
|
|
}
|
2018-02-09 18:19:30 -05:00
|
|
|
|
}
|
2017-11-07 18:18:59 -05:00
|
|
|
|
}
|
2018-02-09 18:19:30 -05:00
|
|
|
|
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeSuccessResponseHeadersOnly(w)
|
2016-07-24 01:51:12 -04:00
|
|
|
|
|
2017-03-22 21:44:35 -04:00
|
|
|
|
// Get host and port from Request.RemoteAddr.
|
2018-07-02 17:40:18 -04:00
|
|
|
|
host, port, err := net.SplitHostPort(handlers.GetSourceIP(r))
|
2017-03-22 21:44:35 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
host, port = "", ""
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-29 01:46:19 -04:00
|
|
|
|
// Notify object created event.
|
2018-03-15 16:03:41 -04:00
|
|
|
|
sendEvent(eventArgs{
|
|
|
|
|
EventName: event.ObjectCreatedPut,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Object: objInfo,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
|
|
|
|
Host: host,
|
|
|
|
|
Port: port,
|
2016-09-29 01:46:19 -04:00
|
|
|
|
})
|
2018-10-12 15:25:59 -04:00
|
|
|
|
|
|
|
|
|
for k, v := range objInfo.UserDefined {
|
|
|
|
|
logger.GetReqInfo(ctx).SetTags(k, v)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
logger.GetReqInfo(ctx).SetTags("etag", objInfo.ETag)
|
2015-02-15 20:03:27 -05:00
|
|
|
|
}
|
2015-05-07 22:55:30 -04:00
|
|
|
|
|
2016-04-12 15:45:15 -04:00
|
|
|
|
/// Multipart objectAPIHandlers
|
2015-06-08 14:06:06 -04:00
|
|
|
|
|
2016-10-06 16:34:33 -04:00
|
|
|
|
// NewMultipartUploadHandler - New multipart upload.
|
2018-09-25 15:39:46 -04:00
|
|
|
|
// Notice: The S3 client can send secret keys in headers for encryption related jobs,
|
|
|
|
|
// the handler should ensure to remove these keys before sending them to the object layer.
|
|
|
|
|
// Currently these keys are:
|
|
|
|
|
// - X-Amz-Server-Side-Encryption-Customer-Key
|
|
|
|
|
// - X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "NewMultipartUpload")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2018-10-12 15:25:59 -04:00
|
|
|
|
defer logger.AuditLog(ctx, r)
|
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-10-09 18:04:53 -04:00
|
|
|
|
if crypto.S3KMS.IsRequested(r.Header) {
|
2018-08-18 00:07:19 -04:00
|
|
|
|
writeErrorResponse(w, ErrNotImplemented, r.URL) // SSE-KMS is not supported
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
|
object := vars["object"]
|
2018-09-10 12:42:43 -04:00
|
|
|
|
opts := ObjectOptions{}
|
2018-04-05 18:04:40 -04:00
|
|
|
|
|
2018-04-24 18:53:30 -04:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, bucket, object); s3Error != ErrNone {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, s3Error, r.URL)
|
2016-03-12 19:08:15 -05:00
|
|
|
|
return
|
2016-02-15 20:42:39 -05:00
|
|
|
|
}
|
|
|
|
|
|
2018-03-27 19:44:45 -04:00
|
|
|
|
// Deny if WORM is enabled
|
|
|
|
|
if globalWORMEnabled {
|
2018-09-10 12:42:43 -04:00
|
|
|
|
if _, err := objectAPI.GetObjectInfo(ctx, bucket, object, opts); err == nil {
|
2018-03-27 19:44:45 -04:00
|
|
|
|
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-26 23:36:16 -05:00
|
|
|
|
// Validate storage class metadata if present
|
|
|
|
|
if _, ok := r.Header[amzStorageClassCanonical]; ok {
|
|
|
|
|
if !isValidStorageClassMeta(r.Header.Get(amzStorageClassCanonical)) {
|
|
|
|
|
writeErrorResponse(w, ErrInvalidStorageClass, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-01 14:37:57 -05:00
|
|
|
|
var encMetadata = map[string]string{}
|
|
|
|
|
|
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
2018-08-17 15:52:14 -04:00
|
|
|
|
if hasServerSideEncryptionHeader(r.Header) {
|
|
|
|
|
if err := setEncryptionMetadata(r, bucket, object, encMetadata); err != nil {
|
2018-03-01 14:37:57 -05:00
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
// Set this for multipart only operations, we need to differentiate during
|
|
|
|
|
// decryption if the file was actually multipart or not.
|
|
|
|
|
encMetadata[ReservedMetadataPrefix+"Encrypted-Multipart"] = ""
|
|
|
|
|
}
|
2017-11-07 18:18:59 -05:00
|
|
|
|
}
|
|
|
|
|
|
2016-07-22 23:31:45 -04:00
|
|
|
|
// Extract metadata that needs to be saved.
|
2018-07-10 23:27:10 -04:00
|
|
|
|
metadata, err := extractMetadata(ctx, r)
|
2017-07-05 19:56:10 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, ErrInternalError, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2016-05-18 22:54:25 -04:00
|
|
|
|
|
2018-03-01 14:37:57 -05:00
|
|
|
|
// We need to preserve the encryption headers set in EncryptRequest,
|
|
|
|
|
// so we do not want to override them, copy them instead.
|
|
|
|
|
for k, v := range encMetadata {
|
|
|
|
|
metadata[k] = v
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-25 15:39:46 -04:00
|
|
|
|
// Ensure that metadata does not contain sensitive information
|
|
|
|
|
crypto.RemoveSensitiveEntries(metadata)
|
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
|
if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) {
|
|
|
|
|
// Storing the compression metadata.
|
|
|
|
|
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV1
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
|
newMultipartUpload := objectAPI.NewMultipartUpload
|
2018-08-17 15:52:14 -04:00
|
|
|
|
if api.CacheAPI() != nil && !hasServerSideEncryptionHeader(r.Header) {
|
2018-03-28 17:14:06 -04:00
|
|
|
|
newMultipartUpload = api.CacheAPI().NewMultipartUpload
|
|
|
|
|
}
|
2018-09-10 12:42:43 -04:00
|
|
|
|
uploadID, err := newMultipartUpload(ctx, bucket, object, metadata, opts)
|
2015-09-19 06:20:07 -04:00
|
|
|
|
if err != nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
2015-08-03 19:17:21 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2015-09-19 06:20:07 -04:00
|
|
|
|
|
|
|
|
|
response := generateInitiateMultipartUploadResponse(bucket, object, uploadID)
|
2016-03-06 15:16:22 -05:00
|
|
|
|
encodedSuccessResponse := encodeResponse(response)
|
2017-01-06 03:37:00 -05:00
|
|
|
|
|
|
|
|
|
// Write success response.
|
|
|
|
|
writeSuccessResponseXML(w, encodedSuccessResponse)
|
2015-05-07 22:55:30 -04:00
|
|
|
|
}
|
|
|
|
|
|
2017-01-31 12:38:34 -05:00
|
|
|
|
// CopyObjectPartHandler - uploads a part by copying data from an existing object as data source.
|
|
|
|
|
func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "CopyObjectPart")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2018-10-12 15:25:59 -04:00
|
|
|
|
defer logger.AuditLog(ctx, r)
|
|
|
|
|
|
2017-01-31 12:38:34 -05:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
|
|
|
|
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-10-09 18:04:53 -04:00
|
|
|
|
if crypto.S3KMS.IsRequested(r.Header) {
|
2018-08-18 00:07:19 -04:00
|
|
|
|
writeErrorResponse(w, ErrNotImplemented, r.URL) // SSE-KMS is not supported
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
dstBucket := vars["bucket"]
|
|
|
|
|
dstObject := vars["object"]
|
2017-01-31 12:38:34 -05:00
|
|
|
|
|
2018-04-24 18:53:30 -04:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, dstBucket, dstObject); s3Error != ErrNone {
|
2017-01-31 12:38:34 -05:00
|
|
|
|
writeErrorResponse(w, s3Error, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Copy source path.
|
|
|
|
|
cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source"))
|
|
|
|
|
if err != nil {
|
|
|
|
|
// Save unescaped string as is.
|
|
|
|
|
cpSrcPath = r.Header.Get("X-Amz-Copy-Source")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
srcBucket, srcObject := path2BucketAndObject(cpSrcPath)
|
|
|
|
|
// If source object is empty or bucket is empty, reply back invalid copy source.
|
|
|
|
|
if srcObject == "" || srcBucket == "" {
|
|
|
|
|
writeErrorResponse(w, ErrInvalidCopySource, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uploadID := r.URL.Query().Get("uploadId")
|
|
|
|
|
partIDString := r.URL.Query().Get("partNumber")
|
|
|
|
|
|
|
|
|
|
partID, err := strconv.Atoi(partIDString)
|
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, ErrInvalidPart, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// check partID with maximum part ID for multipart objects
|
|
|
|
|
if isMaxPartID(partID) {
|
|
|
|
|
writeErrorResponse(w, ErrInvalidMaxParts, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-09-10 12:42:43 -04:00
|
|
|
|
var srcOpts, dstOpts ObjectOptions
|
2017-01-31 12:38:34 -05:00
|
|
|
|
|
2018-03-27 19:44:45 -04:00
|
|
|
|
// Deny if WORM is enabled
|
|
|
|
|
if globalWORMEnabled {
|
2018-09-10 12:42:43 -04:00
|
|
|
|
if _, err = objectAPI.GetObjectInfo(ctx, dstBucket, dstObject, dstOpts); err == nil {
|
2018-03-27 19:44:45 -04:00
|
|
|
|
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-25 15:39:46 -04:00
|
|
|
|
getObjectNInfo := objectAPI.GetObjectNInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectNInfo = api.CacheAPI().GetObjectNInfo
|
2018-03-01 14:37:57 -05:00
|
|
|
|
}
|
|
|
|
|
|
2017-01-31 12:38:34 -05:00
|
|
|
|
// Get request range.
|
2018-09-25 15:39:46 -04:00
|
|
|
|
var rs *HTTPRangeSpec
|
2017-01-31 12:38:34 -05:00
|
|
|
|
rangeHeader := r.Header.Get("x-amz-copy-source-range")
|
2018-09-25 15:39:46 -04:00
|
|
|
|
if rangeHeader != "" {
|
|
|
|
|
var parseRangeErr error
|
|
|
|
|
if rs, parseRangeErr = parseCopyPartRangeSpec(rangeHeader); parseRangeErr != nil {
|
|
|
|
|
// Handle only errInvalidRange
|
|
|
|
|
// Ignore other parse error and treat it as regular Get request like Amazon S3.
|
|
|
|
|
logger.GetReqInfo(ctx).AppendTags("rangeHeader", rangeHeader)
|
|
|
|
|
logger.LogIf(ctx, parseRangeErr)
|
|
|
|
|
writeCopyPartErr(w, parseRangeErr, r.URL)
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-27 06:06:45 -04:00
|
|
|
|
gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, readLock, srcOpts)
|
2018-09-25 15:39:46 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
defer gr.Close()
|
|
|
|
|
srcInfo := gr.ObjInfo
|
|
|
|
|
|
2018-10-22 17:23:23 -04:00
|
|
|
|
actualPartSize := srcInfo.Size
|
|
|
|
|
if crypto.IsEncrypted(srcInfo.UserDefined) {
|
|
|
|
|
actualPartSize, err = srcInfo.DecryptedSize()
|
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-09-27 23:36:17 -04:00
|
|
|
|
|
2018-09-25 15:39:46 -04:00
|
|
|
|
// Special care for CopyObjectPart
|
2018-10-22 17:23:23 -04:00
|
|
|
|
if partRangeErr := checkCopyPartRangeWithSize(rs, actualPartSize); partRangeErr != nil {
|
2018-09-25 15:39:46 -04:00
|
|
|
|
writeCopyPartErr(w, partRangeErr, r.URL)
|
2018-09-20 22:22:09 -04:00
|
|
|
|
return
|
2017-01-31 12:38:34 -05:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Verify before x-amz-copy-source preconditions before continuing with CopyObject.
|
2018-02-21 03:48:47 -05:00
|
|
|
|
if checkCopyObjectPartPreconditions(w, r, srcInfo) {
|
2017-01-31 12:38:34 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-25 15:39:46 -04:00
|
|
|
|
// Get the object offset & length
|
2018-10-25 11:50:06 -04:00
|
|
|
|
startOffset, length, err := rs.GetOffsetLength(actualPartSize)
|
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-09-27 23:36:17 -04:00
|
|
|
|
|
2017-01-31 12:38:34 -05:00
|
|
|
|
/// maximum copy size for multipart objects in a single operation
|
2017-03-03 13:14:17 -05:00
|
|
|
|
if isMaxAllowedPartSize(length) {
|
2017-01-31 12:38:34 -05:00
|
|
|
|
writeErrorResponse(w, ErrEntityTooLarge, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-25 11:50:06 -04:00
|
|
|
|
actualPartSize = length
|
2018-09-27 23:36:17 -04:00
|
|
|
|
var reader io.Reader
|
|
|
|
|
|
2018-10-23 14:46:20 -04:00
|
|
|
|
var li ListPartsInfo
|
|
|
|
|
li, err = objectAPI.ListObjectParts(ctx, dstBucket, dstObject, uploadID, 0, 1)
|
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-10-25 11:50:06 -04:00
|
|
|
|
|
2018-10-23 14:46:20 -04:00
|
|
|
|
// Read compression metadata preserved in the init multipart for the decision.
|
|
|
|
|
_, compressPart := li.UserDefined[ReservedMetadataPrefix+"compression"]
|
|
|
|
|
isCompressed := compressPart
|
|
|
|
|
// Compress only if the compression is enabled during initial multipart.
|
|
|
|
|
if isCompressed {
|
2018-09-27 23:36:17 -04:00
|
|
|
|
// Open a pipe for compression.
|
2018-09-28 03:44:59 -04:00
|
|
|
|
// Where pipeWriter is piped to srcInfo.Reader.
|
|
|
|
|
// gr writes to pipeWriter.
|
|
|
|
|
pipeReader, pipeWriter := io.Pipe()
|
|
|
|
|
reader = pipeReader
|
2018-09-27 23:36:17 -04:00
|
|
|
|
length = -1
|
|
|
|
|
|
2018-09-28 03:44:59 -04:00
|
|
|
|
snappyWriter := snappy.NewWriter(pipeWriter)
|
2018-09-27 23:36:17 -04:00
|
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
|
// Compress the decompressed source object.
|
2018-09-28 03:44:59 -04:00
|
|
|
|
_, cerr := io.Copy(snappyWriter, gr)
|
|
|
|
|
snappyWriter.Close()
|
|
|
|
|
pipeWriter.CloseWithError(cerr)
|
2018-09-27 23:36:17 -04:00
|
|
|
|
}()
|
|
|
|
|
} else {
|
|
|
|
|
reader = gr
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualPartSize)
|
2018-03-02 20:24:02 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-09-27 23:36:17 -04:00
|
|
|
|
|
2018-10-23 14:46:20 -04:00
|
|
|
|
if objectAPI.IsEncryptionSupported() && !isCompressed {
|
2018-08-17 15:52:14 -04:00
|
|
|
|
if crypto.IsEncrypted(li.UserDefined) {
|
2018-10-25 11:50:06 -04:00
|
|
|
|
if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(li.UserDefined) {
|
2018-09-20 22:22:09 -04:00
|
|
|
|
writeErrorResponse(w, ErrSSEMultipartEncrypted, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-03-01 14:37:57 -05:00
|
|
|
|
var key []byte
|
2018-08-17 15:52:14 -04:00
|
|
|
|
if crypto.SSEC.IsRequested(r.Header) {
|
|
|
|
|
key, err = ParseSSECustomerRequest(r)
|
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-03-01 14:37:57 -05:00
|
|
|
|
}
|
|
|
|
|
var objectEncryptionKey []byte
|
2018-07-09 20:18:28 -04:00
|
|
|
|
objectEncryptionKey, err = decryptObjectInfo(key, dstBucket, dstObject, li.UserDefined)
|
2018-03-01 14:37:57 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-12 12:29:56 -04:00
|
|
|
|
var partIDbin [4]byte
|
|
|
|
|
binary.LittleEndian.PutUint32(partIDbin[:], uint32(partID)) // marshal part ID
|
|
|
|
|
|
|
|
|
|
mac := hmac.New(sha256.New, objectEncryptionKey) // derive part encryption key from part ID and object key
|
|
|
|
|
mac.Write(partIDbin[:])
|
|
|
|
|
partEncryptionKey := mac.Sum(nil)
|
|
|
|
|
reader, err = sio.EncryptReader(reader, sio.Config{Key: partEncryptionKey})
|
2018-03-01 14:37:57 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-12 14:23:32 -04:00
|
|
|
|
info := ObjectInfo{Size: length}
|
2018-10-25 11:50:06 -04:00
|
|
|
|
srcInfo.Reader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", length)
|
2018-03-01 14:37:57 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-10-25 11:50:06 -04:00
|
|
|
|
|
2017-01-31 12:38:34 -05:00
|
|
|
|
// Copy source object to destination, if source and destination
|
|
|
|
|
// object is same then only metadata is updated.
|
2018-10-25 11:50:06 -04:00
|
|
|
|
partInfo, err := objectAPI.CopyObjectPart(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID,
|
|
|
|
|
startOffset, length, srcInfo, srcOpts, dstOpts)
|
2017-01-31 12:38:34 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
response := generateCopyObjectPartResponse(partInfo.ETag, partInfo.LastModified)
|
|
|
|
|
encodedSuccessResponse := encodeResponse(response)
|
|
|
|
|
|
|
|
|
|
// Write success response.
|
|
|
|
|
writeSuccessResponseXML(w, encodedSuccessResponse)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// PutObjectPartHandler - uploads an incoming part for an ongoing multipart operation.
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "PutObjectPart")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2018-10-12 15:25:59 -04:00
|
|
|
|
defer logger.AuditLog(ctx, r)
|
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-10-09 18:04:53 -04:00
|
|
|
|
if crypto.S3KMS.IsRequested(r.Header) {
|
2018-08-18 00:07:19 -04:00
|
|
|
|
writeErrorResponse(w, ErrNotImplemented, r.URL) // SSE-KMS is not supported
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
|
object := vars["object"]
|
2016-08-10 21:47:49 -04:00
|
|
|
|
|
2017-03-03 19:32:04 -05:00
|
|
|
|
// X-Amz-Copy-Source shouldn't be set for this call.
|
|
|
|
|
if _, ok := r.Header["X-Amz-Copy-Source"]; ok {
|
|
|
|
|
writeErrorResponse(w, ErrInvalidCopySource, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-05 19:43:48 -05:00
|
|
|
|
// get Content-Md5 sent by client and verify if valid
|
2018-03-16 14:22:34 -04:00
|
|
|
|
md5Bytes, err := checkValidMD5(r.Header)
|
2016-03-12 19:08:15 -05:00
|
|
|
|
if err != nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrInvalidDigest, r.URL)
|
2015-10-16 22:09:35 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2015-12-28 02:00:36 -05:00
|
|
|
|
/// if Content-Length is unknown/missing, throw away
|
2016-02-15 20:42:39 -05:00
|
|
|
|
size := r.ContentLength
|
2016-08-08 23:56:29 -04:00
|
|
|
|
|
|
|
|
|
rAuthType := getRequestAuthType(r)
|
|
|
|
|
// For auth type streaming signature, we need to gather a different content length.
|
|
|
|
|
if rAuthType == authTypeStreamingSigned {
|
2018-03-16 14:22:34 -04:00
|
|
|
|
if sizeStr, ok := r.Header["X-Amz-Decoded-Content-Length"]; ok {
|
|
|
|
|
if sizeStr[0] == "" {
|
|
|
|
|
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
size, err = strconv.ParseInt(sizeStr[0], 10, 64)
|
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2016-08-08 23:56:29 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
2015-12-28 02:00:36 -05:00
|
|
|
|
if size == -1 {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
2015-12-28 02:00:36 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2015-05-08 01:43:19 -04:00
|
|
|
|
/// maximum Upload size for multipart objects in a single operation
|
2017-03-03 13:14:17 -05:00
|
|
|
|
if isMaxAllowedPartSize(size) {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrEntityTooLarge, r.URL)
|
2015-05-07 22:55:30 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2015-04-30 19:29:03 -04:00
|
|
|
|
|
2016-02-15 20:42:39 -05:00
|
|
|
|
uploadID := r.URL.Query().Get("uploadId")
|
|
|
|
|
partIDString := r.URL.Query().Get("partNumber")
|
2015-05-09 22:39:00 -04:00
|
|
|
|
|
2016-04-29 17:24:10 -04:00
|
|
|
|
partID, err := strconv.Atoi(partIDString)
|
|
|
|
|
if err != nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrInvalidPart, r.URL)
|
2016-03-02 14:22:58 -05:00
|
|
|
|
return
|
2015-05-07 22:55:30 -04:00
|
|
|
|
}
|
2015-07-02 23:31:22 -04:00
|
|
|
|
|
2016-05-24 04:52:47 -04:00
|
|
|
|
// check partID with maximum part ID for multipart objects
|
|
|
|
|
if isMaxPartID(partID) {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrInvalidMaxParts, r.URL)
|
2016-05-24 04:52:47 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-22 01:30:34 -04:00
|
|
|
|
var (
|
|
|
|
|
md5hex = hex.EncodeToString(md5Bytes)
|
|
|
|
|
sha256hex = ""
|
2018-03-01 14:37:57 -05:00
|
|
|
|
reader io.Reader
|
2018-10-09 17:00:01 -04:00
|
|
|
|
s3Error APIErrorCode
|
2017-10-22 01:30:34 -04:00
|
|
|
|
)
|
2018-03-01 14:37:57 -05:00
|
|
|
|
reader = r.Body
|
2017-10-22 01:30:34 -04:00
|
|
|
|
|
2018-10-09 17:00:01 -04:00
|
|
|
|
if s3Error = isPutAllowed(rAuthType, bucket, object, r); s3Error != ErrNone {
|
|
|
|
|
writeErrorResponse(w, s3Error, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-08 23:56:29 -04:00
|
|
|
|
switch rAuthType {
|
|
|
|
|
case authTypeStreamingSigned:
|
|
|
|
|
// Initialize stream signature verifier.
|
2017-10-22 01:30:34 -04:00
|
|
|
|
reader, s3Error = newSignV4ChunkedReader(r)
|
2016-08-08 23:56:29 -04:00
|
|
|
|
if s3Error != ErrNone {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, s3Error, r.URL)
|
2016-08-08 23:56:29 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-09-30 17:32:13 -04:00
|
|
|
|
case authTypeSignedV2, authTypePresignedV2:
|
2018-10-09 17:00:01 -04:00
|
|
|
|
if s3Error = isReqAuthenticatedV2(r); s3Error != ErrNone {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, s3Error, r.URL)
|
2016-09-30 17:32:13 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-04-07 06:04:18 -04:00
|
|
|
|
case authTypePresigned, authTypeSigned:
|
2018-10-09 17:00:01 -04:00
|
|
|
|
if s3Error = reqSignatureV4Verify(r, globalServerConfig.GetRegion()); s3Error != ErrNone {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, s3Error, r.URL)
|
2016-10-02 18:51:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if !skipContentSha256Cksum(r) {
|
2017-11-05 06:02:19 -05:00
|
|
|
|
sha256hex = getContentSha256Cksum(r)
|
2016-10-02 18:51:49 -04:00
|
|
|
|
}
|
2016-02-16 21:50:36 -05:00
|
|
|
|
}
|
2017-10-22 01:30:34 -04:00
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
|
actualSize := size
|
|
|
|
|
var pipeReader *io.PipeReader
|
|
|
|
|
var pipeWriter *io.PipeWriter
|
|
|
|
|
|
|
|
|
|
var li ListPartsInfo
|
|
|
|
|
li, err = objectAPI.ListObjectParts(ctx, bucket, object, uploadID, 0, 1)
|
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
// Read compression metadata preserved in the init multipart for the decision.
|
|
|
|
|
_, compressPart := li.UserDefined[ReservedMetadataPrefix+"compression"]
|
|
|
|
|
|
|
|
|
|
isCompressed := false
|
|
|
|
|
if objectAPI.IsCompressionSupported() && compressPart {
|
|
|
|
|
pipeReader, pipeWriter = io.Pipe()
|
|
|
|
|
snappyWriter := snappy.NewWriter(pipeWriter)
|
2018-09-28 03:44:59 -04:00
|
|
|
|
|
|
|
|
|
var actualReader *hash.Reader
|
|
|
|
|
actualReader, err = hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
2018-09-27 23:36:17 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-09-28 03:44:59 -04:00
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
|
go func() {
|
|
|
|
|
// Writing to the compressed writer.
|
2018-09-28 03:44:59 -04:00
|
|
|
|
_, cerr := io.CopyN(snappyWriter, actualReader, actualSize)
|
|
|
|
|
snappyWriter.Close()
|
|
|
|
|
pipeWriter.CloseWithError(cerr)
|
2018-09-27 23:36:17 -04:00
|
|
|
|
}()
|
2018-09-28 03:44:59 -04:00
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
|
// Set compression metrics.
|
|
|
|
|
size = -1 // Since compressed size is un-predictable.
|
|
|
|
|
md5hex = "" // Do not try to verify the content.
|
|
|
|
|
sha256hex = ""
|
|
|
|
|
reader = pipeReader
|
|
|
|
|
isCompressed = true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
2017-10-22 01:30:34 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-10 12:42:43 -04:00
|
|
|
|
opts := ObjectOptions{}
|
2018-03-27 19:44:45 -04:00
|
|
|
|
// Deny if WORM is enabled
|
|
|
|
|
if globalWORMEnabled {
|
2018-09-10 12:42:43 -04:00
|
|
|
|
if _, err = objectAPI.GetObjectInfo(ctx, bucket, object, opts); err == nil {
|
2018-03-27 19:44:45 -04:00
|
|
|
|
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-09-20 22:22:09 -04:00
|
|
|
|
|
2018-10-18 19:05:05 -04:00
|
|
|
|
isEncrypted := false
|
2018-09-27 23:36:17 -04:00
|
|
|
|
if objectAPI.IsEncryptionSupported() && !isCompressed {
|
2018-03-01 14:37:57 -05:00
|
|
|
|
var li ListPartsInfo
|
2018-03-14 15:01:47 -04:00
|
|
|
|
li, err = objectAPI.ListObjectParts(ctx, bucket, object, uploadID, 0, 1)
|
2018-03-01 14:37:57 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-08-17 15:52:14 -04:00
|
|
|
|
if crypto.IsEncrypted(li.UserDefined) {
|
2018-10-18 19:05:05 -04:00
|
|
|
|
isEncrypted = true
|
|
|
|
|
if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(li.UserDefined) {
|
2018-09-20 22:22:09 -04:00
|
|
|
|
writeErrorResponse(w, ErrSSEMultipartEncrypted, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-03-01 14:37:57 -05:00
|
|
|
|
var key []byte
|
2018-08-17 15:52:14 -04:00
|
|
|
|
if crypto.SSEC.IsRequested(r.Header) {
|
|
|
|
|
key, err = ParseSSECustomerRequest(r)
|
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-03-01 14:37:57 -05:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Calculating object encryption key
|
|
|
|
|
var objectEncryptionKey []byte
|
2018-07-09 20:18:28 -04:00
|
|
|
|
objectEncryptionKey, err = decryptObjectInfo(key, bucket, object, li.UserDefined)
|
2018-03-01 14:37:57 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var partIDbin [4]byte
|
|
|
|
|
binary.LittleEndian.PutUint32(partIDbin[:], uint32(partID)) // marshal part ID
|
|
|
|
|
|
|
|
|
|
mac := hmac.New(sha256.New, objectEncryptionKey) // derive part encryption key from part ID and object key
|
|
|
|
|
mac.Write(partIDbin[:])
|
|
|
|
|
partEncryptionKey := mac.Sum(nil)
|
|
|
|
|
|
2018-10-18 19:05:05 -04:00
|
|
|
|
reader, err = sio.EncryptReader(hashReader, sio.Config{Key: partEncryptionKey})
|
2018-03-01 14:37:57 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info := ObjectInfo{Size: size}
|
2018-09-27 23:36:17 -04:00
|
|
|
|
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", size) // do not try to verify encrypted content
|
2018-03-01 14:37:57 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
|
putObjectPart := objectAPI.PutObjectPart
|
2018-10-18 19:05:05 -04:00
|
|
|
|
if api.CacheAPI() != nil && !isEncrypted {
|
2018-03-28 17:14:06 -04:00
|
|
|
|
putObjectPart = api.CacheAPI().PutObjectPart
|
|
|
|
|
}
|
2018-09-10 12:42:43 -04:00
|
|
|
|
partInfo, err := putObjectPart(ctx, bucket, object, uploadID, partID, hashReader, opts)
|
2015-09-19 06:20:07 -04:00
|
|
|
|
if err != nil {
|
2016-03-12 19:08:15 -05:00
|
|
|
|
// Verify if the underlying error is signature mismatch.
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
2015-08-03 19:17:21 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-09-27 23:36:17 -04:00
|
|
|
|
if isCompressed {
|
|
|
|
|
pipeWriter.Close()
|
|
|
|
|
// Suppress compressed ETag.
|
|
|
|
|
partInfo.ETag = partInfo.ETag + "-1"
|
|
|
|
|
}
|
2017-01-31 12:38:34 -05:00
|
|
|
|
if partInfo.ETag != "" {
|
|
|
|
|
w.Header().Set("ETag", "\""+partInfo.ETag+"\"")
|
2016-02-01 15:19:54 -05:00
|
|
|
|
}
|
2017-01-06 03:37:00 -05:00
|
|
|
|
|
|
|
|
|
writeSuccessResponseHeadersOnly(w)
|
2015-05-07 22:55:30 -04:00
|
|
|
|
}
|
|
|
|
|
|
2015-06-30 23:15:48 -04:00
|
|
|
|
// AbortMultipartUploadHandler - Abort multipart upload
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "AbortMultipartUpload")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2018-10-12 15:25:59 -04:00
|
|
|
|
defer logger.AuditLog(ctx, r)
|
|
|
|
|
|
2016-02-15 20:42:39 -05:00
|
|
|
|
vars := mux.Vars(r)
|
2015-05-09 19:06:35 -04:00
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
|
object := vars["object"]
|
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-03-28 17:14:06 -04:00
|
|
|
|
abortMultipartUpload := objectAPI.AbortMultipartUpload
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
abortMultipartUpload = api.CacheAPI().AbortMultipartUpload
|
|
|
|
|
}
|
2018-04-24 18:53:30 -04:00
|
|
|
|
|
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.AbortMultipartUploadAction, bucket, object); s3Error != ErrNone {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, s3Error, r.URL)
|
2016-03-12 19:08:15 -05:00
|
|
|
|
return
|
2016-02-15 20:42:39 -05:00
|
|
|
|
}
|
|
|
|
|
|
2018-03-27 19:44:45 -04:00
|
|
|
|
// Deny if WORM is enabled
|
|
|
|
|
if globalWORMEnabled {
|
2018-09-10 12:42:43 -04:00
|
|
|
|
if _, err := objectAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{}); err == nil {
|
2018-03-27 19:44:45 -04:00
|
|
|
|
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-18 10:31:46 -04:00
|
|
|
|
uploadID, _, _, _, s3Error := getObjectResources(r.URL.Query())
|
|
|
|
|
if s3Error != ErrNone {
|
|
|
|
|
writeErrorResponse(w, s3Error, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2018-03-28 17:14:06 -04:00
|
|
|
|
if err := abortMultipartUpload(ctx, bucket, object, uploadID); err != nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
2015-08-03 19:17:21 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2015-10-16 23:02:37 -04:00
|
|
|
|
writeSuccessNoContent(w)
|
2015-05-09 19:06:35 -04:00
|
|
|
|
}
|
|
|
|
|
|
2015-06-30 23:15:48 -04:00
|
|
|
|
// ListObjectPartsHandler - List object parts
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "ListObjectParts")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2018-10-12 15:25:59 -04:00
|
|
|
|
defer logger.AuditLog(ctx, r)
|
|
|
|
|
|
2016-02-15 20:42:39 -05:00
|
|
|
|
vars := mux.Vars(r)
|
2015-10-16 22:09:35 -04:00
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
|
object := vars["object"]
|
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-24 18:53:30 -04:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.ListMultipartUploadPartsAction, bucket, object); s3Error != ErrNone {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, s3Error, r.URL)
|
2016-03-12 19:08:15 -05:00
|
|
|
|
return
|
2016-02-15 20:42:39 -05:00
|
|
|
|
}
|
|
|
|
|
|
2018-10-18 10:31:46 -04:00
|
|
|
|
uploadID, partNumberMarker, maxParts, _, s3Error := getObjectResources(r.URL.Query())
|
|
|
|
|
if s3Error != ErrNone {
|
|
|
|
|
writeErrorResponse(w, s3Error, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
objectAPI: Fix object API interface, remove unnecessary structs.
ObjectAPI changes.
```
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, *probe.Error)
ListMultipartUploads(bucket, objectPrefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, *probe.Error)
ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, *probe.Error)
```
2016-04-03 04:34:20 -04:00
|
|
|
|
if partNumberMarker < 0 {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrInvalidPartNumberMarker, r.URL)
|
2015-07-16 20:22:45 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
objectAPI: Fix object API interface, remove unnecessary structs.
ObjectAPI changes.
```
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, *probe.Error)
ListMultipartUploads(bucket, objectPrefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, *probe.Error)
ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, *probe.Error)
```
2016-04-03 04:34:20 -04:00
|
|
|
|
if maxParts < 0 {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrInvalidMaxParts, r.URL)
|
2015-07-16 20:22:45 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-03-14 15:01:47 -04:00
|
|
|
|
listPartsInfo, err := objectAPI.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts)
|
2015-09-19 06:20:07 -04:00
|
|
|
|
if err != nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
2015-08-03 19:17:21 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
objectAPI: Fix object API interface, remove unnecessary structs.
ObjectAPI changes.
```
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, *probe.Error)
ListMultipartUploads(bucket, objectPrefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, *probe.Error)
ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, *probe.Error)
```
2016-04-03 04:34:20 -04:00
|
|
|
|
response := generateListPartsResponse(listPartsInfo)
|
2016-03-06 15:16:22 -05:00
|
|
|
|
encodedSuccessResponse := encodeResponse(response)
|
2017-01-06 03:37:00 -05:00
|
|
|
|
|
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
|
|
|
|
// Write success response.
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeSuccessResponseXML(w, encodedSuccessResponse)
|
2015-05-09 14:41:26 -04:00
|
|
|
|
}
|
|
|
|
|
|
2016-09-21 23:08:08 -04:00
|
|
|
|
// CompleteMultipartUploadHandler - Complete multipart upload.
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "CompleteMultipartUpload")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2018-10-12 15:25:59 -04:00
|
|
|
|
defer logger.AuditLog(ctx, r)
|
|
|
|
|
|
2016-02-15 20:42:39 -05:00
|
|
|
|
vars := mux.Vars(r)
|
2015-05-07 22:55:30 -04:00
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
|
object := vars["object"]
|
2015-06-30 17:42:29 -04:00
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-24 18:53:30 -04:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, bucket, object); s3Error != ErrNone {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, s3Error, r.URL)
|
2016-11-21 16:51:05 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-27 19:44:45 -04:00
|
|
|
|
// Deny if WORM is enabled
|
|
|
|
|
if globalWORMEnabled {
|
2018-09-10 12:42:43 -04:00
|
|
|
|
if _, err := objectAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{}); err == nil {
|
2018-03-27 19:44:45 -04:00
|
|
|
|
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
fs: Break fs package to top-level and introduce ObjectAPI interface.
ObjectAPI interface brings in changes needed for XL ObjectAPI layer.
The new interface for any ObjectAPI layer is as below
```
// ObjectAPI interface.
type ObjectAPI interface {
// Bucket resource API.
DeleteBucket(bucket string) *probe.Error
ListBuckets() ([]BucketInfo, *probe.Error)
MakeBucket(bucket string) *probe.Error
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
// Bucket query API.
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error)
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
// Object resource API.
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error)
PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error)
DeleteObject(bucket, object string) *probe.Error
// Object query API.
NewMultipartUpload(bucket, object string) (string, *probe.Error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error)
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
}
```
2016-03-30 19:15:28 -04:00
|
|
|
|
// Get upload id.
|
2018-10-18 10:31:46 -04:00
|
|
|
|
uploadID, _, _, _, s3Error := getObjectResources(r.URL.Query())
|
|
|
|
|
if s3Error != ErrNone {
|
|
|
|
|
writeErrorResponse(w, s3Error, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2016-03-02 14:22:58 -05:00
|
|
|
|
|
2017-11-07 18:18:59 -05:00
|
|
|
|
completeMultipartBytes, err := goioutil.ReadAll(r.Body)
|
2016-04-29 17:24:10 -04:00
|
|
|
|
if err != nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrInternalError, r.URL)
|
fs: Break fs package to top-level and introduce ObjectAPI interface.
ObjectAPI interface brings in changes needed for XL ObjectAPI layer.
The new interface for any ObjectAPI layer is as below
```
// ObjectAPI interface.
type ObjectAPI interface {
// Bucket resource API.
DeleteBucket(bucket string) *probe.Error
ListBuckets() ([]BucketInfo, *probe.Error)
MakeBucket(bucket string) *probe.Error
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
// Bucket query API.
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error)
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
// Object resource API.
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error)
PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error)
DeleteObject(bucket, object string) *probe.Error
// Object query API.
NewMultipartUpload(bucket, object string) (string, *probe.Error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error)
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
}
```
2016-03-30 19:15:28 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2017-11-14 03:25:10 -05:00
|
|
|
|
complMultipartUpload := &CompleteMultipartUpload{}
|
2016-04-29 17:24:10 -04:00
|
|
|
|
if err = xml.Unmarshal(completeMultipartBytes, complMultipartUpload); err != nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrMalformedXML, r.URL)
|
fs: Break fs package to top-level and introduce ObjectAPI interface.
ObjectAPI interface brings in changes needed for XL ObjectAPI layer.
The new interface for any ObjectAPI layer is as below
```
// ObjectAPI interface.
type ObjectAPI interface {
// Bucket resource API.
DeleteBucket(bucket string) *probe.Error
ListBuckets() ([]BucketInfo, *probe.Error)
MakeBucket(bucket string) *probe.Error
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
// Bucket query API.
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error)
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
// Object resource API.
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error)
PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error)
DeleteObject(bucket, object string) *probe.Error
// Object query API.
NewMultipartUpload(bucket, object string) (string, *probe.Error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error)
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
}
```
2016-03-30 19:15:28 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-05-25 15:11:26 -04:00
|
|
|
|
if len(complMultipartUpload.Parts) == 0 {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrMalformedXML, r.URL)
|
2016-05-25 15:11:26 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2017-11-14 03:25:10 -05:00
|
|
|
|
if !sort.IsSorted(CompletedParts(complMultipartUpload.Parts)) {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrInvalidPartOrder, r.URL)
|
fs: Break fs package to top-level and introduce ObjectAPI interface.
ObjectAPI interface brings in changes needed for XL ObjectAPI layer.
The new interface for any ObjectAPI layer is as below
```
// ObjectAPI interface.
type ObjectAPI interface {
// Bucket resource API.
DeleteBucket(bucket string) *probe.Error
ListBuckets() ([]BucketInfo, *probe.Error)
MakeBucket(bucket string) *probe.Error
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
// Bucket query API.
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error)
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
// Object resource API.
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error)
PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error)
DeleteObject(bucket, object string) *probe.Error
// Object query API.
NewMultipartUpload(bucket, object string) (string, *probe.Error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error)
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
}
```
2016-03-30 19:15:28 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2017-01-06 03:37:00 -05:00
|
|
|
|
|
fs: Break fs package to top-level and introduce ObjectAPI interface.
ObjectAPI interface brings in changes needed for XL ObjectAPI layer.
The new interface for any ObjectAPI layer is as below
```
// ObjectAPI interface.
type ObjectAPI interface {
// Bucket resource API.
DeleteBucket(bucket string) *probe.Error
ListBuckets() ([]BucketInfo, *probe.Error)
MakeBucket(bucket string) *probe.Error
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
// Bucket query API.
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error)
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
// Object resource API.
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error)
PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error)
DeleteObject(bucket, object string) *probe.Error
// Object query API.
NewMultipartUpload(bucket, object string) (string, *probe.Error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error)
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
}
```
2016-03-30 19:15:28 -04:00
|
|
|
|
// Complete parts.
|
2017-11-14 03:25:10 -05:00
|
|
|
|
var completeParts []CompletePart
|
2016-04-11 04:29:18 -04:00
|
|
|
|
for _, part := range complMultipartUpload.Parts {
|
2018-09-27 23:36:17 -04:00
|
|
|
|
// Avoiding for gateway parts.
|
|
|
|
|
// `strings.TrimPrefix` does not work here as intended. So `Replace` is used instead.
|
|
|
|
|
if objectAPI.IsCompressionSupported() {
|
|
|
|
|
part.ETag = strings.Replace(part.ETag, "-1", "", -1) // For compressed multiparts, We append '-1' for part.ETag.
|
|
|
|
|
}
|
2017-03-15 23:48:49 -04:00
|
|
|
|
part.ETag = canonicalizeETag(part.ETag)
|
2016-04-11 04:29:18 -04:00
|
|
|
|
completeParts = append(completeParts, part)
|
|
|
|
|
}
|
2016-08-10 21:47:49 -04:00
|
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
|
completeMultiPartUpload := objectAPI.CompleteMultipartUpload
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
completeMultiPartUpload = api.CacheAPI().CompleteMultipartUpload
|
|
|
|
|
}
|
|
|
|
|
objInfo, err := completeMultiPartUpload(ctx, bucket, object, uploadID, completeParts)
|
2015-09-19 06:20:07 -04:00
|
|
|
|
if err != nil {
|
2016-06-28 17:51:49 -04:00
|
|
|
|
switch oErr := err.(type) {
|
|
|
|
|
case PartTooSmall:
|
|
|
|
|
// Write part too small error.
|
|
|
|
|
writePartSmallErrorResponse(w, r, oErr)
|
|
|
|
|
default:
|
|
|
|
|
// Handle all other generic issues.
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
2016-06-28 17:51:49 -04:00
|
|
|
|
}
|
2015-08-03 19:17:21 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-06-15 23:31:06 -04:00
|
|
|
|
|
2016-03-12 19:08:15 -05:00
|
|
|
|
// Get object location.
|
2018-03-23 16:46:57 -04:00
|
|
|
|
location := getObjectLocation(r, globalDomainName, bucket, object)
|
2016-03-01 23:01:40 -05:00
|
|
|
|
// Generate complete multipart response.
|
2017-05-14 15:05:51 -04:00
|
|
|
|
response := generateCompleteMultpartUploadResponse(bucket, object, location, objInfo.ETag)
|
2016-09-21 23:08:08 -04:00
|
|
|
|
encodedSuccessResponse := encodeResponse(response)
|
2016-06-30 21:48:50 -04:00
|
|
|
|
if err != nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrInternalError, r.URL)
|
2016-06-30 21:48:50 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-07-24 01:51:12 -04:00
|
|
|
|
|
2016-11-10 10:41:02 -05:00
|
|
|
|
// Set etag.
|
2017-05-14 15:05:51 -04:00
|
|
|
|
w.Header().Set("ETag", "\""+objInfo.ETag+"\"")
|
2016-11-10 10:41:02 -05:00
|
|
|
|
|
2016-07-24 01:51:12 -04:00
|
|
|
|
// Write success response.
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeSuccessResponseXML(w, encodedSuccessResponse)
|
2016-07-24 01:51:12 -04:00
|
|
|
|
|
2017-03-22 21:44:35 -04:00
|
|
|
|
// Get host and port from Request.RemoteAddr.
|
2018-07-02 17:40:18 -04:00
|
|
|
|
host, port, err := net.SplitHostPort(handlers.GetSourceIP(r))
|
2017-03-22 21:44:35 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
host, port = "", ""
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-29 01:46:19 -04:00
|
|
|
|
// Notify object created event.
|
2018-03-15 16:03:41 -04:00
|
|
|
|
sendEvent(eventArgs{
|
|
|
|
|
EventName: event.ObjectCreatedCompleteMultipartUpload,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Object: objInfo,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
|
|
|
|
Host: host,
|
|
|
|
|
Port: port,
|
2016-09-29 01:46:19 -04:00
|
|
|
|
})
|
2018-10-12 15:25:59 -04:00
|
|
|
|
|
|
|
|
|
for k, v := range objInfo.UserDefined {
|
|
|
|
|
logger.GetReqInfo(ctx).SetTags(k, v)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
logger.GetReqInfo(ctx).SetTags("etag", objInfo.ETag)
|
2015-05-07 22:55:30 -04:00
|
|
|
|
}
|
2015-06-08 14:06:06 -04:00
|
|
|
|
|
2016-04-12 15:45:15 -04:00
|
|
|
|
/// Delete objectAPIHandlers
|
2015-06-08 14:06:06 -04:00
|
|
|
|
|
2016-03-05 19:43:48 -05:00
|
|
|
|
// DeleteObjectHandler - delete an object
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "DeleteObject")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2018-10-12 15:25:59 -04:00
|
|
|
|
defer logger.AuditLog(ctx, r)
|
|
|
|
|
|
2016-02-15 20:42:39 -05:00
|
|
|
|
vars := mux.Vars(r)
|
2015-10-16 14:26:01 -04:00
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
|
object := vars["object"]
|
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-24 18:53:30 -04:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object); s3Error != ErrNone {
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeErrorResponse(w, s3Error, r.URL)
|
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
|
|
|
|
return
|
2016-02-04 15:52:25 -05:00
|
|
|
|
}
|
2016-11-21 16:51:05 -05:00
|
|
|
|
|
2018-03-27 19:44:45 -04:00
|
|
|
|
// Deny if WORM is enabled
|
|
|
|
|
if globalWORMEnabled {
|
|
|
|
|
// Not required to check whether given object exists or not, because
|
|
|
|
|
// DeleteObject is always successful irrespective of object existence.
|
|
|
|
|
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-01 01:35:43 -04:00
|
|
|
|
if globalDNSConfig != nil {
|
|
|
|
|
_, err := globalDNSConfig.Get(bucket)
|
|
|
|
|
if err != nil {
|
2018-07-12 17:12:40 -04:00
|
|
|
|
if err == dns.ErrNoEntriesFound {
|
2018-07-01 01:35:43 -04:00
|
|
|
|
writeErrorResponse(w, ErrNoSuchBucket, r.URL)
|
|
|
|
|
} else {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
getBucketInfo := objectAPI.GetBucketInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getBucketInfo = api.CacheAPI().GetBucketInfo
|
|
|
|
|
}
|
|
|
|
|
if _, err := getBucketInfo(ctx, bucket); err != nil {
|
|
|
|
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-27 02:27:48 -04:00
|
|
|
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html
|
|
|
|
|
// Ignore delete object errors while replying to client, since we are
|
|
|
|
|
// suppposed to reply only 204. Additionally log the error for
|
|
|
|
|
// investigation.
|
2018-04-05 18:04:40 -04:00
|
|
|
|
deleteObject(ctx, objectAPI, api.CacheAPI(), bucket, object, r)
|
2015-10-16 23:02:37 -04:00
|
|
|
|
writeSuccessNoContent(w)
|
2015-06-08 14:06:06 -04:00
|
|
|
|
}
|