2021-04-18 15:41:13 -04:00
|
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
|
//
|
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
|
//
|
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
//
|
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
|
//
|
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2015-02-23 19:46:48 -05:00
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
|
package cmd
|
2015-02-15 20:03:27 -05:00
|
|
|
|
|
|
|
|
|
import (
|
2019-12-12 13:01:15 -05:00
|
|
|
|
"bufio"
|
2018-04-05 18:04:40 -04:00
|
|
|
|
"context"
|
2021-03-26 20:15:09 -04:00
|
|
|
|
"encoding/hex"
|
fs: Break fs package to top-level and introduce ObjectAPI interface.
ObjectAPI interface brings in changes needed for XL ObjectAPI layer.
The new interface for any ObjectAPI layer is as below
```
// ObjectAPI interface.
type ObjectAPI interface {
// Bucket resource API.
DeleteBucket(bucket string) *probe.Error
ListBuckets() ([]BucketInfo, *probe.Error)
MakeBucket(bucket string) *probe.Error
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
// Bucket query API.
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error)
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
// Object resource API.
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error)
PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error)
DeleteObject(bucket, object string) *probe.Error
// Object query API.
NewMultipartUpload(bucket, object string) (string, *probe.Error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error)
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
}
```
2016-03-30 19:15:28 -04:00
|
|
|
|
"encoding/xml"
|
2020-11-12 15:12:09 -05:00
|
|
|
|
"fmt"
|
2017-11-07 18:18:59 -05:00
|
|
|
|
"io"
|
2015-02-15 20:03:27 -05:00
|
|
|
|
"net/http"
|
2020-11-12 15:12:09 -05:00
|
|
|
|
"net/http/httptest"
|
2016-02-07 06:37:54 -05:00
|
|
|
|
"net/url"
|
2021-03-26 20:15:09 -04:00
|
|
|
|
"os"
|
fs: Break fs package to top-level and introduce ObjectAPI interface.
ObjectAPI interface brings in changes needed for XL ObjectAPI layer.
The new interface for any ObjectAPI layer is as below
```
// ObjectAPI interface.
type ObjectAPI interface {
// Bucket resource API.
DeleteBucket(bucket string) *probe.Error
ListBuckets() ([]BucketInfo, *probe.Error)
MakeBucket(bucket string) *probe.Error
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
// Bucket query API.
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error)
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
// Object resource API.
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error)
PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error)
DeleteObject(bucket, object string) *probe.Error
// Object query API.
NewMultipartUpload(bucket, object string) (string, *probe.Error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error)
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
}
```
2016-03-30 19:15:28 -04:00
|
|
|
|
"sort"
|
2015-05-04 02:16:10 -04:00
|
|
|
|
"strconv"
|
2018-08-15 06:30:19 -04:00
|
|
|
|
"strings"
|
2020-04-17 14:20:56 -04:00
|
|
|
|
"sync"
|
2019-02-05 23:58:09 -05:00
|
|
|
|
"time"
|
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
|
"github.com/google/uuid"
|
2018-04-21 22:23:54 -04:00
|
|
|
|
"github.com/gorilla/mux"
|
2020-07-14 12:38:05 -04:00
|
|
|
|
miniogo "github.com/minio/minio-go/v7"
|
2020-07-17 01:38:58 -04:00
|
|
|
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
2020-07-14 12:38:05 -04:00
|
|
|
|
"github.com/minio/minio-go/v7/pkg/encrypt"
|
|
|
|
|
"github.com/minio/minio-go/v7/pkg/tags"
|
2020-09-10 17:19:32 -04:00
|
|
|
|
"github.com/minio/minio/cmd/config/dns"
|
2019-10-07 01:50:24 -04:00
|
|
|
|
"github.com/minio/minio/cmd/config/storageclass"
|
2018-08-17 15:52:14 -04:00
|
|
|
|
"github.com/minio/minio/cmd/crypto"
|
2019-07-03 01:34:32 -04:00
|
|
|
|
xhttp "github.com/minio/minio/cmd/http"
|
2018-04-05 18:04:40 -04:00
|
|
|
|
"github.com/minio/minio/cmd/logger"
|
2020-11-12 15:12:09 -05:00
|
|
|
|
"github.com/minio/minio/pkg/bucket/lifecycle"
|
2020-01-27 17:12:34 -05:00
|
|
|
|
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
2020-07-21 20:49:56 -04:00
|
|
|
|
"github.com/minio/minio/pkg/bucket/replication"
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
"github.com/minio/minio/pkg/etag"
|
2018-03-15 16:03:41 -04:00
|
|
|
|
"github.com/minio/minio/pkg/event"
|
2021-04-14 11:29:56 -04:00
|
|
|
|
"github.com/minio/minio/pkg/fips"
|
2018-07-02 17:40:18 -04:00
|
|
|
|
"github.com/minio/minio/pkg/handlers"
|
2017-10-22 01:30:34 -04:00
|
|
|
|
"github.com/minio/minio/pkg/hash"
|
2017-11-07 18:18:59 -05:00
|
|
|
|
"github.com/minio/minio/pkg/ioutil"
|
2021-05-10 21:15:11 -04:00
|
|
|
|
"github.com/minio/minio/pkg/kms"
|
2021-02-28 18:33:03 -05:00
|
|
|
|
xnet "github.com/minio/minio/pkg/net"
|
2018-08-15 06:30:19 -04:00
|
|
|
|
"github.com/minio/minio/pkg/s3select"
|
2021-05-30 00:16:42 -04:00
|
|
|
|
"github.com/minio/pkg/bucket/policy"
|
|
|
|
|
iampolicy "github.com/minio/pkg/iam/policy"
|
2018-03-01 14:37:57 -05:00
|
|
|
|
"github.com/minio/sio"
|
2015-05-09 22:39:00 -04:00
|
|
|
|
)
|
|
|
|
|
|
2017-08-08 14:04:04 -04:00
|
|
|
|
// supportedHeadGetReqParams - supported request parameters for GET and HEAD presigned request.
|
|
|
|
|
var supportedHeadGetReqParams = map[string]string{
|
2019-07-03 01:34:32 -04:00
|
|
|
|
"response-expires": xhttp.Expires,
|
|
|
|
|
"response-content-type": xhttp.ContentType,
|
|
|
|
|
"response-cache-control": xhttp.CacheControl,
|
|
|
|
|
"response-content-encoding": xhttp.ContentEncoding,
|
|
|
|
|
"response-content-language": xhttp.ContentLanguage,
|
|
|
|
|
"response-content-disposition": xhttp.ContentDisposition,
|
2016-02-07 06:37:54 -05:00
|
|
|
|
}
|
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
|
const (
|
|
|
|
|
compressionAlgorithmV1 = "golang/snappy/LZ77"
|
2019-09-26 02:08:24 -04:00
|
|
|
|
compressionAlgorithmV2 = "klauspost/compress/s2"
|
2019-12-12 13:01:15 -05:00
|
|
|
|
|
|
|
|
|
// When an upload exceeds encryptBufferThreshold ...
|
|
|
|
|
encryptBufferThreshold = 1 << 20
|
|
|
|
|
// add an input buffer of this size.
|
|
|
|
|
encryptBufferSize = 1 << 20
|
2018-09-27 23:36:17 -04:00
|
|
|
|
)
|
|
|
|
|
|
2017-08-08 14:04:04 -04:00
|
|
|
|
// setHeadGetRespHeaders - set any requested parameters as response headers.
|
|
|
|
|
func setHeadGetRespHeaders(w http.ResponseWriter, reqParams url.Values) {
|
2016-02-07 06:37:54 -05:00
|
|
|
|
for k, v := range reqParams {
|
2020-04-21 01:01:59 -04:00
|
|
|
|
if header, ok := supportedHeadGetReqParams[strings.ToLower(k)]; ok {
|
2016-02-07 06:37:54 -05:00
|
|
|
|
w.Header()[header] = v
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-15 06:30:19 -04:00
|
|
|
|
// SelectObjectContentHandler - GET Object?select
|
|
|
|
|
// ----------
|
|
|
|
|
// This implementation of the GET operation retrieves object content based
|
|
|
|
|
// on an SQL expression. In the request, along with the sql expression, you must
|
|
|
|
|
// also specify a data serialization format (JSON, CSV) of the object.
|
|
|
|
|
func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
ctx := newContext(r, w, "SelectObject")
|
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2018-10-12 15:25:59 -04:00
|
|
|
|
|
2018-08-15 06:30:19 -04:00
|
|
|
|
// Fetch object stat info.
|
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2018-08-15 06:30:19 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-09-15 16:57:15 -04:00
|
|
|
|
|
|
|
|
|
if crypto.S3.IsRequested(r.Header) || crypto.S3KMS.IsRequested(r.Header) { // If SSE-S3 or SSE-KMS present -> AWS fails with undefined error
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL, guessIsBrowserReq(r))
|
2018-10-09 18:04:53 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-09-15 16:57:15 -04:00
|
|
|
|
|
2020-12-22 12:19:32 -05:00
|
|
|
|
if _, ok := crypto.IsRequested(r.Header); ok && !objectAPI.IsEncryptionSupported() {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL, guessIsBrowserReq(r))
|
2018-12-15 00:39:59 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2021-03-26 20:15:09 -04:00
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
bucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
object, err := unescapePath(vars["object"])
|
2020-02-11 22:38:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-02-11 22:38:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-09-20 22:22:09 -04:00
|
|
|
|
|
2019-01-05 17:16:43 -05:00
|
|
|
|
// get gateway encryption options
|
2019-02-09 00:31:06 -05:00
|
|
|
|
opts, err := getOpts(ctx, r, bucket, object)
|
2019-01-05 17:16:43 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-01-05 17:16:43 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2019-01-08 19:53:04 -05:00
|
|
|
|
|
|
|
|
|
getObjectInfo := objectAPI.GetObjectInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
// Check for auth type to return S3 compatible error.
|
|
|
|
|
// type to return the correct error (NoSuchKey vs AccessDenied)
|
2018-08-15 06:30:19 -04:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
|
|
|
|
|
if getRequestAuthType(r) == authTypeAnonymous {
|
|
|
|
|
// As per "Permission" section in
|
2018-09-20 22:22:09 -04:00
|
|
|
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
|
|
|
|
|
// If the object you request does not exist,
|
|
|
|
|
// the error Amazon S3 returns depends on
|
|
|
|
|
// whether you also have the s3:ListBucket
|
|
|
|
|
// permission.
|
|
|
|
|
// * If you have the s3:ListBucket permission
|
|
|
|
|
// on the bucket, Amazon S3 will return an
|
|
|
|
|
// HTTP status code 404 ("no such key")
|
|
|
|
|
// error.
|
|
|
|
|
// * if you don’t have the s3:ListBucket
|
|
|
|
|
// permission, Amazon S3 will return an HTTP
|
|
|
|
|
// status code 403 ("access denied") error.`
|
2018-08-15 06:30:19 -04:00
|
|
|
|
if globalPolicySys.IsAllowed(policy.Args{
|
|
|
|
|
Action: policy.ListBucketAction,
|
|
|
|
|
BucketName: bucket,
|
2019-10-16 11:59:59 -04:00
|
|
|
|
ConditionValues: getConditionValues(r, "", "", nil),
|
2018-08-15 06:30:19 -04:00
|
|
|
|
IsOwner: false,
|
|
|
|
|
}) {
|
2019-01-05 17:16:43 -05:00
|
|
|
|
_, err = getObjectInfo(ctx, bucket, object, opts)
|
2019-02-12 04:25:52 -05:00
|
|
|
|
if toAPIError(ctx, err).Code == "NoSuchKey" {
|
2018-08-15 06:30:19 -04:00
|
|
|
|
s3Error = ErrNoSuchKey
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2018-08-15 06:30:19 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-09-20 22:22:09 -04:00
|
|
|
|
|
|
|
|
|
// Get request range.
|
2020-07-08 20:36:56 -04:00
|
|
|
|
rangeHeader := r.Header.Get(xhttp.Range)
|
2018-09-20 22:22:09 -04:00
|
|
|
|
if rangeHeader != "" {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrUnsupportedRangeHeader), r.URL, guessIsBrowserReq(r))
|
2018-09-20 22:22:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-15 06:30:19 -04:00
|
|
|
|
if r.ContentLength <= 0 {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEmptyRequestBody), r.URL, guessIsBrowserReq(r))
|
2018-08-15 06:30:19 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2019-01-05 17:16:43 -05:00
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
getObjectNInfo := objectAPI.GetObjectNInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectNInfo = api.CacheAPI().GetObjectNInfo
|
|
|
|
|
}
|
2020-05-19 05:01:08 -04:00
|
|
|
|
|
2019-01-08 19:53:04 -05:00
|
|
|
|
getObject := func(offset, length int64) (rc io.ReadCloser, err error) {
|
|
|
|
|
isSuffixLength := false
|
|
|
|
|
if offset < 0 {
|
|
|
|
|
isSuffixLength = true
|
2018-08-15 06:30:19 -04:00
|
|
|
|
}
|
2020-05-19 05:01:08 -04:00
|
|
|
|
|
2021-02-19 20:55:28 -05:00
|
|
|
|
if length > 0 {
|
|
|
|
|
length--
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-08 19:53:04 -05:00
|
|
|
|
rs := &HTTPRangeSpec{
|
|
|
|
|
IsSuffixLength: isSuffixLength,
|
|
|
|
|
Start: offset,
|
2019-01-16 11:22:04 -05:00
|
|
|
|
End: offset + length,
|
2018-08-15 06:30:19 -04:00
|
|
|
|
}
|
|
|
|
|
|
2020-09-14 18:57:13 -04:00
|
|
|
|
return getObjectNInfo(ctx, bucket, object, rs, r.Header, readLock, opts)
|
2018-08-15 06:30:19 -04:00
|
|
|
|
}
|
|
|
|
|
|
2019-01-28 20:59:48 -05:00
|
|
|
|
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
|
|
|
|
|
if err != nil {
|
2020-07-02 19:17:27 -04:00
|
|
|
|
if globalBucketVersioningSys.Enabled(bucket) {
|
|
|
|
|
// Versioning enabled quite possibly object is deleted might be delete-marker
|
|
|
|
|
// if present set the headers, no idea why AWS S3 sets these headers.
|
|
|
|
|
if objInfo.VersionID != "" && objInfo.DeleteMarker {
|
|
|
|
|
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
|
|
|
|
|
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(objInfo.DeleteMarker)}
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-01-28 20:59:48 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-04-24 16:51:48 -04:00
|
|
|
|
|
2020-05-19 05:01:08 -04:00
|
|
|
|
// filter object lock metadata if permission does not permit
|
|
|
|
|
getRetPerms := checkRequestAuthType(ctx, r, policy.GetObjectRetentionAction, bucket, object)
|
|
|
|
|
legalHoldPerms := checkRequestAuthType(ctx, r, policy.GetObjectLegalHoldAction, bucket, object)
|
|
|
|
|
|
|
|
|
|
// filter object lock metadata if permission does not permit
|
|
|
|
|
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
|
|
|
|
|
|
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
2020-07-17 16:01:22 -04:00
|
|
|
|
if _, err = DecryptObjectInfo(&objInfo, r); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-05-19 05:01:08 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-24 16:51:48 -04:00
|
|
|
|
s3Select, err := s3select.NewS3Select(r.Body)
|
|
|
|
|
if err != nil {
|
|
|
|
|
if serr, ok := err.(s3select.SelectError); ok {
|
|
|
|
|
encodedErrorResponse := encodeResponse(APIErrorResponse{
|
|
|
|
|
Code: serr.ErrorCode(),
|
|
|
|
|
Message: serr.ErrorMessage(),
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Key: object,
|
|
|
|
|
Resource: r.URL.Path,
|
|
|
|
|
RequestID: w.Header().Get(xhttp.AmzRequestID),
|
|
|
|
|
HostID: globalDeploymentID,
|
|
|
|
|
})
|
|
|
|
|
writeResponse(w, serr.HTTPStatusCode(), encodedErrorResponse, mimeXML)
|
|
|
|
|
} else {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-04-24 16:51:48 -04:00
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
2021-01-19 20:51:46 -05:00
|
|
|
|
defer s3Select.Close()
|
2020-04-24 16:51:48 -04:00
|
|
|
|
|
2019-01-08 19:53:04 -05:00
|
|
|
|
if err = s3Select.Open(getObject); err != nil {
|
|
|
|
|
if serr, ok := err.(s3select.SelectError); ok {
|
2019-02-13 02:48:11 -05:00
|
|
|
|
encodedErrorResponse := encodeResponse(APIErrorResponse{
|
|
|
|
|
Code: serr.ErrorCode(),
|
|
|
|
|
Message: serr.ErrorMessage(),
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Key: object,
|
|
|
|
|
Resource: r.URL.Path,
|
2019-07-03 01:34:32 -04:00
|
|
|
|
RequestID: w.Header().Get(xhttp.AmzRequestID),
|
2019-07-01 15:22:01 -04:00
|
|
|
|
HostID: globalDeploymentID,
|
2019-02-13 02:48:11 -05:00
|
|
|
|
})
|
|
|
|
|
writeResponse(w, serr.HTTPStatusCode(), encodedErrorResponse, mimeXML)
|
2019-01-08 19:53:04 -05:00
|
|
|
|
} else {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-11-14 18:55:10 -05:00
|
|
|
|
}
|
2018-10-22 15:12:22 -04:00
|
|
|
|
return
|
2018-08-15 06:30:19 -04:00
|
|
|
|
}
|
2018-10-22 15:12:22 -04:00
|
|
|
|
|
2020-05-19 05:01:08 -04:00
|
|
|
|
// Set encryption response headers
|
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
2021-02-03 18:19:08 -05:00
|
|
|
|
switch kind, _ := crypto.IsEncrypted(objInfo.UserDefined); kind {
|
|
|
|
|
case crypto.S3:
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
|
2021-05-18 17:21:20 -04:00
|
|
|
|
case crypto.S3KMS:
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionKmsID, objInfo.UserDefined[crypto.MetaKeyID])
|
|
|
|
|
if kmsCtx, ok := objInfo.UserDefined[crypto.MetaContext]; ok {
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionKmsContext, kmsCtx)
|
|
|
|
|
}
|
2021-02-03 18:19:08 -05:00
|
|
|
|
case crypto.SSEC:
|
|
|
|
|
// Validate the SSE-C Key set in the header.
|
|
|
|
|
if _, err = crypto.SSEC.UnsealObjectKey(r.Header, objInfo.UserDefined, bucket, object); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-02-03 18:19:08 -05:00
|
|
|
|
return
|
2020-05-19 05:01:08 -04:00
|
|
|
|
}
|
2021-02-03 18:19:08 -05:00
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm))
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5))
|
2020-05-19 05:01:08 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-08 19:53:04 -05:00
|
|
|
|
s3Select.Evaluate(w)
|
|
|
|
|
|
2018-11-02 21:40:08 -04:00
|
|
|
|
// Notify object accessed via a GET request.
|
|
|
|
|
sendEvent(eventArgs{
|
|
|
|
|
EventName: event.ObjectAccessedGet,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Object: objInfo,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
RespElements: extractRespElements(w),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
2019-03-25 14:45:42 -04:00
|
|
|
|
Host: handlers.GetSourceIP(r),
|
2018-11-02 21:40:08 -04:00
|
|
|
|
})
|
2018-08-15 06:30:19 -04:00
|
|
|
|
}
|
|
|
|
|
|
2015-06-30 23:15:48 -04:00
|
|
|
|
// GetObjectHandler - GET Object
|
2015-02-23 19:46:48 -05:00
|
|
|
|
// ----------
|
|
|
|
|
// This implementation of the GET operation retrieves object. To use GET,
|
|
|
|
|
// you must have READ access to the object.
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "GetObject")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2018-10-12 15:25:59 -04:00
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-08-18 00:07:19 -04:00
|
|
|
|
if crypto.S3.IsRequested(r.Header) || crypto.S3KMS.IsRequested(r.Header) { // If SSE-S3 or SSE-KMS present -> AWS fails with undefined error
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL, guessIsBrowserReq(r))
|
2018-08-18 00:07:19 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-12-22 12:19:32 -05:00
|
|
|
|
if _, ok := crypto.IsRequested(r.Header); !objectAPI.IsEncryptionSupported() && ok {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL, guessIsBrowserReq(r))
|
2018-12-15 00:39:59 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-08-18 00:07:19 -04:00
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
bucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
object, err := unescapePath(vars["object"])
|
2020-02-11 22:38:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-02-11 22:38:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-08-31 16:10:12 -04:00
|
|
|
|
|
2019-01-05 17:16:43 -05:00
|
|
|
|
// get gateway encryption options
|
2019-02-09 00:31:06 -05:00
|
|
|
|
opts, err := getOpts(ctx, r, bucket, object)
|
2019-01-05 17:16:43 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-01-05 17:16:43 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
// Check for auth type to return S3 compatible error.
|
|
|
|
|
// type to return the correct error (NoSuchKey vs AccessDenied)
|
2018-05-02 02:43:27 -04:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
|
|
|
|
|
if getRequestAuthType(r) == authTypeAnonymous {
|
2018-09-20 22:22:09 -04:00
|
|
|
|
// As per "Permission" section in
|
|
|
|
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
|
|
|
|
|
// If the object you request does not exist,
|
|
|
|
|
// the error Amazon S3 returns depends on
|
|
|
|
|
// whether you also have the s3:ListBucket
|
|
|
|
|
// permission.
|
|
|
|
|
// * If you have the s3:ListBucket permission
|
|
|
|
|
// on the bucket, Amazon S3 will return an
|
|
|
|
|
// HTTP status code 404 ("no such key")
|
|
|
|
|
// error.
|
|
|
|
|
// * if you don’t have the s3:ListBucket
|
|
|
|
|
// permission, Amazon S3 will return an HTTP
|
|
|
|
|
// status code 403 ("access denied") error.`
|
2018-05-02 02:43:27 -04:00
|
|
|
|
if globalPolicySys.IsAllowed(policy.Args{
|
2018-04-24 18:53:30 -04:00
|
|
|
|
Action: policy.ListBucketAction,
|
|
|
|
|
BucketName: bucket,
|
2019-10-16 11:59:59 -04:00
|
|
|
|
ConditionValues: getConditionValues(r, "", "", nil),
|
2018-04-24 18:53:30 -04:00
|
|
|
|
IsOwner: false,
|
|
|
|
|
}) {
|
2018-09-20 22:22:09 -04:00
|
|
|
|
getObjectInfo := objectAPI.GetObjectInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-14 20:36:41 -05:00
|
|
|
|
_, err = getObjectInfo(ctx, bucket, object, opts)
|
2019-02-12 04:25:52 -05:00
|
|
|
|
if toAPIError(ctx, err).Code == "NoSuchKey" {
|
2018-05-02 02:43:27 -04:00
|
|
|
|
s3Error = ErrNoSuchKey
|
|
|
|
|
}
|
2018-04-24 18:53:30 -04:00
|
|
|
|
}
|
2015-08-03 19:17:21 -04:00
|
|
|
|
}
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2018-05-02 02:43:27 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-08-28 16:08:30 -04:00
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
getObjectNInfo := objectAPI.GetObjectNInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectNInfo = api.CacheAPI().GetObjectNInfo
|
2018-08-28 16:08:30 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Get request range.
|
2018-09-20 22:22:09 -04:00
|
|
|
|
var rs *HTTPRangeSpec
|
2020-07-17 16:01:22 -04:00
|
|
|
|
var rangeErr error
|
2020-07-08 20:36:56 -04:00
|
|
|
|
rangeHeader := r.Header.Get(xhttp.Range)
|
2018-08-28 16:08:30 -04:00
|
|
|
|
if rangeHeader != "" {
|
2020-07-17 16:01:22 -04:00
|
|
|
|
rs, rangeErr = parseRequestRangeSpec(rangeHeader)
|
2020-10-01 18:41:12 -04:00
|
|
|
|
// Handle only errInvalidRange. Ignore other
|
|
|
|
|
// parse error and treat it as regular Get
|
|
|
|
|
// request like Amazon S3.
|
|
|
|
|
if rangeErr == errInvalidRange {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRange), r.URL, guessIsBrowserReq(r))
|
2020-10-01 18:41:12 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
if rangeErr != nil {
|
|
|
|
|
logger.LogIf(ctx, rangeErr, logger.Application)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Both 'bytes' and 'partNumber' cannot be specified at the same time
|
|
|
|
|
if rs != nil && opts.PartNumber > 0 {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRangePartNumber), r.URL, guessIsBrowserReq(r))
|
2020-10-01 18:41:12 -04:00
|
|
|
|
return
|
2020-07-17 16:01:22 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate pre-conditions if any.
|
|
|
|
|
opts.CheckPrecondFn = func(oi ObjectInfo) bool {
|
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
|
|
|
|
if _, err := DecryptObjectInfo(&oi, r); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-07-17 16:01:22 -04:00
|
|
|
|
return true
|
2018-08-28 16:08:30 -04:00
|
|
|
|
}
|
2020-07-17 16:01:22 -04:00
|
|
|
|
}
|
|
|
|
|
|
2021-01-22 15:09:24 -05:00
|
|
|
|
return checkPreconditions(ctx, w, r, oi, opts)
|
2018-08-28 16:08:30 -04:00
|
|
|
|
}
|
|
|
|
|
|
2018-09-27 06:06:45 -04:00
|
|
|
|
gr, err := getObjectNInfo(ctx, bucket, object, rs, r.Header, readLock, opts)
|
2018-09-20 22:22:09 -04:00
|
|
|
|
if err != nil {
|
2021-02-10 20:25:04 -05:00
|
|
|
|
var (
|
|
|
|
|
reader *GetObjectReader
|
|
|
|
|
proxy bool
|
|
|
|
|
)
|
|
|
|
|
if isProxyable(ctx, bucket) {
|
|
|
|
|
// proxy to replication target if active-active replication is in place.
|
|
|
|
|
reader, proxy = proxyGetToReplicationTarget(ctx, bucket, object, rs, r.Header, opts)
|
|
|
|
|
if reader != nil && proxy {
|
|
|
|
|
gr = reader
|
|
|
|
|
}
|
2020-07-17 16:01:22 -04:00
|
|
|
|
}
|
2021-02-10 20:25:04 -05:00
|
|
|
|
if reader == nil || !proxy {
|
|
|
|
|
if isErrPreconditionFailed(err) {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
if globalBucketVersioningSys.Enabled(bucket) && gr != nil {
|
|
|
|
|
if !gr.ObjInfo.VersionPurgeStatus.Empty() {
|
|
|
|
|
// Shows the replication status of a permanent delete of a version
|
|
|
|
|
w.Header()[xhttp.MinIODeleteReplicationStatus] = []string{string(gr.ObjInfo.VersionPurgeStatus)}
|
|
|
|
|
}
|
|
|
|
|
if !gr.ObjInfo.ReplicationStatus.Empty() && gr.ObjInfo.DeleteMarker {
|
|
|
|
|
w.Header()[xhttp.MinIODeleteMarkerReplicationStatus] = []string{string(gr.ObjInfo.ReplicationStatus)}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Versioning enabled quite possibly object is deleted might be delete-marker
|
|
|
|
|
// if present set the headers, no idea why AWS S3 sets these headers.
|
|
|
|
|
if gr.ObjInfo.VersionID != "" && gr.ObjInfo.DeleteMarker {
|
|
|
|
|
w.Header()[xhttp.AmzVersionID] = []string{gr.ObjInfo.VersionID}
|
|
|
|
|
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(gr.ObjInfo.DeleteMarker)}
|
|
|
|
|
}
|
2020-07-02 19:17:27 -04:00
|
|
|
|
}
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-02-10 20:25:04 -05:00
|
|
|
|
return
|
2020-07-02 19:17:27 -04:00
|
|
|
|
}
|
2016-06-26 21:10:08 -04:00
|
|
|
|
}
|
2018-09-20 22:22:09 -04:00
|
|
|
|
defer gr.Close()
|
2021-01-03 19:27:34 -05:00
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
objInfo := gr.ObjInfo
|
2016-06-26 21:10:08 -04:00
|
|
|
|
|
2021-02-01 12:52:11 -05:00
|
|
|
|
// Automatically remove the object/version is an expiry lifecycle rule can be applied
|
|
|
|
|
if lc, err := globalLifecycleSys.Get(bucket); err == nil {
|
|
|
|
|
action := evalActionFromLifecycle(ctx, *lc, objInfo, false)
|
|
|
|
|
if action == lifecycle.DeleteAction || action == lifecycle.DeleteVersionAction {
|
2021-02-06 19:10:33 -05:00
|
|
|
|
globalExpiryState.queueExpiryTask(objInfo, action == lifecycle.DeleteVersionAction)
|
2021-02-01 12:52:11 -05:00
|
|
|
|
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrNoSuchKey))
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-20 16:18:09 -05:00
|
|
|
|
// filter object lock metadata if permission does not permit
|
|
|
|
|
getRetPerms := checkRequestAuthType(ctx, r, policy.GetObjectRetentionAction, bucket, object)
|
2020-01-16 18:41:56 -05:00
|
|
|
|
legalHoldPerms := checkRequestAuthType(ctx, r, policy.GetObjectLegalHoldAction, bucket, object)
|
|
|
|
|
|
|
|
|
|
// filter object lock metadata if permission does not permit
|
|
|
|
|
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
|
2019-11-20 16:18:09 -05:00
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
// Set encryption response headers
|
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
2021-02-03 18:19:08 -05:00
|
|
|
|
switch kind, _ := crypto.IsEncrypted(objInfo.UserDefined); kind {
|
|
|
|
|
case crypto.S3:
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
|
2021-05-06 18:24:01 -04:00
|
|
|
|
case crypto.S3KMS:
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
|
2021-05-18 17:21:20 -04:00
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionKmsID, objInfo.UserDefined[crypto.MetaKeyID])
|
2021-05-06 18:24:01 -04:00
|
|
|
|
if kmsCtx, ok := objInfo.UserDefined[crypto.MetaContext]; ok {
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionKmsContext, kmsCtx)
|
|
|
|
|
}
|
2021-02-03 18:19:08 -05:00
|
|
|
|
case crypto.SSEC:
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm))
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5))
|
2016-07-10 20:12:22 -04:00
|
|
|
|
}
|
2017-11-07 18:18:59 -05:00
|
|
|
|
}
|
2016-07-24 01:51:12 -04:00
|
|
|
|
|
2020-10-01 18:41:12 -04:00
|
|
|
|
if err = setObjectHeaders(w, objInfo, rs, opts); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-09-20 22:22:09 -04:00
|
|
|
|
return
|
2018-08-31 16:10:12 -04:00
|
|
|
|
}
|
|
|
|
|
|
2020-06-10 12:22:15 -04:00
|
|
|
|
// Set Parts Count Header
|
|
|
|
|
if opts.PartNumber > 0 && len(objInfo.Parts) > 0 {
|
|
|
|
|
setPartsCountHeaders(w, objInfo)
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
setHeadGetRespHeaders(w, r.URL.Query())
|
2018-08-08 18:39:47 -04:00
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
statusCodeWritten := false
|
|
|
|
|
httpWriter := ioutil.WriteOnClose(w)
|
2020-10-01 18:41:12 -04:00
|
|
|
|
if rs != nil || opts.PartNumber > 0 {
|
2018-08-08 18:39:47 -04:00
|
|
|
|
statusCodeWritten = true
|
|
|
|
|
w.WriteHeader(http.StatusPartialContent)
|
|
|
|
|
}
|
2020-07-02 19:17:27 -04:00
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
|
// Write object content to response body
|
|
|
|
|
if _, err = io.Copy(httpWriter, gr); err != nil {
|
2020-07-17 16:01:22 -04:00
|
|
|
|
if !httpWriter.HasWritten() && !statusCodeWritten {
|
|
|
|
|
// write error response only if no data or headers has been written to client yet
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-02-28 18:33:03 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients
|
|
|
|
|
logger.LogIf(ctx, fmt.Errorf("Unable to write all the data to client %w", err))
|
2016-07-10 20:12:22 -04:00
|
|
|
|
}
|
2016-05-28 18:13:15 -04:00
|
|
|
|
return
|
fs: Break fs package to top-level and introduce ObjectAPI interface.
ObjectAPI interface brings in changes needed for XL ObjectAPI layer.
The new interface for any ObjectAPI layer is as below
```
// ObjectAPI interface.
type ObjectAPI interface {
// Bucket resource API.
DeleteBucket(bucket string) *probe.Error
ListBuckets() ([]BucketInfo, *probe.Error)
MakeBucket(bucket string) *probe.Error
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
// Bucket query API.
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error)
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
// Object resource API.
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error)
PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error)
DeleteObject(bucket, object string) *probe.Error
// Object query API.
NewMultipartUpload(bucket, object string) (string, *probe.Error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error)
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
}
```
2016-03-30 19:15:28 -04:00
|
|
|
|
}
|
2018-02-23 18:07:21 -05:00
|
|
|
|
|
2017-11-07 18:18:59 -05:00
|
|
|
|
if err = httpWriter.Close(); err != nil {
|
2018-08-08 18:39:47 -04:00
|
|
|
|
if !httpWriter.HasWritten() && !statusCodeWritten { // write error response only if no data or headers has been written to client yet
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2017-11-07 18:18:59 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2021-02-28 18:33:03 -05:00
|
|
|
|
if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients
|
|
|
|
|
logger.LogIf(ctx, fmt.Errorf("Unable to write all the data to client %w", err))
|
|
|
|
|
}
|
|
|
|
|
return
|
2016-07-10 20:12:22 -04:00
|
|
|
|
}
|
2017-03-21 13:32:17 -04:00
|
|
|
|
|
|
|
|
|
// Notify object accessed via a GET request.
|
2018-03-15 16:03:41 -04:00
|
|
|
|
sendEvent(eventArgs{
|
2018-08-23 17:40:54 -04:00
|
|
|
|
EventName: event.ObjectAccessedGet,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Object: objInfo,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
RespElements: extractRespElements(w),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
2019-03-25 14:45:42 -04:00
|
|
|
|
Host: handlers.GetSourceIP(r),
|
2017-03-21 13:32:17 -04:00
|
|
|
|
})
|
2015-02-15 20:03:27 -05:00
|
|
|
|
}
|
|
|
|
|
|
2015-06-30 23:15:48 -04:00
|
|
|
|
// HeadObjectHandler - HEAD Object
|
2015-02-23 19:46:48 -05:00
|
|
|
|
// -----------
|
|
|
|
|
// The HEAD operation retrieves metadata from an object without returning the object itself.
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "HeadObject")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2018-10-12 15:25:59 -04:00
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2019-02-12 04:25:52 -05:00
|
|
|
|
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrServerNotInitialized))
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-08-18 00:07:19 -04:00
|
|
|
|
if crypto.S3.IsRequested(r.Header) || crypto.S3KMS.IsRequested(r.Header) { // If SSE-S3 or SSE-KMS present -> AWS fails with undefined error
|
2019-02-12 04:25:52 -05:00
|
|
|
|
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrBadRequest))
|
2018-08-18 00:07:19 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-12-22 12:19:32 -05:00
|
|
|
|
if _, ok := crypto.IsRequested(r.Header); !objectAPI.IsEncryptionSupported() && ok {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL, guessIsBrowserReq(r))
|
2018-12-15 00:39:59 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-08-18 00:07:19 -04:00
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
bucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
object, err := unescapePath(vars["object"])
|
2020-02-11 22:38:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-02-11 22:38:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-08-10 21:47:49 -04:00
|
|
|
|
|
2018-09-21 16:48:58 -04:00
|
|
|
|
getObjectInfo := objectAPI.GetObjectInfo
|
2018-03-28 17:14:06 -04:00
|
|
|
|
if api.CacheAPI() != nil {
|
2018-09-21 16:48:58 -04:00
|
|
|
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
2018-03-28 17:14:06 -04:00
|
|
|
|
}
|
|
|
|
|
|
2019-02-09 00:31:06 -05:00
|
|
|
|
opts, err := getOpts(ctx, r, bucket, object)
|
2019-01-05 17:16:43 -05:00
|
|
|
|
if err != nil {
|
2019-02-12 04:25:52 -05:00
|
|
|
|
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
2019-01-05 17:16:43 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-11-14 20:36:41 -05:00
|
|
|
|
|
2018-05-02 02:43:27 -04:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
|
|
|
|
|
if getRequestAuthType(r) == authTypeAnonymous {
|
2018-09-20 22:22:09 -04:00
|
|
|
|
// As per "Permission" section in
|
|
|
|
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html
|
|
|
|
|
// If the object you request does not exist,
|
|
|
|
|
// the error Amazon S3 returns depends on
|
|
|
|
|
// whether you also have the s3:ListBucket
|
|
|
|
|
// permission.
|
|
|
|
|
// * If you have the s3:ListBucket permission
|
|
|
|
|
// on the bucket, Amazon S3 will return an
|
|
|
|
|
// HTTP status code 404 ("no such key")
|
|
|
|
|
// error.
|
|
|
|
|
// * if you don’t have the s3:ListBucket
|
|
|
|
|
// permission, Amazon S3 will return an HTTP
|
|
|
|
|
// status code 403 ("access denied") error.`
|
2018-05-02 02:43:27 -04:00
|
|
|
|
if globalPolicySys.IsAllowed(policy.Args{
|
2018-04-24 18:53:30 -04:00
|
|
|
|
Action: policy.ListBucketAction,
|
|
|
|
|
BucketName: bucket,
|
2019-10-16 11:59:59 -04:00
|
|
|
|
ConditionValues: getConditionValues(r, "", "", nil),
|
2018-04-24 18:53:30 -04:00
|
|
|
|
IsOwner: false,
|
|
|
|
|
}) {
|
2018-11-14 20:36:41 -05:00
|
|
|
|
_, err = getObjectInfo(ctx, bucket, object, opts)
|
2019-02-12 04:25:52 -05:00
|
|
|
|
if toAPIError(ctx, err).Code == "NoSuchKey" {
|
2018-05-02 02:43:27 -04:00
|
|
|
|
s3Error = ErrNoSuchKey
|
|
|
|
|
}
|
2018-04-24 18:53:30 -04:00
|
|
|
|
}
|
2015-09-19 06:20:07 -04:00
|
|
|
|
}
|
2019-02-12 04:25:52 -05:00
|
|
|
|
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
|
2018-05-02 02:43:27 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-04-24 18:53:30 -04:00
|
|
|
|
|
2018-09-21 16:48:58 -04:00
|
|
|
|
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
|
2018-05-02 02:43:27 -04:00
|
|
|
|
if err != nil {
|
2021-02-10 20:25:04 -05:00
|
|
|
|
var (
|
|
|
|
|
proxy bool
|
|
|
|
|
perr error
|
|
|
|
|
oi ObjectInfo
|
|
|
|
|
)
|
|
|
|
|
// proxy HEAD to replication target if active-active replication configured on bucket
|
|
|
|
|
if isProxyable(ctx, bucket) {
|
|
|
|
|
oi, proxy, perr = proxyHeadToReplicationTarget(ctx, bucket, object, opts)
|
|
|
|
|
if proxy && perr == nil {
|
|
|
|
|
objInfo = oi
|
2020-11-22 02:48:50 -05:00
|
|
|
|
}
|
2021-02-10 20:25:04 -05:00
|
|
|
|
}
|
|
|
|
|
if !proxy || perr != nil {
|
|
|
|
|
if globalBucketVersioningSys.Enabled(bucket) {
|
|
|
|
|
if !objInfo.VersionPurgeStatus.Empty() {
|
|
|
|
|
// Shows the replication status of a permanent delete of a version
|
|
|
|
|
w.Header()[xhttp.MinIODeleteReplicationStatus] = []string{string(objInfo.VersionPurgeStatus)}
|
|
|
|
|
}
|
|
|
|
|
if !objInfo.ReplicationStatus.Empty() && objInfo.DeleteMarker {
|
|
|
|
|
w.Header()[xhttp.MinIODeleteMarkerReplicationStatus] = []string{string(objInfo.ReplicationStatus)}
|
|
|
|
|
}
|
|
|
|
|
// Versioning enabled quite possibly object is deleted might be delete-marker
|
|
|
|
|
// if present set the headers, no idea why AWS S3 sets these headers.
|
|
|
|
|
if objInfo.VersionID != "" && objInfo.DeleteMarker {
|
|
|
|
|
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
|
|
|
|
|
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(objInfo.DeleteMarker)}
|
|
|
|
|
}
|
2020-07-02 19:17:27 -04:00
|
|
|
|
}
|
2021-02-10 20:25:04 -05:00
|
|
|
|
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
|
|
|
|
return
|
2020-07-02 19:17:27 -04:00
|
|
|
|
}
|
2015-08-03 19:17:21 -04:00
|
|
|
|
}
|
2019-11-20 16:18:09 -05:00
|
|
|
|
|
2021-02-01 12:52:11 -05:00
|
|
|
|
// Automatically remove the object/version is an expiry lifecycle rule can be applied
|
|
|
|
|
if lc, err := globalLifecycleSys.Get(bucket); err == nil {
|
|
|
|
|
action := evalActionFromLifecycle(ctx, *lc, objInfo, false)
|
|
|
|
|
if action == lifecycle.DeleteAction || action == lifecycle.DeleteVersionAction {
|
2021-02-06 19:10:33 -05:00
|
|
|
|
globalExpiryState.queueExpiryTask(objInfo, action == lifecycle.DeleteVersionAction)
|
2021-02-01 12:52:11 -05:00
|
|
|
|
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrNoSuchKey))
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-20 16:18:09 -05:00
|
|
|
|
// filter object lock metadata if permission does not permit
|
|
|
|
|
getRetPerms := checkRequestAuthType(ctx, r, policy.GetObjectRetentionAction, bucket, object)
|
2020-01-16 18:41:56 -05:00
|
|
|
|
legalHoldPerms := checkRequestAuthType(ctx, r, policy.GetObjectLegalHoldAction, bucket, object)
|
|
|
|
|
|
|
|
|
|
// filter object lock metadata if permission does not permit
|
|
|
|
|
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
|
2019-11-20 16:18:09 -05:00
|
|
|
|
|
2018-02-23 18:07:21 -05:00
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
2020-07-17 16:01:22 -04:00
|
|
|
|
if _, err = DecryptObjectInfo(&objInfo, r); err != nil {
|
2019-02-12 04:25:52 -05:00
|
|
|
|
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
2017-11-07 18:18:59 -05:00
|
|
|
|
return
|
2018-09-23 13:24:10 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-17 16:01:22 -04:00
|
|
|
|
// Validate pre-conditions if any.
|
|
|
|
|
if checkPreconditions(ctx, w, r, objInfo, opts) {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Get request range.
|
|
|
|
|
var rs *HTTPRangeSpec
|
|
|
|
|
rangeHeader := r.Header.Get(xhttp.Range)
|
|
|
|
|
if rangeHeader != "" {
|
|
|
|
|
if rs, err = parseRequestRangeSpec(rangeHeader); err != nil {
|
|
|
|
|
// Handle only errInvalidRange. Ignore other
|
|
|
|
|
// parse error and treat it as regular Get
|
|
|
|
|
// request like Amazon S3.
|
|
|
|
|
if err == errInvalidRange {
|
|
|
|
|
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrInvalidRange))
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-01 18:41:12 -04:00
|
|
|
|
// Both 'bytes' and 'partNumber' cannot be specified at the same time
|
|
|
|
|
if rs != nil && opts.PartNumber > 0 {
|
2021-04-07 17:37:10 -04:00
|
|
|
|
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrInvalidRangePartNumber))
|
2020-10-01 18:41:12 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-23 13:24:10 -04:00
|
|
|
|
// Set encryption response headers
|
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
2021-02-03 18:19:08 -05:00
|
|
|
|
switch kind, _ := crypto.IsEncrypted(objInfo.UserDefined); kind {
|
|
|
|
|
case crypto.S3:
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
|
2021-05-06 18:24:01 -04:00
|
|
|
|
case crypto.S3KMS:
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
|
2021-05-18 17:21:20 -04:00
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionKmsID, objInfo.UserDefined[crypto.MetaKeyID])
|
2021-05-06 18:24:01 -04:00
|
|
|
|
if kmsCtx, ok := objInfo.UserDefined[crypto.MetaContext]; ok {
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionKmsContext, kmsCtx)
|
|
|
|
|
}
|
2021-02-03 18:19:08 -05:00
|
|
|
|
case crypto.SSEC:
|
|
|
|
|
// Validate the SSE-C Key set in the header.
|
|
|
|
|
if _, err = crypto.SSEC.UnsealObjectKey(r.Header, objInfo.UserDefined, bucket, object); err != nil {
|
|
|
|
|
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
|
|
|
|
return
|
2018-08-17 15:52:14 -04:00
|
|
|
|
}
|
2021-02-03 18:19:08 -05:00
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm))
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5))
|
2017-11-07 18:18:59 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
2016-02-28 21:10:37 -05:00
|
|
|
|
|
2016-07-10 20:12:22 -04:00
|
|
|
|
// Set standard object headers.
|
2020-10-01 18:41:12 -04:00
|
|
|
|
if err = setObjectHeaders(w, objInfo, rs, opts); err != nil {
|
2019-02-12 04:25:52 -05:00
|
|
|
|
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
2018-09-20 22:22:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-02-28 21:10:37 -05:00
|
|
|
|
|
2020-06-10 12:22:15 -04:00
|
|
|
|
// Set Parts Count Header
|
|
|
|
|
if opts.PartNumber > 0 && len(objInfo.Parts) > 0 {
|
|
|
|
|
setPartsCountHeaders(w, objInfo)
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-08 14:04:04 -04:00
|
|
|
|
// Set any additional requested response headers.
|
|
|
|
|
setHeadGetRespHeaders(w, r.URL.Query())
|
|
|
|
|
|
2016-07-27 14:57:08 -04:00
|
|
|
|
// Successful response.
|
2020-10-01 18:41:12 -04:00
|
|
|
|
if rs != nil || opts.PartNumber > 0 {
|
2018-09-23 13:24:10 -04:00
|
|
|
|
w.WriteHeader(http.StatusPartialContent)
|
|
|
|
|
} else {
|
|
|
|
|
w.WriteHeader(http.StatusOK)
|
|
|
|
|
}
|
2017-03-21 13:32:17 -04:00
|
|
|
|
|
|
|
|
|
// Notify object accessed via a HEAD request.
|
2018-03-15 16:03:41 -04:00
|
|
|
|
sendEvent(eventArgs{
|
2018-08-23 17:40:54 -04:00
|
|
|
|
EventName: event.ObjectAccessedHead,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Object: objInfo,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
RespElements: extractRespElements(w),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
2019-03-25 14:45:42 -04:00
|
|
|
|
Host: handlers.GetSourceIP(r),
|
2017-03-21 13:32:17 -04:00
|
|
|
|
})
|
2015-02-15 20:03:27 -05:00
|
|
|
|
}
|
|
|
|
|
|
2016-12-26 19:29:26 -05:00
|
|
|
|
// Extract metadata relevant for an CopyObject operation based on conditional
|
|
|
|
|
// header values specified in X-Amz-Metadata-Directive.
|
2018-07-10 23:27:10 -04:00
|
|
|
|
func getCpObjMetadataFromHeader(ctx context.Context, r *http.Request, userMeta map[string]string) (map[string]string, error) {
|
2018-02-23 18:07:21 -05:00
|
|
|
|
// Make a copy of the supplied metadata to avoid
|
|
|
|
|
// to change the original one.
|
|
|
|
|
defaultMeta := make(map[string]string, len(userMeta))
|
|
|
|
|
for k, v := range userMeta {
|
|
|
|
|
defaultMeta[k] = v
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-05 17:16:43 -05:00
|
|
|
|
// remove SSE Headers from source info
|
|
|
|
|
crypto.RemoveSSEHeaders(defaultMeta)
|
|
|
|
|
|
2020-04-16 20:42:44 -04:00
|
|
|
|
// Storage class is special, it can be replaced regardless of the
|
|
|
|
|
// metadata directive, if set should be preserved and replaced
|
|
|
|
|
// to the destination metadata.
|
|
|
|
|
sc := r.Header.Get(xhttp.AmzStorageClass)
|
|
|
|
|
if sc == "" {
|
|
|
|
|
sc = r.URL.Query().Get(xhttp.AmzStorageClass)
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-26 19:29:26 -05:00
|
|
|
|
// if x-amz-metadata-directive says REPLACE then
|
|
|
|
|
// we extract metadata from the input headers.
|
2020-01-20 11:45:59 -05:00
|
|
|
|
if isDirectiveReplace(r.Header.Get(xhttp.AmzMetadataDirective)) {
|
2020-04-16 20:42:44 -04:00
|
|
|
|
emetadata, err := extractMetadata(ctx, r)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
if sc != "" {
|
|
|
|
|
emetadata[xhttp.AmzStorageClass] = sc
|
|
|
|
|
}
|
|
|
|
|
return emetadata, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if sc != "" {
|
|
|
|
|
defaultMeta[xhttp.AmzStorageClass] = sc
|
2016-12-26 19:29:26 -05:00
|
|
|
|
}
|
2017-01-06 03:37:00 -05:00
|
|
|
|
|
2016-12-26 19:29:26 -05:00
|
|
|
|
// if x-amz-metadata-directive says COPY then we
|
|
|
|
|
// return the default metadata.
|
2020-01-20 11:45:59 -05:00
|
|
|
|
if isDirectiveCopy(r.Header.Get(xhttp.AmzMetadataDirective)) {
|
2017-07-05 19:56:10 -04:00
|
|
|
|
return defaultMeta, nil
|
2016-12-26 19:29:26 -05:00
|
|
|
|
}
|
2017-01-06 03:37:00 -05:00
|
|
|
|
|
2016-12-26 19:29:26 -05:00
|
|
|
|
// Copy is default behavior if not x-amz-metadata-directive is set.
|
2017-07-05 19:56:10 -04:00
|
|
|
|
return defaultMeta, nil
|
2016-12-26 19:29:26 -05:00
|
|
|
|
}
|
|
|
|
|
|
2020-04-17 14:20:56 -04:00
|
|
|
|
// getRemoteInstanceTransport contains a singleton roundtripper.
|
2020-09-12 02:03:08 -04:00
|
|
|
|
var (
|
|
|
|
|
getRemoteInstanceTransport *http.Transport
|
|
|
|
|
getRemoteInstanceTransportOnce sync.Once
|
|
|
|
|
)
|
2020-04-17 14:20:56 -04:00
|
|
|
|
|
2018-11-29 20:35:11 -05:00
|
|
|
|
// Returns a minio-go Client configured to access remote host described by destDNSRecord
|
|
|
|
|
// Applicable only in a federated deployment
|
2018-12-19 08:13:47 -05:00
|
|
|
|
var getRemoteInstanceClient = func(r *http.Request, host string) (*miniogo.Core, error) {
|
2020-09-12 02:03:08 -04:00
|
|
|
|
if newObjectLayerFn() == nil {
|
|
|
|
|
return nil, errServerNotInitialized
|
2018-12-19 08:13:47 -05:00
|
|
|
|
}
|
2020-07-17 17:25:47 -04:00
|
|
|
|
|
2020-07-07 15:19:57 -04:00
|
|
|
|
cred := getReqAccessCred(r, globalServerRegion)
|
|
|
|
|
// In a federated deployment, all the instances share config files
|
|
|
|
|
// and hence expected to have same credentials.
|
2020-07-17 01:38:58 -04:00
|
|
|
|
core, err := miniogo.NewCore(host, &miniogo.Options{
|
|
|
|
|
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, ""),
|
2020-12-22 00:42:38 -05:00
|
|
|
|
Secure: globalIsTLS,
|
2020-09-12 02:03:08 -04:00
|
|
|
|
Transport: getRemoteInstanceTransport,
|
2020-07-17 01:38:58 -04:00
|
|
|
|
})
|
2020-07-07 15:19:57 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
return core, nil
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-20 13:19:22 -04:00
|
|
|
|
// Check if the destination bucket is on a remote site, this code only gets executed
|
|
|
|
|
// when federation is enabled, ie when globalDNSConfig is non 'nil'.
|
|
|
|
|
//
|
|
|
|
|
// This function is similar to isRemoteCallRequired but specifically for COPY object API
|
2020-09-09 15:20:49 -04:00
|
|
|
|
// if destination and source are same we do not need to check for destination bucket
|
2019-08-20 13:19:22 -04:00
|
|
|
|
// to exist locally.
|
|
|
|
|
func isRemoteCopyRequired(ctx context.Context, srcBucket, dstBucket string, objAPI ObjectLayer) bool {
|
|
|
|
|
if srcBucket == dstBucket {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
return isRemoteCallRequired(ctx, dstBucket, objAPI)
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-19 08:13:47 -05:00
|
|
|
|
// Check if the bucket is on a remote site, this code only gets executed when federation is enabled.
|
2019-08-20 13:19:22 -04:00
|
|
|
|
func isRemoteCallRequired(ctx context.Context, bucket string, objAPI ObjectLayer) bool {
|
2018-12-19 08:13:47 -05:00
|
|
|
|
if globalDNSConfig == nil {
|
|
|
|
|
return false
|
|
|
|
|
}
|
2019-12-29 11:56:45 -05:00
|
|
|
|
if globalBucketFederation {
|
|
|
|
|
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
|
|
|
|
return err == toObjectErr(errVolumeNotFound, bucket)
|
|
|
|
|
}
|
|
|
|
|
return false
|
2018-11-29 20:35:11 -05:00
|
|
|
|
}
|
|
|
|
|
|
2016-02-27 06:04:52 -05:00
|
|
|
|
// CopyObjectHandler - Copy Object
|
|
|
|
|
// ----------
|
|
|
|
|
// This implementation of the PUT operation adds an object to a bucket
|
|
|
|
|
// while reading the object from another source.
|
2018-09-25 15:39:46 -04:00
|
|
|
|
// Notice: The S3 client can send secret keys in headers for encryption related jobs,
|
|
|
|
|
// the handler should ensure to remove these keys before sending them to the object layer.
|
|
|
|
|
// Currently these keys are:
|
|
|
|
|
// - X-Amz-Server-Side-Encryption-Customer-Key
|
|
|
|
|
// - X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "CopyObject")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2018-10-12 15:25:59 -04:00
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-09-15 16:57:15 -04:00
|
|
|
|
|
2021-04-09 11:56:09 -04:00
|
|
|
|
if _, ok := crypto.IsRequested(r.Header); ok {
|
|
|
|
|
if globalIsGateway {
|
|
|
|
|
if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
2021-04-09 11:56:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
if !objectAPI.IsEncryptionSupported() {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
2021-04-09 11:56:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-12-15 00:39:59 -05:00
|
|
|
|
}
|
2021-04-08 19:40:38 -04:00
|
|
|
|
|
2018-08-18 00:07:19 -04:00
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
dstBucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
dstObject, err := unescapePath(vars["object"])
|
2020-02-11 22:38:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-02-11 22:38:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-08-10 21:47:49 -04:00
|
|
|
|
|
2018-04-24 18:53:30 -04:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, dstBucket, dstObject); s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2016-02-27 06:04:52 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-10 16:10:10 -05:00
|
|
|
|
// Read escaped copy source path to check for parameters.
|
2019-07-03 01:34:32 -04:00
|
|
|
|
cpSrcPath := r.Header.Get(xhttp.AmzCopySource)
|
2020-06-12 23:04:01 -04:00
|
|
|
|
var vid string
|
2018-12-12 14:43:44 -05:00
|
|
|
|
if u, err := url.Parse(cpSrcPath); err == nil {
|
2020-07-08 20:36:56 -04:00
|
|
|
|
vid = strings.TrimSpace(u.Query().Get(xhttp.VersionID))
|
2019-01-10 16:10:10 -05:00
|
|
|
|
// Note that url.Parse does the unescaping
|
2018-12-12 14:43:44 -05:00
|
|
|
|
cpSrcPath = u.Path
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-21 17:07:49 -05:00
|
|
|
|
srcBucket, srcObject := path2BucketObject(cpSrcPath)
|
2016-12-26 19:29:26 -05:00
|
|
|
|
// If source object is empty or bucket is empty, reply back invalid copy source.
|
|
|
|
|
if srcObject == "" || srcBucket == "" {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL, guessIsBrowserReq(r))
|
2016-02-27 06:04:52 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
|
if vid != "" && vid != nullVersionID {
|
|
|
|
|
_, err := uuid.Parse(vid)
|
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, VersionNotFound{
|
|
|
|
|
Bucket: srcBucket,
|
|
|
|
|
Object: srcObject,
|
|
|
|
|
VersionID: vid,
|
2021-04-29 22:01:43 -04:00
|
|
|
|
}), r.URL, guessIsBrowserReq(r))
|
2020-06-12 23:04:01 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-26 19:12:44 -04:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, srcBucket, srcObject); s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2018-10-26 19:12:44 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-26 19:29:26 -05:00
|
|
|
|
// Check if metadata directive is valid.
|
2020-01-20 11:45:59 -05:00
|
|
|
|
if !isDirectiveValid(r.Header.Get(xhttp.AmzMetadataDirective)) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMetadataDirective), r.URL, guessIsBrowserReq(r))
|
2016-02-27 06:04:52 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2019-09-23 16:35:04 -04:00
|
|
|
|
|
2020-01-20 11:45:59 -05:00
|
|
|
|
// check if tag directive is valid
|
|
|
|
|
if !isDirectiveValid(r.Header.Get(xhttp.AmzTagDirective)) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidTagDirective), r.URL, guessIsBrowserReq(r))
|
2020-01-20 11:45:59 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-16 20:42:44 -04:00
|
|
|
|
// Validate storage class metadata if present
|
|
|
|
|
dstSc := r.Header.Get(xhttp.AmzStorageClass)
|
|
|
|
|
if dstSc != "" && !storageclass.IsValid(dstSc) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidStorageClass), r.URL, guessIsBrowserReq(r))
|
2020-04-16 20:42:44 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-05 04:42:34 -05:00
|
|
|
|
// Check if bucket encryption is enabled
|
2021-05-14 03:59:05 -04:00
|
|
|
|
sseConfig, _ := globalBucketSSEConfigSys.Get(dstBucket)
|
|
|
|
|
sseConfig.Apply(r.Header, globalAutoEncryption)
|
2016-02-27 06:04:52 -05:00
|
|
|
|
|
2018-09-10 12:42:43 -04:00
|
|
|
|
var srcOpts, dstOpts ObjectOptions
|
2020-02-11 22:38:02 -05:00
|
|
|
|
srcOpts, err = copySrcOpts(ctx, r, srcBucket, srcObject)
|
2019-01-05 17:16:43 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
logger.LogIf(ctx, err)
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-01-05 17:16:43 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
srcOpts.VersionID = vid
|
|
|
|
|
|
2019-01-05 17:16:43 -05:00
|
|
|
|
// convert copy src encryption options for GET calls
|
2020-06-12 23:04:01 -04:00
|
|
|
|
var getOpts = ObjectOptions{VersionID: srcOpts.VersionID, Versioned: srcOpts.Versioned}
|
2019-01-05 17:16:43 -05:00
|
|
|
|
getSSE := encrypt.SSE(srcOpts.ServerSideEncryption)
|
|
|
|
|
if getSSE != srcOpts.ServerSideEncryption {
|
|
|
|
|
getOpts.ServerSideEncryption = getSSE
|
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
2019-02-09 00:31:06 -05:00
|
|
|
|
dstOpts, err = copyDstOpts(ctx, r, dstBucket, dstObject, nil)
|
2019-01-05 17:16:43 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
logger.LogIf(ctx, err)
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-01-05 17:16:43 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2019-04-10 14:31:50 -04:00
|
|
|
|
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
2019-11-12 17:50:18 -05:00
|
|
|
|
|
2018-09-25 15:39:46 -04:00
|
|
|
|
getObjectNInfo := objectAPI.GetObjectNInfo
|
2020-07-17 16:01:22 -04:00
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectNInfo = api.CacheAPI().GetObjectNInfo
|
|
|
|
|
}
|
2018-09-25 15:39:46 -04:00
|
|
|
|
|
2020-07-17 16:01:22 -04:00
|
|
|
|
checkCopyPrecondFn := func(o ObjectInfo) bool {
|
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
|
|
|
|
if _, err := DecryptObjectInfo(&o, r); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-07-17 16:01:22 -04:00
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return checkCopyObjectPreconditions(ctx, w, r, o)
|
2019-03-06 15:38:41 -05:00
|
|
|
|
}
|
2020-07-17 16:01:22 -04:00
|
|
|
|
getOpts.CheckPrecondFn = checkCopyPrecondFn
|
|
|
|
|
|
2020-09-14 18:57:13 -04:00
|
|
|
|
// FIXME: a possible race exists between a parallel
|
|
|
|
|
// GetObject v/s CopyObject with metadata updates, ideally
|
|
|
|
|
// we should be holding write lock here but it is not
|
|
|
|
|
// possible due to other constraints such as knowing
|
|
|
|
|
// the type of source content etc.
|
|
|
|
|
lock := noLock
|
|
|
|
|
if !cpSrcDstSame {
|
|
|
|
|
lock = readLock
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-25 11:50:06 -04:00
|
|
|
|
var rs *HTTPRangeSpec
|
2019-01-05 17:16:43 -05:00
|
|
|
|
gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, lock, getOpts)
|
2018-09-25 15:39:46 -04:00
|
|
|
|
if err != nil {
|
2019-03-06 15:38:41 -05:00
|
|
|
|
if isErrPreconditionFailed(err) {
|
|
|
|
|
return
|
|
|
|
|
}
|
2020-07-02 19:17:27 -04:00
|
|
|
|
if globalBucketVersioningSys.Enabled(srcBucket) && gr != nil {
|
|
|
|
|
// Versioning enabled quite possibly object is deleted might be delete-marker
|
|
|
|
|
// if present set the headers, no idea why AWS S3 sets these headers.
|
|
|
|
|
if gr.ObjInfo.VersionID != "" && gr.ObjInfo.DeleteMarker {
|
|
|
|
|
w.Header()[xhttp.AmzVersionID] = []string{gr.ObjInfo.VersionID}
|
|
|
|
|
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(gr.ObjInfo.DeleteMarker)}
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-09-25 15:39:46 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
defer gr.Close()
|
|
|
|
|
srcInfo := gr.ObjInfo
|
|
|
|
|
|
2020-01-20 11:45:59 -05:00
|
|
|
|
// maximum Upload size for object in a single CopyObject operation.
|
2018-02-21 03:48:47 -05:00
|
|
|
|
if isMaxObjectSize(srcInfo.Size) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL, guessIsBrowserReq(r))
|
2016-02-27 06:04:52 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-23 18:07:21 -05:00
|
|
|
|
// We have to copy metadata only if source and destination are same.
|
|
|
|
|
// this changes for encryption which can be observed below.
|
|
|
|
|
if cpSrcDstSame {
|
|
|
|
|
srcInfo.metadataOnly = true
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-16 20:42:44 -04:00
|
|
|
|
var chStorageClass bool
|
|
|
|
|
if dstSc != "" {
|
2020-06-19 16:53:45 -04:00
|
|
|
|
chStorageClass = true
|
|
|
|
|
srcInfo.metadataOnly = false
|
2020-04-16 20:42:44 -04:00
|
|
|
|
}
|
|
|
|
|
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
var reader io.Reader = gr
|
2018-10-23 14:46:20 -04:00
|
|
|
|
|
2021-01-05 23:08:35 -05:00
|
|
|
|
// Set the actual size to the compressed/decrypted size if encrypted.
|
|
|
|
|
actualSize, err := srcInfo.GetActualSize()
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-01-05 23:08:35 -05:00
|
|
|
|
return
|
2018-10-23 14:46:20 -04:00
|
|
|
|
}
|
2021-01-05 23:08:35 -05:00
|
|
|
|
length := actualSize
|
2020-07-24 15:24:21 -04:00
|
|
|
|
|
2020-05-16 22:27:33 -04:00
|
|
|
|
if !cpSrcDstSame {
|
|
|
|
|
if err := enforceBucketQuota(ctx, dstBucket, actualSize); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-05-16 22:27:33 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-04-30 18:55:54 -04:00
|
|
|
|
}
|
2018-10-23 14:46:20 -04:00
|
|
|
|
|
2021-03-08 15:57:54 -05:00
|
|
|
|
// Check if either the source is encrypted or the destination will be encrypted.
|
|
|
|
|
_, objectEncryption := crypto.IsRequested(r.Header)
|
|
|
|
|
objectEncryption = objectEncryption || crypto.IsSourceEncrypted(srcInfo.UserDefined)
|
|
|
|
|
|
2018-12-11 15:05:41 -05:00
|
|
|
|
var compressMetadata map[string]string
|
2018-09-27 23:36:17 -04:00
|
|
|
|
// No need to compress for remote etcd calls
|
|
|
|
|
// Pass the decompressed stream to such calls.
|
2021-01-05 23:08:35 -05:00
|
|
|
|
isDstCompressed := objectAPI.IsCompressionSupported() &&
|
2021-01-22 15:09:24 -05:00
|
|
|
|
isCompressible(r.Header, dstObject) &&
|
2021-03-08 15:57:54 -05:00
|
|
|
|
!isRemoteCopyRequired(ctx, srcBucket, dstBucket, objectAPI) && !cpSrcDstSame && !objectEncryption
|
2021-01-05 23:08:35 -05:00
|
|
|
|
if isDstCompressed {
|
2018-12-11 15:05:41 -05:00
|
|
|
|
compressMetadata = make(map[string]string, 2)
|
|
|
|
|
// Preserving the compression metadata.
|
2019-09-26 02:08:24 -04:00
|
|
|
|
compressMetadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
|
2018-12-11 15:05:41 -05:00
|
|
|
|
compressMetadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(actualSize, 10)
|
2018-09-27 23:36:17 -04:00
|
|
|
|
|
2021-05-27 11:18:41 -04:00
|
|
|
|
reader = etag.NewReader(reader, nil)
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
s2c := newS2CompressReader(reader, actualSize)
|
2019-09-26 02:08:24 -04:00
|
|
|
|
defer s2c.Close()
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
reader = etag.Wrap(s2c, reader)
|
2019-03-05 11:35:37 -05:00
|
|
|
|
length = -1
|
2018-09-27 23:36:17 -04:00
|
|
|
|
} else {
|
2021-03-08 15:57:54 -05:00
|
|
|
|
delete(srcInfo.UserDefined, ReservedMetadataPrefix+"compression")
|
|
|
|
|
delete(srcInfo.UserDefined, ReservedMetadataPrefix+"actual-size")
|
2018-09-27 23:36:17 -04:00
|
|
|
|
reader = gr
|
|
|
|
|
}
|
2018-03-02 20:24:02 -05:00
|
|
|
|
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualSize)
|
2018-03-02 20:24:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-03-02 20:24:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-11-27 16:23:32 -05:00
|
|
|
|
|
2021-02-10 11:52:50 -05:00
|
|
|
|
pReader := NewPutObjReader(srcInfo.Reader)
|
2018-11-27 16:23:32 -05:00
|
|
|
|
|
2021-03-08 15:57:54 -05:00
|
|
|
|
// Handle encryption
|
2018-02-23 18:07:21 -05:00
|
|
|
|
var encMetadata = make(map[string]string)
|
2021-01-05 23:08:35 -05:00
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
2018-10-19 13:41:13 -04:00
|
|
|
|
// Encryption parameters not applicable for this object.
|
2021-02-08 22:58:17 -05:00
|
|
|
|
if _, ok := crypto.IsEncrypted(srcInfo.UserDefined); !ok && crypto.SSECopy.IsRequested(r.Header) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL, guessIsBrowserReq(r))
|
2018-10-19 13:41:13 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
// Encryption parameters not present for this object.
|
|
|
|
|
if crypto.SSEC.IsEncrypted(srcInfo.UserDefined) && !crypto.SSECopy.IsRequested(r.Header) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidSSECustomerAlgorithm), r.URL, guessIsBrowserReq(r))
|
2018-10-19 13:41:13 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-11-14 20:36:41 -05:00
|
|
|
|
|
2020-04-09 20:01:45 -04:00
|
|
|
|
var oldKey, newKey []byte
|
2021-05-06 18:24:01 -04:00
|
|
|
|
var newKeyID string
|
2021-05-10 21:15:11 -04:00
|
|
|
|
var kmsCtx kms.Context
|
2020-04-09 20:01:45 -04:00
|
|
|
|
var objEncKey crypto.ObjectKey
|
2021-05-06 18:24:01 -04:00
|
|
|
|
sseCopyKMS := crypto.S3KMS.IsEncrypted(srcInfo.UserDefined)
|
2018-08-17 15:52:14 -04:00
|
|
|
|
sseCopyS3 := crypto.S3.IsEncrypted(srcInfo.UserDefined)
|
2018-10-19 13:41:13 -04:00
|
|
|
|
sseCopyC := crypto.SSEC.IsEncrypted(srcInfo.UserDefined) && crypto.SSECopy.IsRequested(r.Header)
|
2018-08-17 15:52:14 -04:00
|
|
|
|
sseC := crypto.SSEC.IsRequested(r.Header)
|
|
|
|
|
sseS3 := crypto.S3.IsRequested(r.Header)
|
2021-05-06 18:24:01 -04:00
|
|
|
|
sseKMS := crypto.S3KMS.IsRequested(r.Header)
|
2018-10-15 14:07:36 -04:00
|
|
|
|
|
2021-05-06 18:24:01 -04:00
|
|
|
|
isSourceEncrypted := sseCopyC || sseCopyS3 || sseCopyKMS
|
|
|
|
|
isTargetEncrypted := sseC || sseS3 || sseKMS
|
2018-10-15 14:07:36 -04:00
|
|
|
|
|
|
|
|
|
if sseC {
|
|
|
|
|
newKey, err = ParseSSECustomerRequest(r)
|
2018-02-23 18:07:21 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-02-23 18:07:21 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-05-06 18:24:01 -04:00
|
|
|
|
if crypto.S3KMS.IsRequested(r.Header) {
|
|
|
|
|
newKeyID, kmsCtx, err = crypto.S3KMS.ParseHTTP(r.Header)
|
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-11-05 13:26:10 -05:00
|
|
|
|
|
|
|
|
|
// If src == dst and either
|
|
|
|
|
// - the object is encrypted using SSE-C and two different SSE-C keys are present
|
|
|
|
|
// - the object is encrypted using SSE-S3 and the SSE-S3 header is present
|
2020-04-16 20:42:44 -04:00
|
|
|
|
// - the object storage class is not changing
|
|
|
|
|
// then execute a key rotation.
|
|
|
|
|
if cpSrcDstSame && (sseCopyC && sseC) && !chStorageClass {
|
2019-09-23 16:35:04 -04:00
|
|
|
|
oldKey, err = ParseSSECopyCustomerRequest(r.Header, srcInfo.UserDefined)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-09-23 16:35:04 -04:00
|
|
|
|
return
|
2018-03-02 20:24:02 -05:00
|
|
|
|
}
|
2018-12-19 17:12:53 -05:00
|
|
|
|
|
2018-02-23 18:07:21 -05:00
|
|
|
|
for k, v := range srcInfo.UserDefined {
|
2020-05-28 17:36:38 -04:00
|
|
|
|
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
2018-12-19 17:12:53 -05:00
|
|
|
|
encMetadata[k] = v
|
|
|
|
|
}
|
2018-02-23 18:07:21 -05:00
|
|
|
|
}
|
2018-11-05 13:26:10 -05:00
|
|
|
|
|
2021-05-06 18:24:01 -04:00
|
|
|
|
if err = rotateKey(oldKey, newKeyID, newKey, srcBucket, srcObject, encMetadata, kmsCtx); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-02-23 18:07:21 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-03-12 16:52:38 -04:00
|
|
|
|
|
|
|
|
|
// Since we are rotating the keys, make sure to update the metadata.
|
|
|
|
|
srcInfo.metadataOnly = true
|
2020-05-28 15:36:20 -04:00
|
|
|
|
srcInfo.keyRotation = true
|
2018-02-23 18:07:21 -05:00
|
|
|
|
} else {
|
2018-10-15 14:07:36 -04:00
|
|
|
|
if isSourceEncrypted || isTargetEncrypted {
|
2018-02-23 18:07:21 -05:00
|
|
|
|
// We are not only copying just metadata instead
|
|
|
|
|
// we are creating a new object at this point, even
|
|
|
|
|
// if source and destination are same objects.
|
2020-05-28 15:36:20 -04:00
|
|
|
|
if !srcInfo.keyRotation {
|
2019-01-05 17:16:43 -05:00
|
|
|
|
srcInfo.metadataOnly = false
|
|
|
|
|
}
|
2018-02-23 18:07:21 -05:00
|
|
|
|
}
|
2018-10-15 14:07:36 -04:00
|
|
|
|
|
|
|
|
|
// Calculate the size of the target object
|
|
|
|
|
var targetSize int64
|
|
|
|
|
|
|
|
|
|
switch {
|
2021-01-05 23:08:35 -05:00
|
|
|
|
case isDstCompressed:
|
|
|
|
|
targetSize = -1
|
2018-10-15 14:07:36 -04:00
|
|
|
|
case !isSourceEncrypted && !isTargetEncrypted:
|
2021-01-05 23:08:35 -05:00
|
|
|
|
targetSize, _ = srcInfo.GetActualSize()
|
2018-10-15 14:07:36 -04:00
|
|
|
|
case isSourceEncrypted && isTargetEncrypted:
|
2019-01-09 18:17:21 -05:00
|
|
|
|
objInfo := ObjectInfo{Size: actualSize}
|
|
|
|
|
targetSize = objInfo.EncryptedSize()
|
2018-10-15 14:07:36 -04:00
|
|
|
|
case !isSourceEncrypted && isTargetEncrypted:
|
|
|
|
|
targetSize = srcInfo.EncryptedSize()
|
|
|
|
|
case isSourceEncrypted && !isTargetEncrypted:
|
|
|
|
|
targetSize, _ = srcInfo.DecryptedSize()
|
|
|
|
|
}
|
2018-11-27 16:23:32 -05:00
|
|
|
|
|
2018-10-15 14:07:36 -04:00
|
|
|
|
if isTargetEncrypted {
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
var encReader io.Reader
|
2021-05-06 18:24:01 -04:00
|
|
|
|
kind, _ := crypto.IsRequested(r.Header)
|
|
|
|
|
encReader, objEncKey, err = newEncryptReader(srcInfo.Reader, kind, newKeyID, newKey, dstBucket, dstObject, encMetadata, kmsCtx)
|
2018-02-23 18:07:21 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-02-23 18:07:21 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
reader = etag.Wrap(encReader, srcInfo.Reader)
|
2018-03-02 20:24:02 -05:00
|
|
|
|
}
|
2018-08-17 15:52:14 -04:00
|
|
|
|
|
2018-10-15 14:07:36 -04:00
|
|
|
|
if isSourceEncrypted {
|
|
|
|
|
// Remove all source encrypted related metadata to
|
|
|
|
|
// avoid copying them in target object.
|
2018-10-19 13:50:52 -04:00
|
|
|
|
crypto.RemoveInternalEntries(srcInfo.UserDefined)
|
2018-10-15 14:07:36 -04:00
|
|
|
|
}
|
|
|
|
|
|
2019-05-08 21:35:40 -04:00
|
|
|
|
// do not try to verify encrypted content
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
srcInfo.Reader, err = hash.NewReader(reader, targetSize, "", "", actualSize)
|
2018-03-02 20:24:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-03-02 20:24:02 -05:00
|
|
|
|
return
|
2018-02-23 18:07:21 -05:00
|
|
|
|
}
|
2018-11-27 16:23:32 -05:00
|
|
|
|
|
2020-04-11 23:20:30 -04:00
|
|
|
|
if isTargetEncrypted {
|
2021-02-10 11:52:50 -05:00
|
|
|
|
pReader, err = pReader.WithEncryption(srcInfo.Reader, &objEncKey)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-02-10 11:52:50 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-04-11 23:20:30 -04:00
|
|
|
|
}
|
2018-02-23 18:07:21 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
2018-09-25 15:39:46 -04:00
|
|
|
|
|
2018-11-14 20:36:41 -05:00
|
|
|
|
srcInfo.PutObjReader = pReader
|
|
|
|
|
|
2018-07-10 23:27:10 -04:00
|
|
|
|
srcInfo.UserDefined, err = getCpObjMetadataFromHeader(ctx, r, srcInfo.UserDefined)
|
2017-07-05 19:56:10 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2017-10-03 13:38:25 -04:00
|
|
|
|
return
|
2017-07-05 19:56:10 -04:00
|
|
|
|
}
|
2020-01-20 11:45:59 -05:00
|
|
|
|
|
2020-05-05 17:18:13 -04:00
|
|
|
|
objTags := srcInfo.UserTags
|
|
|
|
|
// If x-amz-tagging-directive header is REPLACE, get passed tags.
|
|
|
|
|
if isDirectiveReplace(r.Header.Get(xhttp.AmzTagDirective)) {
|
|
|
|
|
objTags = r.Header.Get(xhttp.AmzObjectTagging)
|
|
|
|
|
if _, err := tags.ParseObjectTags(objTags); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-05-05 17:18:13 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-05-26 20:32:53 -04:00
|
|
|
|
if globalIsGateway {
|
|
|
|
|
srcInfo.UserDefined[xhttp.AmzTagDirective] = replaceDirective
|
|
|
|
|
}
|
2020-01-20 11:45:59 -05:00
|
|
|
|
}
|
|
|
|
|
|
2020-05-05 17:18:13 -04:00
|
|
|
|
if objTags != "" {
|
|
|
|
|
srcInfo.UserDefined[xhttp.AmzObjectTagging] = objTags
|
2020-01-20 11:45:59 -05:00
|
|
|
|
}
|
2020-08-12 20:32:24 -04:00
|
|
|
|
srcInfo.UserDefined = filterReplicationStatusMetadata(srcInfo.UserDefined)
|
|
|
|
|
|
2020-04-06 16:44:16 -04:00
|
|
|
|
srcInfo.UserDefined = objectlock.FilterObjectLockMetadata(srcInfo.UserDefined, true, true)
|
2020-11-04 12:13:34 -05:00
|
|
|
|
retPerms := isPutActionAllowed(ctx, getRequestAuthType(r), dstBucket, dstObject, r, iampolicy.PutObjectRetentionAction)
|
|
|
|
|
holdPerms := isPutActionAllowed(ctx, getRequestAuthType(r), dstBucket, dstObject, r, iampolicy.PutObjectLegalHoldAction)
|
2019-11-20 16:18:09 -05:00
|
|
|
|
getObjectInfo := objectAPI.GetObjectInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
|
|
|
|
}
|
2020-01-16 18:41:56 -05:00
|
|
|
|
|
2019-11-20 16:18:09 -05:00
|
|
|
|
// apply default bucket configuration/governance headers for dest side.
|
2020-01-16 18:41:56 -05:00
|
|
|
|
retentionMode, retentionDate, legalHold, s3Err := checkPutObjectLockAllowed(ctx, r, dstBucket, dstObject, getObjectInfo, retPerms, holdPerms)
|
2020-04-06 16:44:16 -04:00
|
|
|
|
if s3Err == ErrNone && retentionMode.Valid() {
|
2020-04-13 17:03:23 -04:00
|
|
|
|
srcInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockMode)] = string(retentionMode)
|
2020-05-25 19:51:32 -04:00
|
|
|
|
srcInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = retentionDate.UTC().Format(iso8601TimeFormat)
|
2019-11-20 16:18:09 -05:00
|
|
|
|
}
|
2020-04-06 16:44:16 -04:00
|
|
|
|
if s3Err == ErrNone && legalHold.Status.Valid() {
|
2020-04-13 17:03:23 -04:00
|
|
|
|
srcInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = string(legalHold.Status)
|
2020-01-16 18:41:56 -05:00
|
|
|
|
}
|
2019-11-20 16:18:09 -05:00
|
|
|
|
if s3Err != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
2019-11-20 16:18:09 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-11-19 14:50:22 -05:00
|
|
|
|
if rs := r.Header.Get(xhttp.AmzBucketReplicationStatus); rs != "" {
|
|
|
|
|
srcInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = rs
|
|
|
|
|
}
|
2021-05-13 22:20:45 -04:00
|
|
|
|
if ok, _ := mustReplicate(ctx, r, dstBucket, dstObject, srcInfo.UserDefined, srcInfo.ReplicationStatus.String(), srcInfo.metadataOnly); ok {
|
2020-07-21 20:49:56 -04:00
|
|
|
|
srcInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Pending.String()
|
|
|
|
|
}
|
2018-12-11 15:05:41 -05:00
|
|
|
|
// Store the preserved compression metadata.
|
|
|
|
|
for k, v := range compressMetadata {
|
|
|
|
|
srcInfo.UserDefined[k] = v
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-23 18:07:21 -05:00
|
|
|
|
// We need to preserve the encryption headers set in EncryptRequest,
|
|
|
|
|
// so we do not want to override them, copy them instead.
|
|
|
|
|
for k, v := range encMetadata {
|
|
|
|
|
srcInfo.UserDefined[k] = v
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-25 15:39:46 -04:00
|
|
|
|
// Ensure that metadata does not contain sensitive information
|
|
|
|
|
crypto.RemoveSensitiveEntries(srcInfo.UserDefined)
|
2021-03-08 15:57:54 -05:00
|
|
|
|
|
2021-04-08 17:29:27 -04:00
|
|
|
|
// If we see legacy source, metadataOnly we have to overwrite the content.
|
|
|
|
|
if srcInfo.Legacy {
|
|
|
|
|
srcInfo.metadataOnly = false
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-20 11:45:59 -05:00
|
|
|
|
// Check if x-amz-metadata-directive or x-amz-tagging-directive was not set to REPLACE and source,
|
|
|
|
|
// destination are same objects. Apply this restriction also when
|
2018-02-23 18:07:21 -05:00
|
|
|
|
// metadataOnly is true indicating that we are not overwriting the object.
|
2018-03-06 19:04:48 -05:00
|
|
|
|
// if encryption is enabled we do not need explicit "REPLACE" metadata to
|
|
|
|
|
// be enabled as well - this is to allow for key-rotation.
|
2020-01-20 11:45:59 -05:00
|
|
|
|
if !isDirectiveReplace(r.Header.Get(xhttp.AmzMetadataDirective)) && !isDirectiveReplace(r.Header.Get(xhttp.AmzTagDirective)) &&
|
2021-01-22 15:09:24 -05:00
|
|
|
|
srcInfo.metadataOnly && srcOpts.VersionID == "" && !objectEncryption {
|
2016-12-26 19:29:26 -05:00
|
|
|
|
// If x-amz-metadata-directive is not set to REPLACE then we need
|
|
|
|
|
// to error out if source and destination are same.
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopyDest), r.URL, guessIsBrowserReq(r))
|
2016-12-26 19:29:26 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-12-10 19:15:12 -05:00
|
|
|
|
|
2018-05-11 15:02:30 -04:00
|
|
|
|
var objInfo ObjectInfo
|
|
|
|
|
|
2018-12-19 08:13:47 -05:00
|
|
|
|
if isRemoteCopyRequired(ctx, srcBucket, dstBucket, objectAPI) {
|
|
|
|
|
var dstRecords []dns.SrvRecord
|
|
|
|
|
dstRecords, err = globalDNSConfig.Get(dstBucket)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-05-15 21:20:22 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-12-19 08:13:47 -05:00
|
|
|
|
|
|
|
|
|
// Send PutObject request to appropriate instance (in federated deployment)
|
2020-09-12 02:03:08 -04:00
|
|
|
|
core, rerr := getRemoteInstanceClient(r, getHostFromSrv(dstRecords))
|
2018-12-19 08:13:47 -05:00
|
|
|
|
if rerr != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, rerr), r.URL, guessIsBrowserReq(r))
|
2018-12-19 08:13:47 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-05-23 14:09:35 -04:00
|
|
|
|
tag, err := tags.ParseObjectTags(objTags)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-05-23 14:09:35 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2021-01-22 15:09:24 -05:00
|
|
|
|
// Remove the metadata for remote calls.
|
|
|
|
|
delete(srcInfo.UserDefined, ReservedMetadataPrefix+"compression")
|
|
|
|
|
delete(srcInfo.UserDefined, ReservedMetadataPrefix+"actual-size")
|
2020-05-23 14:09:35 -04:00
|
|
|
|
opts := miniogo.PutObjectOptions{
|
|
|
|
|
UserMetadata: srcInfo.UserDefined,
|
|
|
|
|
ServerSideEncryption: dstOpts.ServerSideEncryption,
|
|
|
|
|
UserTags: tag.ToMap(),
|
|
|
|
|
}
|
2020-07-14 12:38:05 -04:00
|
|
|
|
remoteObjInfo, rerr := core.PutObject(ctx, dstBucket, dstObject, srcInfo.Reader,
|
2020-05-23 14:09:35 -04:00
|
|
|
|
srcInfo.Size, "", "", opts)
|
2018-12-19 08:13:47 -05:00
|
|
|
|
if rerr != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, rerr), r.URL, guessIsBrowserReq(r))
|
2018-12-19 08:13:47 -05:00
|
|
|
|
return
|
2018-05-11 15:02:30 -04:00
|
|
|
|
}
|
2018-12-19 08:13:47 -05:00
|
|
|
|
objInfo.ETag = remoteObjInfo.ETag
|
|
|
|
|
objInfo.ModTime = remoteObjInfo.LastModified
|
2018-05-11 15:02:30 -04:00
|
|
|
|
} else {
|
2021-04-19 13:30:42 -04:00
|
|
|
|
|
|
|
|
|
os := newObjSweeper(dstBucket, dstObject)
|
|
|
|
|
// Get appropriate object info to identify the remote object to delete
|
|
|
|
|
if !srcInfo.metadataOnly {
|
|
|
|
|
goiOpts := os.GetOpts()
|
|
|
|
|
if goi, gerr := getObjectInfo(ctx, dstBucket, dstObject, goiOpts); gerr == nil {
|
|
|
|
|
os.SetTransitionState(goi)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-07 17:36:46 -05:00
|
|
|
|
copyObjectFn := objectAPI.CopyObject
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
copyObjectFn = api.CacheAPI().CopyObject
|
|
|
|
|
}
|
2020-09-14 18:57:13 -04:00
|
|
|
|
|
2018-05-11 15:02:30 -04:00
|
|
|
|
// Copy source object to destination, if source and destination
|
|
|
|
|
// object is same then only metadata is updated.
|
2020-02-07 17:36:46 -05:00
|
|
|
|
objInfo, err = copyObjectFn(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts)
|
2018-05-11 15:02:30 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-05-11 15:02:30 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2021-04-19 13:30:42 -04:00
|
|
|
|
|
|
|
|
|
// Remove the transitioned object whose object version is being overwritten.
|
|
|
|
|
logger.LogIf(ctx, os.Sweep())
|
2016-02-27 06:04:52 -05:00
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
objInfo.ETag = getDecryptedETag(r.Header, objInfo, false)
|
|
|
|
|
response := generateCopyObjectResponse(objInfo.ETag, objInfo.ModTime)
|
2016-03-06 15:16:22 -05:00
|
|
|
|
encodedSuccessResponse := encodeResponse(response)
|
2021-05-13 22:20:45 -04:00
|
|
|
|
if replicate, sync := mustReplicate(ctx, r, dstBucket, dstObject, objInfo.UserDefined, objInfo.ReplicationStatus.String(), objInfo.metadataOnly); replicate {
|
2021-04-03 12:03:42 -04:00
|
|
|
|
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType)
|
2020-07-21 20:49:56 -04:00
|
|
|
|
}
|
2020-09-16 19:04:55 -04:00
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
|
setPutObjHeaders(w, objInfo, false)
|
2020-06-19 16:53:45 -04:00
|
|
|
|
// We must not use the http.Header().Set method here because some (broken)
|
|
|
|
|
// clients expect the x-amz-copy-source-version-id header key to be literally
|
|
|
|
|
// "x-amz-copy-source-version-id"- not in canonicalized form, preserve it.
|
|
|
|
|
if srcOpts.VersionID != "" {
|
|
|
|
|
w.Header()[strings.ToLower(xhttp.AmzCopySourceVersionID)] = []string{srcOpts.VersionID}
|
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
2017-01-06 03:37:00 -05:00
|
|
|
|
// Write success response.
|
|
|
|
|
writeSuccessResponseXML(w, encodedSuccessResponse)
|
2016-07-24 01:51:12 -04:00
|
|
|
|
|
2016-09-29 01:46:19 -04:00
|
|
|
|
// Notify object created event.
|
2018-03-15 16:03:41 -04:00
|
|
|
|
sendEvent(eventArgs{
|
2018-11-02 21:40:08 -04:00
|
|
|
|
EventName: event.ObjectCreatedCopy,
|
|
|
|
|
BucketName: dstBucket,
|
|
|
|
|
Object: objInfo,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
RespElements: extractRespElements(w),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
2019-03-25 14:45:42 -04:00
|
|
|
|
Host: handlers.GetSourceIP(r),
|
2016-09-29 01:46:19 -04:00
|
|
|
|
})
|
2016-02-27 06:04:52 -05:00
|
|
|
|
}
|
|
|
|
|
|
2015-06-30 23:15:48 -04:00
|
|
|
|
// PutObjectHandler - PUT Object
|
2015-02-23 19:46:48 -05:00
|
|
|
|
// ----------
|
|
|
|
|
// This implementation of the PUT operation adds an object to a bucket.
|
2018-09-25 15:39:46 -04:00
|
|
|
|
// Notice: The S3 client can send secret keys in headers for encryption related jobs,
|
|
|
|
|
// the handler should ensure to remove these keys before sending them to the object layer.
|
|
|
|
|
// Currently these keys are:
|
|
|
|
|
// - X-Amz-Server-Side-Encryption-Customer-Key
|
|
|
|
|
// - X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "PutObject")
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2018-10-12 15:25:59 -04:00
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-09-15 16:57:15 -04:00
|
|
|
|
|
2021-04-08 19:40:38 -04:00
|
|
|
|
if _, ok := crypto.IsRequested(r.Header); ok {
|
|
|
|
|
if globalIsGateway {
|
|
|
|
|
if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
2021-04-08 19:40:38 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
if !objectAPI.IsEncryptionSupported() {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
2021-04-08 19:40:38 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-12-15 00:39:59 -05:00
|
|
|
|
}
|
2020-09-15 16:57:15 -04:00
|
|
|
|
|
2016-02-15 20:42:39 -05:00
|
|
|
|
vars := mux.Vars(r)
|
2016-02-27 06:04:52 -05:00
|
|
|
|
bucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
object, err := unescapePath(vars["object"])
|
2020-02-11 22:38:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-02-11 22:38:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2015-04-22 22:29:39 -04:00
|
|
|
|
|
2018-08-18 00:07:19 -04:00
|
|
|
|
// X-Amz-Copy-Source shouldn't be set for this call.
|
2019-08-22 04:02:39 -04:00
|
|
|
|
if _, ok := r.Header[xhttp.AmzCopySource]; ok {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL, guessIsBrowserReq(r))
|
2018-08-18 00:07:19 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-26 23:36:16 -05:00
|
|
|
|
// Validate storage class metadata if present
|
2019-10-07 01:50:24 -04:00
|
|
|
|
if sc := r.Header.Get(xhttp.AmzStorageClass); sc != "" {
|
|
|
|
|
if !storageclass.IsValid(sc) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidStorageClass), r.URL, guessIsBrowserReq(r))
|
2017-12-26 23:36:16 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-03 15:58:28 -05:00
|
|
|
|
clientETag, err := etag.FromContentMD5(r.Header)
|
2016-03-12 19:08:15 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDigest), r.URL, guessIsBrowserReq(r))
|
2015-04-22 19:28:13 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-02-16 01:07:52 -05:00
|
|
|
|
|
2015-12-28 02:00:36 -05:00
|
|
|
|
/// if Content-Length is unknown/missing, deny the request
|
2016-02-15 20:42:39 -05:00
|
|
|
|
size := r.ContentLength
|
2016-08-08 23:56:29 -04:00
|
|
|
|
rAuthType := getRequestAuthType(r)
|
|
|
|
|
if rAuthType == authTypeStreamingSigned {
|
2019-08-22 04:02:39 -04:00
|
|
|
|
if sizeStr, ok := r.Header[xhttp.AmzDecodedContentLength]; ok {
|
2018-03-16 14:22:34 -04:00
|
|
|
|
if sizeStr[0] == "" {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
2018-03-16 14:22:34 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
size, err = strconv.ParseInt(sizeStr[0], 10, 64)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-03-16 14:22:34 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-08-08 23:56:29 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
2017-01-20 19:33:01 -05:00
|
|
|
|
if size == -1 {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
2015-04-29 05:19:51 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-09-30 17:32:13 -04:00
|
|
|
|
|
2015-04-29 13:51:59 -04:00
|
|
|
|
/// maximum Upload size for objects in a single operation
|
2015-04-29 05:19:51 -04:00
|
|
|
|
if isMaxObjectSize(size) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL, guessIsBrowserReq(r))
|
2015-04-29 05:19:51 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2015-07-02 23:31:22 -04:00
|
|
|
|
|
2018-07-10 23:27:10 -04:00
|
|
|
|
metadata, err := extractMetadata(ctx, r)
|
2017-07-05 19:56:10 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2017-07-05 19:56:10 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-07-10 23:27:10 -04:00
|
|
|
|
|
2020-05-05 17:18:13 -04:00
|
|
|
|
if objTags := r.Header.Get(xhttp.AmzObjectTagging); objTags != "" {
|
2020-05-23 14:09:35 -04:00
|
|
|
|
if !objectAPI.IsTaggingSupported() {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
2020-05-23 14:09:35 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-05 17:18:13 -04:00
|
|
|
|
if _, err := tags.ParseObjectTags(objTags); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-01-20 11:45:59 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-05-05 17:18:13 -04:00
|
|
|
|
|
|
|
|
|
metadata[xhttp.AmzObjectTagging] = objTags
|
2020-01-20 11:45:59 -05:00
|
|
|
|
}
|
|
|
|
|
|
2017-10-22 01:30:34 -04:00
|
|
|
|
var (
|
2021-03-03 15:58:28 -05:00
|
|
|
|
md5hex = clientETag.String()
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
sha256hex = ""
|
|
|
|
|
reader io.Reader = r.Body
|
2017-11-07 18:18:59 -05:00
|
|
|
|
s3Err APIErrorCode
|
2018-03-28 17:14:06 -04:00
|
|
|
|
putObject = objectAPI.PutObject
|
2017-10-22 01:30:34 -04:00
|
|
|
|
)
|
2018-10-09 17:00:01 -04:00
|
|
|
|
|
|
|
|
|
// Check if put is allowed
|
2020-11-04 12:13:34 -05:00
|
|
|
|
if s3Err = isPutActionAllowed(ctx, rAuthType, bucket, object, r, iampolicy.PutObjectAction); s3Err != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
2018-10-09 17:00:01 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-08 23:56:29 -04:00
|
|
|
|
switch rAuthType {
|
|
|
|
|
case authTypeStreamingSigned:
|
|
|
|
|
// Initialize stream signature verifier.
|
2017-11-07 18:18:59 -05:00
|
|
|
|
reader, s3Err = newSignV4ChunkedReader(r)
|
|
|
|
|
if s3Err != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
2016-08-08 23:56:29 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-09-30 17:32:13 -04:00
|
|
|
|
case authTypeSignedV2, authTypePresignedV2:
|
2017-11-07 18:18:59 -05:00
|
|
|
|
s3Err = isReqAuthenticatedV2(r)
|
|
|
|
|
if s3Err != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
2016-09-30 17:32:13 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-01-09 02:19:50 -05:00
|
|
|
|
|
2016-04-07 06:04:18 -04:00
|
|
|
|
case authTypePresigned, authTypeSigned:
|
2019-10-23 01:59:13 -04:00
|
|
|
|
if s3Err = reqSignatureV4Verify(r, globalServerRegion, serviceS3); s3Err != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
2016-10-02 18:51:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
if !skipContentSha256Cksum(r) {
|
2019-02-27 20:46:55 -05:00
|
|
|
|
sha256hex = getContentSha256Cksum(r, serviceS3)
|
2016-10-02 18:51:49 -04:00
|
|
|
|
}
|
2016-02-16 21:50:36 -05:00
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
2020-04-30 18:55:54 -04:00
|
|
|
|
if err := enforceBucketQuota(ctx, bucket, size); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-04-30 18:55:54 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-05-19 16:53:54 -04:00
|
|
|
|
|
2020-02-05 04:42:34 -05:00
|
|
|
|
// Check if bucket encryption is enabled
|
2021-05-14 03:59:05 -04:00
|
|
|
|
sseConfig, _ := globalBucketSSEConfigSys.Get(bucket)
|
|
|
|
|
sseConfig.Apply(r.Header, globalAutoEncryption)
|
2019-05-02 11:28:18 -04:00
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
|
actualSize := size
|
|
|
|
|
if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) && size > 0 {
|
|
|
|
|
// Storing the compression metadata.
|
2019-09-26 02:08:24 -04:00
|
|
|
|
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
|
2018-09-27 23:36:17 -04:00
|
|
|
|
metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10)
|
|
|
|
|
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
2018-09-27 23:36:17 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-09-27 23:36:17 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-09-28 03:44:59 -04:00
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
|
// Set compression metrics.
|
2021-01-22 15:09:24 -05:00
|
|
|
|
s2c := newS2CompressReader(actualReader, actualSize)
|
2019-09-26 02:08:24 -04:00
|
|
|
|
defer s2c.Close()
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
reader = etag.Wrap(s2c, actualReader)
|
2018-09-27 23:36:17 -04:00
|
|
|
|
size = -1 // Since compressed size is un-predictable.
|
|
|
|
|
md5hex = "" // Do not try to verify the content.
|
|
|
|
|
sha256hex = ""
|
|
|
|
|
}
|
|
|
|
|
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
2017-10-22 01:30:34 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2017-10-22 01:30:34 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-11-14 20:36:41 -05:00
|
|
|
|
|
|
|
|
|
rawReader := hashReader
|
2021-02-10 11:52:50 -05:00
|
|
|
|
pReader := NewPutObjReader(rawReader)
|
2018-11-14 20:36:41 -05:00
|
|
|
|
|
2019-01-05 17:16:43 -05:00
|
|
|
|
// get gateway encryption options
|
2018-11-14 20:36:41 -05:00
|
|
|
|
var opts ObjectOptions
|
2019-02-09 00:31:06 -05:00
|
|
|
|
opts, err = putOpts(ctx, r, bucket, object, metadata)
|
2019-01-05 17:16:43 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-01-05 17:16:43 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-04-06 16:44:16 -04:00
|
|
|
|
|
2019-11-15 05:16:27 -05:00
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
putObject = api.CacheAPI().PutObject
|
|
|
|
|
}
|
2020-04-06 16:44:16 -04:00
|
|
|
|
|
2020-11-04 12:13:34 -05:00
|
|
|
|
retPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectRetentionAction)
|
|
|
|
|
holdPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectLegalHoldAction)
|
2020-01-16 18:41:56 -05:00
|
|
|
|
|
2020-04-06 16:44:16 -04:00
|
|
|
|
getObjectInfo := objectAPI.GetObjectInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-16 18:41:56 -05:00
|
|
|
|
retentionMode, retentionDate, legalHold, s3Err := checkPutObjectLockAllowed(ctx, r, bucket, object, getObjectInfo, retPerms, holdPerms)
|
2020-04-06 16:44:16 -04:00
|
|
|
|
if s3Err == ErrNone && retentionMode.Valid() {
|
2019-11-20 16:18:09 -05:00
|
|
|
|
metadata[strings.ToLower(xhttp.AmzObjectLockMode)] = string(retentionMode)
|
2020-05-25 19:51:32 -04:00
|
|
|
|
metadata[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = retentionDate.UTC().Format(iso8601TimeFormat)
|
2019-11-20 16:18:09 -05:00
|
|
|
|
}
|
2020-04-06 16:44:16 -04:00
|
|
|
|
if s3Err == ErrNone && legalHold.Status.Valid() {
|
2020-01-16 18:41:56 -05:00
|
|
|
|
metadata[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = string(legalHold.Status)
|
|
|
|
|
}
|
2019-11-20 16:18:09 -05:00
|
|
|
|
if s3Err != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
2019-11-20 16:18:09 -05:00
|
|
|
|
return
|
2018-03-27 19:44:45 -04:00
|
|
|
|
}
|
2021-05-13 22:20:45 -04:00
|
|
|
|
if ok, _ := mustReplicate(ctx, r, bucket, object, metadata, "", false); ok {
|
2020-09-16 19:04:55 -04:00
|
|
|
|
metadata[xhttp.AmzBucketReplicationStatus] = replication.Pending.String()
|
2020-07-21 20:49:56 -04:00
|
|
|
|
}
|
|
|
|
|
if r.Header.Get(xhttp.AmzBucketReplicationStatus) == replication.Replica.String() {
|
2020-11-04 12:13:34 -05:00
|
|
|
|
if s3Err = isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.ReplicateObjectAction); s3Err != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
2020-07-21 20:49:56 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-04-09 20:01:45 -04:00
|
|
|
|
var objectEncryptionKey crypto.ObjectKey
|
2018-02-09 18:19:30 -05:00
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
2020-12-22 12:19:32 -05:00
|
|
|
|
if _, ok := crypto.IsRequested(r.Header); ok && !HasSuffix(object, SlashSeparator) { // handle SSE requests
|
2019-09-20 17:56:12 -04:00
|
|
|
|
if crypto.SSECopy.IsRequested(r.Header) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL, guessIsBrowserReq(r))
|
2019-09-20 17:56:12 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-04-09 20:01:45 -04:00
|
|
|
|
|
2018-11-14 20:36:41 -05:00
|
|
|
|
reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata)
|
2018-02-09 18:19:30 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-02-09 18:19:30 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2021-01-05 23:08:35 -05:00
|
|
|
|
|
|
|
|
|
wantSize := int64(-1)
|
|
|
|
|
if size >= 0 {
|
|
|
|
|
info := ObjectInfo{Size: size}
|
|
|
|
|
wantSize = info.EncryptedSize()
|
|
|
|
|
}
|
2020-04-09 20:01:45 -04:00
|
|
|
|
|
2019-05-08 21:35:40 -04:00
|
|
|
|
// do not try to verify encrypted content
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
hashReader, err = hash.NewReader(etag.Wrap(reader, hashReader), wantSize, "", "", actualSize)
|
2018-02-09 18:19:30 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-02-09 18:19:30 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2021-02-10 11:52:50 -05:00
|
|
|
|
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-02-10 11:52:50 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2017-11-07 18:18:59 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-25 15:39:46 -04:00
|
|
|
|
// Ensure that metadata does not contain sensitive information
|
|
|
|
|
crypto.RemoveSensitiveEntries(metadata)
|
|
|
|
|
|
2021-04-19 13:30:42 -04:00
|
|
|
|
oc := newObjSweeper(bucket, object)
|
|
|
|
|
// Get appropriate object info to identify the remote object to delete
|
|
|
|
|
goiOpts := oc.GetOpts()
|
|
|
|
|
if goi, gerr := getObjectInfo(ctx, bucket, object, goiOpts); gerr == nil {
|
|
|
|
|
oc.SetTransitionState(goi)
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
|
// Create the object..
|
2019-02-09 00:31:06 -05:00
|
|
|
|
objInfo, err := putObject(ctx, bucket, object, pReader, opts)
|
2015-09-19 06:20:07 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2015-08-03 19:17:21 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-02-23 18:07:21 -05:00
|
|
|
|
|
2021-05-27 11:18:41 -04:00
|
|
|
|
if kind, encrypted := crypto.IsEncrypted(objInfo.UserDefined); encrypted {
|
2021-02-03 18:19:08 -05:00
|
|
|
|
switch kind {
|
|
|
|
|
case crypto.S3:
|
2020-12-22 12:19:32 -05:00
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
|
2020-06-12 23:04:01 -04:00
|
|
|
|
objInfo.ETag, _ = DecryptETag(objectEncryptionKey, ObjectInfo{ETag: objInfo.ETag})
|
2021-05-06 18:24:01 -04:00
|
|
|
|
case crypto.S3KMS:
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
|
2021-05-18 17:21:20 -04:00
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionKmsID, objInfo.UserDefined[crypto.MetaKeyID])
|
2021-05-06 18:24:01 -04:00
|
|
|
|
if kmsCtx, ok := objInfo.UserDefined[crypto.MetaContext]; ok {
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionKmsContext, kmsCtx)
|
|
|
|
|
}
|
|
|
|
|
if len(objInfo.ETag) >= 32 && strings.Count(objInfo.ETag, "-") != 1 {
|
|
|
|
|
objInfo.ETag = objInfo.ETag[len(objInfo.ETag)-32:]
|
|
|
|
|
}
|
2021-02-03 18:19:08 -05:00
|
|
|
|
case crypto.SSEC:
|
2020-12-22 12:19:32 -05:00
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm))
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5))
|
2020-04-09 20:01:45 -04:00
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
|
if len(objInfo.ETag) >= 32 && strings.Count(objInfo.ETag, "-") != 1 {
|
|
|
|
|
objInfo.ETag = objInfo.ETag[len(objInfo.ETag)-32:]
|
2018-09-23 13:24:10 -04:00
|
|
|
|
}
|
2018-02-09 18:19:30 -05:00
|
|
|
|
}
|
2017-11-07 18:18:59 -05:00
|
|
|
|
}
|
2021-05-13 22:20:45 -04:00
|
|
|
|
if replicate, sync := mustReplicate(ctx, r, bucket, object, metadata, "", false); replicate {
|
2021-04-03 12:03:42 -04:00
|
|
|
|
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType)
|
2020-07-21 20:49:56 -04:00
|
|
|
|
}
|
2021-04-19 13:30:42 -04:00
|
|
|
|
|
|
|
|
|
// Remove the transitioned object whose object version is being overwritten.
|
|
|
|
|
logger.LogIf(ctx, oc.Sweep())
|
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
|
setPutObjHeaders(w, objInfo, false)
|
2020-05-21 17:12:52 -04:00
|
|
|
|
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeSuccessResponseHeadersOnly(w)
|
2016-07-24 01:51:12 -04:00
|
|
|
|
|
2016-09-29 01:46:19 -04:00
|
|
|
|
// Notify object created event.
|
2018-03-15 16:03:41 -04:00
|
|
|
|
sendEvent(eventArgs{
|
2018-11-02 21:40:08 -04:00
|
|
|
|
EventName: event.ObjectCreatedPut,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Object: objInfo,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
RespElements: extractRespElements(w),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
2019-03-25 14:45:42 -04:00
|
|
|
|
Host: handlers.GetSourceIP(r),
|
2016-09-29 01:46:19 -04:00
|
|
|
|
})
|
2015-02-15 20:03:27 -05:00
|
|
|
|
}
|
2015-05-07 22:55:30 -04:00
|
|
|
|
|
2021-03-26 20:15:09 -04:00
|
|
|
|
// PutObjectExtractHandler - PUT Object extract is an extended API
|
|
|
|
|
// based off from AWS Snowball feature to auto extract compressed
|
|
|
|
|
// stream will be extracted in the same directory it is stored in
|
|
|
|
|
// and the folder structures will be built out accordingly.
|
|
|
|
|
func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
ctx := newContext(r, w, "PutObjectExtract")
|
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
|
|
|
|
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-08 19:40:38 -04:00
|
|
|
|
if _, ok := crypto.IsRequested(r.Header); ok {
|
|
|
|
|
if globalIsGateway {
|
|
|
|
|
if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
2021-04-08 19:40:38 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
if !objectAPI.IsEncryptionSupported() {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
2021-04-08 19:40:38 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-03-26 20:15:09 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
|
object, err := unescapePath(vars["object"])
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// X-Amz-Copy-Source shouldn't be set for this call.
|
|
|
|
|
if _, ok := r.Header[xhttp.AmzCopySource]; ok {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate storage class metadata if present
|
|
|
|
|
sc := r.Header.Get(xhttp.AmzStorageClass)
|
|
|
|
|
if sc != "" {
|
|
|
|
|
if !storageclass.IsValid(sc) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidStorageClass), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
clientETag, err := etag.FromContentMD5(r.Header)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDigest), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// if Content-Length is unknown/missing, deny the request
|
|
|
|
|
size := r.ContentLength
|
|
|
|
|
rAuthType := getRequestAuthType(r)
|
|
|
|
|
if rAuthType == authTypeStreamingSigned {
|
|
|
|
|
if sizeStr, ok := r.Header[xhttp.AmzDecodedContentLength]; ok {
|
|
|
|
|
if sizeStr[0] == "" {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
size, err = strconv.ParseInt(sizeStr[0], 10, 64)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if size == -1 {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// maximum Upload size for objects in a single operation
|
|
|
|
|
if isMaxObjectSize(size) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var (
|
|
|
|
|
md5hex = clientETag.String()
|
|
|
|
|
sha256hex = ""
|
|
|
|
|
reader io.Reader = r.Body
|
|
|
|
|
s3Err APIErrorCode
|
|
|
|
|
putObject = objectAPI.PutObject
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// Check if put is allowed
|
|
|
|
|
if s3Err = isPutActionAllowed(ctx, rAuthType, bucket, object, r, iampolicy.PutObjectAction); s3Err != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch rAuthType {
|
|
|
|
|
case authTypeStreamingSigned:
|
|
|
|
|
// Initialize stream signature verifier.
|
|
|
|
|
reader, s3Err = newSignV4ChunkedReader(r)
|
|
|
|
|
if s3Err != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
case authTypeSignedV2, authTypePresignedV2:
|
|
|
|
|
s3Err = isReqAuthenticatedV2(r)
|
|
|
|
|
if s3Err != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case authTypePresigned, authTypeSigned:
|
|
|
|
|
if s3Err = reqSignatureV4Verify(r, globalServerRegion, serviceS3); s3Err != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
if !skipContentSha256Cksum(r) {
|
|
|
|
|
sha256hex = getContentSha256Cksum(r, serviceS3)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
hreader, err := hash.NewReader(reader, size, md5hex, sha256hex, size)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err := enforceBucketQuota(ctx, bucket, size); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check if bucket encryption is enabled
|
2021-05-14 03:59:05 -04:00
|
|
|
|
sseConfig, _ := globalBucketSSEConfigSys.Get(bucket)
|
|
|
|
|
sseConfig.Apply(r.Header, globalAutoEncryption)
|
2021-03-26 20:15:09 -04:00
|
|
|
|
|
|
|
|
|
retPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectRetentionAction)
|
|
|
|
|
holdPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectLegalHoldAction)
|
|
|
|
|
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
putObject = api.CacheAPI().PutObject
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
getObjectInfo := objectAPI.GetObjectInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
putObjectTar := func(reader io.Reader, info os.FileInfo, object string) {
|
|
|
|
|
size := info.Size()
|
|
|
|
|
metadata := map[string]string{
|
|
|
|
|
xhttp.AmzStorageClass: sc,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
actualSize := size
|
|
|
|
|
if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) && size > 0 {
|
|
|
|
|
// Storing the compression metadata.
|
|
|
|
|
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
|
|
|
|
|
metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10)
|
|
|
|
|
|
|
|
|
|
actualReader, err := hash.NewReader(reader, size, "", "", actualSize)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Set compression metrics.
|
|
|
|
|
s2c := newS2CompressReader(actualReader, actualSize)
|
|
|
|
|
defer s2c.Close()
|
|
|
|
|
reader = etag.Wrap(s2c, actualReader)
|
|
|
|
|
size = -1 // Since compressed size is un-predictable.
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
hashReader, err := hash.NewReader(reader, size, "", "", actualSize)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rawReader := hashReader
|
|
|
|
|
pReader := NewPutObjReader(rawReader)
|
|
|
|
|
|
|
|
|
|
// get encryption options
|
|
|
|
|
opts, err := putOpts(ctx, r, bucket, object, metadata)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
opts.MTime = info.ModTime()
|
|
|
|
|
|
|
|
|
|
retentionMode, retentionDate, legalHold, s3Err := checkPutObjectLockAllowed(ctx, r, bucket, object, getObjectInfo, retPerms, holdPerms)
|
|
|
|
|
if s3Err == ErrNone && retentionMode.Valid() {
|
|
|
|
|
metadata[strings.ToLower(xhttp.AmzObjectLockMode)] = string(retentionMode)
|
|
|
|
|
metadata[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = retentionDate.UTC().Format(iso8601TimeFormat)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if s3Err == ErrNone && legalHold.Status.Valid() {
|
|
|
|
|
metadata[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = string(legalHold.Status)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if s3Err != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-13 22:20:45 -04:00
|
|
|
|
if ok, _ := mustReplicate(ctx, r, bucket, object, metadata, "", false); ok {
|
2021-03-26 20:15:09 -04:00
|
|
|
|
metadata[xhttp.AmzBucketReplicationStatus] = replication.Pending.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if r.Header.Get(xhttp.AmzBucketReplicationStatus) == replication.Replica.String() {
|
|
|
|
|
if s3Err = isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.ReplicateObjectAction); s3Err != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var objectEncryptionKey crypto.ObjectKey
|
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
|
|
|
|
if _, ok := crypto.IsRequested(r.Header); ok && !HasSuffix(object, SlashSeparator) { // handle SSE requests
|
|
|
|
|
if crypto.SSECopy.IsRequested(r.Header) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
wantSize := int64(-1)
|
|
|
|
|
if size >= 0 {
|
|
|
|
|
info := ObjectInfo{Size: size}
|
|
|
|
|
wantSize = info.EncryptedSize()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// do not try to verify encrypted content
|
|
|
|
|
hashReader, err = hash.NewReader(etag.Wrap(reader, hashReader), wantSize, "", "", actualSize)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Ensure that metadata does not contain sensitive information
|
|
|
|
|
crypto.RemoveSensitiveEntries(metadata)
|
|
|
|
|
|
|
|
|
|
// Create the object..
|
|
|
|
|
objInfo, err := putObject(ctx, bucket, object, pReader, opts)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-03-26 20:15:09 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-13 22:20:45 -04:00
|
|
|
|
if replicate, sync := mustReplicate(ctx, r, bucket, object, metadata, "", false); replicate {
|
2021-04-03 12:03:42 -04:00
|
|
|
|
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType)
|
2021-05-13 22:20:45 -04:00
|
|
|
|
|
2021-03-26 20:15:09 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
untar(hreader, putObjectTar)
|
|
|
|
|
|
|
|
|
|
w.Header()[xhttp.ETag] = []string{`"` + hex.EncodeToString(hreader.MD5Current()) + `"`}
|
|
|
|
|
writeSuccessResponseHeadersOnly(w)
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-12 15:45:15 -04:00
|
|
|
|
/// Multipart objectAPIHandlers
|
2015-06-08 14:06:06 -04:00
|
|
|
|
|
2016-10-06 16:34:33 -04:00
|
|
|
|
// NewMultipartUploadHandler - New multipart upload.
|
2018-09-25 15:39:46 -04:00
|
|
|
|
// Notice: The S3 client can send secret keys in headers for encryption related jobs,
|
|
|
|
|
// the handler should ensure to remove these keys before sending them to the object layer.
|
|
|
|
|
// Currently these keys are:
|
|
|
|
|
// - X-Amz-Server-Side-Encryption-Customer-Key
|
|
|
|
|
// - X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "NewMultipartUpload")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2018-10-12 15:25:59 -04:00
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-09-15 16:57:15 -04:00
|
|
|
|
|
2021-04-08 19:40:38 -04:00
|
|
|
|
if _, ok := crypto.IsRequested(r.Header); ok {
|
|
|
|
|
if globalIsGateway {
|
|
|
|
|
if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
2021-04-08 19:40:38 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
if !objectAPI.IsEncryptionSupported() {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
2021-04-08 19:40:38 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-12-15 00:39:59 -05:00
|
|
|
|
}
|
2020-09-15 16:57:15 -04:00
|
|
|
|
|
2018-08-18 00:07:19 -04:00
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
bucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
object, err := unescapePath(vars["object"])
|
2020-02-11 22:38:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-02-11 22:38:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
|
|
2019-01-16 15:12:06 -05:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, bucket, object); s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2019-01-16 15:12:06 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-05-19 16:53:54 -04:00
|
|
|
|
|
2020-02-05 04:42:34 -05:00
|
|
|
|
// Check if bucket encryption is enabled
|
2021-05-14 03:59:05 -04:00
|
|
|
|
sseConfig, _ := globalBucketSSEConfigSys.Get(bucket)
|
|
|
|
|
sseConfig.Apply(r.Header, globalAutoEncryption)
|
2019-01-05 17:16:43 -05:00
|
|
|
|
|
2017-12-26 23:36:16 -05:00
|
|
|
|
// Validate storage class metadata if present
|
2019-10-07 01:50:24 -04:00
|
|
|
|
if sc := r.Header.Get(xhttp.AmzStorageClass); sc != "" {
|
|
|
|
|
if !storageclass.IsValid(sc) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidStorageClass), r.URL, guessIsBrowserReq(r))
|
2017-12-26 23:36:16 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-01 14:37:57 -05:00
|
|
|
|
var encMetadata = map[string]string{}
|
|
|
|
|
|
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
2020-12-22 12:19:32 -05:00
|
|
|
|
if _, ok := crypto.IsRequested(r.Header); ok {
|
2018-11-14 20:36:41 -05:00
|
|
|
|
if err = setEncryptionMetadata(r, bucket, object, encMetadata); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-03-01 14:37:57 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
// Set this for multipart only operations, we need to differentiate during
|
|
|
|
|
// decryption if the file was actually multipart or not.
|
|
|
|
|
encMetadata[ReservedMetadataPrefix+"Encrypted-Multipart"] = ""
|
|
|
|
|
}
|
2017-11-07 18:18:59 -05:00
|
|
|
|
}
|
|
|
|
|
|
2016-07-22 23:31:45 -04:00
|
|
|
|
// Extract metadata that needs to be saved.
|
2018-07-10 23:27:10 -04:00
|
|
|
|
metadata, err := extractMetadata(ctx, r)
|
2017-07-05 19:56:10 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2017-07-05 19:56:10 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-04-06 16:44:16 -04:00
|
|
|
|
|
2020-11-04 12:13:34 -05:00
|
|
|
|
retPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectRetentionAction)
|
|
|
|
|
holdPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectLegalHoldAction)
|
2020-01-16 18:41:56 -05:00
|
|
|
|
|
2020-04-06 16:44:16 -04:00
|
|
|
|
getObjectInfo := objectAPI.GetObjectInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
retentionMode, retentionDate, legalHold, s3Err := checkPutObjectLockAllowed(ctx, r, bucket, object, getObjectInfo, retPerms, holdPerms)
|
|
|
|
|
if s3Err == ErrNone && retentionMode.Valid() {
|
2019-11-20 16:18:09 -05:00
|
|
|
|
metadata[strings.ToLower(xhttp.AmzObjectLockMode)] = string(retentionMode)
|
2020-05-25 19:51:32 -04:00
|
|
|
|
metadata[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = retentionDate.UTC().Format(iso8601TimeFormat)
|
2019-11-20 16:18:09 -05:00
|
|
|
|
}
|
2020-04-06 16:44:16 -04:00
|
|
|
|
if s3Err == ErrNone && legalHold.Status.Valid() {
|
2020-01-16 18:41:56 -05:00
|
|
|
|
metadata[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = string(legalHold.Status)
|
|
|
|
|
}
|
2019-11-20 16:18:09 -05:00
|
|
|
|
if s3Err != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
2019-11-20 16:18:09 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2021-05-13 22:20:45 -04:00
|
|
|
|
if ok, _ := mustReplicate(ctx, r, bucket, object, metadata, "", false); ok {
|
2020-09-16 19:04:55 -04:00
|
|
|
|
metadata[xhttp.AmzBucketReplicationStatus] = replication.Pending.String()
|
2020-07-21 20:49:56 -04:00
|
|
|
|
}
|
2018-03-01 14:37:57 -05:00
|
|
|
|
// We need to preserve the encryption headers set in EncryptRequest,
|
|
|
|
|
// so we do not want to override them, copy them instead.
|
|
|
|
|
for k, v := range encMetadata {
|
|
|
|
|
metadata[k] = v
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-25 15:39:46 -04:00
|
|
|
|
// Ensure that metadata does not contain sensitive information
|
|
|
|
|
crypto.RemoveSensitiveEntries(metadata)
|
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
|
if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) {
|
|
|
|
|
// Storing the compression metadata.
|
2019-09-26 02:08:24 -04:00
|
|
|
|
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
|
2018-09-27 23:36:17 -04:00
|
|
|
|
}
|
|
|
|
|
|
2020-05-18 12:59:45 -04:00
|
|
|
|
opts, err := putOpts(ctx, r, bucket, object, metadata)
|
2019-02-09 00:31:06 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-02-09 00:31:06 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-03-28 17:14:06 -04:00
|
|
|
|
newMultipartUpload := objectAPI.NewMultipartUpload
|
2019-08-09 20:09:08 -04:00
|
|
|
|
|
2019-02-09 00:31:06 -05:00
|
|
|
|
uploadID, err := newMultipartUpload(ctx, bucket, object, opts)
|
2015-09-19 06:20:07 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2015-08-03 19:17:21 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2015-09-19 06:20:07 -04:00
|
|
|
|
|
|
|
|
|
response := generateInitiateMultipartUploadResponse(bucket, object, uploadID)
|
2016-03-06 15:16:22 -05:00
|
|
|
|
encodedSuccessResponse := encodeResponse(response)
|
2017-01-06 03:37:00 -05:00
|
|
|
|
|
|
|
|
|
// Write success response.
|
|
|
|
|
writeSuccessResponseXML(w, encodedSuccessResponse)
|
2015-05-07 22:55:30 -04:00
|
|
|
|
}
|
|
|
|
|
|
2017-01-31 12:38:34 -05:00
|
|
|
|
// CopyObjectPartHandler - uploads a part by copying data from an existing object as data source.
|
|
|
|
|
func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "CopyObjectPart")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2018-10-12 15:25:59 -04:00
|
|
|
|
|
2017-01-31 12:38:34 -05:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2017-01-31 12:38:34 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-09-15 16:57:15 -04:00
|
|
|
|
|
|
|
|
|
if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
2018-08-18 00:07:19 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-09-15 16:57:15 -04:00
|
|
|
|
|
2020-12-22 12:19:32 -05:00
|
|
|
|
if _, ok := crypto.IsRequested(r.Header); !objectAPI.IsEncryptionSupported() && ok {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
2018-12-15 00:39:59 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-08-18 00:07:19 -04:00
|
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
dstBucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
dstObject, err := unescapePath(vars["object"])
|
2020-02-11 22:38:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-02-11 22:38:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2017-01-31 12:38:34 -05:00
|
|
|
|
|
2018-04-24 18:53:30 -04:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, dstBucket, dstObject); s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2017-01-31 12:38:34 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-10 16:10:10 -05:00
|
|
|
|
// Read escaped copy source path to check for parameters.
|
2019-07-03 01:34:32 -04:00
|
|
|
|
cpSrcPath := r.Header.Get(xhttp.AmzCopySource)
|
2020-06-12 23:04:01 -04:00
|
|
|
|
var vid string
|
2018-12-14 01:04:37 -05:00
|
|
|
|
if u, err := url.Parse(cpSrcPath); err == nil {
|
2020-07-08 20:36:56 -04:00
|
|
|
|
vid = strings.TrimSpace(u.Query().Get(xhttp.VersionID))
|
2019-01-10 16:10:10 -05:00
|
|
|
|
// Note that url.Parse does the unescaping
|
2018-12-14 01:04:37 -05:00
|
|
|
|
cpSrcPath = u.Path
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-21 17:07:49 -05:00
|
|
|
|
srcBucket, srcObject := path2BucketObject(cpSrcPath)
|
2017-01-31 12:38:34 -05:00
|
|
|
|
// If source object is empty or bucket is empty, reply back invalid copy source.
|
|
|
|
|
if srcObject == "" || srcBucket == "" {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL, guessIsBrowserReq(r))
|
2017-01-31 12:38:34 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
|
if vid != "" && vid != nullVersionID {
|
|
|
|
|
_, err := uuid.Parse(vid)
|
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, VersionNotFound{
|
|
|
|
|
Bucket: srcBucket,
|
|
|
|
|
Object: srcObject,
|
|
|
|
|
VersionID: vid,
|
2021-04-29 22:01:43 -04:00
|
|
|
|
}), r.URL, guessIsBrowserReq(r))
|
2020-06-12 23:04:01 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-26 19:12:44 -04:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, srcBucket, srcObject); s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2018-10-26 19:12:44 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-08 20:36:56 -04:00
|
|
|
|
uploadID := r.URL.Query().Get(xhttp.UploadID)
|
|
|
|
|
partIDString := r.URL.Query().Get(xhttp.PartNumber)
|
2017-01-31 12:38:34 -05:00
|
|
|
|
|
|
|
|
|
partID, err := strconv.Atoi(partIDString)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPart), r.URL, guessIsBrowserReq(r))
|
2017-01-31 12:38:34 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// check partID with maximum part ID for multipart objects
|
|
|
|
|
if isMaxPartID(partID) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxParts), r.URL, guessIsBrowserReq(r))
|
2017-01-31 12:38:34 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-11-14 20:36:41 -05:00
|
|
|
|
|
2018-09-10 12:42:43 -04:00
|
|
|
|
var srcOpts, dstOpts ObjectOptions
|
2019-02-09 00:31:06 -05:00
|
|
|
|
srcOpts, err = copySrcOpts(ctx, r, srcBucket, srcObject)
|
2019-01-05 17:16:43 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-01-05 17:16:43 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
srcOpts.VersionID = vid
|
|
|
|
|
|
2019-01-05 17:16:43 -05:00
|
|
|
|
// convert copy src and dst encryption options for GET/PUT calls
|
2020-06-12 23:04:01 -04:00
|
|
|
|
var getOpts = ObjectOptions{VersionID: srcOpts.VersionID}
|
2019-01-05 17:16:43 -05:00
|
|
|
|
if srcOpts.ServerSideEncryption != nil {
|
|
|
|
|
getOpts.ServerSideEncryption = encrypt.SSE(srcOpts.ServerSideEncryption)
|
|
|
|
|
}
|
2020-06-17 14:13:41 -04:00
|
|
|
|
|
2019-02-09 00:31:06 -05:00
|
|
|
|
dstOpts, err = copyDstOpts(ctx, r, dstBucket, dstObject, nil)
|
2019-01-05 17:16:43 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-01-05 17:16:43 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2017-01-31 12:38:34 -05:00
|
|
|
|
|
2018-09-25 15:39:46 -04:00
|
|
|
|
getObjectNInfo := objectAPI.GetObjectNInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectNInfo = api.CacheAPI().GetObjectNInfo
|
2018-03-01 14:37:57 -05:00
|
|
|
|
}
|
|
|
|
|
|
2017-01-31 12:38:34 -05:00
|
|
|
|
// Get request range.
|
2018-09-25 15:39:46 -04:00
|
|
|
|
var rs *HTTPRangeSpec
|
2020-07-17 16:01:22 -04:00
|
|
|
|
var parseRangeErr error
|
|
|
|
|
if rangeHeader := r.Header.Get(xhttp.AmzCopySourceRange); rangeHeader != "" {
|
|
|
|
|
rs, parseRangeErr = parseCopyPartRangeSpec(rangeHeader)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
checkCopyPartPrecondFn := func(o ObjectInfo) bool {
|
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
|
|
|
|
if _, err := DecryptObjectInfo(&o, r); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-07-17 16:01:22 -04:00
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if checkCopyObjectPartPreconditions(ctx, w, r, o) {
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
if parseRangeErr != nil {
|
2018-09-25 15:39:46 -04:00
|
|
|
|
logger.LogIf(ctx, parseRangeErr)
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeCopyPartErr(ctx, w, parseRangeErr, r.URL, guessIsBrowserReq(r))
|
2020-07-17 16:01:22 -04:00
|
|
|
|
// Range header mismatch is pre-condition like failure
|
|
|
|
|
// so return true to indicate Range precondition failed.
|
|
|
|
|
return true
|
2018-09-25 15:39:46 -04:00
|
|
|
|
}
|
2020-07-17 16:01:22 -04:00
|
|
|
|
return false
|
2018-09-25 15:39:46 -04:00
|
|
|
|
}
|
2020-07-17 16:01:22 -04:00
|
|
|
|
getOpts.CheckPrecondFn = checkCopyPartPrecondFn
|
2019-01-05 17:16:43 -05:00
|
|
|
|
gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, readLock, getOpts)
|
2018-09-25 15:39:46 -04:00
|
|
|
|
if err != nil {
|
2019-03-06 15:38:41 -05:00
|
|
|
|
if isErrPreconditionFailed(err) {
|
|
|
|
|
return
|
|
|
|
|
}
|
2020-07-02 19:17:27 -04:00
|
|
|
|
if globalBucketVersioningSys.Enabled(srcBucket) && gr != nil {
|
|
|
|
|
// Versioning enabled quite possibly object is deleted might be delete-marker
|
|
|
|
|
// if present set the headers, no idea why AWS S3 sets these headers.
|
|
|
|
|
if gr.ObjInfo.VersionID != "" && gr.ObjInfo.DeleteMarker {
|
|
|
|
|
w.Header()[xhttp.AmzVersionID] = []string{gr.ObjInfo.VersionID}
|
|
|
|
|
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(gr.ObjInfo.DeleteMarker)}
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-09-25 15:39:46 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
defer gr.Close()
|
|
|
|
|
srcInfo := gr.ObjInfo
|
|
|
|
|
|
2018-10-22 17:23:23 -04:00
|
|
|
|
actualPartSize := srcInfo.Size
|
2021-02-03 18:19:08 -05:00
|
|
|
|
if _, ok := crypto.IsEncrypted(srcInfo.UserDefined); ok {
|
2021-01-05 23:08:35 -05:00
|
|
|
|
actualPartSize, err = srcInfo.GetActualSize()
|
2018-10-22 17:23:23 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-10-22 17:23:23 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-07-24 15:24:21 -04:00
|
|
|
|
|
2020-04-30 18:55:54 -04:00
|
|
|
|
if err := enforceBucketQuota(ctx, dstBucket, actualPartSize); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-04-30 18:55:54 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-07-24 15:24:21 -04:00
|
|
|
|
|
2018-09-25 15:39:46 -04:00
|
|
|
|
// Special care for CopyObjectPart
|
2018-10-22 17:23:23 -04:00
|
|
|
|
if partRangeErr := checkCopyPartRangeWithSize(rs, actualPartSize); partRangeErr != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeCopyPartErr(ctx, w, partRangeErr, r.URL, guessIsBrowserReq(r))
|
2018-09-20 22:22:09 -04:00
|
|
|
|
return
|
2017-01-31 12:38:34 -05:00
|
|
|
|
}
|
|
|
|
|
|
2018-09-25 15:39:46 -04:00
|
|
|
|
// Get the object offset & length
|
2018-10-25 11:50:06 -04:00
|
|
|
|
startOffset, length, err := rs.GetOffsetLength(actualPartSize)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-10-25 11:50:06 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-09-27 23:36:17 -04:00
|
|
|
|
|
2017-01-31 12:38:34 -05:00
|
|
|
|
/// maximum copy size for multipart objects in a single operation
|
2017-03-03 13:14:17 -05:00
|
|
|
|
if isMaxAllowedPartSize(length) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL, guessIsBrowserReq(r))
|
2017-01-31 12:38:34 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-20 13:19:22 -04:00
|
|
|
|
if isRemoteCopyRequired(ctx, srcBucket, dstBucket, objectAPI) {
|
|
|
|
|
var dstRecords []dns.SrvRecord
|
|
|
|
|
dstRecords, err = globalDNSConfig.Get(dstBucket)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-08-20 13:19:22 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Send PutObject request to appropriate instance (in federated deployment)
|
2020-09-12 02:03:08 -04:00
|
|
|
|
core, rerr := getRemoteInstanceClient(r, getHostFromSrv(dstRecords))
|
2019-08-20 13:19:22 -04:00
|
|
|
|
if rerr != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, rerr), r.URL, guessIsBrowserReq(r))
|
2019-08-20 13:19:22 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
partInfo, err := core.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, gr, length, "", "", dstOpts.ServerSideEncryption)
|
2019-08-20 13:19:22 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-08-20 13:19:22 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
response := generateCopyObjectPartResponse(partInfo.ETag, partInfo.LastModified)
|
|
|
|
|
encodedSuccessResponse := encodeResponse(response)
|
|
|
|
|
|
|
|
|
|
// Write success response.
|
|
|
|
|
writeSuccessResponseXML(w, encodedSuccessResponse)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-25 11:50:06 -04:00
|
|
|
|
actualPartSize = length
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
var reader io.Reader = etag.NewReader(gr, nil)
|
2018-09-27 23:36:17 -04:00
|
|
|
|
|
2020-05-28 15:36:20 -04:00
|
|
|
|
mi, err := objectAPI.GetMultipartInfo(ctx, dstBucket, dstObject, uploadID, dstOpts)
|
2018-10-23 14:46:20 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-10-23 14:46:20 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-10-25 11:50:06 -04:00
|
|
|
|
|
2018-10-23 14:46:20 -04:00
|
|
|
|
// Read compression metadata preserved in the init multipart for the decision.
|
2020-05-28 15:36:20 -04:00
|
|
|
|
_, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"]
|
2018-10-23 14:46:20 -04:00
|
|
|
|
// Compress only if the compression is enabled during initial multipart.
|
|
|
|
|
if isCompressed {
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
s2c := newS2CompressReader(reader, actualPartSize)
|
2019-09-26 02:08:24 -04:00
|
|
|
|
defer s2c.Close()
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
reader = etag.Wrap(s2c, reader)
|
2018-09-27 23:36:17 -04:00
|
|
|
|
length = -1
|
|
|
|
|
}
|
|
|
|
|
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualPartSize)
|
2018-03-02 20:24:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-03-02 20:24:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-09-27 23:36:17 -04:00
|
|
|
|
|
2020-05-28 15:36:20 -04:00
|
|
|
|
dstOpts, err = copyDstOpts(ctx, r, dstBucket, dstObject, mi.UserDefined)
|
2020-05-15 11:06:45 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-05-15 11:06:45 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-14 20:36:41 -05:00
|
|
|
|
rawReader := srcInfo.Reader
|
2021-02-10 11:52:50 -05:00
|
|
|
|
pReader := NewPutObjReader(rawReader)
|
2018-11-14 20:36:41 -05:00
|
|
|
|
|
2021-02-03 18:19:08 -05:00
|
|
|
|
_, isEncrypted := crypto.IsEncrypted(mi.UserDefined)
|
2020-04-09 20:01:45 -04:00
|
|
|
|
var objectEncryptionKey crypto.ObjectKey
|
2021-01-05 23:08:35 -05:00
|
|
|
|
if objectAPI.IsEncryptionSupported() && isEncrypted {
|
2020-05-28 15:36:20 -04:00
|
|
|
|
if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(mi.UserDefined) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL, guessIsBrowserReq(r))
|
2019-01-05 17:16:43 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-05-28 15:36:20 -04:00
|
|
|
|
if crypto.S3.IsEncrypted(mi.UserDefined) && crypto.SSEC.IsRequested(r.Header) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL, guessIsBrowserReq(r))
|
2019-01-05 17:16:43 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-05-15 11:06:45 -04:00
|
|
|
|
var key []byte
|
|
|
|
|
if crypto.SSEC.IsRequested(r.Header) {
|
|
|
|
|
key, err = ParseSSECustomerRequest(r)
|
2018-03-01 14:37:57 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-03-01 14:37:57 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-05-15 11:06:45 -04:00
|
|
|
|
}
|
2020-05-28 15:36:20 -04:00
|
|
|
|
key, err = decryptObjectInfo(key, dstBucket, dstObject, mi.UserDefined)
|
2020-05-15 11:06:45 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-05-15 11:06:45 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
copy(objectEncryptionKey[:], key)
|
2018-03-01 14:37:57 -05:00
|
|
|
|
|
2020-05-15 11:06:45 -04:00
|
|
|
|
partEncryptionKey := objectEncryptionKey.DerivePartKey(uint32(partID))
|
2021-04-14 11:29:56 -04:00
|
|
|
|
encReader, err := sio.EncryptReader(reader, sio.Config{Key: partEncryptionKey[:], CipherSuites: fips.CipherSuitesDARE()})
|
2020-05-15 11:06:45 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-05-15 11:06:45 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
reader = etag.Wrap(encReader, reader)
|
2018-03-01 14:37:57 -05:00
|
|
|
|
|
2021-01-05 23:08:35 -05:00
|
|
|
|
wantSize := int64(-1)
|
|
|
|
|
if length >= 0 {
|
|
|
|
|
info := ObjectInfo{Size: length}
|
|
|
|
|
wantSize = info.EncryptedSize()
|
|
|
|
|
}
|
|
|
|
|
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
srcInfo.Reader, err = hash.NewReader(reader, wantSize, "", "", actualPartSize)
|
2020-05-15 11:06:45 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-05-15 11:06:45 -04:00
|
|
|
|
return
|
2018-03-01 14:37:57 -05:00
|
|
|
|
}
|
2021-02-10 11:52:50 -05:00
|
|
|
|
pReader, err = pReader.WithEncryption(srcInfo.Reader, &objectEncryptionKey)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-02-10 11:52:50 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-03-01 14:37:57 -05:00
|
|
|
|
}
|
2020-05-15 11:06:45 -04:00
|
|
|
|
|
2018-11-14 20:36:41 -05:00
|
|
|
|
srcInfo.PutObjReader = pReader
|
2017-01-31 12:38:34 -05:00
|
|
|
|
// Copy source object to destination, if source and destination
|
|
|
|
|
// object is same then only metadata is updated.
|
2018-10-25 11:50:06 -04:00
|
|
|
|
partInfo, err := objectAPI.CopyObjectPart(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID,
|
|
|
|
|
startOffset, length, srcInfo, srcOpts, dstOpts)
|
2017-01-31 12:38:34 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2017-01-31 12:38:34 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-14 20:36:41 -05:00
|
|
|
|
if isEncrypted {
|
2020-04-09 20:01:45 -04:00
|
|
|
|
partInfo.ETag = tryDecryptETag(objectEncryptionKey[:], partInfo.ETag, crypto.SSEC.IsRequested(r.Header))
|
2018-11-14 20:36:41 -05:00
|
|
|
|
}
|
|
|
|
|
|
2017-01-31 12:38:34 -05:00
|
|
|
|
response := generateCopyObjectPartResponse(partInfo.ETag, partInfo.LastModified)
|
|
|
|
|
encodedSuccessResponse := encodeResponse(response)
|
|
|
|
|
|
|
|
|
|
// Write success response.
|
|
|
|
|
writeSuccessResponseXML(w, encodedSuccessResponse)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// PutObjectPartHandler - uploads an incoming part for an ongoing multipart operation.
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "PutObjectPart")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2018-10-12 15:25:59 -04:00
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-09-15 16:57:15 -04:00
|
|
|
|
|
2021-04-08 19:40:38 -04:00
|
|
|
|
if _, ok := crypto.IsRequested(r.Header); ok {
|
|
|
|
|
if globalIsGateway {
|
|
|
|
|
if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
2021-04-08 19:40:38 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
if !objectAPI.IsEncryptionSupported() {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
2021-04-08 19:40:38 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-12-15 00:39:59 -05:00
|
|
|
|
}
|
2021-04-08 19:40:38 -04:00
|
|
|
|
|
2018-08-18 00:07:19 -04:00
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
bucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
object, err := unescapePath(vars["object"])
|
2020-02-11 22:38:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-02-11 22:38:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-08-10 21:47:49 -04:00
|
|
|
|
|
2017-03-03 19:32:04 -05:00
|
|
|
|
// X-Amz-Copy-Source shouldn't be set for this call.
|
2019-07-03 01:34:32 -04:00
|
|
|
|
if _, ok := r.Header[xhttp.AmzCopySource]; ok {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL, guessIsBrowserReq(r))
|
2017-03-03 19:32:04 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-03 15:58:28 -05:00
|
|
|
|
clientETag, err := etag.FromContentMD5(r.Header)
|
2016-03-12 19:08:15 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDigest), r.URL, guessIsBrowserReq(r))
|
2015-10-16 22:09:35 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2015-12-28 02:00:36 -05:00
|
|
|
|
/// if Content-Length is unknown/missing, throw away
|
2016-02-15 20:42:39 -05:00
|
|
|
|
size := r.ContentLength
|
2016-08-08 23:56:29 -04:00
|
|
|
|
|
|
|
|
|
rAuthType := getRequestAuthType(r)
|
|
|
|
|
// For auth type streaming signature, we need to gather a different content length.
|
|
|
|
|
if rAuthType == authTypeStreamingSigned {
|
2019-07-03 01:34:32 -04:00
|
|
|
|
if sizeStr, ok := r.Header[xhttp.AmzDecodedContentLength]; ok {
|
2018-03-16 14:22:34 -04:00
|
|
|
|
if sizeStr[0] == "" {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
2018-03-16 14:22:34 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
size, err = strconv.ParseInt(sizeStr[0], 10, 64)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-03-16 14:22:34 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-08-08 23:56:29 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
2015-12-28 02:00:36 -05:00
|
|
|
|
if size == -1 {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
2015-12-28 02:00:36 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2015-05-08 01:43:19 -04:00
|
|
|
|
/// maximum Upload size for multipart objects in a single operation
|
2017-03-03 13:14:17 -05:00
|
|
|
|
if isMaxAllowedPartSize(size) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL, guessIsBrowserReq(r))
|
2015-05-07 22:55:30 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2015-04-30 19:29:03 -04:00
|
|
|
|
|
2020-07-08 20:36:56 -04:00
|
|
|
|
uploadID := r.URL.Query().Get(xhttp.UploadID)
|
|
|
|
|
partIDString := r.URL.Query().Get(xhttp.PartNumber)
|
2015-05-09 22:39:00 -04:00
|
|
|
|
|
2016-04-29 17:24:10 -04:00
|
|
|
|
partID, err := strconv.Atoi(partIDString)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPart), r.URL, guessIsBrowserReq(r))
|
2016-03-02 14:22:58 -05:00
|
|
|
|
return
|
2015-05-07 22:55:30 -04:00
|
|
|
|
}
|
2015-07-02 23:31:22 -04:00
|
|
|
|
|
2016-05-24 04:52:47 -04:00
|
|
|
|
// check partID with maximum part ID for multipart objects
|
|
|
|
|
if isMaxPartID(partID) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxParts), r.URL, guessIsBrowserReq(r))
|
2016-05-24 04:52:47 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-22 01:30:34 -04:00
|
|
|
|
var (
|
2021-03-03 15:58:28 -05:00
|
|
|
|
md5hex = clientETag.String()
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
sha256hex = ""
|
|
|
|
|
reader io.Reader = r.Body
|
2018-10-09 17:00:01 -04:00
|
|
|
|
s3Error APIErrorCode
|
2017-10-22 01:30:34 -04:00
|
|
|
|
)
|
2020-11-04 12:13:34 -05:00
|
|
|
|
if s3Error = isPutActionAllowed(ctx, rAuthType, bucket, object, r, iampolicy.PutObjectAction); s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2018-10-09 17:00:01 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-08 23:56:29 -04:00
|
|
|
|
switch rAuthType {
|
|
|
|
|
case authTypeStreamingSigned:
|
|
|
|
|
// Initialize stream signature verifier.
|
2017-10-22 01:30:34 -04:00
|
|
|
|
reader, s3Error = newSignV4ChunkedReader(r)
|
2016-08-08 23:56:29 -04:00
|
|
|
|
if s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2016-08-08 23:56:29 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-09-30 17:32:13 -04:00
|
|
|
|
case authTypeSignedV2, authTypePresignedV2:
|
2018-10-09 17:00:01 -04:00
|
|
|
|
if s3Error = isReqAuthenticatedV2(r); s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2016-09-30 17:32:13 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-04-07 06:04:18 -04:00
|
|
|
|
case authTypePresigned, authTypeSigned:
|
2019-10-23 01:59:13 -04:00
|
|
|
|
if s3Error = reqSignatureV4Verify(r, globalServerRegion, serviceS3); s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2016-10-02 18:51:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if !skipContentSha256Cksum(r) {
|
2019-02-27 20:46:55 -05:00
|
|
|
|
sha256hex = getContentSha256Cksum(r, serviceS3)
|
2016-10-02 18:51:49 -04:00
|
|
|
|
}
|
2016-02-16 21:50:36 -05:00
|
|
|
|
}
|
2017-10-22 01:30:34 -04:00
|
|
|
|
|
2020-04-30 18:55:54 -04:00
|
|
|
|
if err := enforceBucketQuota(ctx, bucket, size); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-04-30 18:55:54 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
|
actualSize := size
|
|
|
|
|
|
2019-01-05 17:16:43 -05:00
|
|
|
|
// get encryption options
|
|
|
|
|
var opts ObjectOptions
|
|
|
|
|
if crypto.SSEC.IsRequested(r.Header) {
|
2020-06-12 23:04:01 -04:00
|
|
|
|
opts, err = getOpts(ctx, r, bucket, object)
|
2019-01-05 17:16:43 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-01-05 17:16:43 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-05-28 15:36:20 -04:00
|
|
|
|
|
|
|
|
|
mi, err := objectAPI.GetMultipartInfo(ctx, bucket, object, uploadID, opts)
|
2018-09-27 23:36:17 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-09-27 23:36:17 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-05-15 11:06:45 -04:00
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
|
// Read compression metadata preserved in the init multipart for the decision.
|
2020-05-28 15:36:20 -04:00
|
|
|
|
_, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"]
|
2018-09-27 23:36:17 -04:00
|
|
|
|
|
2020-05-15 11:06:45 -04:00
|
|
|
|
if objectAPI.IsCompressionSupported() && isCompressed {
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
2018-09-27 23:36:17 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-09-27 23:36:17 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-09-28 03:44:59 -04:00
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
|
// Set compression metrics.
|
2021-01-22 15:09:24 -05:00
|
|
|
|
s2c := newS2CompressReader(actualReader, actualSize)
|
2019-09-26 02:08:24 -04:00
|
|
|
|
defer s2c.Close()
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
reader = etag.Wrap(s2c, actualReader)
|
2018-09-27 23:36:17 -04:00
|
|
|
|
size = -1 // Since compressed size is un-predictable.
|
|
|
|
|
md5hex = "" // Do not try to verify the content.
|
|
|
|
|
sha256hex = ""
|
|
|
|
|
}
|
|
|
|
|
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
2017-10-22 01:30:34 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2017-10-22 01:30:34 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-11-14 20:36:41 -05:00
|
|
|
|
rawReader := hashReader
|
2021-02-10 11:52:50 -05:00
|
|
|
|
pReader := NewPutObjReader(rawReader)
|
2018-11-14 20:36:41 -05:00
|
|
|
|
|
2021-02-03 18:19:08 -05:00
|
|
|
|
_, isEncrypted := crypto.IsEncrypted(mi.UserDefined)
|
2020-04-09 20:01:45 -04:00
|
|
|
|
var objectEncryptionKey crypto.ObjectKey
|
2021-01-05 23:08:35 -05:00
|
|
|
|
if objectAPI.IsEncryptionSupported() && isEncrypted {
|
2020-05-28 15:36:20 -04:00
|
|
|
|
if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(mi.UserDefined) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL, guessIsBrowserReq(r))
|
2020-05-15 11:06:45 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-28 15:36:20 -04:00
|
|
|
|
opts, err = putOpts(ctx, r, bucket, object, mi.UserDefined)
|
2018-03-01 14:37:57 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-03-01 14:37:57 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2019-01-05 17:16:43 -05:00
|
|
|
|
|
2020-05-15 11:06:45 -04:00
|
|
|
|
var key []byte
|
|
|
|
|
if crypto.SSEC.IsRequested(r.Header) {
|
|
|
|
|
key, err = ParseSSECustomerRequest(r)
|
2019-01-05 17:16:43 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-01-05 17:16:43 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-05-15 11:06:45 -04:00
|
|
|
|
}
|
2019-01-05 17:16:43 -05:00
|
|
|
|
|
2020-05-15 11:06:45 -04:00
|
|
|
|
// Calculating object encryption key
|
2020-05-28 15:36:20 -04:00
|
|
|
|
key, err = decryptObjectInfo(key, bucket, object, mi.UserDefined)
|
2020-05-15 11:06:45 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-05-15 11:06:45 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
copy(objectEncryptionKey[:], key)
|
2018-03-01 14:37:57 -05:00
|
|
|
|
|
2020-05-15 11:06:45 -04:00
|
|
|
|
partEncryptionKey := objectEncryptionKey.DerivePartKey(uint32(partID))
|
|
|
|
|
in := io.Reader(hashReader)
|
|
|
|
|
if size > encryptBufferThreshold {
|
|
|
|
|
// The encryption reads in blocks of 64KB.
|
|
|
|
|
// We add a buffer on bigger files to reduce the number of syscalls upstream.
|
|
|
|
|
in = bufio.NewReaderSize(hashReader, encryptBufferSize)
|
2018-03-01 14:37:57 -05:00
|
|
|
|
}
|
2021-04-14 11:29:56 -04:00
|
|
|
|
reader, err = sio.EncryptReader(in, sio.Config{Key: partEncryptionKey[:], CipherSuites: fips.CipherSuitesDARE()})
|
2020-05-15 11:06:45 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-05-15 11:06:45 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2021-01-05 23:08:35 -05:00
|
|
|
|
wantSize := int64(-1)
|
|
|
|
|
if size >= 0 {
|
|
|
|
|
info := ObjectInfo{Size: size}
|
|
|
|
|
wantSize = info.EncryptedSize()
|
|
|
|
|
}
|
2020-05-15 11:06:45 -04:00
|
|
|
|
// do not try to verify encrypted content
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
|
hashReader, err = hash.NewReader(etag.Wrap(reader, hashReader), wantSize, "", "", actualSize)
|
2020-05-15 11:06:45 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-05-15 11:06:45 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2021-02-10 11:52:50 -05:00
|
|
|
|
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-02-10 11:52:50 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-03-01 14:37:57 -05:00
|
|
|
|
}
|
|
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
|
putObjectPart := objectAPI.PutObjectPart
|
2019-08-09 20:09:08 -04:00
|
|
|
|
|
2018-11-14 20:36:41 -05:00
|
|
|
|
partInfo, err := putObjectPart(ctx, bucket, object, uploadID, partID, pReader, opts)
|
2015-09-19 06:20:07 -04:00
|
|
|
|
if err != nil {
|
2016-03-12 19:08:15 -05:00
|
|
|
|
// Verify if the underlying error is signature mismatch.
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2015-08-03 19:17:21 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-12-19 17:12:53 -05:00
|
|
|
|
|
|
|
|
|
etag := partInfo.ETag
|
2021-04-07 17:50:28 -04:00
|
|
|
|
switch kind, encrypted := crypto.IsEncrypted(mi.UserDefined); {
|
|
|
|
|
case encrypted:
|
|
|
|
|
switch kind {
|
|
|
|
|
case crypto.S3:
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
|
|
|
|
|
etag = tryDecryptETag(objectEncryptionKey[:], etag, false)
|
|
|
|
|
case crypto.SSEC:
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm))
|
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5))
|
|
|
|
|
|
|
|
|
|
if len(etag) >= 32 && strings.Count(etag, "-") != 1 {
|
|
|
|
|
etag = etag[len(etag)-32:]
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-02-01 15:19:54 -05:00
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
|
|
// We must not use the http.Header().Set method here because some (broken)
|
|
|
|
|
// clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive).
|
|
|
|
|
// Therefore, we have to set the ETag directly as map entry.
|
2019-07-03 01:34:32 -04:00
|
|
|
|
w.Header()[xhttp.ETag] = []string{"\"" + etag + "\""}
|
2017-01-06 03:37:00 -05:00
|
|
|
|
|
|
|
|
|
writeSuccessResponseHeadersOnly(w)
|
2015-05-07 22:55:30 -04:00
|
|
|
|
}
|
|
|
|
|
|
2015-06-30 23:15:48 -04:00
|
|
|
|
// AbortMultipartUploadHandler - Abort multipart upload
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "AbortMultipartUpload")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2018-10-12 15:25:59 -04:00
|
|
|
|
|
2016-02-15 20:42:39 -05:00
|
|
|
|
vars := mux.Vars(r)
|
2015-05-09 19:06:35 -04:00
|
|
|
|
bucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
object, err := unescapePath(vars["object"])
|
2020-02-11 22:38:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-02-11 22:38:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2015-05-09 19:06:35 -04:00
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-03-28 17:14:06 -04:00
|
|
|
|
abortMultipartUpload := objectAPI.AbortMultipartUpload
|
2018-04-24 18:53:30 -04:00
|
|
|
|
|
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.AbortMultipartUploadAction, bucket, object); s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2016-03-12 19:08:15 -05:00
|
|
|
|
return
|
2016-02-15 20:42:39 -05:00
|
|
|
|
}
|
|
|
|
|
|
2018-10-18 10:31:46 -04:00
|
|
|
|
uploadID, _, _, _, s3Error := getObjectResources(r.URL.Query())
|
|
|
|
|
if s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2018-10-18 10:31:46 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-09-14 18:57:13 -04:00
|
|
|
|
opts := ObjectOptions{}
|
|
|
|
|
if err := abortMultipartUpload(ctx, bucket, object, uploadID, opts); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2015-08-03 19:17:21 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2018-11-02 21:40:08 -04:00
|
|
|
|
|
2015-10-16 23:02:37 -04:00
|
|
|
|
writeSuccessNoContent(w)
|
2015-05-09 19:06:35 -04:00
|
|
|
|
}
|
|
|
|
|
|
2015-06-30 23:15:48 -04:00
|
|
|
|
// ListObjectPartsHandler - List object parts
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "ListObjectParts")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2018-10-12 15:25:59 -04:00
|
|
|
|
|
2016-02-15 20:42:39 -05:00
|
|
|
|
vars := mux.Vars(r)
|
2015-10-16 22:09:35 -04:00
|
|
|
|
bucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
object, err := unescapePath(vars["object"])
|
2020-02-11 22:38:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-02-11 22:38:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2015-10-16 22:09:35 -04:00
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-24 18:53:30 -04:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.ListMultipartUploadPartsAction, bucket, object); s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2016-03-12 19:08:15 -05:00
|
|
|
|
return
|
2016-02-15 20:42:39 -05:00
|
|
|
|
}
|
|
|
|
|
|
2019-02-24 01:14:24 -05:00
|
|
|
|
uploadID, partNumberMarker, maxParts, encodingType, s3Error := getObjectResources(r.URL.Query())
|
2018-10-18 10:31:46 -04:00
|
|
|
|
if s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2018-10-18 10:31:46 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
objectAPI: Fix object API interface, remove unnecessary structs.
ObjectAPI changes.
```
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, *probe.Error)
ListMultipartUploads(bucket, objectPrefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, *probe.Error)
ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, *probe.Error)
```
2016-04-03 04:34:20 -04:00
|
|
|
|
if partNumberMarker < 0 {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPartNumberMarker), r.URL, guessIsBrowserReq(r))
|
2015-07-16 20:22:45 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
objectAPI: Fix object API interface, remove unnecessary structs.
ObjectAPI changes.
```
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, *probe.Error)
ListMultipartUploads(bucket, objectPrefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, *probe.Error)
ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, *probe.Error)
```
2016-04-03 04:34:20 -04:00
|
|
|
|
if maxParts < 0 {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxParts), r.URL, guessIsBrowserReq(r))
|
2015-07-16 20:22:45 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-05-15 11:06:45 -04:00
|
|
|
|
|
2020-09-14 18:57:13 -04:00
|
|
|
|
opts := ObjectOptions{}
|
2019-01-05 17:16:43 -05:00
|
|
|
|
listPartsInfo, err := objectAPI.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts)
|
2015-09-19 06:20:07 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2015-08-03 19:17:21 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-05-15 11:06:45 -04:00
|
|
|
|
|
2018-11-14 20:36:41 -05:00
|
|
|
|
var ssec bool
|
2021-02-03 18:19:08 -05:00
|
|
|
|
if _, ok := crypto.IsEncrypted(listPartsInfo.UserDefined); ok && objectAPI.IsEncryptionSupported() {
|
2020-05-15 11:06:45 -04:00
|
|
|
|
var key []byte
|
|
|
|
|
if crypto.SSEC.IsEncrypted(listPartsInfo.UserDefined) {
|
|
|
|
|
ssec = true
|
2018-11-14 20:36:41 -05:00
|
|
|
|
}
|
2020-05-15 11:06:45 -04:00
|
|
|
|
var objectEncryptionKey []byte
|
|
|
|
|
if crypto.S3.IsEncrypted(listPartsInfo.UserDefined) {
|
|
|
|
|
// Calculating object encryption key
|
|
|
|
|
objectEncryptionKey, err = decryptObjectInfo(key, bucket, object, listPartsInfo.UserDefined)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-05-15 11:06:45 -04:00
|
|
|
|
return
|
2018-11-14 20:36:41 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
2020-07-03 22:27:13 -04:00
|
|
|
|
for i := range listPartsInfo.Parts {
|
|
|
|
|
curp := listPartsInfo.Parts[i]
|
|
|
|
|
curp.ETag = tryDecryptETag(objectEncryptionKey, curp.ETag, ssec)
|
|
|
|
|
if !ssec {
|
|
|
|
|
var partSize uint64
|
|
|
|
|
partSize, err = sio.DecryptedSize(uint64(curp.Size))
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-07-03 22:27:13 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
curp.Size = int64(partSize)
|
|
|
|
|
}
|
|
|
|
|
listPartsInfo.Parts[i] = curp
|
2020-05-15 11:06:45 -04:00
|
|
|
|
}
|
2018-11-14 20:36:41 -05:00
|
|
|
|
}
|
|
|
|
|
|
2019-02-24 01:14:24 -05:00
|
|
|
|
response := generateListPartsResponse(listPartsInfo, encodingType)
|
2016-03-06 15:16:22 -05:00
|
|
|
|
encodedSuccessResponse := encodeResponse(response)
|
2017-01-06 03:37:00 -05:00
|
|
|
|
|
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
|
|
|
|
// Write success response.
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeSuccessResponseXML(w, encodedSuccessResponse)
|
2015-05-09 14:41:26 -04:00
|
|
|
|
}
|
|
|
|
|
|
2019-02-21 01:20:15 -05:00
|
|
|
|
type whiteSpaceWriter struct {
|
|
|
|
|
http.ResponseWriter
|
|
|
|
|
http.Flusher
|
|
|
|
|
written bool
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (w *whiteSpaceWriter) Write(b []byte) (n int, err error) {
|
|
|
|
|
n, err = w.ResponseWriter.Write(b)
|
|
|
|
|
w.written = true
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (w *whiteSpaceWriter) WriteHeader(statusCode int) {
|
|
|
|
|
if !w.written {
|
|
|
|
|
w.ResponseWriter.WriteHeader(statusCode)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Send empty whitespaces every 10 seconds to the client till completeMultiPartUpload() is
|
|
|
|
|
// done so that the client does not time out. Downside is we might send 200 OK and
|
|
|
|
|
// then send error XML. But accoording to S3 spec the client is supposed to check
|
|
|
|
|
// for error XML even if it received 200 OK. But for erasure this is not a problem
|
|
|
|
|
// as completeMultiPartUpload() is quick. Even For FS, it would not be an issue as
|
|
|
|
|
// we do background append as and when the parts arrive and completeMultiPartUpload
|
|
|
|
|
// is quick. Only in a rare case where parts would be out of order will
|
|
|
|
|
// FS:completeMultiPartUpload() take a longer time.
|
2019-10-15 21:35:41 -04:00
|
|
|
|
func sendWhiteSpace(w http.ResponseWriter) <-chan bool {
|
2019-02-21 01:20:15 -05:00
|
|
|
|
doneCh := make(chan bool)
|
2019-02-05 23:58:09 -05:00
|
|
|
|
go func() {
|
|
|
|
|
ticker := time.NewTicker(time.Second * 10)
|
2019-02-21 01:20:15 -05:00
|
|
|
|
headerWritten := false
|
2019-02-05 23:58:09 -05:00
|
|
|
|
for {
|
|
|
|
|
select {
|
|
|
|
|
case <-ticker.C:
|
2019-02-21 01:20:15 -05:00
|
|
|
|
// Write header if not written yet.
|
|
|
|
|
if !headerWritten {
|
|
|
|
|
w.Write([]byte(xml.Header))
|
|
|
|
|
headerWritten = true
|
2019-02-05 23:58:09 -05:00
|
|
|
|
}
|
2019-02-21 01:20:15 -05:00
|
|
|
|
|
|
|
|
|
// Once header is written keep writing empty spaces
|
|
|
|
|
// which are ignored by client SDK XML parsers.
|
|
|
|
|
// This occurs when server takes long time to completeMultiPartUpload()
|
|
|
|
|
w.Write([]byte(" "))
|
2019-02-05 23:58:09 -05:00
|
|
|
|
w.(http.Flusher).Flush()
|
2019-02-21 01:20:15 -05:00
|
|
|
|
case doneCh <- headerWritten:
|
2019-02-05 23:58:09 -05:00
|
|
|
|
ticker.Stop()
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}()
|
|
|
|
|
return doneCh
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-21 23:08:08 -04:00
|
|
|
|
// CompleteMultipartUploadHandler - Complete multipart upload.
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "CompleteMultipartUpload")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2018-10-12 15:25:59 -04:00
|
|
|
|
|
2016-02-15 20:42:39 -05:00
|
|
|
|
vars := mux.Vars(r)
|
2015-05-07 22:55:30 -04:00
|
|
|
|
bucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
object, err := unescapePath(vars["object"])
|
2020-02-11 22:38:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-02-11 22:38:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2015-06-30 17:42:29 -04:00
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-24 18:53:30 -04:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, bucket, object); s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2016-11-21 16:51:05 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-22 22:36:46 -05:00
|
|
|
|
// Content-Length is required and should be non-zero
|
|
|
|
|
if r.ContentLength <= 0 {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
|
2020-02-22 22:36:46 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
fs: Break fs package to top-level and introduce ObjectAPI interface.
ObjectAPI interface brings in changes needed for XL ObjectAPI layer.
The new interface for any ObjectAPI layer is as below
```
// ObjectAPI interface.
type ObjectAPI interface {
// Bucket resource API.
DeleteBucket(bucket string) *probe.Error
ListBuckets() ([]BucketInfo, *probe.Error)
MakeBucket(bucket string) *probe.Error
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
// Bucket query API.
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error)
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
// Object resource API.
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error)
PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error)
DeleteObject(bucket, object string) *probe.Error
// Object query API.
NewMultipartUpload(bucket, object string) (string, *probe.Error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error)
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
}
```
2016-03-30 19:15:28 -04:00
|
|
|
|
// Get upload id.
|
2018-10-18 10:31:46 -04:00
|
|
|
|
uploadID, _, _, _, s3Error := getObjectResources(r.URL.Query())
|
|
|
|
|
if s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2018-10-18 10:31:46 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-03-02 14:22:58 -05:00
|
|
|
|
|
2017-11-14 03:25:10 -05:00
|
|
|
|
complMultipartUpload := &CompleteMultipartUpload{}
|
2020-02-22 22:36:46 -05:00
|
|
|
|
if err = xmlDecoder(r.Body, complMultipartUpload, r.ContentLength); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
fs: Break fs package to top-level and introduce ObjectAPI interface.
ObjectAPI interface brings in changes needed for XL ObjectAPI layer.
The new interface for any ObjectAPI layer is as below
```
// ObjectAPI interface.
type ObjectAPI interface {
// Bucket resource API.
DeleteBucket(bucket string) *probe.Error
ListBuckets() ([]BucketInfo, *probe.Error)
MakeBucket(bucket string) *probe.Error
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
// Bucket query API.
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error)
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
// Object resource API.
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error)
PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error)
DeleteObject(bucket, object string) *probe.Error
// Object query API.
NewMultipartUpload(bucket, object string) (string, *probe.Error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error)
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
}
```
2016-03-30 19:15:28 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-05-25 15:11:26 -04:00
|
|
|
|
if len(complMultipartUpload.Parts) == 0 {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
|
2016-05-25 15:11:26 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2017-11-14 03:25:10 -05:00
|
|
|
|
if !sort.IsSorted(CompletedParts(complMultipartUpload.Parts)) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPartOrder), r.URL, guessIsBrowserReq(r))
|
fs: Break fs package to top-level and introduce ObjectAPI interface.
ObjectAPI interface brings in changes needed for XL ObjectAPI layer.
The new interface for any ObjectAPI layer is as below
```
// ObjectAPI interface.
type ObjectAPI interface {
// Bucket resource API.
DeleteBucket(bucket string) *probe.Error
ListBuckets() ([]BucketInfo, *probe.Error)
MakeBucket(bucket string) *probe.Error
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
// Bucket query API.
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error)
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
// Object resource API.
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error)
PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error)
DeleteObject(bucket, object string) *probe.Error
// Object query API.
NewMultipartUpload(bucket, object string) (string, *probe.Error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error)
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
}
```
2016-03-30 19:15:28 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-04-06 16:44:16 -04:00
|
|
|
|
|
|
|
|
|
// Reject retention or governance headers if set, CompleteMultipartUpload spec
|
|
|
|
|
// does not use these headers, and should not be passed down to checkPutObjectLockAllowed
|
|
|
|
|
if objectlock.IsObjectLockRequested(r.Header) || objectlock.IsObjectLockGovernanceBypassSet(r.Header) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL, guessIsBrowserReq(r))
|
2020-04-06 16:44:16 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-13 17:03:23 -04:00
|
|
|
|
if _, _, _, s3Err := checkPutObjectLockAllowed(ctx, r, bucket, object, objectAPI.GetObjectInfo, ErrNone, ErrNone); s3Err != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
2020-04-06 16:44:16 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-14 20:36:41 -05:00
|
|
|
|
var objectEncryptionKey []byte
|
|
|
|
|
var isEncrypted, ssec bool
|
|
|
|
|
if objectAPI.IsEncryptionSupported() {
|
2020-09-14 18:57:13 -04:00
|
|
|
|
mi, err := objectAPI.GetMultipartInfo(ctx, bucket, object, uploadID, ObjectOptions{})
|
2018-11-14 20:36:41 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-11-14 20:36:41 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2021-02-03 18:19:08 -05:00
|
|
|
|
if _, ok := crypto.IsEncrypted(mi.UserDefined); ok {
|
2018-11-14 20:36:41 -05:00
|
|
|
|
var key []byte
|
2019-01-05 17:16:43 -05:00
|
|
|
|
isEncrypted = true
|
2020-05-28 15:36:20 -04:00
|
|
|
|
ssec = crypto.SSEC.IsEncrypted(mi.UserDefined)
|
|
|
|
|
if crypto.S3.IsEncrypted(mi.UserDefined) {
|
2018-11-14 20:36:41 -05:00
|
|
|
|
// Calculating object encryption key
|
2020-05-28 15:36:20 -04:00
|
|
|
|
objectEncryptionKey, err = decryptObjectInfo(key, bucket, object, mi.UserDefined)
|
2018-11-14 20:36:41 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-11-14 20:36:41 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
partsMap := make(map[string]PartInfo)
|
|
|
|
|
if isEncrypted {
|
2020-05-28 15:36:20 -04:00
|
|
|
|
maxParts := 10000
|
2020-09-14 18:57:13 -04:00
|
|
|
|
listPartsInfo, err := objectAPI.ListObjectParts(ctx, bucket, object, uploadID, 0, maxParts, ObjectOptions{})
|
2020-05-28 15:36:20 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-05-28 15:36:20 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
for _, part := range listPartsInfo.Parts {
|
|
|
|
|
partsMap[strconv.Itoa(part.PartNumber)] = part
|
2018-11-14 20:36:41 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
2019-01-05 17:16:43 -05:00
|
|
|
|
|
fs: Break fs package to top-level and introduce ObjectAPI interface.
ObjectAPI interface brings in changes needed for XL ObjectAPI layer.
The new interface for any ObjectAPI layer is as below
```
// ObjectAPI interface.
type ObjectAPI interface {
// Bucket resource API.
DeleteBucket(bucket string) *probe.Error
ListBuckets() ([]BucketInfo, *probe.Error)
MakeBucket(bucket string) *probe.Error
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
// Bucket query API.
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error)
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
// Object resource API.
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error)
PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error)
DeleteObject(bucket, object string) *probe.Error
// Object query API.
NewMultipartUpload(bucket, object string) (string, *probe.Error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error)
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
}
```
2016-03-30 19:15:28 -04:00
|
|
|
|
// Complete parts.
|
2020-09-14 23:44:18 -04:00
|
|
|
|
completeParts := make([]CompletePart, 0, len(complMultipartUpload.Parts))
|
2016-04-11 04:29:18 -04:00
|
|
|
|
for _, part := range complMultipartUpload.Parts {
|
2017-03-15 23:48:49 -04:00
|
|
|
|
part.ETag = canonicalizeETag(part.ETag)
|
2018-11-14 20:36:41 -05:00
|
|
|
|
if isEncrypted {
|
|
|
|
|
// ETag is stored in the backend in encrypted form. Validate client sent ETag with
|
|
|
|
|
// decrypted ETag.
|
|
|
|
|
if bkPartInfo, ok := partsMap[strconv.Itoa(part.PartNumber)]; ok {
|
|
|
|
|
bkETag := tryDecryptETag(objectEncryptionKey, bkPartInfo.ETag, ssec)
|
|
|
|
|
if bkETag != part.ETag {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPart), r.URL, guessIsBrowserReq(r))
|
2018-11-14 20:36:41 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
part.ETag = bkPartInfo.ETag
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-04-11 04:29:18 -04:00
|
|
|
|
completeParts = append(completeParts, part)
|
|
|
|
|
}
|
2016-08-10 21:47:49 -04:00
|
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
|
completeMultiPartUpload := objectAPI.CompleteMultipartUpload
|
2019-02-21 01:20:15 -05:00
|
|
|
|
|
|
|
|
|
// This code is specifically to handle the requirements for slow
|
|
|
|
|
// complete multipart upload operations on FS mode.
|
|
|
|
|
writeErrorResponseWithoutXMLHeader := func(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
|
|
|
|
|
switch err.Code {
|
|
|
|
|
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
|
2020-06-12 23:04:01 -04:00
|
|
|
|
// Set retxry-after header to indicate user-agents to retry request after 120secs.
|
2019-02-21 01:20:15 -05:00
|
|
|
|
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
2019-07-03 01:34:32 -04:00
|
|
|
|
w.Header().Set(xhttp.RetryAfter, "120")
|
2019-02-21 01:20:15 -05:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Generate error response.
|
|
|
|
|
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path,
|
2019-07-03 01:34:32 -04:00
|
|
|
|
w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
|
2019-02-21 01:20:15 -05:00
|
|
|
|
encodedErrorResponse, _ := xml.Marshal(errorResponse)
|
|
|
|
|
setCommonHeaders(w)
|
2019-07-03 01:34:32 -04:00
|
|
|
|
w.Header().Set(xhttp.ContentType, string(mimeXML))
|
2019-02-21 01:20:15 -05:00
|
|
|
|
w.Write(encodedErrorResponse)
|
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
|
}
|
2020-07-30 22:45:12 -04:00
|
|
|
|
|
2021-04-19 13:30:42 -04:00
|
|
|
|
os := newObjSweeper(bucket, object)
|
|
|
|
|
// Get appropriate object info to identify the remote object to delete
|
|
|
|
|
goiOpts := os.GetOpts()
|
|
|
|
|
if goi, gerr := objectAPI.GetObjectInfo(ctx, bucket, object, goiOpts); gerr == nil {
|
|
|
|
|
os.SetTransitionState(goi)
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-30 22:45:12 -04:00
|
|
|
|
setEventStreamHeaders(w)
|
|
|
|
|
|
2019-02-21 01:20:15 -05:00
|
|
|
|
w = &whiteSpaceWriter{ResponseWriter: w, Flusher: w.(http.Flusher)}
|
2019-10-15 21:35:41 -04:00
|
|
|
|
completeDoneCh := sendWhiteSpace(w)
|
2020-09-14 18:57:13 -04:00
|
|
|
|
objInfo, err := completeMultiPartUpload(ctx, bucket, object, uploadID, completeParts, ObjectOptions{})
|
2019-02-05 23:58:09 -05:00
|
|
|
|
// Stop writing white spaces to the client. Note that close(doneCh) style is not used as it
|
|
|
|
|
// can cause white space to be written after we send XML response in a race condition.
|
2019-02-21 01:20:15 -05:00
|
|
|
|
headerWritten := <-completeDoneCh
|
2015-09-19 06:20:07 -04:00
|
|
|
|
if err != nil {
|
2019-02-21 01:20:15 -05:00
|
|
|
|
if headerWritten {
|
|
|
|
|
writeErrorResponseWithoutXMLHeader(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
|
} else {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2016-06-28 17:51:49 -04:00
|
|
|
|
}
|
2015-08-03 19:17:21 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2016-06-15 23:31:06 -04:00
|
|
|
|
|
2016-03-12 19:08:15 -05:00
|
|
|
|
// Get object location.
|
2019-02-22 22:18:01 -05:00
|
|
|
|
location := getObjectLocation(r, globalDomainNames, bucket, object)
|
2016-03-01 23:01:40 -05:00
|
|
|
|
// Generate complete multipart response.
|
2017-05-14 15:05:51 -04:00
|
|
|
|
response := generateCompleteMultpartUploadResponse(bucket, object, location, objInfo.ETag)
|
2019-02-21 01:20:15 -05:00
|
|
|
|
var encodedSuccessResponse []byte
|
|
|
|
|
if !headerWritten {
|
|
|
|
|
encodedSuccessResponse = encodeResponse(response)
|
|
|
|
|
} else {
|
|
|
|
|
encodedSuccessResponse, err = xml.Marshal(response)
|
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponseWithoutXMLHeader(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
|
return
|
|
|
|
|
}
|
2016-06-30 21:48:50 -04:00
|
|
|
|
}
|
2016-07-24 01:51:12 -04:00
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
|
setPutObjHeaders(w, objInfo, false)
|
2021-05-13 22:20:45 -04:00
|
|
|
|
if replicate, sync := mustReplicate(ctx, r, bucket, object, objInfo.UserDefined, objInfo.ReplicationStatus.String(), false); replicate {
|
2021-04-03 12:03:42 -04:00
|
|
|
|
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType)
|
2020-07-21 20:49:56 -04:00
|
|
|
|
}
|
2021-01-26 16:21:51 -05:00
|
|
|
|
|
2021-04-19 13:30:42 -04:00
|
|
|
|
// Remove the transitioned object whose object version is being overwritten.
|
|
|
|
|
logger.LogIf(ctx, os.Sweep())
|
|
|
|
|
|
2016-07-24 01:51:12 -04:00
|
|
|
|
// Write success response.
|
2017-01-06 03:37:00 -05:00
|
|
|
|
writeSuccessResponseXML(w, encodedSuccessResponse)
|
2016-07-24 01:51:12 -04:00
|
|
|
|
|
2016-09-29 01:46:19 -04:00
|
|
|
|
// Notify object created event.
|
2018-03-15 16:03:41 -04:00
|
|
|
|
sendEvent(eventArgs{
|
2018-11-02 21:40:08 -04:00
|
|
|
|
EventName: event.ObjectCreatedCompleteMultipartUpload,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Object: objInfo,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
RespElements: extractRespElements(w),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
2019-03-25 14:45:42 -04:00
|
|
|
|
Host: handlers.GetSourceIP(r),
|
2016-09-29 01:46:19 -04:00
|
|
|
|
})
|
2015-05-07 22:55:30 -04:00
|
|
|
|
}
|
2015-06-08 14:06:06 -04:00
|
|
|
|
|
2016-04-12 15:45:15 -04:00
|
|
|
|
/// Delete objectAPIHandlers
|
2015-06-08 14:06:06 -04:00
|
|
|
|
|
2016-03-05 19:43:48 -05:00
|
|
|
|
// DeleteObjectHandler - delete an object
|
2016-04-12 15:45:15 -04:00
|
|
|
|
func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
2018-07-20 21:46:32 -04:00
|
|
|
|
ctx := newContext(r, w, "DeleteObject")
|
2018-03-14 15:01:47 -04:00
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2018-10-12 15:25:59 -04:00
|
|
|
|
|
2016-02-15 20:42:39 -05:00
|
|
|
|
vars := mux.Vars(r)
|
2015-10-16 14:26:01 -04:00
|
|
|
|
bucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
object, err := unescapePath(vars["object"])
|
2020-02-11 22:38:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-02-11 22:38:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2015-10-16 14:26:01 -04:00
|
|
|
|
|
2016-08-10 21:47:49 -04:00
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2016-08-10 21:47:49 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-24 18:53:30 -04:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object); s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
|
|
|
|
return
|
2016-02-04 15:52:25 -05:00
|
|
|
|
}
|
2016-11-21 16:51:05 -05:00
|
|
|
|
|
2019-11-20 16:18:09 -05:00
|
|
|
|
getObjectInfo := objectAPI.GetObjectInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
2018-03-27 19:44:45 -04:00
|
|
|
|
}
|
|
|
|
|
|
2018-07-01 01:35:43 -04:00
|
|
|
|
if globalDNSConfig != nil {
|
|
|
|
|
_, err := globalDNSConfig.Get(bucket)
|
2020-09-09 15:20:49 -04:00
|
|
|
|
if err != nil && err != dns.ErrNotImplemented {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2018-07-01 01:35:43 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-16 10:54:27 -04:00
|
|
|
|
opts, err := delOpts(ctx, r, bucket, object)
|
2020-06-12 23:04:01 -04:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-06-12 23:04:01 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-11-25 14:24:50 -05:00
|
|
|
|
var (
|
2021-04-19 13:30:42 -04:00
|
|
|
|
goi ObjectInfo
|
|
|
|
|
gerr error
|
2020-11-25 14:24:50 -05:00
|
|
|
|
)
|
2021-04-19 13:30:42 -04:00
|
|
|
|
|
|
|
|
|
var goiOpts ObjectOptions
|
|
|
|
|
os := newObjSweeper(bucket, object).WithVersion(singleDelete(*r))
|
|
|
|
|
// Mutations of objects on versioning suspended buckets
|
|
|
|
|
// affect its null version. Through opts below we select
|
|
|
|
|
// the null version's remote object to delete if
|
|
|
|
|
// transitioned.
|
|
|
|
|
goiOpts = os.GetOpts()
|
|
|
|
|
goi, gerr = getObjectInfo(ctx, bucket, object, goiOpts)
|
|
|
|
|
if gerr == nil {
|
|
|
|
|
os.SetTransitionState(goi)
|
2020-11-25 14:24:50 -05:00
|
|
|
|
}
|
2021-03-30 20:15:36 -04:00
|
|
|
|
|
|
|
|
|
replicateDel, replicateSync := checkReplicateDelete(ctx, bucket, ObjectToDelete{ObjectName: object, VersionID: opts.VersionID}, goi, gerr)
|
2020-11-19 21:43:58 -05:00
|
|
|
|
if replicateDel {
|
|
|
|
|
if opts.VersionID != "" {
|
|
|
|
|
opts.VersionPurgeStatus = Pending
|
|
|
|
|
} else {
|
|
|
|
|
opts.DeleteMarkerReplicationStatus = string(replication.Pending)
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-03-30 20:15:36 -04:00
|
|
|
|
|
2020-11-25 14:24:50 -05:00
|
|
|
|
vID := opts.VersionID
|
2020-11-19 21:43:58 -05:00
|
|
|
|
if r.Header.Get(xhttp.AmzBucketReplicationStatus) == replication.Replica.String() {
|
|
|
|
|
// check if replica has permission to be deleted.
|
|
|
|
|
if apiErrCode := checkRequestAuthType(ctx, r, policy.ReplicateDeleteAction, bucket, object); apiErrCode != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErrCode), r.URL, guessIsBrowserReq(r))
|
2020-11-19 21:43:58 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
opts.DeleteMarkerReplicationStatus = replication.Replica.String()
|
2020-11-25 14:24:50 -05:00
|
|
|
|
if opts.VersionPurgeStatus.Empty() {
|
|
|
|
|
// opts.VersionID holds delete marker version ID to replicate and not yet present on disk
|
|
|
|
|
vID = ""
|
|
|
|
|
}
|
2020-11-19 21:43:58 -05:00
|
|
|
|
}
|
2020-11-25 14:24:50 -05:00
|
|
|
|
|
2020-04-06 16:44:16 -04:00
|
|
|
|
apiErr := ErrNone
|
2020-05-19 16:53:54 -04:00
|
|
|
|
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
|
2020-11-12 19:33:22 -05:00
|
|
|
|
if vID != "" {
|
2020-06-12 23:04:01 -04:00
|
|
|
|
apiErr = enforceRetentionBypassForDelete(ctx, r, bucket, ObjectToDelete{
|
|
|
|
|
ObjectName: object,
|
2020-11-12 19:33:22 -05:00
|
|
|
|
VersionID: vID,
|
2020-11-25 14:24:50 -05:00
|
|
|
|
}, goi, gerr)
|
2020-06-12 23:04:01 -04:00
|
|
|
|
if apiErr != ErrNone && apiErr != ErrNoSuchKey {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErr), r.URL, guessIsBrowserReq(r))
|
2020-06-12 23:04:01 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-04-06 16:44:16 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-09 21:23:08 -04:00
|
|
|
|
if apiErr == ErrNoSuchKey {
|
|
|
|
|
writeSuccessNoContent(w)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-11 01:00:42 -05:00
|
|
|
|
deleteObject := objectAPI.DeleteObject
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
deleteObject = api.CacheAPI().DeleteObject
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-09 21:23:08 -04:00
|
|
|
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html
|
2021-02-11 01:00:42 -05:00
|
|
|
|
objInfo, err := deleteObject(ctx, bucket, object, opts)
|
2020-09-09 21:23:08 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
switch err.(type) {
|
|
|
|
|
case BucketNotFound:
|
|
|
|
|
// When bucket doesn't exist specially handle it.
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-09-09 21:23:08 -04:00
|
|
|
|
return
|
2020-04-06 16:44:16 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
2020-11-12 14:43:04 -05:00
|
|
|
|
|
2021-02-11 01:00:42 -05:00
|
|
|
|
if objInfo.Name == "" {
|
|
|
|
|
writeSuccessNoContent(w)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
setPutObjHeaders(w, objInfo, true)
|
|
|
|
|
writeSuccessNoContent(w)
|
|
|
|
|
|
|
|
|
|
eventName := event.ObjectRemovedDelete
|
|
|
|
|
if objInfo.DeleteMarker {
|
|
|
|
|
eventName = event.ObjectRemovedDeleteMarkerCreated
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Notify object deleted event.
|
|
|
|
|
sendEvent(eventArgs{
|
|
|
|
|
EventName: eventName,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Object: objInfo,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
RespElements: extractRespElements(w),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
|
|
|
|
Host: handlers.GetSourceIP(r),
|
|
|
|
|
})
|
|
|
|
|
|
2020-11-19 21:43:58 -05:00
|
|
|
|
if replicateDel {
|
|
|
|
|
dmVersionID := ""
|
|
|
|
|
versionID := ""
|
|
|
|
|
if objInfo.DeleteMarker {
|
|
|
|
|
dmVersionID = objInfo.VersionID
|
|
|
|
|
} else {
|
|
|
|
|
versionID = objInfo.VersionID
|
|
|
|
|
}
|
2021-01-12 01:36:51 -05:00
|
|
|
|
dobj := DeletedObjectVersionInfo{
|
2020-11-19 21:43:58 -05:00
|
|
|
|
DeletedObject: DeletedObject{
|
|
|
|
|
ObjectName: object,
|
|
|
|
|
VersionID: versionID,
|
|
|
|
|
DeleteMarkerVersionID: dmVersionID,
|
|
|
|
|
DeleteMarkerReplicationStatus: string(objInfo.ReplicationStatus),
|
2020-11-29 00:15:45 -05:00
|
|
|
|
DeleteMarkerMTime: DeleteMarkerMTime{objInfo.ModTime},
|
2020-11-19 21:43:58 -05:00
|
|
|
|
DeleteMarker: objInfo.DeleteMarker,
|
|
|
|
|
VersionPurgeStatus: objInfo.VersionPurgeStatus,
|
|
|
|
|
},
|
|
|
|
|
Bucket: bucket,
|
2021-01-12 01:36:51 -05:00
|
|
|
|
}
|
|
|
|
|
scheduleReplicationDelete(ctx, dobj, objectAPI, replicateSync)
|
2020-11-19 21:43:58 -05:00
|
|
|
|
}
|
2020-11-29 00:15:45 -05:00
|
|
|
|
|
2021-04-19 13:30:42 -04:00
|
|
|
|
// Remove the transitioned object whose object version is being overwritten.
|
|
|
|
|
logger.LogIf(ctx, os.Sweep())
|
|
|
|
|
|
2015-06-08 14:06:06 -04:00
|
|
|
|
}
|
2019-11-12 17:50:18 -05:00
|
|
|
|
|
|
|
|
|
// PutObjectLegalHoldHandler - set legal hold configuration to object,
|
|
|
|
|
func (api objectAPIHandlers) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
ctx := newContext(r, w, "PutObjectLegalHold")
|
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2019-11-12 17:50:18 -05:00
|
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
bucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
object, err := unescapePath(vars["object"])
|
2020-02-11 22:38:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-02-11 22:38:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2019-11-12 17:50:18 -05:00
|
|
|
|
|
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2019-11-12 17:50:18 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-16 18:41:56 -05:00
|
|
|
|
// Check permissions to perform this legal hold operation
|
2020-03-04 10:04:12 -05:00
|
|
|
|
if s3Err := checkRequestAuthType(ctx, r, policy.PutObjectLegalHoldAction, bucket, object); s3Err != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
2020-01-16 18:41:56 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-01-16 18:41:56 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-03-04 10:04:12 -05:00
|
|
|
|
if !hasContentMD5(r.Header) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
|
2020-01-16 18:41:56 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-02-16 01:07:52 -05:00
|
|
|
|
|
2020-05-19 16:53:54 -04:00
|
|
|
|
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); !rcfg.LockEnabled {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketObjectLockConfiguration), r.URL, guessIsBrowserReq(r))
|
2020-01-16 18:41:56 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
legalHold, err := objectlock.ParseObjectLegalHold(r.Body)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-01-16 18:41:56 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-12 17:50:18 -05:00
|
|
|
|
getObjectInfo := objectAPI.GetObjectInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
2019-11-12 17:50:18 -05:00
|
|
|
|
opts, err := getOpts(ctx, r, bucket, object)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-11-12 17:50:18 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-16 18:41:56 -05:00
|
|
|
|
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-11-12 17:50:18 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2021-04-04 16:32:31 -04:00
|
|
|
|
if objInfo.DeleteMarker {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
2021-04-04 16:32:31 -04:00
|
|
|
|
return
|
2020-06-10 11:16:30 -04:00
|
|
|
|
}
|
2021-04-04 16:32:31 -04:00
|
|
|
|
objInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = strings.ToUpper(string(legalHold.Status))
|
2021-05-13 22:20:45 -04:00
|
|
|
|
replicate, sync := mustReplicate(ctx, r, bucket, object, objInfo.UserDefined, objInfo.ReplicationStatus.String(), true)
|
2020-11-19 14:50:22 -05:00
|
|
|
|
if replicate {
|
|
|
|
|
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Pending.String()
|
|
|
|
|
}
|
2021-04-04 16:32:31 -04:00
|
|
|
|
// if version-id is not specified retention is supposed to be set on the latest object.
|
|
|
|
|
if opts.VersionID == "" {
|
|
|
|
|
opts.VersionID = objInfo.VersionID
|
|
|
|
|
}
|
|
|
|
|
popts := ObjectOptions{
|
|
|
|
|
MTime: opts.MTime,
|
|
|
|
|
VersionID: opts.VersionID,
|
|
|
|
|
UserDefined: make(map[string]string, len(objInfo.UserDefined)),
|
|
|
|
|
}
|
|
|
|
|
for k, v := range objInfo.UserDefined {
|
|
|
|
|
popts.UserDefined[k] = v
|
|
|
|
|
}
|
|
|
|
|
if _, err = objectAPI.PutObjectMetadata(ctx, bucket, object, popts); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-01-16 18:41:56 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-11-19 14:50:22 -05:00
|
|
|
|
if replicate {
|
2021-04-03 12:03:42 -04:00
|
|
|
|
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.MetadataReplicationType)
|
2020-11-19 14:50:22 -05:00
|
|
|
|
}
|
2020-02-18 21:45:48 -05:00
|
|
|
|
writeSuccessResponseHeadersOnly(w)
|
2021-02-03 23:41:33 -05:00
|
|
|
|
|
|
|
|
|
// Notify object event.
|
2020-01-16 18:41:56 -05:00
|
|
|
|
sendEvent(eventArgs{
|
|
|
|
|
EventName: event.ObjectCreatedPutLegalHold,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Object: objInfo,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
RespElements: extractRespElements(w),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
|
|
|
|
Host: handlers.GetSourceIP(r),
|
|
|
|
|
})
|
2019-11-12 17:50:18 -05:00
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetObjectLegalHoldHandler - get legal hold configuration to object,
|
|
|
|
|
func (api objectAPIHandlers) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
ctx := newContext(r, w, "GetObjectLegalHold")
|
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2019-11-12 17:50:18 -05:00
|
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
bucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
object, err := unescapePath(vars["object"])
|
2020-02-11 22:38:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-02-11 22:38:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2019-11-12 17:50:18 -05:00
|
|
|
|
|
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2019-11-12 17:50:18 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-01-16 18:41:56 -05:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectLegalHoldAction, bucket, object); s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2020-01-16 18:41:56 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2019-11-12 17:50:18 -05:00
|
|
|
|
|
|
|
|
|
getObjectInfo := objectAPI.GetObjectInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-19 16:53:54 -04:00
|
|
|
|
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); !rcfg.LockEnabled {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketObjectLockConfiguration), r.URL, guessIsBrowserReq(r))
|
2020-04-06 16:44:16 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-12 17:50:18 -05:00
|
|
|
|
opts, err := getOpts(ctx, r, bucket, object)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-11-12 17:50:18 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-16 18:41:56 -05:00
|
|
|
|
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-11-12 17:50:18 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-16 18:41:56 -05:00
|
|
|
|
legalHold := objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined)
|
2020-02-18 21:45:48 -05:00
|
|
|
|
if legalHold.IsEmpty() {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchObjectLockConfiguration), r.URL, guessIsBrowserReq(r))
|
2020-02-18 21:45:48 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-04-06 16:44:16 -04:00
|
|
|
|
|
2020-01-16 18:41:56 -05:00
|
|
|
|
writeSuccessResponseXML(w, encodeResponse(legalHold))
|
|
|
|
|
// Notify object legal hold accessed via a GET request.
|
|
|
|
|
sendEvent(eventArgs{
|
|
|
|
|
EventName: event.ObjectAccessedGetLegalHold,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Object: objInfo,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
RespElements: extractRespElements(w),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
|
|
|
|
Host: handlers.GetSourceIP(r),
|
|
|
|
|
})
|
2019-11-12 17:50:18 -05:00
|
|
|
|
}
|
|
|
|
|
|
2019-11-20 16:18:09 -05:00
|
|
|
|
// PutObjectRetentionHandler - set object hold configuration to object,
|
2019-11-12 17:50:18 -05:00
|
|
|
|
func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
ctx := newContext(r, w, "PutObjectRetention")
|
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2019-11-12 17:50:18 -05:00
|
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
bucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
object, err := unescapePath(vars["object"])
|
2020-02-11 22:38:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-02-11 22:38:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2019-11-12 17:50:18 -05:00
|
|
|
|
|
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2019-11-12 17:50:18 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-04-06 16:44:16 -04:00
|
|
|
|
|
|
|
|
|
cred, owner, claims, s3Err := validateSignature(getRequestAuthType(r), r)
|
|
|
|
|
if s3Err != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
2019-11-20 16:18:09 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-25 13:58:39 -05:00
|
|
|
|
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-11-25 13:58:39 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-04-06 16:44:16 -04:00
|
|
|
|
|
2020-03-04 10:04:12 -05:00
|
|
|
|
if !hasContentMD5(r.Header) {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
|
2019-11-20 16:18:09 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-02-16 01:07:52 -05:00
|
|
|
|
|
2020-05-19 16:53:54 -04:00
|
|
|
|
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); !rcfg.LockEnabled {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketObjectLockConfiguration), r.URL, guessIsBrowserReq(r))
|
2019-11-20 16:18:09 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2019-11-12 17:50:18 -05:00
|
|
|
|
|
2020-01-16 18:41:56 -05:00
|
|
|
|
objRetention, err := objectlock.ParseObjectRetention(r.Body)
|
2019-11-25 13:58:39 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
|
|
|
|
apiErr.Description = err.Error()
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
2019-11-25 13:58:39 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-04-06 16:44:16 -04:00
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
|
opts, err := getOpts(ctx, r, bucket, object)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-06-12 23:04:01 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-12 17:50:18 -05:00
|
|
|
|
getObjectInfo := objectAPI.GetObjectInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-06 16:44:16 -04:00
|
|
|
|
objInfo, s3Err := enforceRetentionBypassForPut(ctx, r, bucket, object, getObjectInfo, objRetention, cred, owner, claims)
|
2019-11-20 16:18:09 -05:00
|
|
|
|
if s3Err != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
2019-11-20 16:18:09 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2021-04-04 16:32:31 -04:00
|
|
|
|
if objInfo.DeleteMarker {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
2021-04-04 16:32:31 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-06-30 19:44:24 -04:00
|
|
|
|
if objRetention.Mode.Valid() {
|
|
|
|
|
objInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockMode)] = string(objRetention.Mode)
|
|
|
|
|
objInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = objRetention.RetainUntilDate.UTC().Format(time.RFC3339)
|
|
|
|
|
} else {
|
2021-04-04 16:32:31 -04:00
|
|
|
|
objInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockMode)] = ""
|
|
|
|
|
objInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = ""
|
2020-06-10 11:16:30 -04:00
|
|
|
|
}
|
2021-05-13 22:20:45 -04:00
|
|
|
|
replicate, sync := mustReplicate(ctx, r, bucket, object, objInfo.UserDefined, objInfo.ReplicationStatus.String(), true)
|
2020-11-19 14:50:22 -05:00
|
|
|
|
if replicate {
|
|
|
|
|
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Pending.String()
|
|
|
|
|
}
|
2021-04-04 16:32:31 -04:00
|
|
|
|
// if version-id is not specified retention is supposed to be set on the latest object.
|
|
|
|
|
if opts.VersionID == "" {
|
|
|
|
|
opts.VersionID = objInfo.VersionID
|
|
|
|
|
}
|
|
|
|
|
popts := ObjectOptions{
|
|
|
|
|
MTime: opts.MTime,
|
|
|
|
|
VersionID: opts.VersionID,
|
|
|
|
|
UserDefined: make(map[string]string, len(objInfo.UserDefined)),
|
|
|
|
|
}
|
|
|
|
|
for k, v := range objInfo.UserDefined {
|
|
|
|
|
popts.UserDefined[k] = v
|
|
|
|
|
}
|
|
|
|
|
if _, err = objectAPI.PutObjectMetadata(ctx, bucket, object, popts); err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-11-12 17:50:18 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-11-19 14:50:22 -05:00
|
|
|
|
if replicate {
|
2021-04-03 12:03:42 -04:00
|
|
|
|
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.MetadataReplicationType)
|
2020-11-19 14:50:22 -05:00
|
|
|
|
}
|
2019-11-12 17:50:18 -05:00
|
|
|
|
|
2019-11-20 16:18:09 -05:00
|
|
|
|
writeSuccessNoContent(w)
|
|
|
|
|
// Notify object event.
|
|
|
|
|
sendEvent(eventArgs{
|
|
|
|
|
EventName: event.ObjectCreatedPutRetention,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Object: objInfo,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
RespElements: extractRespElements(w),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
|
|
|
|
Host: handlers.GetSourceIP(r),
|
|
|
|
|
})
|
2019-11-12 17:50:18 -05:00
|
|
|
|
}
|
|
|
|
|
|
2019-11-20 16:18:09 -05:00
|
|
|
|
// GetObjectRetentionHandler - get object retention configuration of object,
|
2019-11-12 17:50:18 -05:00
|
|
|
|
func (api objectAPIHandlers) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
ctx := newContext(r, w, "GetObjectRetention")
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2019-11-12 17:50:18 -05:00
|
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
bucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
object, err := unescapePath(vars["object"])
|
2020-02-11 22:38:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-02-11 22:38:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2019-11-12 17:50:18 -05:00
|
|
|
|
|
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2019-11-12 17:50:18 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2019-11-20 16:18:09 -05:00
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectRetentionAction, bucket, object); s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2019-11-20 16:18:09 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2019-11-12 17:50:18 -05:00
|
|
|
|
|
|
|
|
|
getObjectInfo := objectAPI.GetObjectInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-14 03:01:53 -04:00
|
|
|
|
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); !rcfg.LockEnabled {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketObjectLockConfiguration), r.URL, guessIsBrowserReq(r))
|
2021-04-14 03:01:53 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-12 17:50:18 -05:00
|
|
|
|
opts, err := getOpts(ctx, r, bucket, object)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-11-12 17:50:18 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-20 16:18:09 -05:00
|
|
|
|
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2019-11-12 17:50:18 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-16 18:41:56 -05:00
|
|
|
|
retention := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
|
2020-06-30 19:44:24 -04:00
|
|
|
|
if !retention.Mode.Valid() {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchObjectLockConfiguration), r.URL, guessIsBrowserReq(r))
|
2020-06-30 19:44:24 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-20 16:18:09 -05:00
|
|
|
|
writeSuccessResponseXML(w, encodeResponse(retention))
|
|
|
|
|
// Notify object retention accessed via a GET request.
|
|
|
|
|
sendEvent(eventArgs{
|
|
|
|
|
EventName: event.ObjectAccessedGetRetention,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Object: objInfo,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
RespElements: extractRespElements(w),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
|
|
|
|
Host: handlers.GetSourceIP(r),
|
|
|
|
|
})
|
2019-11-12 17:50:18 -05:00
|
|
|
|
}
|
2020-01-20 11:45:59 -05:00
|
|
|
|
|
|
|
|
|
// GetObjectTaggingHandler - GET object tagging
|
|
|
|
|
func (api objectAPIHandlers) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
ctx := newContext(r, w, "GetObjectTagging")
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2020-01-20 11:45:59 -05:00
|
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
bucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
object, err := unescapePath(vars["object"])
|
2020-02-11 22:38:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-02-11 22:38:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-01-20 11:45:59 -05:00
|
|
|
|
|
|
|
|
|
objAPI := api.ObjectAPI()
|
|
|
|
|
if objAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2020-01-20 11:45:59 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-23 14:09:35 -04:00
|
|
|
|
if !objAPI.IsTaggingSupported() {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
2020-05-23 14:09:35 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-07-17 01:38:58 -04:00
|
|
|
|
|
2020-01-20 11:45:59 -05:00
|
|
|
|
// Allow getObjectTagging if policy action is set.
|
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectTaggingAction, bucket, object); s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2020-01-20 11:45:59 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
|
opts, err := getOpts(ctx, r, bucket, object)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-06-12 23:04:01 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-20 11:45:59 -05:00
|
|
|
|
// Get object tags
|
2020-06-12 23:04:01 -04:00
|
|
|
|
tags, err := objAPI.GetObjectTags(ctx, bucket, object, opts)
|
2020-01-20 11:45:59 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-01-20 11:45:59 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-17 01:38:58 -04:00
|
|
|
|
if opts.VersionID != "" {
|
|
|
|
|
w.Header()[xhttp.AmzVersionID] = []string{opts.VersionID}
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-20 11:45:59 -05:00
|
|
|
|
writeSuccessResponseXML(w, encodeResponse(tags))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// PutObjectTaggingHandler - PUT object tagging
|
|
|
|
|
func (api objectAPIHandlers) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
ctx := newContext(r, w, "PutObjectTagging")
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2020-01-20 11:45:59 -05:00
|
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
bucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
object, err := unescapePath(vars["object"])
|
2020-02-11 22:38:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-02-11 22:38:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-01-20 11:45:59 -05:00
|
|
|
|
|
|
|
|
|
objAPI := api.ObjectAPI()
|
|
|
|
|
if objAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2020-01-20 11:45:59 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-05-23 14:09:35 -04:00
|
|
|
|
if !objAPI.IsTaggingSupported() {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
2020-05-23 14:09:35 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-01-20 11:45:59 -05:00
|
|
|
|
|
|
|
|
|
// Allow putObjectTagging if policy action is set
|
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectTaggingAction, bucket, object); s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2020-01-20 11:45:59 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-05 17:18:13 -04:00
|
|
|
|
tags, err := tags.ParseObjectXML(io.LimitReader(r.Body, r.ContentLength))
|
2020-01-20 11:45:59 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-01-20 11:45:59 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
|
opts, err := getOpts(ctx, r, bucket, object)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-06-12 23:04:01 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-13 22:20:45 -04:00
|
|
|
|
objInfo, err := objAPI.GetObjectInfo(ctx, bucket, object, opts)
|
|
|
|
|
if err != nil {
|
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
replicate, sync := mustReplicate(ctx, r, bucket, object, map[string]string{xhttp.AmzObjectTagging: tags.String()}, objInfo.ReplicationStatus.String(), true)
|
2020-11-19 14:50:22 -05:00
|
|
|
|
if replicate {
|
|
|
|
|
opts.UserDefined = make(map[string]string)
|
|
|
|
|
opts.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Pending.String()
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-01 16:52:51 -05:00
|
|
|
|
tagsStr := tags.String()
|
|
|
|
|
|
2020-01-20 11:45:59 -05:00
|
|
|
|
// Put object tags
|
2021-05-13 22:20:45 -04:00
|
|
|
|
objInfo, err = objAPI.PutObjectTags(ctx, bucket, object, tagsStr, opts)
|
2020-01-20 11:45:59 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-01-20 11:45:59 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-19 14:50:22 -05:00
|
|
|
|
if replicate {
|
2021-04-03 12:03:42 -04:00
|
|
|
|
scheduleReplication(ctx, objInfo.Clone(), objAPI, sync, replication.MetadataReplicationType)
|
2020-11-19 14:50:22 -05:00
|
|
|
|
}
|
|
|
|
|
|
2021-02-01 16:52:51 -05:00
|
|
|
|
if objInfo.VersionID != "" {
|
|
|
|
|
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
|
2020-07-17 01:38:58 -04:00
|
|
|
|
}
|
|
|
|
|
|
2020-01-20 11:45:59 -05:00
|
|
|
|
writeSuccessResponseHeadersOnly(w)
|
2021-02-01 16:52:51 -05:00
|
|
|
|
|
|
|
|
|
sendEvent(eventArgs{
|
|
|
|
|
EventName: event.ObjectCreatedPutTagging,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Object: objInfo,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
RespElements: extractRespElements(w),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
|
|
|
|
Host: handlers.GetSourceIP(r),
|
|
|
|
|
})
|
|
|
|
|
|
2020-01-20 11:45:59 -05:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DeleteObjectTaggingHandler - DELETE object tagging
|
|
|
|
|
func (api objectAPIHandlers) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
ctx := newContext(r, w, "DeleteObjectTagging")
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2020-01-20 11:45:59 -05:00
|
|
|
|
|
|
|
|
|
objAPI := api.ObjectAPI()
|
|
|
|
|
if objAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2020-01-20 11:45:59 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-05-23 14:09:35 -04:00
|
|
|
|
if !objAPI.IsTaggingSupported() {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
|
2020-05-23 14:09:35 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-01-20 11:45:59 -05:00
|
|
|
|
|
2020-02-11 22:38:02 -05:00
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
bucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
object, err := unescapePath(vars["object"])
|
2020-02-11 22:38:02 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-02-11 22:38:02 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-20 11:45:59 -05:00
|
|
|
|
// Allow deleteObjectTagging if policy action is set
|
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteObjectTaggingAction, bucket, object); s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2020-01-20 11:45:59 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
|
opts, err := getOpts(ctx, r, bucket, object)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-06-12 23:04:01 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2021-02-03 23:41:33 -05:00
|
|
|
|
|
2020-11-19 14:50:22 -05:00
|
|
|
|
oi, err := objAPI.GetObjectInfo(ctx, bucket, object, opts)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-11-19 14:50:22 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2021-05-13 22:20:45 -04:00
|
|
|
|
replicate, sync := mustReplicate(ctx, r, bucket, object, map[string]string{xhttp.AmzObjectTagging: oi.UserTags}, oi.ReplicationStatus.String(), true)
|
2020-11-19 14:50:22 -05:00
|
|
|
|
if replicate {
|
|
|
|
|
opts.UserDefined = make(map[string]string)
|
|
|
|
|
opts.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Pending.String()
|
|
|
|
|
}
|
2021-02-01 16:52:51 -05:00
|
|
|
|
|
2021-02-03 23:41:33 -05:00
|
|
|
|
oi, err = objAPI.DeleteObjectTags(ctx, bucket, object, opts)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-01-20 11:45:59 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2020-07-17 01:38:58 -04:00
|
|
|
|
|
2020-11-19 14:50:22 -05:00
|
|
|
|
if replicate {
|
2021-04-03 12:03:42 -04:00
|
|
|
|
scheduleReplication(ctx, oi.Clone(), objAPI, sync, replication.MetadataReplicationType)
|
2020-11-19 14:50:22 -05:00
|
|
|
|
}
|
|
|
|
|
|
2021-02-01 16:52:51 -05:00
|
|
|
|
if oi.VersionID != "" {
|
|
|
|
|
w.Header()[xhttp.AmzVersionID] = []string{oi.VersionID}
|
|
|
|
|
}
|
2020-03-17 02:21:24 -04:00
|
|
|
|
writeSuccessNoContent(w)
|
2021-02-01 16:52:51 -05:00
|
|
|
|
|
|
|
|
|
sendEvent(eventArgs{
|
|
|
|
|
EventName: event.ObjectCreatedDeleteTagging,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Object: oi,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
RespElements: extractRespElements(w),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
|
|
|
|
Host: handlers.GetSourceIP(r),
|
|
|
|
|
})
|
2020-01-20 11:45:59 -05:00
|
|
|
|
}
|
2020-11-12 15:12:09 -05:00
|
|
|
|
|
|
|
|
|
// RestoreObjectHandler - POST restore object handler.
|
|
|
|
|
// ----------
|
|
|
|
|
func (api objectAPIHandlers) PostRestoreObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
ctx := newContext(r, w, "PostRestoreObject")
|
2021-01-26 16:21:51 -05:00
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
2020-11-12 15:12:09 -05:00
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
bucket := vars["bucket"]
|
2021-03-09 15:58:22 -05:00
|
|
|
|
object, err := unescapePath(vars["object"])
|
2020-11-12 15:12:09 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-11-12 15:12:09 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Fetch object stat info.
|
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
|
if objectAPI == nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
2020-11-12 15:12:09 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
getObjectInfo := objectAPI.GetObjectInfo
|
|
|
|
|
if api.CacheAPI() != nil {
|
|
|
|
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check for auth type to return S3 compatible error.
|
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.RestoreObjectAction, bucket, object); s3Error != ErrNone {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
2020-11-12 15:12:09 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if r.ContentLength <= 0 {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEmptyRequestBody), r.URL, guessIsBrowserReq(r))
|
2020-11-12 15:12:09 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
2021-04-19 13:30:42 -04:00
|
|
|
|
opts, err := postRestoreOpts(ctx, r, bucket, object)
|
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2021-04-19 13:30:42 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
|
2020-11-12 15:12:09 -05:00
|
|
|
|
if err != nil {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-11-12 15:12:09 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if objInfo.TransitionStatus != lifecycle.TransitionComplete {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidObjectState), r.URL, guessIsBrowserReq(r))
|
2020-11-12 15:12:09 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rreq, err := parseRestoreRequest(io.LimitReader(r.Body, r.ContentLength))
|
|
|
|
|
if err != nil {
|
|
|
|
|
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
|
|
|
|
apiErr.Description = err.Error()
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
2020-11-12 15:12:09 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
// validate the request
|
|
|
|
|
if err := rreq.validate(ctx, objectAPI); err != nil {
|
|
|
|
|
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
|
|
|
|
apiErr.Description = err.Error()
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
2020-11-12 15:12:09 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
statusCode := http.StatusOK
|
|
|
|
|
alreadyRestored := false
|
|
|
|
|
if err == nil {
|
|
|
|
|
if objInfo.RestoreOngoing && rreq.Type != SelectRestoreRequest {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrObjectRestoreAlreadyInProgress), r.URL, guessIsBrowserReq(r))
|
2020-11-12 15:12:09 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
if !objInfo.RestoreOngoing && !objInfo.RestoreExpires.IsZero() {
|
|
|
|
|
statusCode = http.StatusAccepted
|
|
|
|
|
alreadyRestored = true
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// set or upgrade restore expiry
|
|
|
|
|
restoreExpiry := lifecycle.ExpectedExpiryTime(time.Now(), rreq.Days)
|
|
|
|
|
metadata := cloneMSS(objInfo.UserDefined)
|
|
|
|
|
|
|
|
|
|
// update self with restore metadata
|
|
|
|
|
if rreq.Type != SelectRestoreRequest {
|
|
|
|
|
objInfo.metadataOnly = true // Perform only metadata updates.
|
|
|
|
|
metadata[xhttp.AmzRestoreExpiryDays] = strconv.Itoa(rreq.Days)
|
|
|
|
|
metadata[xhttp.AmzRestoreRequestDate] = time.Now().UTC().Format(http.TimeFormat)
|
|
|
|
|
if alreadyRestored {
|
2021-04-19 13:30:42 -04:00
|
|
|
|
metadata[xhttp.AmzRestore] = completedRestoreObj(restoreExpiry).String()
|
2020-11-12 15:12:09 -05:00
|
|
|
|
} else {
|
2021-04-19 13:30:42 -04:00
|
|
|
|
metadata[xhttp.AmzRestore] = ongoingRestoreObj().String()
|
2020-11-12 15:12:09 -05:00
|
|
|
|
}
|
|
|
|
|
objInfo.UserDefined = metadata
|
|
|
|
|
if _, err := objectAPI.CopyObject(GlobalContext, bucket, object, bucket, object, objInfo, ObjectOptions{
|
|
|
|
|
VersionID: objInfo.VersionID,
|
|
|
|
|
}, ObjectOptions{
|
|
|
|
|
VersionID: objInfo.VersionID,
|
|
|
|
|
}); err != nil {
|
|
|
|
|
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s: %s", objInfo.VersionID, err))
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidObjectState), r.URL, guessIsBrowserReq(r))
|
2020-11-12 15:12:09 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
// for previously restored object, just update the restore expiry
|
|
|
|
|
if alreadyRestored {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
restoreObject := mustGetUUID()
|
|
|
|
|
if rreq.OutputLocation.S3.BucketName != "" {
|
|
|
|
|
w.Header()[xhttp.AmzRestoreOutputPath] = []string{pathJoin(rreq.OutputLocation.S3.BucketName, rreq.OutputLocation.S3.Prefix, restoreObject)}
|
|
|
|
|
}
|
|
|
|
|
w.WriteHeader(statusCode)
|
|
|
|
|
// Notify object restore started via a POST request.
|
|
|
|
|
sendEvent(eventArgs{
|
|
|
|
|
EventName: event.ObjectRestorePostInitiated,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Object: objInfo,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
|
|
|
|
Host: handlers.GetSourceIP(r),
|
|
|
|
|
})
|
|
|
|
|
// now process the restore in background
|
|
|
|
|
go func() {
|
|
|
|
|
rctx := GlobalContext
|
|
|
|
|
if !rreq.SelectParameters.IsEmpty() {
|
|
|
|
|
getObject := func(offset, length int64) (rc io.ReadCloser, err error) {
|
|
|
|
|
isSuffixLength := false
|
|
|
|
|
if offset < 0 {
|
|
|
|
|
isSuffixLength = true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rs := &HTTPRangeSpec{
|
|
|
|
|
IsSuffixLength: isSuffixLength,
|
|
|
|
|
Start: offset,
|
|
|
|
|
End: offset + length,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return getTransitionedObjectReader(rctx, bucket, object, rs, r.Header, objInfo, ObjectOptions{
|
|
|
|
|
VersionID: objInfo.VersionID,
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
if err = rreq.SelectParameters.Open(getObject); err != nil {
|
|
|
|
|
if serr, ok := err.(s3select.SelectError); ok {
|
|
|
|
|
encodedErrorResponse := encodeResponse(APIErrorResponse{
|
|
|
|
|
Code: serr.ErrorCode(),
|
|
|
|
|
Message: serr.ErrorMessage(),
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Key: object,
|
|
|
|
|
Resource: r.URL.Path,
|
|
|
|
|
RequestID: w.Header().Get(xhttp.AmzRequestID),
|
|
|
|
|
HostID: globalDeploymentID,
|
|
|
|
|
})
|
|
|
|
|
writeResponse(w, serr.HTTPStatusCode(), encodedErrorResponse, mimeXML)
|
|
|
|
|
} else {
|
2021-04-29 22:01:43 -04:00
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
2020-11-12 15:12:09 -05:00
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
nr := httptest.NewRecorder()
|
|
|
|
|
rw := logger.NewResponseWriter(nr)
|
|
|
|
|
rw.LogErrBody = true
|
|
|
|
|
rw.LogAllBody = true
|
|
|
|
|
rreq.SelectParameters.Evaluate(rw)
|
|
|
|
|
rreq.SelectParameters.Close()
|
|
|
|
|
return
|
|
|
|
|
}
|
2021-04-19 13:30:42 -04:00
|
|
|
|
opts := ObjectOptions{
|
|
|
|
|
Transition: TransitionOptions{
|
|
|
|
|
RestoreRequest: rreq,
|
|
|
|
|
RestoreExpiry: restoreExpiry,
|
|
|
|
|
},
|
|
|
|
|
VersionID: objInfo.VersionID,
|
|
|
|
|
}
|
|
|
|
|
if err := objectAPI.RestoreTransitionedObject(rctx, bucket, object, opts); err != nil {
|
|
|
|
|
logger.LogIf(ctx, err)
|
2020-11-12 15:12:09 -05:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Notify object restore completed via a POST request.
|
|
|
|
|
sendEvent(eventArgs{
|
|
|
|
|
EventName: event.ObjectRestorePostCompleted,
|
|
|
|
|
BucketName: bucket,
|
|
|
|
|
Object: objInfo,
|
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
|
UserAgent: r.UserAgent(),
|
|
|
|
|
Host: handlers.GetSourceIP(r),
|
|
|
|
|
})
|
|
|
|
|
}()
|
|
|
|
|
}
|