2022-12-08 15:24:03 -05:00
// Copyright (c) 2015-2022 MinIO, Inc.
2021-04-18 15:41:13 -04:00
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
2015-06-30 17:42:29 -04:00
2016-08-18 19:23:42 -04:00
package cmd
2015-06-30 17:42:29 -04:00
import (
2021-03-25 16:57:57 -04:00
"bytes"
2021-10-06 13:24:40 -04:00
"context"
2023-07-05 06:16:05 -04:00
"crypto/md5"
2016-10-25 02:47:03 -04:00
"encoding/base64"
2023-07-05 06:16:05 -04:00
"encoding/hex"
2023-05-25 01:51:07 -04:00
"encoding/json"
2016-03-05 19:43:48 -05:00
"encoding/xml"
2023-05-15 17:08:54 -04:00
"errors"
2018-04-05 11:18:42 -04:00
"fmt"
2016-02-15 20:42:39 -05:00
"io"
2023-05-15 17:08:54 -04:00
"mime/multipart"
2015-06-30 17:42:29 -04:00
"net/http"
2021-02-03 23:41:33 -05:00
"net/textproto"
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
"net/url"
2017-01-11 16:26:42 -05:00
"path"
2023-05-25 01:51:07 -04:00
"runtime"
2021-01-28 14:44:48 -05:00
"sort"
2020-07-08 20:36:56 -04:00
"strconv"
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
"strings"
2021-01-28 14:44:48 -05:00
"sync"
2015-06-30 17:42:29 -04:00
2020-12-11 23:44:08 -05:00
"github.com/google/uuid"
2023-01-23 06:12:47 -05:00
"github.com/minio/mux"
2023-05-25 01:51:07 -04:00
"github.com/valyala/bytebufferpool"
2018-05-11 15:02:30 -04:00
2023-06-19 20:53:08 -04:00
"github.com/minio/madmin-go/v3"
2023-05-25 01:51:07 -04:00
"github.com/minio/minio-go/v7"
2020-07-14 12:38:05 -04:00
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio-go/v7/pkg/tags"
2023-02-06 12:27:29 -05:00
"github.com/minio/minio/internal/auth"
2021-09-21 12:02:15 -04:00
sse "github.com/minio/minio/internal/bucket/encryption"
2021-06-01 17:59:40 -04:00
objectlock "github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/config/dns"
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/handlers"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
2023-05-25 01:51:07 -04:00
"github.com/minio/minio/internal/ioutil"
2021-06-01 17:59:40 -04:00
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
2021-05-30 00:16:42 -04:00
"github.com/minio/pkg/bucket/policy"
iampolicy "github.com/minio/pkg/iam/policy"
2023-04-26 01:57:40 -04:00
"github.com/minio/pkg/sync/errgroup"
2015-06-30 17:42:29 -04:00
)
2019-11-12 17:50:18 -05:00
const (
2020-07-21 20:49:56 -04:00
objectLockConfig = "object-lock.xml"
bucketTaggingConfig = "tagging.xml"
bucketReplicationConfig = "replication.xml"
2023-05-26 13:13:18 -04:00
xMinIOErrCodeHeader = "x-minio-error-code"
xMinIOErrDescHeader = "x-minio-error-desc"
2019-11-12 17:50:18 -05:00
)
2018-04-05 11:18:42 -04:00
// Check if there are buckets on server without corresponding entry in etcd backend and
// make entries. Here is the general flow
// - Range over all the available buckets
// - Check if a bucket has an entry in etcd backend
// -- If no, make an entry
2019-12-16 23:30:57 -05:00
// -- If yes, check if the entry matches local IP check if we
2022-08-26 15:52:29 -04:00
//
// need to update the entry then proceed to update
//
2019-12-16 23:30:57 -05:00
// -- If yes, check if the IP of entry matches local IP.
2022-08-26 15:52:29 -04:00
//
// This means entry is for this instance.
//
2019-12-16 23:30:57 -05:00
// -- If IP of the entry doesn't match, this means entry is
2022-08-26 15:52:29 -04:00
//
// for another instance. Log an error to console.
2019-11-09 12:27:23 -05:00
func initFederatorBackend ( buckets [ ] BucketInfo , objLayer ObjectLayer ) {
if len ( buckets ) == 0 {
2018-04-05 11:18:42 -04:00
return
}
2019-08-13 11:49:26 -04:00
// Get buckets in the DNS
dnsBuckets , err := globalDNSConfig . List ( )
2021-01-28 14:44:48 -05:00
if err != nil && ! IsErrIgnored ( err , dns . ErrNoEntriesFound , dns . ErrNotImplemented , dns . ErrDomainMissing ) {
2020-04-09 12:30:02 -04:00
logger . LogIf ( GlobalContext , err )
2019-08-13 11:49:26 -04:00
return
}
2020-02-03 03:24:20 -05:00
bucketsSet := set . NewStringSet ( )
bucketsToBeUpdated := set . NewStringSet ( )
bucketsInConflict := set . NewStringSet ( )
2021-01-28 14:44:48 -05:00
// This means that domain is updated, we should update
// all bucket entries with new domain name.
domainMissing := err == dns . ErrDomainMissing
2020-09-09 15:20:49 -04:00
if dnsBuckets != nil {
for _ , bucket := range buckets {
bucketsSet . Add ( bucket . Name )
r , ok := dnsBuckets [ bucket . Name ]
if ! ok {
bucketsToBeUpdated . Add ( bucket . Name )
2020-02-03 03:24:20 -05:00
continue
}
2020-09-09 15:20:49 -04:00
if ! globalDomainIPs . Intersection ( set . CreateStringSet ( getHostsSlice ( r ) ... ) ) . IsEmpty ( ) {
2021-01-28 14:44:48 -05:00
if globalDomainIPs . Difference ( set . CreateStringSet ( getHostsSlice ( r ) ... ) ) . IsEmpty ( ) && ! domainMissing {
2020-09-09 15:20:49 -04:00
// No difference in terms of domainIPs and nothing
// has changed so we don't change anything on the etcd.
2021-01-28 14:44:48 -05:00
//
// Additionally also check if domain is updated/missing with more
// entries, if that is the case we should update the
// new domain entries as well.
2020-09-09 15:20:49 -04:00
continue
}
2021-01-28 14:44:48 -05:00
2020-09-09 15:20:49 -04:00
// if domain IPs intersect then it won't be an empty set.
// such an intersection means that bucket exists on etcd.
// but if we do see a difference with local domain IPs with
// hostSlice from etcd then we should update with newer
// domainIPs, we proceed to do that here.
bucketsToBeUpdated . Add ( bucket . Name )
continue
}
2021-01-28 14:44:48 -05:00
2020-09-09 15:20:49 -04:00
// No IPs seem to intersect, this means that bucket exists but has
// different IP addresses perhaps from a different deployment.
// bucket names are globally unique in federation at a given
// path prefix, name collision is not allowed. We simply log
// an error and continue.
bucketsInConflict . Add ( bucket . Name )
2020-02-03 03:24:20 -05:00
}
}
2019-08-13 11:49:26 -04:00
2020-02-03 03:24:20 -05:00
// Add/update buckets that are not registered with the DNS
bucketsToBeUpdatedSlice := bucketsToBeUpdated . ToSlice ( )
2021-02-09 15:08:25 -05:00
g := errgroup . WithNErrs ( len ( bucketsToBeUpdatedSlice ) ) . WithConcurrency ( 50 )
2020-02-03 03:24:20 -05:00
for index := range bucketsToBeUpdatedSlice {
2019-10-14 12:44:51 -04:00
index := index
2018-04-05 11:18:42 -04:00
g . Go ( func ( ) error {
2020-02-03 03:24:20 -05:00
return globalDNSConfig . Put ( bucketsToBeUpdatedSlice [ index ] )
2018-04-05 11:18:42 -04:00
} , index )
}
2021-11-15 12:46:55 -05:00
ctx := GlobalContext
for _ , err := range g . Wait ( ) {
if err != nil {
logger . LogIf ( ctx , err )
return
}
2019-08-13 11:49:26 -04:00
}
2020-02-03 03:24:20 -05:00
for _ , bucket := range bucketsInConflict . ToSlice ( ) {
2021-02-09 15:08:25 -05:00
logger . LogIf ( ctx , fmt . Errorf ( "Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket" , bucket , globalDomainIPs . ToSlice ( ) ) )
2020-02-03 03:24:20 -05:00
}
2021-01-28 14:44:48 -05:00
var wg sync . WaitGroup
2019-08-13 11:49:26 -04:00
// Remove buckets that are in DNS for this server, but aren't local
2020-02-03 03:24:20 -05:00
for bucket , records := range dnsBuckets {
if bucketsSet . Contains ( bucket ) {
continue
}
2019-08-13 11:49:26 -04:00
2020-02-03 03:24:20 -05:00
if globalDomainIPs . Intersection ( set . CreateStringSet ( getHostsSlice ( records ) ... ) ) . IsEmpty ( ) {
2019-08-13 11:49:26 -04:00
// This is not for our server, so we can continue
2020-02-03 03:24:20 -05:00
continue
}
2019-08-13 11:49:26 -04:00
2021-01-28 14:44:48 -05:00
wg . Add ( 1 )
go func ( bucket string ) {
defer wg . Done ( )
// We go to here, so we know the bucket no longer exists,
// but is registered in DNS to this server
if err := globalDNSConfig . Delete ( bucket ) ; err != nil {
logger . LogIf ( GlobalContext , fmt . Errorf ( "Failed to remove DNS entry for %s due to %w" ,
bucket , err ) )
}
} ( bucket )
2018-04-05 11:18:42 -04:00
}
2021-01-28 14:44:48 -05:00
wg . Wait ( )
2018-04-05 11:18:42 -04:00
}
2015-12-27 02:38:38 -05:00
// GetBucketLocationHandler - GET Bucket location.
// -------------------------
// This operation returns bucket location.
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) GetBucketLocationHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "GetBucketLocation" )
2018-03-14 15:01:47 -04:00
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2018-10-12 15:25:59 -04:00
2016-02-15 20:42:39 -05:00
vars := mux . Vars ( r )
2015-12-27 02:38:38 -05:00
bucket := vars [ "bucket" ]
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2016-08-10 21:47:49 -04:00
return
}
2018-04-24 18:53:30 -04:00
if s3Error := checkRequestAuthType ( ctx , r , policy . GetBucketLocationAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2016-11-21 16:51:05 -05:00
return
2016-02-04 15:52:25 -05:00
}
2018-03-28 17:14:06 -04:00
getBucketInfo := objectAPI . GetBucketInfo
2019-08-09 20:09:08 -04:00
2022-07-25 20:51:32 -04:00
if _ , err := getBucketInfo ( ctx , bucket , BucketOptions { } ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2016-01-19 20:49:48 -05:00
return
2015-12-27 02:38:38 -05:00
}
2016-02-15 20:42:39 -05:00
// Generate response.
2016-03-06 15:16:22 -05:00
encodedSuccessResponse := encodeResponse ( LocationResponse { } )
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
// Get current region.
2021-11-25 16:06:25 -05:00
region := globalSite . Region
2017-01-18 15:24:34 -05:00
if region != globalMinioDefaultRegion {
2016-03-06 15:16:22 -05:00
encodedSuccessResponse = encodeResponse ( LocationResponse {
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
Location : region ,
2016-02-15 20:42:39 -05:00
} )
}
2017-01-06 03:37:00 -05:00
// Write success response.
writeSuccessResponseXML ( w , encodedSuccessResponse )
2015-12-27 02:38:38 -05:00
}
2015-06-30 23:15:48 -04:00
// ListMultipartUploadsHandler - GET Bucket (List Multipart uploads)
2015-06-30 17:42:29 -04:00
// -------------------------
// This operation lists in-progress multipart uploads. An in-progress
// multipart upload is a multipart upload that has been initiated,
2015-10-16 22:09:35 -04:00
// using the Initiate Multipart Upload request, but has not yet been
// completed or aborted. This operation returns at most 1,000 multipart
// uploads in the response.
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) ListMultipartUploadsHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "ListMultipartUploads" )
2018-03-14 15:01:47 -04:00
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2018-10-12 15:25:59 -04:00
2016-02-15 20:42:39 -05:00
vars := mux . Vars ( r )
2015-10-16 22:09:35 -04:00
bucket := vars [ "bucket" ]
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2016-08-10 21:47:49 -04:00
return
}
2018-04-24 18:53:30 -04:00
if s3Error := checkRequestAuthType ( ctx , r , policy . ListBucketMultipartUploadsAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2016-02-15 20:42:39 -05:00
return
2016-02-04 15:52:25 -05:00
}
2021-08-08 01:43:01 -04:00
prefix , keyMarker , uploadIDMarker , delimiter , maxUploads , encodingType , errCode := getBucketMultipartResources ( r . Form )
2019-02-12 04:25:52 -05:00
if errCode != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( errCode ) , r . URL )
2018-10-18 10:31:46 -04:00
return
}
2019-02-12 04:25:52 -05:00
objectAPI: Fix object API interface, remove unnecessary structs.
ObjectAPI changes.
```
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, *probe.Error)
ListMultipartUploads(bucket, objectPrefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, *probe.Error)
ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, *probe.Error)
```
2016-04-03 04:34:20 -04:00
if maxUploads < 0 {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrInvalidMaxUploads ) , r . URL )
2015-07-16 20:22:45 -04:00
return
}
2019-02-12 04:25:52 -05:00
2016-04-05 15:26:17 -04:00
if keyMarker != "" {
2016-04-29 17:24:10 -04:00
// Marker not common with prefix is not implemented.
2019-12-06 02:16:06 -05:00
if ! HasPrefix ( keyMarker , prefix ) {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrNotImplemented ) , r . URL )
2016-04-29 17:24:10 -04:00
return
2016-04-05 15:26:17 -04:00
}
2015-06-30 17:42:29 -04:00
}
2018-03-14 15:01:47 -04:00
listMultipartsInfo , err := objectAPI . ListMultipartUploads ( ctx , bucket , prefix , keyMarker , uploadIDMarker , delimiter , maxUploads )
2015-09-19 06:20:07 -04:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2015-08-03 19:17:21 -04:00
return
}
2015-09-19 06:20:07 -04:00
// generate response
2019-02-24 01:14:24 -05:00
response := generateListMultipartUploadsResponse ( bucket , listMultipartsInfo , encodingType )
2016-03-06 15:16:22 -05:00
encodedSuccessResponse := encodeResponse ( response )
2017-01-06 03:37:00 -05:00
2016-01-08 03:40:06 -05:00
// write success response.
2017-01-06 03:37:00 -05:00
writeSuccessResponseXML ( w , encodedSuccessResponse )
2015-06-30 17:42:29 -04:00
}
2016-10-09 12:21:37 -04:00
// ListBucketsHandler - GET Service.
2015-06-30 17:42:29 -04:00
// -----------
// This implementation of the GET operation returns a list of all buckets
// owned by the authenticated sender of the request.
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) ListBucketsHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "ListBuckets" )
2018-03-14 15:01:47 -04:00
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2018-10-12 15:25:59 -04:00
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2016-08-10 21:47:49 -04:00
return
}
2019-02-12 04:25:52 -05:00
listBuckets := objectAPI . ListBuckets
2018-04-24 18:53:30 -04:00
2022-10-02 15:29:29 -04:00
cred , owner , s3Error := checkRequestAuthTypeCredential ( ctx , r , policy . ListAllMyBucketsAction )
2020-04-02 15:35:22 -04:00
if s3Error != ErrNone && s3Error != ErrAccessDenied {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2016-02-15 20:42:39 -05:00
return
2016-02-04 15:52:25 -05:00
}
2019-02-12 04:25:52 -05:00
2021-04-29 00:37:02 -04:00
// Anonymous users, should be rejected.
if cred . AccessKey == "" {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrAccessDenied ) , r . URL )
2021-04-29 00:37:02 -04:00
return
}
2018-02-02 21:18:52 -05:00
// If etcd, dns federation configured list buckets from etcd.
var bucketsInfo [ ] BucketInfo
2019-12-29 11:56:45 -05:00
if globalDNSConfig != nil && globalBucketFederation {
2018-02-02 21:18:52 -05:00
dnsBuckets , err := globalDNSConfig . List ( )
2021-01-28 14:44:48 -05:00
if err != nil && ! IsErrIgnored ( err ,
dns . ErrNoEntriesFound ,
dns . ErrDomainMissing ) {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2018-02-02 21:18:52 -05:00
return
}
2020-02-03 03:24:20 -05:00
for _ , dnsRecords := range dnsBuckets {
2018-02-02 21:18:52 -05:00
bucketsInfo = append ( bucketsInfo , BucketInfo {
2020-02-03 03:24:20 -05:00
Name : dnsRecords [ 0 ] . Key ,
Created : dnsRecords [ 0 ] . CreationDate ,
2018-02-02 21:18:52 -05:00
} )
}
2021-01-28 14:44:48 -05:00
sort . Slice ( bucketsInfo , func ( i , j int ) bool {
return bucketsInfo [ i ] . Name < bucketsInfo [ j ] . Name
} )
2018-02-02 21:18:52 -05:00
} else {
// Invoke the list buckets.
var err error
2022-07-25 20:51:32 -04:00
bucketsInfo , err = listBuckets ( ctx , BucketOptions { } )
2018-02-02 21:18:52 -05:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2018-02-02 21:18:52 -05:00
return
}
2015-08-03 19:17:21 -04:00
}
2016-07-17 16:23:15 -04:00
2020-04-02 15:35:22 -04:00
if s3Error == ErrAccessDenied {
// Set prefix value for "s3:prefix" policy conditionals.
r . Header . Set ( "prefix" , "" )
// Set delimiter value for "s3:delimiter" policy conditionals.
r . Header . Set ( "delimiter" , SlashSeparator )
n := 0
// Use the following trick to filter in place
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
for _ , bucketInfo := range bucketsInfo {
if globalIAMSys . IsAllowed ( iampolicy . Args {
2021-03-23 18:15:51 -04:00
AccountName : cred . AccessKey ,
Groups : cred . Groups ,
2020-04-02 15:35:22 -04:00
Action : iampolicy . ListBucketAction ,
BucketName : bucketInfo . Name ,
2023-02-06 12:27:29 -05:00
ConditionValues : getConditionValues ( r , "" , cred ) ,
2020-04-02 15:35:22 -04:00
IsOwner : owner ,
ObjectName : "" ,
2021-08-12 21:07:08 -04:00
Claims : cred . Claims ,
2020-04-02 15:35:22 -04:00
} ) {
bucketsInfo [ n ] = bucketInfo
n ++
2022-05-12 13:46:20 -04:00
} else if globalIAMSys . IsAllowed ( iampolicy . Args {
AccountName : cred . AccessKey ,
Groups : cred . Groups ,
Action : iampolicy . GetBucketLocationAction ,
BucketName : bucketInfo . Name ,
2023-02-06 12:27:29 -05:00
ConditionValues : getConditionValues ( r , "" , cred ) ,
2022-05-12 13:46:20 -04:00
IsOwner : owner ,
ObjectName : "" ,
Claims : cred . Claims ,
} ) {
bucketsInfo [ n ] = bucketInfo
n ++
2020-04-02 15:35:22 -04:00
}
}
bucketsInfo = bucketsInfo [ : n ]
// No buckets can be filtered return access denied error.
if len ( bucketsInfo ) == 0 {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2020-04-02 15:35:22 -04:00
return
2019-08-12 13:27:38 -04:00
}
}
2016-07-17 16:23:15 -04:00
// Generate response.
2020-04-02 15:35:22 -04:00
response := generateListBucketsResponse ( bucketsInfo )
2016-07-17 16:23:15 -04:00
encodedSuccessResponse := encodeResponse ( response )
2017-01-06 03:37:00 -05:00
2016-07-17 16:23:15 -04:00
// Write response.
2017-01-06 03:37:00 -05:00
writeSuccessResponseXML ( w , encodedSuccessResponse )
2015-06-30 17:42:29 -04:00
}
2016-03-05 19:43:48 -05:00
// DeleteMultipleObjectsHandler - deletes multiple objects.
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) DeleteMultipleObjectsHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "DeleteMultipleObjects" )
2018-03-14 15:01:47 -04:00
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2018-11-02 21:40:08 -04:00
2016-03-05 19:43:48 -05:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2016-08-10 21:47:49 -04:00
return
}
2020-02-22 22:36:46 -05:00
// Content-Md5 is requied should be set
2016-03-05 19:43:48 -05:00
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
2020-02-22 22:36:46 -05:00
if _ , ok := r . Header [ xhttp . ContentMD5 ] ; ! ok {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMissingContentMD5 ) , r . URL )
2016-03-05 19:43:48 -05:00
return
}
2020-02-22 22:36:46 -05:00
// Content-Length is required and should be non-zero
2016-03-05 19:43:48 -05:00
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
2020-02-22 22:36:46 -05:00
if r . ContentLength <= 0 {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMissingContentLength ) , r . URL )
2016-03-05 19:43:48 -05:00
return
}
2020-02-21 00:59:57 -05:00
// The max. XML contains 100000 object names (each at most 1024 bytes long) + XML overhead
const maxBodySize = 2 * 100000 * 1024
2016-03-05 19:43:48 -05:00
2023-05-31 13:58:14 -04:00
if r . ContentLength > maxBodySize {
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrEntityTooLarge ) , r . URL )
return
}
2016-03-05 19:43:48 -05:00
// Unmarshal list of keys to be deleted.
2021-11-24 13:01:07 -05:00
deleteObjectsReq := & DeleteObjectsRequest { }
if err := xmlDecoder ( r . Body , deleteObjectsReq , maxBodySize ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2016-03-05 19:43:48 -05:00
return
}
2018-03-27 19:44:45 -04:00
2022-01-03 04:28:52 -05:00
objects := make ( [ ] ObjectV , len ( deleteObjectsReq . Objects ) )
2021-03-11 16:57:03 -05:00
// Convert object name delete objects if it has `/` in the beginning.
2021-11-24 13:01:07 -05:00
for i := range deleteObjectsReq . Objects {
deleteObjectsReq . Objects [ i ] . ObjectName = trimLeadingSlash ( deleteObjectsReq . Objects [ i ] . ObjectName )
2022-01-03 04:28:52 -05:00
objects [ i ] = deleteObjectsReq . Objects [ i ] . ObjectV
2021-03-11 16:57:03 -05:00
}
2022-01-03 04:28:52 -05:00
// Make sure to update context to print ObjectNames for multi objects.
ctx = updateReqContext ( ctx , objects ... )
2020-11-04 12:13:34 -05:00
// Call checkRequestAuthType to populate ReqInfo.AccessKey before GetBucketInfo()
// Ignore errors here to preserve the S3 error behavior of GetBucketInfo()
checkRequestAuthType ( ctx , r , policy . DeleteObjectAction , bucket , "" )
2020-07-10 11:30:23 -04:00
// Before proceeding validate if bucket exists.
2022-07-25 20:51:32 -04:00
_ , err := objectAPI . GetBucketInfo ( ctx , bucket , BucketOptions { } )
2020-07-10 11:30:23 -04:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-07-10 11:30:23 -04:00
return
}
2019-05-13 15:25:49 -04:00
deleteObjectsFn := objectAPI . DeleteObjects
2018-07-21 00:21:01 -04:00
if api . CacheAPI ( ) != nil {
2019-05-13 15:25:49 -04:00
deleteObjectsFn = api . CacheAPI ( ) . DeleteObjects
2018-07-21 00:21:01 -04:00
}
2016-09-02 04:59:08 -04:00
2021-10-18 11:38:33 -04:00
// Return Malformed XML as S3 spec if the number of objects is empty
2021-11-24 13:01:07 -05:00
if len ( deleteObjectsReq . Objects ) == 0 || len ( deleteObjectsReq . Objects ) > maxDeleteList {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMalformedXML ) , r . URL )
2021-02-13 12:48:25 -05:00
return
}
2022-01-02 12:15:06 -05:00
objectsToDelete := map [ ObjectToDelete ] int { }
2019-11-20 16:18:09 -05:00
getObjectInfoFn := objectAPI . GetObjectInfo
if api . CacheAPI ( ) != nil {
getObjectInfoFn = api . CacheAPI ( ) . GetObjectInfo
}
2021-08-17 10:50:00 -04:00
2020-11-25 14:24:50 -05:00
var (
2021-09-18 16:31:35 -04:00
hasLockEnabled bool
dsc ReplicateDecision
goi ObjectInfo
gerr error
2020-11-25 14:24:50 -05:00
)
2021-11-24 13:01:07 -05:00
replicateDeletes := hasReplicationRules ( ctx , bucket , deleteObjectsReq . Objects )
2020-11-25 14:24:50 -05:00
if rcfg , _ := globalBucketObjectLockSys . Get ( bucket ) ; rcfg . LockEnabled {
hasLockEnabled = true
}
2021-04-19 13:30:42 -04:00
2021-11-24 13:01:07 -05:00
type deleteResult struct {
delInfo DeletedObject
errInfo DeleteError
}
deleteResults := make ( [ ] deleteResult , len ( deleteObjectsReq . Objects ) )
2022-05-06 22:05:28 -04:00
vc , _ := globalBucketVersioningSys . Get ( bucket )
2021-11-24 13:01:07 -05:00
oss := make ( [ ] * objSweeper , len ( deleteObjectsReq . Objects ) )
for index , object := range deleteObjectsReq . Objects {
2023-02-07 21:31:00 -05:00
if apiErrCode := checkRequestAuthTypeWithVID ( ctx , r , policy . DeleteObjectAction , bucket , object . ObjectName , object . VersionID ) ; apiErrCode != ErrNone {
2020-06-12 23:04:01 -04:00
if apiErrCode == ErrSignatureDoesNotMatch || apiErrCode == ErrInvalidAccessKeyID {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( apiErrCode ) , r . URL )
2019-04-22 10:54:43 -04:00
return
2017-08-15 15:49:31 -04:00
}
2020-06-12 23:04:01 -04:00
apiErr := errorCodes . ToAPIErr ( apiErrCode )
2021-11-24 13:01:07 -05:00
deleteResults [ index ] . errInfo = DeleteError {
2020-06-12 23:04:01 -04:00
Code : apiErr . Code ,
Message : apiErr . Description ,
Key : object . ObjectName ,
VersionID : object . VersionID ,
}
2018-07-21 00:21:01 -04:00
continue
}
2020-12-11 23:44:08 -05:00
if object . VersionID != "" && object . VersionID != nullVersionID {
if _ , err := uuid . Parse ( object . VersionID ) ; err != nil {
apiErr := errorCodes . ToAPIErr ( ErrNoSuchVersion )
2021-11-24 13:01:07 -05:00
deleteResults [ index ] . errInfo = DeleteError {
2020-12-11 23:44:08 -05:00
Code : apiErr . Code ,
2022-12-15 11:25:05 -05:00
Message : fmt . Sprintf ( "%s (%s)" , apiErr . Description , err ) ,
2020-12-11 23:44:08 -05:00
Key : object . ObjectName ,
VersionID : object . VersionID ,
}
continue
}
}
2021-08-17 10:50:00 -04:00
opts := ObjectOptions {
VersionID : object . VersionID ,
2022-05-06 22:05:28 -04:00
Versioned : vc . PrefixEnabled ( object . ObjectName ) ,
2022-05-08 01:06:44 -04:00
VersionSuspended : vc . Suspended ( ) ,
2021-08-17 10:50:00 -04:00
}
2022-06-30 15:57:54 -04:00
if replicateDeletes || object . VersionID != "" && hasLockEnabled || ! globalTierConfigMgr . Empty ( ) {
2021-08-17 10:50:00 -04:00
if ! globalTierConfigMgr . Empty ( ) && object . VersionID == "" && opts . VersionSuspended {
opts . VersionID = nullVersionID
}
goi , gerr = getObjectInfoFn ( ctx , bucket , object . ObjectName , opts )
}
if ! globalTierConfigMgr . Empty ( ) {
2022-05-06 22:05:28 -04:00
oss [ index ] = newObjSweeper ( bucket , object . ObjectName ) . WithVersion ( opts . VersionID ) . WithVersioning ( opts . Versioned , opts . VersionSuspended )
2021-08-17 10:50:00 -04:00
oss [ index ] . SetTransitionState ( goi . TransitionedObject )
2020-11-25 14:24:50 -05:00
}
2021-08-17 10:50:00 -04:00
2023-05-04 17:43:52 -04:00
// All deletes on directory objects needs to be for `nullVersionID`
if isDirObject ( object . ObjectName ) && object . VersionID == "" {
object . VersionID = nullVersionID
}
2020-11-25 14:24:50 -05:00
if replicateDeletes {
2021-09-18 16:31:35 -04:00
dsc = checkReplicateDelete ( ctx , bucket , ObjectToDelete {
2022-01-03 04:28:52 -05:00
ObjectV : ObjectV {
ObjectName : object . ObjectName ,
VersionID : object . VersionID ,
} ,
2021-09-18 16:31:35 -04:00
} , goi , opts , gerr )
if dsc . ReplicateAny ( ) {
2020-11-25 14:24:50 -05:00
if object . VersionID != "" {
object . VersionPurgeStatus = Pending
2021-09-18 16:31:35 -04:00
object . VersionPurgeStatuses = dsc . PendingStatus ( )
2020-11-25 14:24:50 -05:00
} else {
2021-09-18 16:31:35 -04:00
object . DeleteMarkerReplicationStatus = dsc . PendingStatus ( )
2020-11-25 14:24:50 -05:00
}
2021-09-18 16:31:35 -04:00
object . ReplicateDecisionStr = dsc . String ( )
2020-11-25 14:24:50 -05:00
}
}
2022-06-30 15:57:54 -04:00
if object . VersionID != "" && hasLockEnabled {
2021-08-17 10:50:00 -04:00
if apiErrCode := enforceRetentionBypassForDelete ( ctx , r , bucket , object , goi , gerr ) ; apiErrCode != ErrNone {
apiErr := errorCodes . ToAPIErr ( apiErrCode )
2021-11-24 13:01:07 -05:00
deleteResults [ index ] . errInfo = DeleteError {
2021-08-17 10:50:00 -04:00
Code : apiErr . Code ,
Message : apiErr . Description ,
Key : object . ObjectName ,
VersionID : object . VersionID ,
2020-06-12 23:04:01 -04:00
}
2021-08-17 10:50:00 -04:00
continue
2020-04-06 16:44:16 -04:00
}
2019-11-20 16:18:09 -05:00
}
2020-04-06 16:44:16 -04:00
2019-11-19 20:42:27 -05:00
// Avoid duplicate objects, we use map to filter them out.
2020-06-12 23:04:01 -04:00
if _ , ok := objectsToDelete [ object ] ; ! ok {
objectsToDelete [ object ] = index
2019-11-19 20:42:27 -05:00
}
2019-05-13 15:25:49 -04:00
}
2020-06-12 23:04:01 -04:00
toNames := func ( input map [ ObjectToDelete ] int ) ( output [ ] ObjectToDelete ) {
output = make ( [ ] ObjectToDelete , len ( input ) )
2019-11-20 20:51:10 -05:00
idx := 0
2020-06-12 23:04:01 -04:00
for obj := range input {
output [ idx ] = obj
2019-11-20 20:51:10 -05:00
idx ++
2019-04-22 10:54:43 -04:00
}
2019-05-13 15:25:49 -04:00
return
}
2022-01-06 13:47:49 -05:00
// Disable timeouts and cancellation
ctx = bgContext ( ctx )
2019-11-20 20:51:10 -05:00
deleteList := toNames ( objectsToDelete )
2020-06-12 23:04:01 -04:00
dObjects , errs := deleteObjectsFn ( ctx , bucket , deleteList , ObjectOptions {
2022-05-08 01:06:44 -04:00
PrefixEnabledFn : vc . PrefixEnabled ,
VersionSuspended : vc . Suspended ( ) ,
2020-06-12 23:04:01 -04:00
} )
2021-11-24 13:01:07 -05:00
2020-06-12 23:04:01 -04:00
for i := range errs {
2021-03-30 20:15:36 -04:00
// DeleteMarkerVersionID is not used specifically to avoid
// lookup errors, since DeleteMarkerVersionID is only
// created during DeleteMarker creation when client didn't
// specify a versionID.
objToDel := ObjectToDelete {
2022-01-03 04:28:52 -05:00
ObjectV : ObjectV {
ObjectName : dObjects [ i ] . ObjectName ,
VersionID : dObjects [ i ] . VersionID ,
} ,
2021-09-18 16:31:35 -04:00
VersionPurgeStatus : dObjects [ i ] . VersionPurgeStatus ( ) ,
VersionPurgeStatuses : dObjects [ i ] . ReplicationState . VersionPurgeStatusInternal ,
DeleteMarkerReplicationStatus : dObjects [ i ] . ReplicationState . ReplicationStatusInternal ,
ReplicateDecisionStr : dObjects [ i ] . ReplicationState . ReplicateDecisionStr ,
2021-03-30 20:15:36 -04:00
}
dindex := objectsToDelete [ objToDel ]
2020-12-11 15:39:09 -05:00
if errs [ i ] == nil || isErrObjectNotFound ( errs [ i ] ) || isErrVersionNotFound ( errs [ i ] ) {
2020-11-12 15:10:59 -05:00
if replicateDeletes {
2021-09-18 16:31:35 -04:00
dObjects [ i ] . ReplicationState = deleteList [ i ] . ReplicationState ( )
2020-11-12 15:10:59 -05:00
}
2021-11-24 13:01:07 -05:00
deleteResults [ dindex ] . delInfo = dObjects [ i ]
2020-06-12 23:04:01 -04:00
continue
}
2020-11-12 15:10:59 -05:00
apiErr := toAPIError ( ctx , errs [ i ] )
2021-11-24 13:01:07 -05:00
deleteResults [ dindex ] . errInfo = DeleteError {
2020-06-12 23:04:01 -04:00
Code : apiErr . Code ,
Message : apiErr . Description ,
Key : deleteList [ i ] . ObjectName ,
VersionID : deleteList [ i ] . VersionID ,
}
2016-09-02 04:59:08 -04:00
}
2021-11-24 13:01:07 -05:00
// Generate response
2022-01-02 12:15:06 -05:00
deleteErrors := make ( [ ] DeleteError , 0 , len ( deleteObjectsReq . Objects ) )
deletedObjects := make ( [ ] DeletedObject , 0 , len ( deleteObjectsReq . Objects ) )
2021-11-24 13:01:07 -05:00
for _ , deleteResult := range deleteResults {
if deleteResult . errInfo . Code != "" {
deleteErrors = append ( deleteErrors , deleteResult . errInfo )
} else {
2023-06-23 16:26:00 -04:00
// All deletes on directory objects was with `nullVersionID`.
// Remove it from response.
if isDirObject ( deleteResult . delInfo . ObjectName ) && deleteResult . delInfo . VersionID == nullVersionID {
deleteResult . delInfo . VersionID = ""
}
2021-11-24 13:01:07 -05:00
deletedObjects = append ( deletedObjects , deleteResult . delInfo )
2016-09-07 14:49:12 -04:00
}
2016-03-05 19:43:48 -05:00
}
2016-09-02 04:59:08 -04:00
2021-11-24 13:01:07 -05:00
response := generateMultiDeleteResponse ( deleteObjectsReq . Quiet , deletedObjects , deleteErrors )
2016-03-05 19:43:48 -05:00
encodedSuccessResponse := encodeResponse ( response )
2017-01-06 03:37:00 -05:00
2016-03-05 19:43:48 -05:00
// Write success response.
2017-01-06 03:37:00 -05:00
writeSuccessResponseXML ( w , encodedSuccessResponse )
2020-11-19 21:43:58 -05:00
for _ , dobj := range deletedObjects {
2021-02-11 01:00:42 -05:00
if dobj . ObjectName == "" {
continue
}
2022-05-08 01:06:44 -04:00
if replicateDeletes && ( dobj . DeleteMarkerReplicationStatus ( ) == replication . Pending || dobj . VersionPurgeStatus ( ) == Pending ) {
2023-06-23 16:26:00 -04:00
// copy so we can re-add null ID.
dobj := dobj
if isDirObject ( dobj . ObjectName ) && dobj . VersionID == "" {
dobj . VersionID = nullVersionID
}
2022-05-08 01:06:44 -04:00
dv := DeletedObjectReplicationInfo {
DeletedObject : dobj ,
Bucket : bucket ,
2022-07-12 13:43:32 -04:00
EventType : ReplicateIncomingDelete ,
2020-11-12 15:10:59 -05:00
}
2022-05-08 01:06:44 -04:00
scheduleReplicationDelete ( ctx , dv , objectAPI )
2021-08-17 10:50:00 -04:00
}
2020-10-17 00:22:12 -04:00
eventName := event . ObjectRemovedDelete
2020-06-12 23:04:01 -04:00
objInfo := ObjectInfo {
2021-03-30 20:15:36 -04:00
Name : dobj . ObjectName ,
VersionID : dobj . VersionID ,
DeleteMarker : dobj . DeleteMarker ,
2020-06-12 23:04:01 -04:00
}
2020-10-17 00:22:12 -04:00
2021-03-30 20:15:36 -04:00
if objInfo . DeleteMarker {
2020-10-17 00:22:12 -04:00
objInfo . VersionID = dobj . DeleteMarkerVersionID
eventName = event . ObjectRemovedDeleteMarkerCreated
2020-06-12 23:04:01 -04:00
}
2020-10-17 00:22:12 -04:00
2018-03-15 16:03:41 -04:00
sendEvent ( eventArgs {
2020-10-17 00:22:12 -04:00
EventName : eventName ,
2020-06-12 23:04:01 -04:00
BucketName : bucket ,
Object : objInfo ,
2018-11-02 21:40:08 -04:00
ReqParams : extractReqParams ( r ) ,
RespElements : extractRespElements ( w ) ,
UserAgent : r . UserAgent ( ) ,
2019-03-25 14:45:42 -04:00
Host : handlers . GetSourceIP ( r ) ,
2016-09-29 01:46:19 -04:00
} )
2016-09-02 04:59:08 -04:00
}
2021-08-12 21:57:37 -04:00
// Clean up transitioned objects from remote tier
for _ , os := range oss {
if os == nil { // skip objects that weren't deleted due to invalid versionID etc.
continue
}
logger . LogIf ( ctx , os . Sweep ( ) )
}
2016-03-05 19:43:48 -05:00
}
2015-06-30 23:15:48 -04:00
// PutBucketHandler - PUT Bucket
2015-06-30 17:42:29 -04:00
// ----------
// This implementation of the PUT operation creates a new bucket for authenticated request
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) PutBucketHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "PutBucket" )
2018-03-14 15:01:47 -04:00
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2018-10-12 15:25:59 -04:00
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2016-08-10 21:47:49 -04:00
return
}
2018-04-24 18:53:30 -04:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
2019-11-12 17:50:18 -05:00
objectLockEnabled := false
2022-04-27 07:44:07 -04:00
if vs := r . Header . Get ( xhttp . AmzObjectLockEnabled ) ; len ( vs ) > 0 {
v := strings . ToLower ( vs )
switch v {
case "true" , "false" :
objectLockEnabled = v == "true"
default :
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrInvalidRequest ) , r . URL )
return
}
}
forceCreate := false
if vs := r . Header . Get ( xhttp . MinIOForceCreate ) ; len ( vs ) > 0 {
v := strings . ToLower ( vs )
switch v {
case "true" , "false" :
forceCreate = v == "true"
default :
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrInvalidRequest ) , r . URL )
2019-11-12 17:50:18 -05:00
return
}
}
2022-10-02 15:29:29 -04:00
cred , owner , s3Error := checkRequestAuthTypeCredential ( ctx , r , policy . CreateBucketAction )
2022-08-05 19:27:09 -04:00
if s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2016-02-04 15:52:25 -05:00
return
2015-07-14 12:17:30 -04:00
}
2022-08-05 19:27:09 -04:00
if objectLockEnabled {
// Creating a bucket with locking requires the user having more permissions
for _ , action := range [ ] iampolicy . Action { iampolicy . PutBucketObjectLockConfigurationAction , iampolicy . PutBucketVersioningAction } {
if ! globalIAMSys . IsAllowed ( iampolicy . Args {
AccountName : cred . AccessKey ,
Groups : cred . Groups ,
Action : action ,
2023-02-06 12:27:29 -05:00
ConditionValues : getConditionValues ( r , "" , cred ) ,
2022-08-05 19:27:09 -04:00
BucketName : bucket ,
IsOwner : owner ,
Claims : cred . Claims ,
} ) {
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrAccessDenied ) , r . URL )
return
}
}
}
2017-04-03 17:50:09 -04:00
// Parse incoming location constraint.
2023-02-24 21:39:20 -05:00
_ , s3Error = parseLocationConstraint ( r )
2017-04-03 17:50:09 -04:00
if s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2016-08-25 23:00:47 -04:00
return
2016-04-20 20:35:38 -04:00
}
2016-07-24 01:51:12 -04:00
2022-12-08 15:24:03 -05:00
// check if client is attempting to create more buckets, complain about it.
2022-09-09 06:06:34 -04:00
if currBuckets := globalBucketMetadataSys . Count ( ) ; currBuckets + 1 > maxBuckets {
2022-12-08 15:24:03 -05:00
logger . LogIf ( ctx , fmt . Errorf ( "An attempt to create %d buckets beyond recommended %d" , currBuckets + 1 , maxBuckets ) )
2022-09-09 06:06:34 -04:00
}
2022-07-25 20:51:32 -04:00
opts := MakeBucketOptions {
2020-06-12 23:04:01 -04:00
LockEnabled : objectLockEnabled ,
2022-04-27 07:44:07 -04:00
ForceCreate : forceCreate ,
2020-06-12 23:04:01 -04:00
}
2018-02-02 21:18:52 -05:00
if globalDNSConfig != nil {
2020-01-22 11:25:28 -05:00
sr , err := globalDNSConfig . Get ( bucket )
if err != nil {
2020-09-09 15:20:49 -04:00
// ErrNotImplemented indicates a DNS backend that doesn't need to check if bucket already
// exists elsewhere
if err == dns . ErrNoEntriesFound || err == dns . ErrNotImplemented {
2018-02-02 21:18:52 -05:00
// Proceed to creating a bucket.
2022-12-23 10:46:00 -05:00
if err = objectAPI . MakeBucket ( ctx , bucket , opts ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-05-01 12:53:14 -04:00
return
2019-11-12 17:50:18 -05:00
}
2018-02-02 21:18:52 -05:00
if err = globalDNSConfig . Put ( bucket ) ; err != nil {
2022-07-25 20:51:32 -04:00
objectAPI . DeleteBucket ( context . Background ( ) , bucket , DeleteBucketOptions {
2023-01-03 11:16:39 -05:00
Force : true ,
2022-07-25 20:51:32 -04:00
SRDeleteOp : getSRBucketDeleteOp ( globalSiteReplicationSys . isEnabled ( ) ) ,
} )
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2018-02-02 21:18:52 -05:00
return
}
2020-05-21 14:03:59 -04:00
// Load updated bucket metadata into memory.
globalNotificationSys . LoadBucketMetadata ( GlobalContext , bucket )
2020-05-19 16:53:54 -04:00
2018-02-02 21:18:52 -05:00
// Make sure to add Location information here only for bucket
2023-03-15 16:40:40 -04:00
w . Header ( ) . Set ( xhttp . Location , pathJoin ( SlashSeparator , bucket ) )
2018-02-02 21:18:52 -05:00
writeSuccessResponseHeadersOnly ( w )
2020-07-20 15:52:49 -04:00
sendEvent ( eventArgs {
EventName : event . BucketCreated ,
BucketName : bucket ,
ReqParams : extractReqParams ( r ) ,
RespElements : extractRespElements ( w ) ,
UserAgent : r . UserAgent ( ) ,
Host : handlers . GetSourceIP ( r ) ,
} )
2018-02-02 21:18:52 -05:00
return
}
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2018-02-02 21:18:52 -05:00
return
}
2020-01-22 11:25:28 -05:00
apiErr := ErrBucketAlreadyExists
if ! globalDomainIPs . Intersection ( set . CreateStringSet ( getHostsSlice ( sr ) ... ) ) . IsEmpty ( ) {
apiErr = ErrBucketAlreadyOwnedByYou
}
// No IPs seem to intersect, this means that bucket exists but has
// different IP addresses perhaps from a different deployment.
// bucket names are globally unique in federation at a given
// path prefix, name collision is not allowed. Return appropriate error.
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( apiErr ) , r . URL )
2018-02-15 20:45:57 -05:00
return
}
2016-07-24 01:51:12 -04:00
// Proceed to creating a bucket.
2022-12-23 10:46:00 -05:00
if err := objectAPI . MakeBucket ( ctx , bucket , opts ) ; err != nil {
2022-06-20 13:48:11 -04:00
if _ , ok := err . ( BucketExists ) ; ok {
// Though bucket exists locally, we send the site-replication
// hook to ensure all sites have this bucket. If the hook
// succeeds, the client will still receive a bucket exists
// message.
globalSiteReplicationSys . MakeBucketHook ( ctx , bucket , opts )
2021-10-06 19:36:31 -04:00
}
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2015-08-03 19:17:21 -04:00
return
}
2017-01-06 03:37:00 -05:00
2020-05-21 14:03:59 -04:00
// Load updated bucket metadata into memory.
globalNotificationSys . LoadBucketMetadata ( GlobalContext , bucket )
2019-11-12 17:50:18 -05:00
2021-10-06 19:36:31 -04:00
// Call site replication hook
2023-01-26 14:11:54 -05:00
logger . LogIf ( ctx , globalSiteReplicationSys . MakeBucketHook ( ctx , bucket , opts ) )
2021-10-06 19:36:31 -04:00
2015-09-19 06:20:07 -04:00
// Make sure to add Location information here only for bucket
2023-03-15 16:40:40 -04:00
w . Header ( ) . Set ( xhttp . Location , pathJoin ( SlashSeparator , bucket ) )
2017-01-06 03:37:00 -05:00
writeSuccessResponseHeadersOnly ( w )
2020-07-20 15:52:49 -04:00
sendEvent ( eventArgs {
EventName : event . BucketCreated ,
BucketName : bucket ,
ReqParams : extractReqParams ( r ) ,
RespElements : extractRespElements ( w ) ,
UserAgent : r . UserAgent ( ) ,
Host : handlers . GetSourceIP ( r ) ,
} )
2015-06-30 17:42:29 -04:00
}
2015-10-02 02:51:17 -04:00
// PostPolicyBucketHandler - POST policy
// ----------
// This implementation of the POST operation handles object creation with a specified
// signature policy in multipart/form-data
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) PostPolicyBucketHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "PostPolicyBucket" )
2018-03-14 15:01:47 -04:00
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2018-10-12 15:25:59 -04:00
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2016-08-10 21:47:49 -04:00
return
}
2019-02-12 04:25:52 -05:00
2018-12-15 00:39:59 -05:00
if crypto . S3KMS . IsRequested ( r . Header ) { // SSE-KMS is not supported
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrNotImplemented ) , r . URL )
2018-12-15 00:39:59 -05:00
return
}
2020-09-15 16:57:15 -04:00
2017-11-13 19:30:20 -05:00
bucket := mux . Vars ( r ) [ "bucket" ]
2019-02-22 22:18:01 -05:00
resource , err := getResource ( r . URL . Path , r . Host , globalDomainNames )
2017-11-15 17:10:45 -05:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrInvalidRequest ) , r . URL )
2017-11-15 17:10:45 -05:00
return
}
2021-03-03 11:47:08 -05:00
// Make sure that the URL does not contain object name.
if bucket != path . Clean ( resource [ 1 : ] ) {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMethodNotAllowed ) , r . URL )
2017-11-15 17:10:45 -05:00
return
}
2017-02-02 13:45:00 -05:00
2015-10-02 02:51:17 -04:00
// Here the parameter is the size of the form data that should
2016-03-22 20:54:31 -04:00
// be loaded in memory, the remaining being put in temporary files.
2023-05-25 01:51:07 -04:00
mp , err := r . MultipartReader ( )
2016-04-29 17:24:10 -04:00
if err != nil {
2022-12-15 11:25:05 -05:00
apiErr := errorCodes . ToAPIErr ( ErrMalformedPOSTRequest )
2023-05-15 17:08:54 -04:00
apiErr . Description = fmt . Sprintf ( "%s (%v)" , apiErr . Description , err )
2022-12-15 11:25:05 -05:00
writeErrorResponse ( ctx , w , apiErr , r . URL )
2015-10-02 02:51:17 -04:00
return
}
2023-05-25 01:51:07 -04:00
const mapEntryOverhead = 200
2023-05-15 17:08:54 -04:00
var (
2023-05-25 01:51:07 -04:00
reader io . Reader
fileSize int64 = - 1
fileName string
fanOutEntries = make ( [ ] minio . PutObjectFanOutEntry , 0 , 100 )
2023-05-15 17:08:54 -04:00
)
maxParts := 1000
// Canonicalize the form values into http.Header.
formValues := make ( http . Header )
for {
2023-05-25 01:51:07 -04:00
part , err := mp . NextRawPart ( )
2023-05-15 17:08:54 -04:00
if errors . Is ( err , io . EOF ) {
break
}
if err != nil {
apiErr := errorCodes . ToAPIErr ( ErrMalformedPOSTRequest )
apiErr . Description = fmt . Sprintf ( "%s (%v)" , apiErr . Description , err )
writeErrorResponse ( ctx , w , apiErr , r . URL )
return
}
if maxParts <= 0 {
apiErr := errorCodes . ToAPIErr ( ErrMalformedPOSTRequest )
apiErr . Description = fmt . Sprintf ( "%s (%v)" , apiErr . Description , multipart . ErrMessageTooLarge )
writeErrorResponse ( ctx , w , apiErr , r . URL )
return
}
maxParts --
2017-02-02 13:45:00 -05:00
2023-05-15 17:08:54 -04:00
name := part . FormName ( )
if name == "" {
continue
}
2017-02-02 13:45:00 -05:00
2023-05-15 17:08:54 -04:00
fileName = part . FileName ( )
// Multiple values for the same key (one map entry, longer slice) are cheaper
// than the same number of values for different keys (many map entries), but
// using a consistent per-value cost for overhead is simpler.
maxMemoryBytes := 2 * int64 ( 10 << 20 )
maxMemoryBytes -= int64 ( len ( name ) )
maxMemoryBytes -= mapEntryOverhead
if maxMemoryBytes < 0 {
// We can't actually take this path, since nextPart would already have
// rejected the MIME headers for being too large. Check anyway.
apiErr := errorCodes . ToAPIErr ( ErrMalformedPOSTRequest )
apiErr . Description = fmt . Sprintf ( "%s (%v)" , apiErr . Description , multipart . ErrMessageTooLarge )
writeErrorResponse ( ctx , w , apiErr , r . URL )
return
}
var b bytes . Buffer
if fileName == "" {
2023-05-25 01:51:07 -04:00
if http . CanonicalHeaderKey ( name ) == http . CanonicalHeaderKey ( "x-minio-fanout-list" ) {
dec := json . NewDecoder ( part )
// while the array contains values
for dec . More ( ) {
var m minio . PutObjectFanOutEntry
if err := dec . Decode ( & m ) ; err != nil {
part . Close ( )
apiErr := errorCodes . ToAPIErr ( ErrMalformedPOSTRequest )
apiErr . Description = fmt . Sprintf ( "%s (%v)" , apiErr . Description , multipart . ErrMessageTooLarge )
writeErrorResponse ( ctx , w , apiErr , r . URL )
return
}
fanOutEntries = append ( fanOutEntries , m )
}
part . Close ( )
continue
}
2023-05-15 17:08:54 -04:00
// value, store as string in memory
n , err := io . CopyN ( & b , part , maxMemoryBytes + 1 )
part . Close ( )
2023-05-25 01:51:07 -04:00
2023-05-15 17:08:54 -04:00
if err != nil && err != io . EOF {
apiErr := errorCodes . ToAPIErr ( ErrMalformedPOSTRequest )
apiErr . Description = fmt . Sprintf ( "%s (%v)" , apiErr . Description , err )
writeErrorResponse ( ctx , w , apiErr , r . URL )
return
}
maxMemoryBytes -= n
if maxMemoryBytes < 0 {
apiErr := errorCodes . ToAPIErr ( ErrMalformedPOSTRequest )
apiErr . Description = fmt . Sprintf ( "%s (%v)" , apiErr . Description , multipart . ErrMessageTooLarge )
writeErrorResponse ( ctx , w , apiErr , r . URL )
return
}
if n > maxFormFieldSize {
apiErr := errorCodes . ToAPIErr ( ErrMalformedPOSTRequest )
apiErr . Description = fmt . Sprintf ( "%s (%v)" , apiErr . Description , multipart . ErrMessageTooLarge )
writeErrorResponse ( ctx , w , apiErr , r . URL )
return
}
formValues [ http . CanonicalHeaderKey ( name ) ] = append ( formValues [ http . CanonicalHeaderKey ( name ) ] , b . String ( ) )
continue
}
// In accordance with https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html
// The file or text content.
// The file or text content must be the last field in the form.
// You cannot upload more than one file at a time.
2023-05-25 01:51:07 -04:00
reader = part
// we have found the File part of the request we are done processing multipart-form
2023-05-15 17:08:54 -04:00
break
}
if _ , ok := formValues [ "Key" ] ; ! ok {
2022-12-15 11:25:05 -05:00
apiErr := errorCodes . ToAPIErr ( ErrMalformedPOSTRequest )
2023-05-15 17:08:54 -04:00
apiErr . Description = fmt . Sprintf ( "%s (%v)" , apiErr . Description , errors . New ( "The name of the uploaded key is missing" ) )
2022-12-15 11:25:05 -05:00
writeErrorResponse ( ctx , w , apiErr , r . URL )
2015-10-02 02:51:17 -04:00
return
}
2023-05-25 01:51:07 -04:00
2023-05-15 17:08:54 -04:00
if fileName == "" {
apiErr := errorCodes . ToAPIErr ( ErrMalformedPOSTRequest )
apiErr . Description = fmt . Sprintf ( "%s (%v)" , apiErr . Description , errors . New ( "The file or text content is missing" ) )
writeErrorResponse ( ctx , w , apiErr , r . URL )
2017-02-09 15:37:32 -05:00
return
}
2023-05-23 10:58:33 -04:00
checksum , err := hash . GetContentChecksum ( formValues )
if err != nil {
apiErr := errorCodes . ToAPIErr ( ErrMalformedPOSTRequest )
apiErr . Description = fmt . Sprintf ( "%s (%v)" , apiErr . Description , fmt . Errorf ( "Invalid checksum: %w" , err ) )
writeErrorResponse ( ctx , w , apiErr , r . URL )
return
}
if checksum != nil && checksum . Type . Trailing ( ) {
// Not officially supported in POST requests.
apiErr := errorCodes . ToAPIErr ( ErrMalformedPOSTRequest )
apiErr . Description = fmt . Sprintf ( "%s (%v)" , apiErr . Description , errors . New ( "Trailing checksums not available for POST operations" ) )
writeErrorResponse ( ctx , w , apiErr , r . URL )
return
}
2017-02-09 15:37:32 -05:00
2017-03-13 17:41:13 -04:00
formValues . Set ( "Bucket" , bucket )
if fileName != "" && strings . Contains ( formValues . Get ( "Key" ) , "${filename}" ) {
2016-07-27 20:51:55 -04:00
// S3 feature to replace ${filename} found in Key form field
// by the filename attribute passed in multipart
2021-11-16 12:28:29 -05:00
formValues . Set ( "Key" , strings . ReplaceAll ( formValues . Get ( "Key" ) , "${filename}" , fileName ) )
2017-03-13 17:41:13 -04:00
}
2021-03-11 16:57:03 -05:00
object := trimLeadingSlash ( formValues . Get ( "Key" ) )
2021-03-09 15:58:22 -05:00
2017-03-13 17:41:13 -04:00
successRedirect := formValues . Get ( "success_action_redirect" )
successStatus := formValues . Get ( "success_action_status" )
var redirectURL * url . URL
if successRedirect != "" {
redirectURL , err = url . Parse ( successRedirect )
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMalformedPOSTRequest ) , r . URL )
2017-03-13 17:41:13 -04:00
return
}
2016-07-27 20:51:55 -04:00
}
2016-02-15 20:42:39 -05:00
// Verify policy signature.
2021-03-03 11:47:08 -05:00
cred , errCode := doesPolicySignatureMatch ( formValues )
2019-02-12 04:25:52 -05:00
if errCode != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( errCode ) , r . URL )
2015-10-02 02:51:17 -04:00
return
}
2016-10-25 02:47:03 -04:00
2023-05-25 01:51:07 -04:00
if len ( fanOutEntries ) > 0 {
// Once signature is validated, check if the user has
// explicit permissions for the user.
if ! globalIAMSys . IsAllowed ( iampolicy . Args {
AccountName : cred . AccessKey ,
Groups : cred . Groups ,
Action : iampolicy . PutObjectFanOutAction ,
ConditionValues : getConditionValues ( r , "" , cred ) ,
BucketName : bucket ,
ObjectName : object ,
IsOwner : globalActiveCred . AccessKey == cred . AccessKey ,
Claims : cred . Claims ,
} ) {
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrAccessDenied ) , r . URL )
return
}
} else {
// Once signature is validated, check if the user has
// explicit permissions for the user.
if ! globalIAMSys . IsAllowed ( iampolicy . Args {
AccountName : cred . AccessKey ,
Groups : cred . Groups ,
Action : iampolicy . PutObjectAction ,
ConditionValues : getConditionValues ( r , "" , cred ) ,
BucketName : bucket ,
ObjectName : object ,
IsOwner : globalActiveCred . AccessKey == cred . AccessKey ,
Claims : cred . Claims ,
} ) {
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrAccessDenied ) , r . URL )
return
}
2021-03-03 11:47:08 -05:00
}
2017-03-13 17:41:13 -04:00
policyBytes , err := base64 . StdEncoding . DecodeString ( formValues . Get ( "Policy" ) )
2016-10-25 02:47:03 -04:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMalformedPOSTRequest ) , r . URL )
2016-10-25 02:47:03 -04:00
return
}
2023-05-25 01:51:07 -04:00
hashReader , err := hash . NewReader ( reader , fileSize , "" , "" , fileSize )
2023-05-15 17:08:54 -04:00
if err != nil {
logger . LogIf ( ctx , err )
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
return
}
2023-05-23 10:58:33 -04:00
if checksum != nil && checksum . Valid ( ) {
2023-05-25 01:51:07 -04:00
if err = hashReader . AddChecksumNoTrailer ( formValues , false ) ; err != nil {
2023-05-23 10:58:33 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
return
}
}
2023-05-15 17:08:54 -04:00
2018-12-03 15:01:28 -05:00
// Handle policy if it is set.
if len ( policyBytes ) > 0 {
2021-03-25 16:57:57 -04:00
postPolicyForm , err := parsePostPolicyForm ( bytes . NewReader ( policyBytes ) )
2018-12-03 15:01:28 -05:00
if err != nil {
2021-03-25 16:57:57 -04:00
errAPI := errorCodes . ToAPIErr ( ErrPostPolicyConditionInvalidFormat )
errAPI . Description = fmt . Sprintf ( "%s '(%s)'" , errAPI . Description , err )
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errAPI , r . URL )
2017-02-02 13:45:00 -05:00
return
2016-11-21 07:15:26 -05:00
}
2017-02-02 13:45:00 -05:00
2018-12-03 15:01:28 -05:00
// Make sure formValues adhere to policy restrictions.
2019-03-05 15:10:47 -05:00
if err = checkPostPolicy ( formValues , postPolicyForm ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErrWithErr ( ErrAccessDenied , err ) , r . URL )
2017-02-02 13:45:00 -05:00
return
2016-11-21 07:15:26 -05:00
}
2018-12-03 15:01:28 -05:00
// Ensure that the object size is within expected range, also the file size
// should not exceed the maximum single Put size (5 GiB)
lengthRange := postPolicyForm . Conditions . ContentLengthRange
if lengthRange . Valid {
2023-05-15 17:08:54 -04:00
hashReader . SetExpectedMin ( lengthRange . Min )
hashReader . SetExpectedMax ( lengthRange . Max )
2018-12-03 15:01:28 -05:00
}
2016-10-25 02:47:03 -04:00
}
2016-12-19 19:14:04 -05:00
// Extract metadata to be saved from received Form.
2018-07-10 23:27:10 -04:00
metadata := make ( map [ string ] string )
2021-02-03 23:41:33 -05:00
err = extractMetadataFromMime ( ctx , textproto . MIMEHeader ( formValues ) , metadata )
2017-07-05 19:56:10 -04:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2017-07-05 19:56:10 -04:00
return
}
2016-10-02 18:51:49 -04:00
2018-11-14 20:36:41 -05:00
rawReader := hashReader
2021-02-10 11:52:50 -05:00
pReader := NewPutObjReader ( rawReader )
2020-04-09 20:01:45 -04:00
var objectEncryptionKey crypto . ObjectKey
2018-12-14 16:35:48 -05:00
2020-02-05 04:42:34 -05:00
// Check if bucket encryption is enabled
2021-05-14 03:59:05 -04:00
sseConfig , _ := globalBucketSSEConfigSys . Get ( bucket )
2023-05-25 01:51:07 -04:00
sseConfig . Apply ( formValues , sse . ApplyOptions {
2021-09-21 12:02:15 -04:00
AutoEncrypt : globalAutoEncryption ,
} )
2020-05-19 16:53:54 -04:00
2019-01-05 17:16:43 -05:00
var opts ObjectOptions
2019-02-09 00:31:06 -05:00
opts , err = putOpts ( ctx , r , bucket , object , metadata )
2019-01-05 17:16:43 -05:00
if err != nil {
2019-02-12 04:25:52 -05:00
writeErrorResponseHeadersOnly ( w , toAPIError ( ctx , err ) )
2019-01-05 17:16:43 -05:00
return
}
2023-01-17 09:07:47 -05:00
2023-05-25 01:51:07 -04:00
fanOutOpts := fanOutOptions { Checksum : checksum }
2023-01-24 18:46:33 -05:00
if crypto . Requested ( formValues ) {
2023-01-17 09:07:47 -05:00
if crypto . SSECopy . IsRequested ( r . Header ) {
writeErrorResponse ( ctx , w , toAPIError ( ctx , errInvalidEncryptionParameters ) , r . URL )
return
}
2023-01-24 18:46:33 -05:00
2023-06-13 16:52:33 -04:00
if crypto . SSEC . IsRequested ( r . Header ) && crypto . S3 . IsRequested ( r . Header ) {
writeErrorResponse ( ctx , w , toAPIError ( ctx , crypto . ErrIncompatibleEncryptionMethod ) , r . URL )
return
}
if crypto . SSEC . IsRequested ( r . Header ) && crypto . S3KMS . IsRequested ( r . Header ) {
writeErrorResponse ( ctx , w , toAPIError ( ctx , crypto . ErrIncompatibleEncryptionMethod ) , r . URL )
return
}
2023-01-24 18:46:33 -05:00
if crypto . SSEC . IsRequested ( r . Header ) && isReplicationEnabled ( ctx , bucket ) {
writeErrorResponse ( ctx , w , toAPIError ( ctx , errInvalidEncryptionParametersSSEC ) , r . URL )
return
}
2023-01-17 09:07:47 -05:00
var (
reader io . Reader
keyID string
key [ ] byte
kmsCtx kms . Context
)
kind , _ := crypto . IsRequested ( formValues )
switch kind {
case crypto . SSEC :
key , err = ParseSSECustomerHeader ( formValues )
2018-03-05 11:02:56 -05:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2018-03-05 11:02:56 -05:00
return
}
2023-01-17 09:07:47 -05:00
case crypto . S3KMS :
keyID , kmsCtx , err = crypto . S3KMS . ParseHTTP ( formValues )
2021-02-10 11:52:50 -05:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2021-02-10 11:52:50 -05:00
return
}
2018-03-05 11:02:56 -05:00
}
2023-05-25 01:51:07 -04:00
if len ( fanOutEntries ) == 0 {
reader , objectEncryptionKey , err = newEncryptReader ( ctx , hashReader , kind , keyID , key , bucket , object , metadata , kmsCtx )
if err != nil {
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
return
}
// do not try to verify encrypted content/
hashReader , err = hash . NewReader ( reader , - 1 , "" , "" , - 1 )
if err != nil {
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
return
}
if checksum != nil && checksum . Valid ( ) {
if err = hashReader . AddChecksumNoTrailer ( formValues , true ) ; err != nil {
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
return
}
}
pReader , err = pReader . WithEncryption ( hashReader , & objectEncryptionKey )
2023-05-23 10:58:33 -04:00
if err != nil {
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
return
}
2023-05-25 01:51:07 -04:00
} else {
fanOutOpts = fanOutOptions {
Key : key ,
Kind : kind ,
KeyID : keyID ,
KmsCtx : kmsCtx ,
Checksum : checksum ,
}
2023-05-23 10:58:33 -04:00
}
2023-05-25 01:51:07 -04:00
}
if len ( fanOutEntries ) > 0 {
// Fan-out requires no copying, and must be carried from original source
// https://en.wikipedia.org/wiki/Copy_protection so the incoming stream
// is always going to be in-memory as we cannot re-read from what we
// wrote to disk - since that amounts to "copying" from a "copy"
// instead of "copying" from source, we need the stream to be seekable
// to ensure that we can make fan-out calls concurrently.
buf := bytebufferpool . Get ( )
defer bytebufferpool . Put ( buf )
2023-07-05 06:16:05 -04:00
md5w := md5 . New ( )
2023-05-25 01:51:07 -04:00
// Maximum allowed fan-out object size.
const maxFanOutSize = 16 << 20
2023-07-05 06:16:05 -04:00
n , err := io . Copy ( io . MultiWriter ( buf , md5w ) , ioutil . HardLimitReader ( pReader , maxFanOutSize ) )
2023-01-17 09:07:47 -05:00
if err != nil {
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
return
}
2023-05-25 01:51:07 -04:00
2023-07-05 06:16:05 -04:00
// Set the correct hex md5sum for the fan-out stream.
fanOutOpts . MD5Hex = hex . EncodeToString ( md5w . Sum ( nil ) )
2023-05-25 01:51:07 -04:00
concurrentSize := 100
if runtime . GOMAXPROCS ( 0 ) < concurrentSize {
concurrentSize = runtime . GOMAXPROCS ( 0 )
}
fanOutResp := make ( [ ] minio . PutObjectFanOutResponse , 0 , len ( fanOutEntries ) )
eventArgsList := make ( [ ] eventArgs , 0 , len ( fanOutEntries ) )
for {
var objInfos [ ] ObjectInfo
var errs [ ] error
var done bool
if len ( fanOutEntries ) < concurrentSize {
objInfos , errs = fanOutPutObject ( ctx , bucket , objectAPI , fanOutEntries , buf . Bytes ( ) [ : n ] , fanOutOpts )
done = true
} else {
objInfos , errs = fanOutPutObject ( ctx , bucket , objectAPI , fanOutEntries [ : concurrentSize ] , buf . Bytes ( ) [ : n ] , fanOutOpts )
fanOutEntries = fanOutEntries [ concurrentSize : ]
}
for i , objInfo := range objInfos {
if errs [ i ] != nil {
fanOutResp = append ( fanOutResp , minio . PutObjectFanOutResponse {
Key : objInfo . Name ,
2023-06-15 12:21:53 -04:00
Error : errs [ i ] . Error ( ) ,
2023-05-25 01:51:07 -04:00
} )
2023-06-18 10:09:20 -04:00
eventArgsList = append ( eventArgsList , eventArgs {
EventName : event . ObjectCreatedPost ,
BucketName : objInfo . Bucket ,
Object : ObjectInfo { Name : objInfo . Name } ,
ReqParams : extractReqParams ( r ) ,
RespElements : extractRespElements ( w ) ,
UserAgent : fmt . Sprintf ( "%s MinIO-Fan-Out (failed: %v)" , r . UserAgent ( ) , errs [ i ] ) ,
Host : handlers . GetSourceIP ( r ) ,
} )
2023-05-25 01:51:07 -04:00
continue
}
fanOutResp = append ( fanOutResp , minio . PutObjectFanOutResponse {
Key : objInfo . Name ,
ETag : getDecryptedETag ( formValues , objInfo , false ) ,
VersionID : objInfo . VersionID ,
LastModified : & objInfo . ModTime ,
} )
eventArgsList = append ( eventArgsList , eventArgs {
EventName : event . ObjectCreatedPost ,
BucketName : objInfo . Bucket ,
Object : objInfo ,
ReqParams : extractReqParams ( r ) ,
RespElements : extractRespElements ( w ) ,
UserAgent : r . UserAgent ( ) + " " + "MinIO-Fan-Out" ,
Host : handlers . GetSourceIP ( r ) ,
} )
}
if done {
break
}
}
enc := json . NewEncoder ( w )
for i , fanOutResp := range fanOutResp {
if err = enc . Encode ( & fanOutResp ) ; err != nil {
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
return
}
// Notify object created events.
sendEvent ( eventArgsList [ i ] )
if eventArgsList [ i ] . Object . NumVersions > dataScannerExcessiveVersionsThreshold {
// Send events for excessive versions.
sendEvent ( eventArgs {
EventName : event . ObjectManyVersions ,
BucketName : eventArgsList [ i ] . Object . Bucket ,
Object : eventArgsList [ i ] . Object ,
ReqParams : extractReqParams ( r ) ,
RespElements : extractRespElements ( w ) ,
UserAgent : r . UserAgent ( ) + " " + "MinIO-Fan-Out" ,
Host : handlers . GetSourceIP ( r ) ,
} )
}
}
return
2018-03-05 11:02:56 -05:00
}
2019-02-09 00:31:06 -05:00
objInfo , err := objectAPI . PutObject ( ctx , bucket , object , pReader , opts )
2016-02-04 15:52:25 -05:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2015-10-02 02:51:17 -04:00
return
}
2017-03-13 17:41:13 -04:00
2020-06-12 23:04:01 -04:00
// We must not use the http.Header().Set method here because some (broken)
// clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive).
// Therefore, we have to set the ETag directly as map entry.
2019-07-03 01:34:32 -04:00
w . Header ( ) [ xhttp . ETag ] = [ ] string { ` " ` + objInfo . ETag + ` " ` }
2020-06-12 23:04:01 -04:00
// Set the relevant version ID as part of the response header.
2023-06-13 16:52:33 -04:00
if objInfo . VersionID != "" && objInfo . VersionID != nullVersionID {
2020-06-12 23:04:01 -04:00
w . Header ( ) [ xhttp . AmzVersionID ] = [ ] string { objInfo . VersionID }
}
2023-03-15 16:40:40 -04:00
if obj := getObjectLocation ( r , globalDomainNames , bucket , object ) ; obj != "" {
w . Header ( ) . Set ( xhttp . Location , obj )
}
2016-08-05 01:01:58 -04:00
2017-03-13 17:41:13 -04:00
// Notify object created event.
2018-03-15 16:03:41 -04:00
defer sendEvent ( eventArgs {
2018-11-02 21:40:08 -04:00
EventName : event . ObjectCreatedPost ,
BucketName : objInfo . Bucket ,
Object : objInfo ,
ReqParams : extractReqParams ( r ) ,
RespElements : extractRespElements ( w ) ,
UserAgent : r . UserAgent ( ) ,
2019-03-25 14:45:42 -04:00
Host : handlers . GetSourceIP ( r ) ,
2017-03-13 17:41:13 -04:00
} )
2023-05-25 01:51:07 -04:00
2023-02-21 12:33:33 -05:00
if objInfo . NumVersions > dataScannerExcessiveVersionsThreshold {
defer sendEvent ( eventArgs {
EventName : event . ObjectManyVersions ,
BucketName : objInfo . Bucket ,
Object : objInfo ,
ReqParams : extractReqParams ( r ) ,
RespElements : extractRespElements ( w ) ,
UserAgent : r . UserAgent ( ) ,
Host : handlers . GetSourceIP ( r ) ,
} )
}
2017-03-13 17:41:13 -04:00
2022-07-13 10:46:44 -04:00
if redirectURL != nil { // success_action_redirect is valid and set.
2022-07-15 18:04:48 -04:00
v := redirectURL . Query ( )
v . Add ( "bucket" , objInfo . Bucket )
v . Add ( "key" , objInfo . Name )
v . Add ( "etag" , "\"" + objInfo . ETag + "\"" )
redirectURL . RawQuery = v . Encode ( )
2017-03-13 17:41:13 -04:00
writeRedirectSeeOther ( w , redirectURL . String ( ) )
return
}
2016-07-24 01:51:12 -04:00
2023-05-23 10:58:33 -04:00
// Add checksum header.
if checksum != nil && checksum . Valid ( ) {
hash . AddChecksumHeader ( w , checksum . AsMap ( ) )
}
2017-03-13 17:41:13 -04:00
// Decide what http response to send depending on success_action_status parameter
switch successStatus {
case "201" :
resp := encodeResponse ( PostResponse {
Bucket : objInfo . Bucket ,
Key : objInfo . Name ,
2017-05-14 15:05:51 -04:00
ETag : ` " ` + objInfo . ETag + ` " ` ,
2020-06-12 23:04:01 -04:00
Location : w . Header ( ) . Get ( xhttp . Location ) ,
2017-03-13 17:41:13 -04:00
} )
2020-06-12 23:04:01 -04:00
writeResponse ( w , http . StatusCreated , resp , mimeXML )
2017-03-13 17:41:13 -04:00
case "200" :
writeSuccessResponseHeadersOnly ( w )
default :
2016-12-18 16:39:56 -05:00
writeSuccessNoContent ( w )
}
2015-10-02 02:51:17 -04:00
}
2021-03-02 02:10:33 -05:00
// GetBucketPolicyStatusHandler - Retrieves the policy status
// for an MinIO bucket, indicating whether the bucket is public.
func ( api objectAPIHandlers ) GetBucketPolicyStatusHandler ( w http . ResponseWriter , r * http . Request ) {
ctx := newContext ( r , w , "GetBucketPolicyStatus" )
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
writeErrorResponseHeadersOnly ( w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) )
return
}
if s3Error := checkRequestAuthType ( ctx , r , policy . GetBucketPolicyStatusAction , bucket , "" ) ; s3Error != ErrNone {
writeErrorResponseHeadersOnly ( w , errorCodes . ToAPIErr ( s3Error ) )
return
}
// Check if bucket exists.
2022-07-25 20:51:32 -04:00
if _ , err := objectAPI . GetBucketInfo ( ctx , bucket , BucketOptions { } ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2021-03-02 02:10:33 -05:00
return
}
// Check if anonymous (non-owner) has access to list objects.
readable := globalPolicySys . IsAllowed ( policy . Args {
Action : policy . ListBucketAction ,
BucketName : bucket ,
2023-02-06 12:27:29 -05:00
ConditionValues : getConditionValues ( r , "" , auth . AnonymousCredentials ) ,
2021-03-02 02:10:33 -05:00
IsOwner : false ,
} )
// Check if anonymous (non-owner) has access to upload objects.
writable := globalPolicySys . IsAllowed ( policy . Args {
Action : policy . PutObjectAction ,
BucketName : bucket ,
2023-02-06 12:27:29 -05:00
ConditionValues : getConditionValues ( r , "" , auth . AnonymousCredentials ) ,
2021-03-02 02:10:33 -05:00
IsOwner : false ,
} )
encodedSuccessResponse := encodeResponse ( PolicyStatus {
IsPublic : func ( ) string {
// Silly to have special 'boolean' values yes
// but complying with silly implementation
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html
if readable && writable {
return "TRUE"
}
return "FALSE"
} ( ) ,
} )
writeSuccessResponseXML ( w , encodedSuccessResponse )
}
2015-06-30 23:15:48 -04:00
// HeadBucketHandler - HEAD Bucket
2015-06-30 17:42:29 -04:00
// ----------
// This operation is useful to determine if a bucket exists.
// The operation returns a 200 OK if the bucket exists and you
// have permission to access it. Otherwise, the operation might
// return responses such as 404 Not Found and 403 Forbidden.
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) HeadBucketHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "HeadBucket" )
2018-03-14 15:01:47 -04:00
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2018-10-12 15:25:59 -04:00
2016-02-15 20:42:39 -05:00
vars := mux . Vars ( r )
2015-06-30 17:42:29 -04:00
bucket := vars [ "bucket" ]
2015-07-02 23:31:22 -04:00
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2019-02-12 04:25:52 -05:00
writeErrorResponseHeadersOnly ( w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) )
2016-08-10 21:47:49 -04:00
return
}
2016-11-21 16:51:05 -05:00
2018-04-24 18:53:30 -04:00
if s3Error := checkRequestAuthType ( ctx , r , policy . ListBucketAction , bucket , "" ) ; s3Error != ErrNone {
2023-06-18 21:20:15 -04:00
writeErrorResponseHeadersOnly ( w , errorCodes . ToAPIErr ( s3Error ) )
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
return
2016-02-04 15:52:25 -05:00
}
2018-04-24 18:53:30 -04:00
2018-03-28 17:14:06 -04:00
getBucketInfo := objectAPI . GetBucketInfo
2019-08-09 20:09:08 -04:00
2022-07-25 20:51:32 -04:00
if _ , err := getBucketInfo ( ctx , bucket , BucketOptions { } ) ; err != nil {
2019-02-12 04:25:52 -05:00
writeErrorResponseHeadersOnly ( w , toAPIError ( ctx , err ) )
2015-08-03 19:17:21 -04:00
return
}
2017-01-06 03:37:00 -05:00
2021-12-06 05:59:51 -05:00
writeResponse ( w , http . StatusOK , nil , mimeXML )
2015-06-30 17:42:29 -04:00
}
2015-10-16 14:26:01 -04:00
// DeleteBucketHandler - Delete bucket
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) DeleteBucketHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "DeleteBucket" )
2018-03-14 15:01:47 -04:00
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2018-10-12 15:25:59 -04:00
2018-04-24 18:53:30 -04:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
2020-04-06 20:51:05 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2020-04-06 20:51:05 -04:00
return
}
2020-07-08 20:36:56 -04:00
// Verify if the caller has sufficient permissions.
if s3Error := checkRequestAuthType ( ctx , r , policy . DeleteBucketAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2020-07-08 20:36:56 -04:00
return
}
2020-03-28 00:52:59 -04:00
forceDelete := false
2020-04-06 20:51:05 -04:00
if value := r . Header . Get ( xhttp . MinIOForceDelete ) ; value != "" {
2020-07-08 20:36:56 -04:00
var err error
forceDelete , err = strconv . ParseBool ( value )
if err != nil {
apiErr := errorCodes . ToAPIErr ( ErrInvalidRequest )
apiErr . Description = err . Error ( )
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , apiErr , r . URL )
2020-03-28 00:52:59 -04:00
return
}
2020-07-08 20:36:56 -04:00
// if force delete header is set, we need to evaluate the policy anyways
// regardless of it being true or not.
2020-04-06 20:51:05 -04:00
if s3Error := checkRequestAuthType ( ctx , r , policy . ForceDeleteBucketAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2020-04-06 20:51:05 -04:00
return
}
2016-08-10 21:47:49 -04:00
2020-07-08 20:36:56 -04:00
if forceDelete {
if rcfg , _ := globalBucketObjectLockSys . Get ( bucket ) ; rcfg . LockEnabled {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMethodNotAllowed ) , r . URL )
2020-07-08 20:36:56 -04:00
return
}
2022-03-08 17:28:51 -05:00
rcfg , err := getReplicationConfig ( ctx , bucket )
switch {
case err != nil :
if _ , ok := err . ( BucketReplicationConfigNotFound ) ; ! ok {
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMethodNotAllowed ) , r . URL )
return
}
case rcfg . HasActiveRules ( "" , true ) :
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMethodNotAllowed ) , r . URL )
return
}
2020-05-19 16:53:54 -04:00
}
2016-02-04 15:52:25 -05:00
}
2018-03-28 17:14:06 -04:00
deleteBucket := objectAPI . DeleteBucket
2019-08-09 20:09:08 -04:00
2016-07-24 01:51:12 -04:00
// Attempt to delete bucket.
2022-07-25 20:51:32 -04:00
if err := deleteBucket ( ctx , bucket , DeleteBucketOptions {
Force : forceDelete ,
SRDeleteOp : getSRBucketDeleteOp ( globalSiteReplicationSys . isEnabled ( ) ) ,
} ) ; err != nil {
2021-05-14 15:40:54 -04:00
apiErr := toAPIError ( ctx , err )
if _ , ok := err . ( BucketNotEmpty ) ; ok {
if globalBucketVersioningSys . Enabled ( bucket ) || globalBucketVersioningSys . Suspended ( bucket ) {
apiErr . Description = "The bucket you tried to delete is not empty. You must delete all versions in the bucket."
}
2020-06-12 23:04:01 -04:00
}
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , apiErr , r . URL )
2015-10-16 14:26:01 -04:00
return
}
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
2023-04-21 15:12:31 -04:00
if globalDNSConfig != nil {
if err := globalDNSConfig . Delete ( bucket ) ; err != nil {
logger . LogIf ( ctx , fmt . Errorf ( "Unable to delete bucket DNS entry %w, please delete it manually, bucket on MinIO no longer exists" , err ) )
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
return
}
}
2020-06-12 23:04:01 -04:00
globalNotificationSys . DeleteBucketMetadata ( ctx , bucket )
2022-02-10 13:16:52 -05:00
globalReplicationPool . deleteResyncMetadata ( ctx , bucket )
2023-01-26 14:11:54 -05:00
2021-10-06 19:36:31 -04:00
// Call site replication hook.
2023-01-26 14:11:54 -05:00
logger . LogIf ( ctx , globalSiteReplicationSys . DeleteBucketHook ( ctx , bucket , forceDelete ) )
2021-10-06 19:36:31 -04:00
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
// Write success response.
2015-10-16 23:02:37 -04:00
writeSuccessNoContent ( w )
2020-07-20 15:52:49 -04:00
sendEvent ( eventArgs {
EventName : event . BucketRemoved ,
BucketName : bucket ,
ReqParams : extractReqParams ( r ) ,
RespElements : extractRespElements ( w ) ,
UserAgent : r . UserAgent ( ) ,
Host : handlers . GetSourceIP ( r ) ,
} )
2015-10-16 14:26:01 -04:00
}
2019-11-12 17:50:18 -05:00
// PutBucketObjectLockConfigHandler - PUT Bucket object lock configuration.
// ----------
// Places an Object Lock configuration on the specified bucket. The rule
// specified in the Object Lock configuration will be applied by default
// to every new object placed in the specified bucket.
func ( api objectAPIHandlers ) PutBucketObjectLockConfigHandler ( w http . ResponseWriter , r * http . Request ) {
ctx := newContext ( r , w , "PutBucketObjectLockConfig" )
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2019-11-12 17:50:18 -05:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2019-11-12 17:50:18 -05:00
return
}
2019-11-20 16:18:09 -05:00
if s3Error := checkRequestAuthType ( ctx , r , policy . PutBucketObjectLockConfigurationAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2019-11-20 16:18:09 -05:00
return
}
2020-05-08 16:44:44 -04:00
2020-01-16 18:41:56 -05:00
config , err := objectlock . ParseObjectLockConfig ( r . Body )
2019-11-12 17:50:18 -05:00
if err != nil {
2023-06-12 16:22:07 -04:00
apiErr := errorCodes . ToAPIErr ( ErrInvalidArgument )
2019-11-13 11:21:41 -05:00
apiErr . Description = err . Error ( )
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , apiErr , r . URL )
2019-11-12 17:50:18 -05:00
return
}
2020-05-01 12:53:14 -04:00
2020-05-19 16:53:54 -04:00
configData , err := xml . Marshal ( config )
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2019-11-12 17:50:18 -05:00
return
}
2020-05-01 12:53:14 -04:00
2020-05-23 13:01:01 -04:00
// Deny object locking configuration settings on existing buckets without object lock enabled.
2022-04-24 05:36:31 -04:00
if _ , _ , err = globalBucketMetadataSys . GetObjectLockConfig ( bucket ) ; err != nil {
2023-06-12 16:22:07 -04:00
if _ , ok := err . ( BucketObjectLockConfigNotFound ) ; ok {
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrObjectLockConfigurationNotAllowed ) , r . URL )
} else {
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
}
2020-05-23 13:01:01 -04:00
return
}
2022-06-28 21:09:20 -04:00
updatedAt , err := globalBucketMetadataSys . Update ( ctx , bucket , objectLockConfig , configData )
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-05-19 16:53:54 -04:00
return
2019-11-12 17:50:18 -05:00
}
2021-10-06 19:36:31 -04:00
// Call site replication hook.
//
// We encode the xml bytes as base64 to ensure there are no encoding
// errors.
cfgStr := base64 . StdEncoding . EncodeToString ( configData )
2023-01-26 14:11:54 -05:00
logger . LogIf ( ctx , globalSiteReplicationSys . BucketMetaHook ( ctx , madmin . SRBucketMeta {
2021-10-06 19:36:31 -04:00
Type : madmin . SRBucketMetaTypeObjectLockConfig ,
Bucket : bucket ,
ObjectLockConfig : & cfgStr ,
2022-06-28 21:09:20 -04:00
UpdatedAt : updatedAt ,
2023-01-26 14:11:54 -05:00
} ) )
2021-10-06 19:36:31 -04:00
2019-11-12 17:50:18 -05:00
// Write success response.
writeSuccessResponseHeadersOnly ( w )
}
// GetBucketObjectLockConfigHandler - GET Bucket object lock configuration.
// ----------
// Gets the Object Lock configuration for a bucket. The rule specified in
// the Object Lock configuration will be applied by default to every new
// object placed in the specified bucket.
func ( api objectAPIHandlers ) GetBucketObjectLockConfigHandler ( w http . ResponseWriter , r * http . Request ) {
ctx := newContext ( r , w , "GetBucketObjectLockConfig" )
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2019-11-12 17:50:18 -05:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2019-11-12 17:50:18 -05:00
return
}
2020-05-19 16:53:54 -04:00
2019-11-20 16:18:09 -05:00
// check if user has permissions to perform this operation
if s3Error := checkRequestAuthType ( ctx , r , policy . GetBucketObjectLockConfigurationAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2019-11-20 16:18:09 -05:00
return
}
2020-05-01 12:53:14 -04:00
2022-04-24 05:36:31 -04:00
config , _ , err := globalBucketMetadataSys . GetObjectLockConfig ( bucket )
2020-05-20 13:18:15 -04:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-05-20 13:18:15 -04:00
return
}
configData , err := xml . Marshal ( config )
2019-11-12 17:50:18 -05:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-05-08 16:44:44 -04:00
return
2019-11-12 17:50:18 -05:00
}
// Write success response.
writeSuccessResponseXML ( w , configData )
}
2020-05-05 17:18:13 -04:00
// PutBucketTaggingHandler - PUT Bucket tagging.
// ----------
func ( api objectAPIHandlers ) PutBucketTaggingHandler ( w http . ResponseWriter , r * http . Request ) {
ctx := newContext ( r , w , "PutBucketTagging" )
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2020-05-05 17:18:13 -04:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2020-05-05 17:18:13 -04:00
return
}
2021-09-20 20:41:01 -04:00
// Check if bucket exists.
2022-07-25 20:51:32 -04:00
if _ , err := objectAPI . GetBucketInfo ( ctx , bucket , BucketOptions { } ) ; err != nil {
2021-09-20 20:41:01 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
return
}
2020-05-05 17:18:13 -04:00
if s3Error := checkRequestAuthType ( ctx , r , policy . PutBucketTaggingAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2020-05-05 17:18:13 -04:00
return
}
2020-05-08 16:44:44 -04:00
2020-05-05 17:18:13 -04:00
tags , err := tags . ParseBucketXML ( io . LimitReader ( r . Body , r . ContentLength ) )
if err != nil {
apiErr := errorCodes . ToAPIErr ( ErrMalformedXML )
apiErr . Description = err . Error ( )
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , apiErr , r . URL )
2020-05-05 17:18:13 -04:00
return
}
2020-05-08 16:44:44 -04:00
2020-05-19 16:53:54 -04:00
configData , err := xml . Marshal ( tags )
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-05-19 16:53:54 -04:00
return
}
2022-06-28 21:09:20 -04:00
updatedAt , err := globalBucketMetadataSys . Update ( ctx , bucket , bucketTaggingConfig , configData )
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-05-05 17:18:13 -04:00
return
}
2021-10-06 19:36:31 -04:00
// Call site replication hook.
//
// We encode the xml bytes as base64 to ensure there are no encoding
// errors.
cfgStr := base64 . StdEncoding . EncodeToString ( configData )
2023-01-26 14:11:54 -05:00
logger . LogIf ( ctx , globalSiteReplicationSys . BucketMetaHook ( ctx , madmin . SRBucketMeta {
2022-06-28 21:09:20 -04:00
Type : madmin . SRBucketMetaTypeTags ,
Bucket : bucket ,
Tags : & cfgStr ,
UpdatedAt : updatedAt ,
2023-01-26 14:11:54 -05:00
} ) )
2021-10-06 19:36:31 -04:00
2020-05-05 17:18:13 -04:00
// Write success response.
writeSuccessResponseHeadersOnly ( w )
}
// GetBucketTaggingHandler - GET Bucket tagging.
// ----------
func ( api objectAPIHandlers ) GetBucketTaggingHandler ( w http . ResponseWriter , r * http . Request ) {
ctx := newContext ( r , w , "GetBucketTagging" )
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2020-05-05 17:18:13 -04:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2020-05-05 17:18:13 -04:00
return
}
2020-05-08 16:44:44 -04:00
2020-05-05 17:18:13 -04:00
// check if user has permissions to perform this operation
if s3Error := checkRequestAuthType ( ctx , r , policy . GetBucketTaggingAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2020-05-05 17:18:13 -04:00
return
}
2020-05-08 16:44:44 -04:00
2022-04-24 05:36:31 -04:00
config , _ , err := globalBucketMetadataSys . GetTaggingConfig ( bucket )
2020-05-20 13:18:15 -04:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-05-20 13:18:15 -04:00
return
}
configData , err := xml . Marshal ( config )
2020-05-08 16:44:44 -04:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-05-05 17:18:13 -04:00
return
}
// Write success response.
writeSuccessResponseXML ( w , configData )
}
// DeleteBucketTaggingHandler - DELETE Bucket tagging.
// ----------
func ( api objectAPIHandlers ) DeleteBucketTaggingHandler ( w http . ResponseWriter , r * http . Request ) {
ctx := newContext ( r , w , "DeleteBucketTagging" )
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2020-05-05 17:18:13 -04:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2020-05-05 17:18:13 -04:00
return
}
if s3Error := checkRequestAuthType ( ctx , r , policy . PutBucketTaggingAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2020-05-05 17:18:13 -04:00
return
}
2022-10-19 20:55:09 -04:00
updatedAt , err := globalBucketMetadataSys . Delete ( ctx , bucket , bucketTaggingConfig )
2022-06-28 21:09:20 -04:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2021-10-06 19:36:31 -04:00
return
}
2023-01-26 14:11:54 -05:00
logger . LogIf ( ctx , globalSiteReplicationSys . BucketMetaHook ( ctx , madmin . SRBucketMeta {
2022-06-28 21:09:20 -04:00
Type : madmin . SRBucketMetaTypeTags ,
Bucket : bucket ,
UpdatedAt : updatedAt ,
2023-01-26 14:11:54 -05:00
} ) )
2020-05-05 17:18:13 -04:00
// Write success response.
2023-06-11 23:49:02 -04:00
writeSuccessNoContent ( w )
2020-05-05 17:18:13 -04:00
}