2021-04-18 15:41:13 -04:00
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
2015-06-30 17:42:29 -04:00
2016-08-18 19:23:42 -04:00
package cmd
2015-06-30 17:42:29 -04:00
import (
2021-03-25 16:57:57 -04:00
"bytes"
2021-10-06 13:24:40 -04:00
"context"
2016-10-25 02:47:03 -04:00
"encoding/base64"
2021-04-03 12:03:42 -04:00
"encoding/json"
2016-03-05 19:43:48 -05:00
"encoding/xml"
2018-04-05 11:18:42 -04:00
"fmt"
2016-02-15 20:42:39 -05:00
"io"
2015-06-30 17:42:29 -04:00
"net/http"
2021-02-03 23:41:33 -05:00
"net/textproto"
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
"net/url"
2017-01-11 16:26:42 -05:00
"path"
2021-01-28 14:44:48 -05:00
"sort"
2020-07-08 20:36:56 -04:00
"strconv"
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
"strings"
2021-01-28 14:44:48 -05:00
"sync"
2021-06-01 22:59:11 -04:00
"time"
2015-06-30 17:42:29 -04:00
2020-12-11 23:44:08 -05:00
"github.com/google/uuid"
2018-04-21 22:23:54 -04:00
"github.com/gorilla/mux"
2018-05-11 15:02:30 -04:00
2021-10-06 19:36:31 -04:00
"github.com/minio/madmin-go"
2020-07-14 12:38:05 -04:00
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio-go/v7/pkg/tags"
2021-09-21 12:02:15 -04:00
sse "github.com/minio/minio/internal/bucket/encryption"
2021-06-01 17:59:40 -04:00
objectlock "github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/config/dns"
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/handlers"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/sync/errgroup"
2021-05-30 00:16:42 -04:00
"github.com/minio/pkg/bucket/policy"
iampolicy "github.com/minio/pkg/iam/policy"
2015-06-30 17:42:29 -04:00
)
2019-11-12 17:50:18 -05:00
const (
2020-07-21 20:49:56 -04:00
objectLockConfig = "object-lock.xml"
bucketTaggingConfig = "tagging.xml"
bucketReplicationConfig = "replication.xml"
2019-11-12 17:50:18 -05:00
)
2018-04-05 11:18:42 -04:00
// Check if there are buckets on server without corresponding entry in etcd backend and
// make entries. Here is the general flow
// - Range over all the available buckets
// - Check if a bucket has an entry in etcd backend
// -- If no, make an entry
2019-12-16 23:30:57 -05:00
// -- If yes, check if the entry matches local IP check if we
// need to update the entry then proceed to update
// -- If yes, check if the IP of entry matches local IP.
// This means entry is for this instance.
// -- If IP of the entry doesn't match, this means entry is
// for another instance. Log an error to console.
2019-11-09 12:27:23 -05:00
func initFederatorBackend ( buckets [ ] BucketInfo , objLayer ObjectLayer ) {
if len ( buckets ) == 0 {
2018-04-05 11:18:42 -04:00
return
}
2019-08-13 11:49:26 -04:00
// Get buckets in the DNS
dnsBuckets , err := globalDNSConfig . List ( )
2021-01-28 14:44:48 -05:00
if err != nil && ! IsErrIgnored ( err , dns . ErrNoEntriesFound , dns . ErrNotImplemented , dns . ErrDomainMissing ) {
2020-04-09 12:30:02 -04:00
logger . LogIf ( GlobalContext , err )
2019-08-13 11:49:26 -04:00
return
}
2020-02-03 03:24:20 -05:00
bucketsSet := set . NewStringSet ( )
bucketsToBeUpdated := set . NewStringSet ( )
bucketsInConflict := set . NewStringSet ( )
2021-01-28 14:44:48 -05:00
// This means that domain is updated, we should update
// all bucket entries with new domain name.
domainMissing := err == dns . ErrDomainMissing
2020-09-09 15:20:49 -04:00
if dnsBuckets != nil {
for _ , bucket := range buckets {
bucketsSet . Add ( bucket . Name )
r , ok := dnsBuckets [ bucket . Name ]
if ! ok {
bucketsToBeUpdated . Add ( bucket . Name )
2020-02-03 03:24:20 -05:00
continue
}
2020-09-09 15:20:49 -04:00
if ! globalDomainIPs . Intersection ( set . CreateStringSet ( getHostsSlice ( r ) ... ) ) . IsEmpty ( ) {
2021-01-28 14:44:48 -05:00
if globalDomainIPs . Difference ( set . CreateStringSet ( getHostsSlice ( r ) ... ) ) . IsEmpty ( ) && ! domainMissing {
2020-09-09 15:20:49 -04:00
// No difference in terms of domainIPs and nothing
// has changed so we don't change anything on the etcd.
2021-01-28 14:44:48 -05:00
//
// Additionally also check if domain is updated/missing with more
// entries, if that is the case we should update the
// new domain entries as well.
2020-09-09 15:20:49 -04:00
continue
}
2021-01-28 14:44:48 -05:00
2020-09-09 15:20:49 -04:00
// if domain IPs intersect then it won't be an empty set.
// such an intersection means that bucket exists on etcd.
// but if we do see a difference with local domain IPs with
// hostSlice from etcd then we should update with newer
// domainIPs, we proceed to do that here.
bucketsToBeUpdated . Add ( bucket . Name )
continue
}
2021-01-28 14:44:48 -05:00
2020-09-09 15:20:49 -04:00
// No IPs seem to intersect, this means that bucket exists but has
// different IP addresses perhaps from a different deployment.
// bucket names are globally unique in federation at a given
// path prefix, name collision is not allowed. We simply log
// an error and continue.
bucketsInConflict . Add ( bucket . Name )
2020-02-03 03:24:20 -05:00
}
}
2019-08-13 11:49:26 -04:00
2020-02-03 03:24:20 -05:00
// Add/update buckets that are not registered with the DNS
bucketsToBeUpdatedSlice := bucketsToBeUpdated . ToSlice ( )
2021-02-09 15:08:25 -05:00
g := errgroup . WithNErrs ( len ( bucketsToBeUpdatedSlice ) ) . WithConcurrency ( 50 )
2020-02-03 03:24:20 -05:00
for index := range bucketsToBeUpdatedSlice {
2019-10-14 12:44:51 -04:00
index := index
2018-04-05 11:18:42 -04:00
g . Go ( func ( ) error {
2020-02-03 03:24:20 -05:00
return globalDNSConfig . Put ( bucketsToBeUpdatedSlice [ index ] )
2018-04-05 11:18:42 -04:00
} , index )
}
2021-11-15 12:46:55 -05:00
ctx := GlobalContext
for _ , err := range g . Wait ( ) {
if err != nil {
logger . LogIf ( ctx , err )
return
}
2019-08-13 11:49:26 -04:00
}
2020-02-03 03:24:20 -05:00
for _ , bucket := range bucketsInConflict . ToSlice ( ) {
2021-02-09 15:08:25 -05:00
logger . LogIf ( ctx , fmt . Errorf ( "Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket" , bucket , globalDomainIPs . ToSlice ( ) ) )
2020-02-03 03:24:20 -05:00
}
2021-01-28 14:44:48 -05:00
var wg sync . WaitGroup
2019-08-13 11:49:26 -04:00
// Remove buckets that are in DNS for this server, but aren't local
2020-02-03 03:24:20 -05:00
for bucket , records := range dnsBuckets {
if bucketsSet . Contains ( bucket ) {
continue
}
2019-08-13 11:49:26 -04:00
2020-02-03 03:24:20 -05:00
if globalDomainIPs . Intersection ( set . CreateStringSet ( getHostsSlice ( records ) ... ) ) . IsEmpty ( ) {
2019-08-13 11:49:26 -04:00
// This is not for our server, so we can continue
2020-02-03 03:24:20 -05:00
continue
}
2019-08-13 11:49:26 -04:00
2021-01-28 14:44:48 -05:00
wg . Add ( 1 )
go func ( bucket string ) {
defer wg . Done ( )
// We go to here, so we know the bucket no longer exists,
// but is registered in DNS to this server
if err := globalDNSConfig . Delete ( bucket ) ; err != nil {
logger . LogIf ( GlobalContext , fmt . Errorf ( "Failed to remove DNS entry for %s due to %w" ,
bucket , err ) )
}
} ( bucket )
2018-04-05 11:18:42 -04:00
}
2021-01-28 14:44:48 -05:00
wg . Wait ( )
2018-04-05 11:18:42 -04:00
}
2015-12-27 02:38:38 -05:00
// GetBucketLocationHandler - GET Bucket location.
// -------------------------
// This operation returns bucket location.
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) GetBucketLocationHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "GetBucketLocation" )
2018-03-14 15:01:47 -04:00
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2018-10-12 15:25:59 -04:00
2016-02-15 20:42:39 -05:00
vars := mux . Vars ( r )
2015-12-27 02:38:38 -05:00
bucket := vars [ "bucket" ]
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2016-08-10 21:47:49 -04:00
return
}
2018-04-24 18:53:30 -04:00
if s3Error := checkRequestAuthType ( ctx , r , policy . GetBucketLocationAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2016-11-21 16:51:05 -05:00
return
2016-02-04 15:52:25 -05:00
}
2018-03-28 17:14:06 -04:00
getBucketInfo := objectAPI . GetBucketInfo
2019-08-09 20:09:08 -04:00
2018-03-28 17:14:06 -04:00
if _ , err := getBucketInfo ( ctx , bucket ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2016-01-19 20:49:48 -05:00
return
2015-12-27 02:38:38 -05:00
}
2016-02-15 20:42:39 -05:00
// Generate response.
2016-03-06 15:16:22 -05:00
encodedSuccessResponse := encodeResponse ( LocationResponse { } )
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
// Get current region.
2021-11-25 16:06:25 -05:00
region := globalSite . Region
2017-01-18 15:24:34 -05:00
if region != globalMinioDefaultRegion {
2016-03-06 15:16:22 -05:00
encodedSuccessResponse = encodeResponse ( LocationResponse {
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
Location : region ,
2016-02-15 20:42:39 -05:00
} )
}
2017-01-06 03:37:00 -05:00
// Write success response.
writeSuccessResponseXML ( w , encodedSuccessResponse )
2015-12-27 02:38:38 -05:00
}
2015-06-30 23:15:48 -04:00
// ListMultipartUploadsHandler - GET Bucket (List Multipart uploads)
2015-06-30 17:42:29 -04:00
// -------------------------
// This operation lists in-progress multipart uploads. An in-progress
// multipart upload is a multipart upload that has been initiated,
2015-10-16 22:09:35 -04:00
// using the Initiate Multipart Upload request, but has not yet been
// completed or aborted. This operation returns at most 1,000 multipart
// uploads in the response.
2015-06-30 17:42:29 -04:00
//
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) ListMultipartUploadsHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "ListMultipartUploads" )
2018-03-14 15:01:47 -04:00
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2018-10-12 15:25:59 -04:00
2016-02-15 20:42:39 -05:00
vars := mux . Vars ( r )
2015-10-16 22:09:35 -04:00
bucket := vars [ "bucket" ]
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2016-08-10 21:47:49 -04:00
return
}
2018-04-24 18:53:30 -04:00
if s3Error := checkRequestAuthType ( ctx , r , policy . ListBucketMultipartUploadsAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2016-02-15 20:42:39 -05:00
return
2016-02-04 15:52:25 -05:00
}
2021-08-08 01:43:01 -04:00
prefix , keyMarker , uploadIDMarker , delimiter , maxUploads , encodingType , errCode := getBucketMultipartResources ( r . Form )
2019-02-12 04:25:52 -05:00
if errCode != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( errCode ) , r . URL )
2018-10-18 10:31:46 -04:00
return
}
2019-02-12 04:25:52 -05:00
objectAPI: Fix object API interface, remove unnecessary structs.
ObjectAPI changes.
```
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, *probe.Error)
ListMultipartUploads(bucket, objectPrefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, *probe.Error)
ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, *probe.Error)
```
2016-04-03 04:34:20 -04:00
if maxUploads < 0 {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrInvalidMaxUploads ) , r . URL )
2015-07-16 20:22:45 -04:00
return
}
2019-02-12 04:25:52 -05:00
2016-04-05 15:26:17 -04:00
if keyMarker != "" {
2016-04-29 17:24:10 -04:00
// Marker not common with prefix is not implemented.
2019-12-06 02:16:06 -05:00
if ! HasPrefix ( keyMarker , prefix ) {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrNotImplemented ) , r . URL )
2016-04-29 17:24:10 -04:00
return
2016-04-05 15:26:17 -04:00
}
2015-06-30 17:42:29 -04:00
}
2018-03-14 15:01:47 -04:00
listMultipartsInfo , err := objectAPI . ListMultipartUploads ( ctx , bucket , prefix , keyMarker , uploadIDMarker , delimiter , maxUploads )
2015-09-19 06:20:07 -04:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2015-08-03 19:17:21 -04:00
return
}
2015-09-19 06:20:07 -04:00
// generate response
2019-02-24 01:14:24 -05:00
response := generateListMultipartUploadsResponse ( bucket , listMultipartsInfo , encodingType )
2016-03-06 15:16:22 -05:00
encodedSuccessResponse := encodeResponse ( response )
2017-01-06 03:37:00 -05:00
2016-01-08 03:40:06 -05:00
// write success response.
2017-01-06 03:37:00 -05:00
writeSuccessResponseXML ( w , encodedSuccessResponse )
2015-06-30 17:42:29 -04:00
}
2016-10-09 12:21:37 -04:00
// ListBucketsHandler - GET Service.
2015-06-30 17:42:29 -04:00
// -----------
// This implementation of the GET operation returns a list of all buckets
// owned by the authenticated sender of the request.
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) ListBucketsHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "ListBuckets" )
2018-03-14 15:01:47 -04:00
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2018-10-12 15:25:59 -04:00
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2016-08-10 21:47:49 -04:00
return
}
2019-02-12 04:25:52 -05:00
listBuckets := objectAPI . ListBuckets
2018-04-24 18:53:30 -04:00
2021-03-23 18:15:51 -04:00
cred , owner , s3Error := checkRequestAuthTypeCredential ( ctx , r , policy . ListAllMyBucketsAction , "" , "" )
2020-04-02 15:35:22 -04:00
if s3Error != ErrNone && s3Error != ErrAccessDenied {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2016-02-15 20:42:39 -05:00
return
2016-02-04 15:52:25 -05:00
}
2019-02-12 04:25:52 -05:00
2021-04-29 00:37:02 -04:00
// Anonymous users, should be rejected.
if cred . AccessKey == "" {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrAccessDenied ) , r . URL )
2021-04-29 00:37:02 -04:00
return
}
2018-02-02 21:18:52 -05:00
// If etcd, dns federation configured list buckets from etcd.
var bucketsInfo [ ] BucketInfo
2019-12-29 11:56:45 -05:00
if globalDNSConfig != nil && globalBucketFederation {
2018-02-02 21:18:52 -05:00
dnsBuckets , err := globalDNSConfig . List ( )
2021-01-28 14:44:48 -05:00
if err != nil && ! IsErrIgnored ( err ,
dns . ErrNoEntriesFound ,
dns . ErrDomainMissing ) {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2018-02-02 21:18:52 -05:00
return
}
2020-02-03 03:24:20 -05:00
for _ , dnsRecords := range dnsBuckets {
2018-02-02 21:18:52 -05:00
bucketsInfo = append ( bucketsInfo , BucketInfo {
2020-02-03 03:24:20 -05:00
Name : dnsRecords [ 0 ] . Key ,
Created : dnsRecords [ 0 ] . CreationDate ,
2018-02-02 21:18:52 -05:00
} )
}
2021-01-28 14:44:48 -05:00
sort . Slice ( bucketsInfo , func ( i , j int ) bool {
return bucketsInfo [ i ] . Name < bucketsInfo [ j ] . Name
} )
2018-02-02 21:18:52 -05:00
} else {
// Invoke the list buckets.
var err error
bucketsInfo , err = listBuckets ( ctx )
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2018-02-02 21:18:52 -05:00
return
}
2015-08-03 19:17:21 -04:00
}
2016-07-17 16:23:15 -04:00
2020-04-02 15:35:22 -04:00
if s3Error == ErrAccessDenied {
// Set prefix value for "s3:prefix" policy conditionals.
r . Header . Set ( "prefix" , "" )
// Set delimiter value for "s3:delimiter" policy conditionals.
r . Header . Set ( "delimiter" , SlashSeparator )
n := 0
// Use the following trick to filter in place
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
for _ , bucketInfo := range bucketsInfo {
if globalIAMSys . IsAllowed ( iampolicy . Args {
2021-03-23 18:15:51 -04:00
AccountName : cred . AccessKey ,
Groups : cred . Groups ,
2020-04-02 15:35:22 -04:00
Action : iampolicy . ListBucketAction ,
BucketName : bucketInfo . Name ,
2021-08-12 21:07:08 -04:00
ConditionValues : getConditionValues ( r , "" , cred . AccessKey , cred . Claims ) ,
2020-04-02 15:35:22 -04:00
IsOwner : owner ,
ObjectName : "" ,
2021-08-12 21:07:08 -04:00
Claims : cred . Claims ,
2020-04-02 15:35:22 -04:00
} ) {
bucketsInfo [ n ] = bucketInfo
n ++
}
}
bucketsInfo = bucketsInfo [ : n ]
// No buckets can be filtered return access denied error.
if len ( bucketsInfo ) == 0 {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2020-04-02 15:35:22 -04:00
return
2019-08-12 13:27:38 -04:00
}
}
2016-07-17 16:23:15 -04:00
// Generate response.
2020-04-02 15:35:22 -04:00
response := generateListBucketsResponse ( bucketsInfo )
2016-07-17 16:23:15 -04:00
encodedSuccessResponse := encodeResponse ( response )
2017-01-06 03:37:00 -05:00
2016-07-17 16:23:15 -04:00
// Write response.
2017-01-06 03:37:00 -05:00
writeSuccessResponseXML ( w , encodedSuccessResponse )
2015-06-30 17:42:29 -04:00
}
2016-03-05 19:43:48 -05:00
// DeleteMultipleObjectsHandler - deletes multiple objects.
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) DeleteMultipleObjectsHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "DeleteMultipleObjects" )
2018-03-14 15:01:47 -04:00
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2018-11-02 21:40:08 -04:00
2016-03-05 19:43:48 -05:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2016-08-10 21:47:49 -04:00
return
}
2020-02-22 22:36:46 -05:00
// Content-Md5 is requied should be set
2016-03-05 19:43:48 -05:00
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
2020-02-22 22:36:46 -05:00
if _ , ok := r . Header [ xhttp . ContentMD5 ] ; ! ok {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMissingContentMD5 ) , r . URL )
2016-03-05 19:43:48 -05:00
return
}
2020-02-22 22:36:46 -05:00
// Content-Length is required and should be non-zero
2016-03-05 19:43:48 -05:00
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
2020-02-22 22:36:46 -05:00
if r . ContentLength <= 0 {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMissingContentLength ) , r . URL )
2016-03-05 19:43:48 -05:00
return
}
2020-02-21 00:59:57 -05:00
// The max. XML contains 100000 object names (each at most 1024 bytes long) + XML overhead
const maxBodySize = 2 * 100000 * 1024
2016-03-05 19:43:48 -05:00
// Unmarshal list of keys to be deleted.
2021-11-24 13:01:07 -05:00
deleteObjectsReq := & DeleteObjectsRequest { }
if err := xmlDecoder ( r . Body , deleteObjectsReq , maxBodySize ) ; err != nil {
2019-10-11 21:50:54 -04:00
logger . LogIf ( ctx , err , logger . Application )
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2016-03-05 19:43:48 -05:00
return
}
2018-03-27 19:44:45 -04:00
2022-01-03 04:28:52 -05:00
objects := make ( [ ] ObjectV , len ( deleteObjectsReq . Objects ) )
2021-03-11 16:57:03 -05:00
// Convert object name delete objects if it has `/` in the beginning.
2021-11-24 13:01:07 -05:00
for i := range deleteObjectsReq . Objects {
deleteObjectsReq . Objects [ i ] . ObjectName = trimLeadingSlash ( deleteObjectsReq . Objects [ i ] . ObjectName )
2022-01-03 04:28:52 -05:00
objects [ i ] = deleteObjectsReq . Objects [ i ] . ObjectV
2021-03-11 16:57:03 -05:00
}
2022-01-03 04:28:52 -05:00
// Make sure to update context to print ObjectNames for multi objects.
ctx = updateReqContext ( ctx , objects ... )
2020-11-04 12:13:34 -05:00
// Call checkRequestAuthType to populate ReqInfo.AccessKey before GetBucketInfo()
// Ignore errors here to preserve the S3 error behavior of GetBucketInfo()
checkRequestAuthType ( ctx , r , policy . DeleteObjectAction , bucket , "" )
2020-07-10 11:30:23 -04:00
// Before proceeding validate if bucket exists.
_ , err := objectAPI . GetBucketInfo ( ctx , bucket )
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-07-10 11:30:23 -04:00
return
}
2019-05-13 15:25:49 -04:00
deleteObjectsFn := objectAPI . DeleteObjects
2018-07-21 00:21:01 -04:00
if api . CacheAPI ( ) != nil {
2019-05-13 15:25:49 -04:00
deleteObjectsFn = api . CacheAPI ( ) . DeleteObjects
2018-07-21 00:21:01 -04:00
}
2016-09-02 04:59:08 -04:00
2021-10-18 11:38:33 -04:00
// Return Malformed XML as S3 spec if the number of objects is empty
2021-11-24 13:01:07 -05:00
if len ( deleteObjectsReq . Objects ) == 0 || len ( deleteObjectsReq . Objects ) > maxDeleteList {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMalformedXML ) , r . URL )
2021-02-13 12:48:25 -05:00
return
}
2022-01-02 12:15:06 -05:00
objectsToDelete := map [ ObjectToDelete ] int { }
2019-11-20 16:18:09 -05:00
getObjectInfoFn := objectAPI . GetObjectInfo
if api . CacheAPI ( ) != nil {
getObjectInfoFn = api . CacheAPI ( ) . GetObjectInfo
}
2021-08-17 10:50:00 -04:00
2020-11-25 14:24:50 -05:00
var (
2021-09-18 16:31:35 -04:00
hasLockEnabled bool
dsc ReplicateDecision
goi ObjectInfo
gerr error
2020-11-25 14:24:50 -05:00
)
2021-11-24 13:01:07 -05:00
replicateDeletes := hasReplicationRules ( ctx , bucket , deleteObjectsReq . Objects )
2020-11-25 14:24:50 -05:00
if rcfg , _ := globalBucketObjectLockSys . Get ( bucket ) ; rcfg . LockEnabled {
hasLockEnabled = true
}
2021-04-19 13:30:42 -04:00
2021-08-17 10:50:00 -04:00
versioned := globalBucketVersioningSys . Enabled ( bucket )
suspended := globalBucketVersioningSys . Suspended ( bucket )
2021-11-24 13:01:07 -05:00
type deleteResult struct {
delInfo DeletedObject
errInfo DeleteError
}
deleteResults := make ( [ ] deleteResult , len ( deleteObjectsReq . Objects ) )
oss := make ( [ ] * objSweeper , len ( deleteObjectsReq . Objects ) )
for index , object := range deleteObjectsReq . Objects {
2020-06-12 23:04:01 -04:00
if apiErrCode := checkRequestAuthType ( ctx , r , policy . DeleteObjectAction , bucket , object . ObjectName ) ; apiErrCode != ErrNone {
if apiErrCode == ErrSignatureDoesNotMatch || apiErrCode == ErrInvalidAccessKeyID {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( apiErrCode ) , r . URL )
2019-04-22 10:54:43 -04:00
return
2017-08-15 15:49:31 -04:00
}
2020-06-12 23:04:01 -04:00
apiErr := errorCodes . ToAPIErr ( apiErrCode )
2021-11-24 13:01:07 -05:00
deleteResults [ index ] . errInfo = DeleteError {
2020-06-12 23:04:01 -04:00
Code : apiErr . Code ,
Message : apiErr . Description ,
Key : object . ObjectName ,
VersionID : object . VersionID ,
}
2018-07-21 00:21:01 -04:00
continue
}
2020-12-11 23:44:08 -05:00
if object . VersionID != "" && object . VersionID != nullVersionID {
if _ , err := uuid . Parse ( object . VersionID ) ; err != nil {
logger . LogIf ( ctx , fmt . Errorf ( "invalid version-id specified %w" , err ) )
apiErr := errorCodes . ToAPIErr ( ErrNoSuchVersion )
2021-11-24 13:01:07 -05:00
deleteResults [ index ] . errInfo = DeleteError {
2020-12-11 23:44:08 -05:00
Code : apiErr . Code ,
Message : apiErr . Description ,
Key : object . ObjectName ,
VersionID : object . VersionID ,
}
continue
}
}
2021-08-17 10:50:00 -04:00
opts := ObjectOptions {
VersionID : object . VersionID ,
Versioned : versioned ,
VersionSuspended : suspended ,
}
if replicateDeletes || object . VersionID != "" && hasLockEnabled || ! globalTierConfigMgr . Empty ( ) {
if ! globalTierConfigMgr . Empty ( ) && object . VersionID == "" && opts . VersionSuspended {
opts . VersionID = nullVersionID
}
goi , gerr = getObjectInfoFn ( ctx , bucket , object . ObjectName , opts )
}
if ! globalTierConfigMgr . Empty ( ) {
oss [ index ] = newObjSweeper ( bucket , object . ObjectName ) . WithVersion ( opts . VersionID ) . WithVersioning ( versioned , suspended )
oss [ index ] . SetTransitionState ( goi . TransitionedObject )
2020-11-25 14:24:50 -05:00
}
2021-08-17 10:50:00 -04:00
2020-11-25 14:24:50 -05:00
if replicateDeletes {
2021-09-18 16:31:35 -04:00
dsc = checkReplicateDelete ( ctx , bucket , ObjectToDelete {
2022-01-03 04:28:52 -05:00
ObjectV : ObjectV {
ObjectName : object . ObjectName ,
VersionID : object . VersionID ,
} ,
2021-09-18 16:31:35 -04:00
} , goi , opts , gerr )
if dsc . ReplicateAny ( ) {
2020-11-25 14:24:50 -05:00
if object . VersionID != "" {
object . VersionPurgeStatus = Pending
2021-09-18 16:31:35 -04:00
object . VersionPurgeStatuses = dsc . PendingStatus ( )
2020-11-25 14:24:50 -05:00
} else {
2021-09-18 16:31:35 -04:00
object . DeleteMarkerReplicationStatus = dsc . PendingStatus ( )
2020-11-25 14:24:50 -05:00
}
2021-09-18 16:31:35 -04:00
object . ReplicateDecisionStr = dsc . String ( )
2020-11-25 14:24:50 -05:00
}
}
2021-08-17 10:50:00 -04:00
if object . VersionID != "" && hasLockEnabled {
if apiErrCode := enforceRetentionBypassForDelete ( ctx , r , bucket , object , goi , gerr ) ; apiErrCode != ErrNone {
apiErr := errorCodes . ToAPIErr ( apiErrCode )
2021-11-24 13:01:07 -05:00
deleteResults [ index ] . errInfo = DeleteError {
2021-08-17 10:50:00 -04:00
Code : apiErr . Code ,
Message : apiErr . Description ,
Key : object . ObjectName ,
VersionID : object . VersionID ,
2020-06-12 23:04:01 -04:00
}
2021-08-17 10:50:00 -04:00
continue
2020-04-06 16:44:16 -04:00
}
2019-11-20 16:18:09 -05:00
}
2020-04-06 16:44:16 -04:00
2019-11-19 20:42:27 -05:00
// Avoid duplicate objects, we use map to filter them out.
2020-06-12 23:04:01 -04:00
if _ , ok := objectsToDelete [ object ] ; ! ok {
objectsToDelete [ object ] = index
2019-11-19 20:42:27 -05:00
}
2019-05-13 15:25:49 -04:00
}
2020-06-12 23:04:01 -04:00
toNames := func ( input map [ ObjectToDelete ] int ) ( output [ ] ObjectToDelete ) {
output = make ( [ ] ObjectToDelete , len ( input ) )
2019-11-20 20:51:10 -05:00
idx := 0
2020-06-12 23:04:01 -04:00
for obj := range input {
output [ idx ] = obj
2019-11-20 20:51:10 -05:00
idx ++
2019-04-22 10:54:43 -04:00
}
2019-05-13 15:25:49 -04:00
return
}
2022-01-06 13:47:49 -05:00
// Disable timeouts and cancellation
ctx = bgContext ( ctx )
2019-11-20 20:51:10 -05:00
deleteList := toNames ( objectsToDelete )
2020-06-12 23:04:01 -04:00
dObjects , errs := deleteObjectsFn ( ctx , bucket , deleteList , ObjectOptions {
2021-08-17 10:50:00 -04:00
Versioned : versioned ,
VersionSuspended : suspended ,
2020-06-12 23:04:01 -04:00
} )
2021-11-24 13:01:07 -05:00
2020-06-12 23:04:01 -04:00
for i := range errs {
2021-03-30 20:15:36 -04:00
// DeleteMarkerVersionID is not used specifically to avoid
// lookup errors, since DeleteMarkerVersionID is only
// created during DeleteMarker creation when client didn't
// specify a versionID.
objToDel := ObjectToDelete {
2022-01-03 04:28:52 -05:00
ObjectV : ObjectV {
ObjectName : dObjects [ i ] . ObjectName ,
VersionID : dObjects [ i ] . VersionID ,
} ,
2021-09-18 16:31:35 -04:00
VersionPurgeStatus : dObjects [ i ] . VersionPurgeStatus ( ) ,
VersionPurgeStatuses : dObjects [ i ] . ReplicationState . VersionPurgeStatusInternal ,
DeleteMarkerReplicationStatus : dObjects [ i ] . ReplicationState . ReplicationStatusInternal ,
ReplicateDecisionStr : dObjects [ i ] . ReplicationState . ReplicateDecisionStr ,
2021-03-30 20:15:36 -04:00
}
dindex := objectsToDelete [ objToDel ]
2020-12-11 15:39:09 -05:00
if errs [ i ] == nil || isErrObjectNotFound ( errs [ i ] ) || isErrVersionNotFound ( errs [ i ] ) {
2020-11-12 15:10:59 -05:00
if replicateDeletes {
2021-09-18 16:31:35 -04:00
dObjects [ i ] . ReplicationState = deleteList [ i ] . ReplicationState ( )
2020-11-12 15:10:59 -05:00
}
2021-11-24 13:01:07 -05:00
deleteResults [ dindex ] . delInfo = dObjects [ i ]
2020-06-12 23:04:01 -04:00
continue
}
2020-11-12 15:10:59 -05:00
apiErr := toAPIError ( ctx , errs [ i ] )
2021-11-24 13:01:07 -05:00
deleteResults [ dindex ] . errInfo = DeleteError {
2020-06-12 23:04:01 -04:00
Code : apiErr . Code ,
Message : apiErr . Description ,
Key : deleteList [ i ] . ObjectName ,
VersionID : deleteList [ i ] . VersionID ,
}
2016-09-02 04:59:08 -04:00
}
2021-11-24 13:01:07 -05:00
// Generate response
2022-01-02 12:15:06 -05:00
deleteErrors := make ( [ ] DeleteError , 0 , len ( deleteObjectsReq . Objects ) )
deletedObjects := make ( [ ] DeletedObject , 0 , len ( deleteObjectsReq . Objects ) )
2021-11-24 13:01:07 -05:00
for _ , deleteResult := range deleteResults {
if deleteResult . errInfo . Code != "" {
deleteErrors = append ( deleteErrors , deleteResult . errInfo )
} else {
deletedObjects = append ( deletedObjects , deleteResult . delInfo )
2016-09-07 14:49:12 -04:00
}
2016-03-05 19:43:48 -05:00
}
2016-09-02 04:59:08 -04:00
2021-11-24 13:01:07 -05:00
response := generateMultiDeleteResponse ( deleteObjectsReq . Quiet , deletedObjects , deleteErrors )
2016-03-05 19:43:48 -05:00
encodedSuccessResponse := encodeResponse ( response )
2017-01-06 03:37:00 -05:00
2016-03-05 19:43:48 -05:00
// Write success response.
2017-01-06 03:37:00 -05:00
writeSuccessResponseXML ( w , encodedSuccessResponse )
2020-11-19 21:43:58 -05:00
for _ , dobj := range deletedObjects {
2021-02-11 01:00:42 -05:00
if dobj . ObjectName == "" {
continue
}
2020-11-12 15:10:59 -05:00
if replicateDeletes {
2021-09-18 16:31:35 -04:00
if dobj . DeleteMarkerReplicationStatus ( ) == replication . Pending || dobj . VersionPurgeStatus ( ) == Pending {
2021-06-01 22:59:11 -04:00
dv := DeletedObjectReplicationInfo {
2020-11-12 15:10:59 -05:00
DeletedObject : dobj ,
Bucket : bucket ,
2021-01-12 01:36:51 -05:00
}
2021-09-18 16:31:35 -04:00
scheduleReplicationDelete ( ctx , dv , objectAPI )
2020-11-12 15:10:59 -05:00
}
2020-11-19 21:43:58 -05:00
}
2020-11-25 14:24:50 -05:00
2021-04-19 13:30:42 -04:00
}
// Notify deleted event for objects.
for _ , dobj := range deletedObjects {
2021-08-17 10:50:00 -04:00
if dobj . ObjectName == "" {
continue
}
2020-10-17 00:22:12 -04:00
eventName := event . ObjectRemovedDelete
2020-06-12 23:04:01 -04:00
objInfo := ObjectInfo {
2021-03-30 20:15:36 -04:00
Name : dobj . ObjectName ,
VersionID : dobj . VersionID ,
DeleteMarker : dobj . DeleteMarker ,
2020-06-12 23:04:01 -04:00
}
2020-10-17 00:22:12 -04:00
2021-03-30 20:15:36 -04:00
if objInfo . DeleteMarker {
2020-10-17 00:22:12 -04:00
objInfo . VersionID = dobj . DeleteMarkerVersionID
eventName = event . ObjectRemovedDeleteMarkerCreated
2020-06-12 23:04:01 -04:00
}
2020-10-17 00:22:12 -04:00
2018-03-15 16:03:41 -04:00
sendEvent ( eventArgs {
2020-10-17 00:22:12 -04:00
EventName : eventName ,
2020-06-12 23:04:01 -04:00
BucketName : bucket ,
Object : objInfo ,
2018-11-02 21:40:08 -04:00
ReqParams : extractReqParams ( r ) ,
RespElements : extractRespElements ( w ) ,
UserAgent : r . UserAgent ( ) ,
2019-03-25 14:45:42 -04:00
Host : handlers . GetSourceIP ( r ) ,
2016-09-29 01:46:19 -04:00
} )
2016-09-02 04:59:08 -04:00
}
2021-08-12 21:57:37 -04:00
// Clean up transitioned objects from remote tier
for _ , os := range oss {
if os == nil { // skip objects that weren't deleted due to invalid versionID etc.
continue
}
logger . LogIf ( ctx , os . Sweep ( ) )
}
2016-03-05 19:43:48 -05:00
}
2015-06-30 23:15:48 -04:00
// PutBucketHandler - PUT Bucket
2015-06-30 17:42:29 -04:00
// ----------
// This implementation of the PUT operation creates a new bucket for authenticated request
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) PutBucketHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "PutBucket" )
2018-03-14 15:01:47 -04:00
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2018-10-12 15:25:59 -04:00
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2016-08-10 21:47:49 -04:00
return
}
2018-04-24 18:53:30 -04:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
2019-11-12 17:50:18 -05:00
objectLockEnabled := false
2019-12-13 18:51:28 -05:00
if vs , found := r . Header [ http . CanonicalHeaderKey ( "x-amz-bucket-object-lock-enabled" ) ] ; found {
v := strings . ToLower ( strings . Join ( vs , "" ) )
if v != "true" && v != "false" {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrInvalidRequest ) , r . URL )
2019-11-12 17:50:18 -05:00
return
}
2019-12-13 18:51:28 -05:00
objectLockEnabled = v == "true"
2019-11-12 17:50:18 -05:00
}
2018-04-24 18:53:30 -04:00
if s3Error := checkRequestAuthType ( ctx , r , policy . CreateBucketAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2016-02-04 15:52:25 -05:00
return
2015-07-14 12:17:30 -04:00
}
2017-04-03 17:50:09 -04:00
// Parse incoming location constraint.
location , s3Error := parseLocationConstraint ( r )
if s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2016-08-25 23:00:47 -04:00
return
2016-04-20 20:35:38 -04:00
}
2016-07-24 01:51:12 -04:00
2017-04-03 17:50:09 -04:00
// Validate if location sent by the client is valid, reject
// requests which do not follow valid region requirements.
if ! isValidLocation ( location ) {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrInvalidRegion ) , r . URL )
2017-04-03 17:50:09 -04:00
return
}
2020-06-12 23:04:01 -04:00
opts := BucketOptions {
Location : location ,
LockEnabled : objectLockEnabled ,
}
2018-02-02 21:18:52 -05:00
if globalDNSConfig != nil {
2020-01-22 11:25:28 -05:00
sr , err := globalDNSConfig . Get ( bucket )
if err != nil {
2020-09-09 15:20:49 -04:00
// ErrNotImplemented indicates a DNS backend that doesn't need to check if bucket already
// exists elsewhere
if err == dns . ErrNoEntriesFound || err == dns . ErrNotImplemented {
2018-02-02 21:18:52 -05:00
// Proceed to creating a bucket.
2020-06-12 23:04:01 -04:00
if err = objectAPI . MakeBucketWithLocation ( ctx , bucket , opts ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-05-01 12:53:14 -04:00
return
2019-11-12 17:50:18 -05:00
}
2018-02-02 21:18:52 -05:00
if err = globalDNSConfig . Put ( bucket ) ; err != nil {
2021-10-06 13:24:40 -04:00
objectAPI . DeleteBucket ( context . Background ( ) , bucket , DeleteBucketOptions { Force : false , NoRecreate : true } )
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2018-02-02 21:18:52 -05:00
return
}
2020-05-21 14:03:59 -04:00
// Load updated bucket metadata into memory.
globalNotificationSys . LoadBucketMetadata ( GlobalContext , bucket )
2020-05-19 16:53:54 -04:00
2018-02-02 21:18:52 -05:00
// Make sure to add Location information here only for bucket
2019-07-03 01:34:32 -04:00
w . Header ( ) . Set ( xhttp . Location ,
getObjectLocation ( r , globalDomainNames , bucket , "" ) )
2018-02-02 21:18:52 -05:00
writeSuccessResponseHeadersOnly ( w )
2020-07-20 15:52:49 -04:00
sendEvent ( eventArgs {
EventName : event . BucketCreated ,
BucketName : bucket ,
ReqParams : extractReqParams ( r ) ,
RespElements : extractRespElements ( w ) ,
UserAgent : r . UserAgent ( ) ,
Host : handlers . GetSourceIP ( r ) ,
} )
2018-02-02 21:18:52 -05:00
return
}
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2018-02-02 21:18:52 -05:00
return
}
2020-01-22 11:25:28 -05:00
apiErr := ErrBucketAlreadyExists
if ! globalDomainIPs . Intersection ( set . CreateStringSet ( getHostsSlice ( sr ) ... ) ) . IsEmpty ( ) {
apiErr = ErrBucketAlreadyOwnedByYou
}
// No IPs seem to intersect, this means that bucket exists but has
// different IP addresses perhaps from a different deployment.
// bucket names are globally unique in federation at a given
// path prefix, name collision is not allowed. Return appropriate error.
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( apiErr ) , r . URL )
2018-02-15 20:45:57 -05:00
return
}
2016-07-24 01:51:12 -04:00
// Proceed to creating a bucket.
2020-06-12 23:04:01 -04:00
err := objectAPI . MakeBucketWithLocation ( ctx , bucket , opts )
2021-10-06 19:36:31 -04:00
if _ , ok := err . ( BucketExists ) ; ok {
// Though bucket exists locally, we send the site-replication
// hook to ensure all sites have this bucket. If the hook
// succeeds, the client will still receive a bucket exists
// message.
err2 := globalSiteReplicationSys . MakeBucketHook ( ctx , bucket , opts )
if err2 != nil {
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
return
}
}
2015-09-19 06:20:07 -04:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2015-08-03 19:17:21 -04:00
return
}
2017-01-06 03:37:00 -05:00
2020-05-21 14:03:59 -04:00
// Load updated bucket metadata into memory.
globalNotificationSys . LoadBucketMetadata ( GlobalContext , bucket )
2019-11-12 17:50:18 -05:00
2021-10-06 19:36:31 -04:00
// Call site replication hook
err = globalSiteReplicationSys . MakeBucketHook ( ctx , bucket , opts )
if err != nil {
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
return
}
2015-09-19 06:20:07 -04:00
// Make sure to add Location information here only for bucket
2021-04-15 19:32:13 -04:00
if cp := pathClean ( r . URL . Path ) ; cp != "" {
w . Header ( ) . Set ( xhttp . Location , cp ) // Clean any trailing slashes.
}
2017-01-06 03:37:00 -05:00
writeSuccessResponseHeadersOnly ( w )
2020-07-20 15:52:49 -04:00
sendEvent ( eventArgs {
EventName : event . BucketCreated ,
BucketName : bucket ,
ReqParams : extractReqParams ( r ) ,
RespElements : extractRespElements ( w ) ,
UserAgent : r . UserAgent ( ) ,
Host : handlers . GetSourceIP ( r ) ,
} )
2015-06-30 17:42:29 -04:00
}
2015-10-02 02:51:17 -04:00
// PostPolicyBucketHandler - POST policy
// ----------
// This implementation of the POST operation handles object creation with a specified
// signature policy in multipart/form-data
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) PostPolicyBucketHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "PostPolicyBucket" )
2018-03-14 15:01:47 -04:00
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2018-10-12 15:25:59 -04:00
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2016-08-10 21:47:49 -04:00
return
}
2019-02-12 04:25:52 -05:00
2018-12-15 00:39:59 -05:00
if crypto . S3KMS . IsRequested ( r . Header ) { // SSE-KMS is not supported
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrNotImplemented ) , r . URL )
2018-12-15 00:39:59 -05:00
return
}
2020-09-15 16:57:15 -04:00
2020-12-22 12:19:32 -05:00
if _ , ok := crypto . IsRequested ( r . Header ) ; ! objectAPI . IsEncryptionSupported ( ) && ok {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrNotImplemented ) , r . URL )
2018-12-15 00:39:59 -05:00
return
}
2019-02-12 04:25:52 -05:00
2017-11-13 19:30:20 -05:00
bucket := mux . Vars ( r ) [ "bucket" ]
2017-02-02 13:45:00 -05:00
// Require Content-Length to be set in the request
size := r . ContentLength
if size < 0 {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMissingContentLength ) , r . URL )
2017-02-02 13:45:00 -05:00
return
}
2021-03-03 11:47:08 -05:00
2019-02-22 22:18:01 -05:00
resource , err := getResource ( r . URL . Path , r . Host , globalDomainNames )
2017-11-15 17:10:45 -05:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrInvalidRequest ) , r . URL )
2017-11-15 17:10:45 -05:00
return
}
2021-03-03 11:47:08 -05:00
// Make sure that the URL does not contain object name.
if bucket != path . Clean ( resource [ 1 : ] ) {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMethodNotAllowed ) , r . URL )
2017-11-15 17:10:45 -05:00
return
}
2017-02-02 13:45:00 -05:00
2015-10-02 02:51:17 -04:00
// Here the parameter is the size of the form data that should
2016-03-22 20:54:31 -04:00
// be loaded in memory, the remaining being put in temporary files.
2016-04-29 17:24:10 -04:00
reader , err := r . MultipartReader ( )
if err != nil {
2018-04-05 18:04:40 -04:00
logger . LogIf ( ctx , err )
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMalformedPOSTRequest ) , r . URL )
2015-10-02 02:51:17 -04:00
return
}
2017-02-02 13:45:00 -05:00
// Read multipart data and save in memory and in the disk if needed
form , err := reader . ReadForm ( maxFormMemory )
if err != nil {
2019-10-11 21:50:54 -04:00
logger . LogIf ( ctx , err , logger . Application )
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMalformedPOSTRequest ) , r . URL )
2017-02-02 13:45:00 -05:00
return
}
2019-07-10 01:18:43 -04:00
// Remove all tmp files created during multipart upload
2017-02-02 13:45:00 -05:00
defer form . RemoveAll ( )
// Extract all form fields
2018-04-05 18:04:40 -04:00
fileBody , fileName , fileSize , formValues , err := extractPostPolicyFormValues ( ctx , form )
2016-02-04 15:52:25 -05:00
if err != nil {
2019-10-11 21:50:54 -04:00
logger . LogIf ( ctx , err , logger . Application )
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMalformedPOSTRequest ) , r . URL )
2015-10-02 02:51:17 -04:00
return
}
2017-02-02 13:45:00 -05:00
2017-02-09 15:37:32 -05:00
// Check if file is provided, error out otherwise.
if fileBody == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrPOSTFileRequired ) , r . URL )
2017-02-09 15:37:32 -05:00
return
}
2017-02-02 13:45:00 -05:00
// Close multipart file
defer fileBody . Close ( )
2017-03-13 17:41:13 -04:00
formValues . Set ( "Bucket" , bucket )
if fileName != "" && strings . Contains ( formValues . Get ( "Key" ) , "${filename}" ) {
2016-07-27 20:51:55 -04:00
// S3 feature to replace ${filename} found in Key form field
// by the filename attribute passed in multipart
2021-11-16 12:28:29 -05:00
formValues . Set ( "Key" , strings . ReplaceAll ( formValues . Get ( "Key" ) , "${filename}" , fileName ) )
2017-03-13 17:41:13 -04:00
}
2021-03-11 16:57:03 -05:00
object := trimLeadingSlash ( formValues . Get ( "Key" ) )
2021-03-09 15:58:22 -05:00
2017-03-13 17:41:13 -04:00
successRedirect := formValues . Get ( "success_action_redirect" )
successStatus := formValues . Get ( "success_action_status" )
var redirectURL * url . URL
if successRedirect != "" {
redirectURL , err = url . Parse ( successRedirect )
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMalformedPOSTRequest ) , r . URL )
2017-03-13 17:41:13 -04:00
return
}
2016-07-27 20:51:55 -04:00
}
2016-02-15 20:42:39 -05:00
// Verify policy signature.
2021-03-03 11:47:08 -05:00
cred , errCode := doesPolicySignatureMatch ( formValues )
2019-02-12 04:25:52 -05:00
if errCode != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( errCode ) , r . URL )
2015-10-02 02:51:17 -04:00
return
}
2016-10-25 02:47:03 -04:00
2021-03-03 11:47:08 -05:00
// Once signature is validated, check if the user has
// explicit permissions for the user.
2021-08-12 21:07:08 -04:00
if ! globalIAMSys . IsAllowed ( iampolicy . Args {
AccountName : cred . AccessKey ,
2022-01-27 00:53:36 -05:00
Groups : cred . Groups ,
2021-08-12 21:07:08 -04:00
Action : iampolicy . PutObjectAction ,
ConditionValues : getConditionValues ( r , "" , cred . AccessKey , cred . Claims ) ,
BucketName : bucket ,
ObjectName : object ,
IsOwner : globalActiveCred . AccessKey == cred . AccessKey ,
Claims : cred . Claims ,
} ) {
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrAccessDenied ) , r . URL )
return
2021-03-03 11:47:08 -05:00
}
2017-03-13 17:41:13 -04:00
policyBytes , err := base64 . StdEncoding . DecodeString ( formValues . Get ( "Policy" ) )
2016-10-25 02:47:03 -04:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMalformedPOSTRequest ) , r . URL )
2016-10-25 02:47:03 -04:00
return
}
2018-12-03 15:01:28 -05:00
// Handle policy if it is set.
if len ( policyBytes ) > 0 {
2021-03-25 16:57:57 -04:00
postPolicyForm , err := parsePostPolicyForm ( bytes . NewReader ( policyBytes ) )
2018-12-03 15:01:28 -05:00
if err != nil {
2021-03-25 16:57:57 -04:00
errAPI := errorCodes . ToAPIErr ( ErrPostPolicyConditionInvalidFormat )
errAPI . Description = fmt . Sprintf ( "%s '(%s)'" , errAPI . Description , err )
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errAPI , r . URL )
2017-02-02 13:45:00 -05:00
return
2016-11-21 07:15:26 -05:00
}
2017-02-02 13:45:00 -05:00
2018-12-03 15:01:28 -05:00
// Make sure formValues adhere to policy restrictions.
2019-03-05 15:10:47 -05:00
if err = checkPostPolicy ( formValues , postPolicyForm ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErrWithErr ( ErrAccessDenied , err ) , r . URL )
2017-02-02 13:45:00 -05:00
return
2016-11-21 07:15:26 -05:00
}
2018-12-03 15:01:28 -05:00
// Ensure that the object size is within expected range, also the file size
// should not exceed the maximum single Put size (5 GiB)
lengthRange := postPolicyForm . Conditions . ContentLengthRange
if lengthRange . Valid {
if fileSize < lengthRange . Min {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , errDataTooSmall ) , r . URL )
2018-12-03 15:01:28 -05:00
return
}
if fileSize > lengthRange . Max || isMaxObjectSize ( fileSize ) {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , errDataTooLarge ) , r . URL )
2018-12-03 15:01:28 -05:00
return
}
}
2016-10-25 02:47:03 -04:00
}
2016-12-19 19:14:04 -05:00
// Extract metadata to be saved from received Form.
2018-07-10 23:27:10 -04:00
metadata := make ( map [ string ] string )
2021-02-03 23:41:33 -05:00
err = extractMetadataFromMime ( ctx , textproto . MIMEHeader ( formValues ) , metadata )
2017-07-05 19:56:10 -04:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2017-07-05 19:56:10 -04:00
return
}
2016-10-02 18:51:49 -04:00
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
hashReader , err := hash . NewReader ( fileBody , fileSize , "" , "" , fileSize )
2017-10-22 01:30:34 -04:00
if err != nil {
2018-04-05 18:04:40 -04:00
logger . LogIf ( ctx , err )
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2017-10-22 01:30:34 -04:00
return
}
2018-11-14 20:36:41 -05:00
rawReader := hashReader
2021-02-10 11:52:50 -05:00
pReader := NewPutObjReader ( rawReader )
2020-04-09 20:01:45 -04:00
var objectEncryptionKey crypto . ObjectKey
2018-12-14 16:35:48 -05:00
2020-02-05 04:42:34 -05:00
// Check if bucket encryption is enabled
2021-05-14 03:59:05 -04:00
sseConfig , _ := globalBucketSSEConfigSys . Get ( bucket )
2021-09-21 12:02:15 -04:00
sseConfig . Apply ( r . Header , sse . ApplyOptions {
AutoEncrypt : globalAutoEncryption ,
Passthrough : globalIsGateway && globalGatewayName == S3BackendGateway ,
} )
2020-05-19 16:53:54 -04:00
2019-01-05 17:16:43 -05:00
// get gateway encryption options
var opts ObjectOptions
2019-02-09 00:31:06 -05:00
opts , err = putOpts ( ctx , r , bucket , object , metadata )
2019-01-05 17:16:43 -05:00
if err != nil {
2019-02-12 04:25:52 -05:00
writeErrorResponseHeadersOnly ( w , toAPIError ( ctx , err ) )
2019-01-05 17:16:43 -05:00
return
}
2018-03-05 11:02:56 -05:00
if objectAPI . IsEncryptionSupported ( ) {
2020-12-22 12:19:32 -05:00
if _ , ok := crypto . IsRequested ( formValues ) ; ok && ! HasSuffix ( object , SlashSeparator ) { // handle SSE requests
2019-09-20 17:56:12 -04:00
if crypto . SSECopy . IsRequested ( r . Header ) {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , errInvalidEncryptionParameters ) , r . URL )
2019-09-20 17:56:12 -04:00
return
}
2021-05-06 18:24:01 -04:00
var (
reader io . Reader
keyID string
key [ ] byte
2021-05-10 21:15:11 -04:00
kmsCtx kms . Context
2021-05-06 18:24:01 -04:00
)
kind , _ := crypto . IsRequested ( formValues )
switch kind {
case crypto . SSEC :
2018-08-17 15:52:14 -04:00
key , err = ParseSSECustomerHeader ( formValues )
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2018-08-17 15:52:14 -04:00
return
}
2021-05-06 18:24:01 -04:00
case crypto . S3KMS :
keyID , kmsCtx , err = crypto . S3KMS . ParseHTTP ( formValues )
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2021-05-06 18:24:01 -04:00
return
}
2018-03-05 11:02:56 -05:00
}
2021-05-06 18:24:01 -04:00
reader , objectEncryptionKey , err = newEncryptReader ( hashReader , kind , keyID , key , bucket , object , metadata , kmsCtx )
2018-03-05 11:02:56 -05:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2018-03-05 11:02:56 -05:00
return
}
info := ObjectInfo { Size : fileSize }
2019-05-08 21:35:40 -04:00
// do not try to verify encrypted content
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
hashReader , err = hash . NewReader ( reader , info . EncryptedSize ( ) , "" , "" , fileSize )
2018-03-05 11:02:56 -05:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2018-03-05 11:02:56 -05:00
return
}
2021-02-10 11:52:50 -05:00
pReader , err = pReader . WithEncryption ( hashReader , & objectEncryptionKey )
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2021-02-10 11:52:50 -05:00
return
}
2018-03-05 11:02:56 -05:00
}
}
2019-02-09 00:31:06 -05:00
objInfo , err := objectAPI . PutObject ( ctx , bucket , object , pReader , opts )
2016-02-04 15:52:25 -05:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2015-10-02 02:51:17 -04:00
return
}
2017-03-13 17:41:13 -04:00
2020-06-12 23:04:01 -04:00
// We must not use the http.Header().Set method here because some (broken)
// clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive).
// Therefore, we have to set the ETag directly as map entry.
2019-07-03 01:34:32 -04:00
w . Header ( ) [ xhttp . ETag ] = [ ] string { ` " ` + objInfo . ETag + ` " ` }
2020-06-12 23:04:01 -04:00
// Set the relevant version ID as part of the response header.
if objInfo . VersionID != "" {
w . Header ( ) [ xhttp . AmzVersionID ] = [ ] string { objInfo . VersionID }
}
w . Header ( ) . Set ( xhttp . Location , getObjectLocation ( r , globalDomainNames , bucket , object ) )
2016-08-05 01:01:58 -04:00
2017-03-13 17:41:13 -04:00
// Notify object created event.
2018-03-15 16:03:41 -04:00
defer sendEvent ( eventArgs {
2018-11-02 21:40:08 -04:00
EventName : event . ObjectCreatedPost ,
BucketName : objInfo . Bucket ,
Object : objInfo ,
ReqParams : extractReqParams ( r ) ,
RespElements : extractRespElements ( w ) ,
UserAgent : r . UserAgent ( ) ,
2019-03-25 14:45:42 -04:00
Host : handlers . GetSourceIP ( r ) ,
2017-03-13 17:41:13 -04:00
} )
if successRedirect != "" {
// Replace raw query params..
redirectURL . RawQuery = getRedirectPostRawQuery ( objInfo )
writeRedirectSeeOther ( w , redirectURL . String ( ) )
return
}
2016-07-24 01:51:12 -04:00
2017-03-13 17:41:13 -04:00
// Decide what http response to send depending on success_action_status parameter
switch successStatus {
case "201" :
resp := encodeResponse ( PostResponse {
Bucket : objInfo . Bucket ,
Key : objInfo . Name ,
2017-05-14 15:05:51 -04:00
ETag : ` " ` + objInfo . ETag + ` " ` ,
2020-06-12 23:04:01 -04:00
Location : w . Header ( ) . Get ( xhttp . Location ) ,
2017-03-13 17:41:13 -04:00
} )
2020-06-12 23:04:01 -04:00
writeResponse ( w , http . StatusCreated , resp , mimeXML )
2017-03-13 17:41:13 -04:00
case "200" :
writeSuccessResponseHeadersOnly ( w )
default :
2016-12-18 16:39:56 -05:00
writeSuccessNoContent ( w )
}
2015-10-02 02:51:17 -04:00
}
2021-03-02 02:10:33 -05:00
// GetBucketPolicyStatusHandler - Retrieves the policy status
// for an MinIO bucket, indicating whether the bucket is public.
func ( api objectAPIHandlers ) GetBucketPolicyStatusHandler ( w http . ResponseWriter , r * http . Request ) {
ctx := newContext ( r , w , "GetBucketPolicyStatus" )
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
writeErrorResponseHeadersOnly ( w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) )
return
}
if s3Error := checkRequestAuthType ( ctx , r , policy . GetBucketPolicyStatusAction , bucket , "" ) ; s3Error != ErrNone {
writeErrorResponseHeadersOnly ( w , errorCodes . ToAPIErr ( s3Error ) )
return
}
// Check if bucket exists.
if _ , err := objectAPI . GetBucketInfo ( ctx , bucket ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2021-03-02 02:10:33 -05:00
return
}
// Check if anonymous (non-owner) has access to list objects.
readable := globalPolicySys . IsAllowed ( policy . Args {
Action : policy . ListBucketAction ,
BucketName : bucket ,
ConditionValues : getConditionValues ( r , "" , "" , nil ) ,
IsOwner : false ,
} )
// Check if anonymous (non-owner) has access to upload objects.
writable := globalPolicySys . IsAllowed ( policy . Args {
Action : policy . PutObjectAction ,
BucketName : bucket ,
ConditionValues : getConditionValues ( r , "" , "" , nil ) ,
IsOwner : false ,
} )
encodedSuccessResponse := encodeResponse ( PolicyStatus {
IsPublic : func ( ) string {
// Silly to have special 'boolean' values yes
// but complying with silly implementation
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html
if readable && writable {
return "TRUE"
}
return "FALSE"
} ( ) ,
} )
writeSuccessResponseXML ( w , encodedSuccessResponse )
}
2015-06-30 23:15:48 -04:00
// HeadBucketHandler - HEAD Bucket
2015-06-30 17:42:29 -04:00
// ----------
// This operation is useful to determine if a bucket exists.
// The operation returns a 200 OK if the bucket exists and you
// have permission to access it. Otherwise, the operation might
// return responses such as 404 Not Found and 403 Forbidden.
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) HeadBucketHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "HeadBucket" )
2018-03-14 15:01:47 -04:00
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2018-10-12 15:25:59 -04:00
2016-02-15 20:42:39 -05:00
vars := mux . Vars ( r )
2015-06-30 17:42:29 -04:00
bucket := vars [ "bucket" ]
2015-07-02 23:31:22 -04:00
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2019-02-12 04:25:52 -05:00
writeErrorResponseHeadersOnly ( w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) )
2016-08-10 21:47:49 -04:00
return
}
2016-11-21 16:51:05 -05:00
2018-04-24 18:53:30 -04:00
if s3Error := checkRequestAuthType ( ctx , r , policy . ListBucketAction , bucket , "" ) ; s3Error != ErrNone {
2019-02-12 04:25:52 -05:00
writeErrorResponseHeadersOnly ( w , errorCodes . ToAPIErr ( s3Error ) )
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
return
2016-02-04 15:52:25 -05:00
}
2018-04-24 18:53:30 -04:00
2018-03-28 17:14:06 -04:00
getBucketInfo := objectAPI . GetBucketInfo
2019-08-09 20:09:08 -04:00
2018-03-28 17:14:06 -04:00
if _ , err := getBucketInfo ( ctx , bucket ) ; err != nil {
2019-02-12 04:25:52 -05:00
writeErrorResponseHeadersOnly ( w , toAPIError ( ctx , err ) )
2015-08-03 19:17:21 -04:00
return
}
2017-01-06 03:37:00 -05:00
2021-12-06 05:59:51 -05:00
writeResponse ( w , http . StatusOK , nil , mimeXML )
2015-06-30 17:42:29 -04:00
}
2015-10-16 14:26:01 -04:00
// DeleteBucketHandler - Delete bucket
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) DeleteBucketHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "DeleteBucket" )
2018-03-14 15:01:47 -04:00
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2018-10-12 15:25:59 -04:00
2018-04-24 18:53:30 -04:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
2020-04-06 20:51:05 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2020-04-06 20:51:05 -04:00
return
}
2020-07-08 20:36:56 -04:00
// Verify if the caller has sufficient permissions.
if s3Error := checkRequestAuthType ( ctx , r , policy . DeleteBucketAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2020-07-08 20:36:56 -04:00
return
}
2020-03-28 00:52:59 -04:00
forceDelete := false
2020-04-06 20:51:05 -04:00
if value := r . Header . Get ( xhttp . MinIOForceDelete ) ; value != "" {
2020-07-08 20:36:56 -04:00
var err error
forceDelete , err = strconv . ParseBool ( value )
if err != nil {
apiErr := errorCodes . ToAPIErr ( ErrInvalidRequest )
apiErr . Description = err . Error ( )
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , apiErr , r . URL )
2020-03-28 00:52:59 -04:00
return
}
2020-07-08 20:36:56 -04:00
// if force delete header is set, we need to evaluate the policy anyways
// regardless of it being true or not.
2020-04-06 20:51:05 -04:00
if s3Error := checkRequestAuthType ( ctx , r , policy . ForceDeleteBucketAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2020-04-06 20:51:05 -04:00
return
}
2016-08-10 21:47:49 -04:00
2020-07-08 20:36:56 -04:00
if forceDelete {
if rcfg , _ := globalBucketObjectLockSys . Get ( bucket ) ; rcfg . LockEnabled {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrMethodNotAllowed ) , r . URL )
2020-07-08 20:36:56 -04:00
return
}
2020-05-19 16:53:54 -04:00
}
2016-02-04 15:52:25 -05:00
}
2021-05-14 15:40:54 -04:00
if globalDNSConfig != nil {
if err := globalDNSConfig . Delete ( bucket ) ; err != nil {
logger . LogIf ( ctx , fmt . Errorf ( "Unable to delete bucket DNS entry %w, please delete it manually" , err ) )
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2021-05-14 15:40:54 -04:00
return
}
}
2018-03-28 17:14:06 -04:00
deleteBucket := objectAPI . DeleteBucket
2019-08-09 20:09:08 -04:00
2016-07-24 01:51:12 -04:00
// Attempt to delete bucket.
2021-10-06 13:24:40 -04:00
if err := deleteBucket ( ctx , bucket , DeleteBucketOptions { Force : forceDelete } ) ; err != nil {
2021-05-14 15:40:54 -04:00
apiErr := toAPIError ( ctx , err )
if _ , ok := err . ( BucketNotEmpty ) ; ok {
if globalBucketVersioningSys . Enabled ( bucket ) || globalBucketVersioningSys . Suspended ( bucket ) {
apiErr . Description = "The bucket you tried to delete is not empty. You must delete all versions in the bucket."
}
2020-06-12 23:04:01 -04:00
}
2021-05-14 15:40:54 -04:00
if globalDNSConfig != nil {
if err2 := globalDNSConfig . Put ( bucket ) ; err2 != nil {
2021-12-14 17:09:57 -05:00
logger . LogIf ( ctx , fmt . Errorf ( "Unable to restore bucket DNS entry %w, please fix it manually" , err2 ) )
2021-05-14 15:40:54 -04:00
}
}
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , apiErr , r . URL )
2015-10-16 14:26:01 -04:00
return
}
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
2020-06-12 23:04:01 -04:00
globalNotificationSys . DeleteBucketMetadata ( ctx , bucket )
2022-02-10 13:16:52 -05:00
globalReplicationPool . deleteResyncMetadata ( ctx , bucket )
2021-10-06 19:36:31 -04:00
// Call site replication hook.
if err := globalSiteReplicationSys . DeleteBucketHook ( ctx , bucket , forceDelete ) ; err != nil {
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
return
}
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
// Write success response.
2015-10-16 23:02:37 -04:00
writeSuccessNoContent ( w )
2020-07-20 15:52:49 -04:00
sendEvent ( eventArgs {
EventName : event . BucketRemoved ,
BucketName : bucket ,
ReqParams : extractReqParams ( r ) ,
RespElements : extractRespElements ( w ) ,
UserAgent : r . UserAgent ( ) ,
Host : handlers . GetSourceIP ( r ) ,
} )
2015-10-16 14:26:01 -04:00
}
2019-11-12 17:50:18 -05:00
// PutBucketObjectLockConfigHandler - PUT Bucket object lock configuration.
// ----------
// Places an Object Lock configuration on the specified bucket. The rule
// specified in the Object Lock configuration will be applied by default
// to every new object placed in the specified bucket.
func ( api objectAPIHandlers ) PutBucketObjectLockConfigHandler ( w http . ResponseWriter , r * http . Request ) {
ctx := newContext ( r , w , "PutBucketObjectLockConfig" )
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2019-11-12 17:50:18 -05:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2019-11-12 17:50:18 -05:00
return
}
2020-07-22 20:39:40 -04:00
if ! globalIsErasure {
writeErrorResponseJSON ( ctx , w , errorCodes . ToAPIErr ( ErrNotImplemented ) , r . URL )
return
}
2019-11-20 16:18:09 -05:00
if s3Error := checkRequestAuthType ( ctx , r , policy . PutBucketObjectLockConfigurationAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2019-11-20 16:18:09 -05:00
return
}
2020-05-08 16:44:44 -04:00
2020-01-16 18:41:56 -05:00
config , err := objectlock . ParseObjectLockConfig ( r . Body )
2019-11-12 17:50:18 -05:00
if err != nil {
2019-11-13 11:21:41 -05:00
apiErr := errorCodes . ToAPIErr ( ErrMalformedXML )
apiErr . Description = err . Error ( )
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , apiErr , r . URL )
2019-11-12 17:50:18 -05:00
return
}
2020-05-01 12:53:14 -04:00
2020-05-19 16:53:54 -04:00
configData , err := xml . Marshal ( config )
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2019-11-12 17:50:18 -05:00
return
}
2020-05-01 12:53:14 -04:00
2020-05-23 13:01:01 -04:00
// Deny object locking configuration settings on existing buckets without object lock enabled.
if _ , err = globalBucketMetadataSys . GetObjectLockConfig ( bucket ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-05-23 13:01:01 -04:00
return
}
2022-01-31 20:27:43 -05:00
if err = globalBucketMetadataSys . Update ( ctx , bucket , objectLockConfig , configData ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-05-19 16:53:54 -04:00
return
2019-11-12 17:50:18 -05:00
}
2021-10-06 19:36:31 -04:00
// Call site replication hook.
//
// We encode the xml bytes as base64 to ensure there are no encoding
// errors.
cfgStr := base64 . StdEncoding . EncodeToString ( configData )
if err = globalSiteReplicationSys . BucketMetaHook ( ctx , madmin . SRBucketMeta {
Type : madmin . SRBucketMetaTypeObjectLockConfig ,
Bucket : bucket ,
ObjectLockConfig : & cfgStr ,
} ) ; err != nil {
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
return
}
2019-11-12 17:50:18 -05:00
// Write success response.
writeSuccessResponseHeadersOnly ( w )
}
// GetBucketObjectLockConfigHandler - GET Bucket object lock configuration.
// ----------
// Gets the Object Lock configuration for a bucket. The rule specified in
// the Object Lock configuration will be applied by default to every new
// object placed in the specified bucket.
func ( api objectAPIHandlers ) GetBucketObjectLockConfigHandler ( w http . ResponseWriter , r * http . Request ) {
ctx := newContext ( r , w , "GetBucketObjectLockConfig" )
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2019-11-12 17:50:18 -05:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2019-11-12 17:50:18 -05:00
return
}
2020-05-19 16:53:54 -04:00
2019-11-20 16:18:09 -05:00
// check if user has permissions to perform this operation
if s3Error := checkRequestAuthType ( ctx , r , policy . GetBucketObjectLockConfigurationAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2019-11-20 16:18:09 -05:00
return
}
2020-05-01 12:53:14 -04:00
2020-05-20 13:18:15 -04:00
config , err := globalBucketMetadataSys . GetObjectLockConfig ( bucket )
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-05-20 13:18:15 -04:00
return
}
configData , err := xml . Marshal ( config )
2019-11-12 17:50:18 -05:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-05-08 16:44:44 -04:00
return
2019-11-12 17:50:18 -05:00
}
// Write success response.
writeSuccessResponseXML ( w , configData )
}
2020-05-05 17:18:13 -04:00
// PutBucketTaggingHandler - PUT Bucket tagging.
// ----------
func ( api objectAPIHandlers ) PutBucketTaggingHandler ( w http . ResponseWriter , r * http . Request ) {
ctx := newContext ( r , w , "PutBucketTagging" )
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2020-05-05 17:18:13 -04:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2020-05-05 17:18:13 -04:00
return
}
2021-09-20 20:41:01 -04:00
// Check if bucket exists.
if _ , err := objectAPI . GetBucketInfo ( ctx , bucket ) ; err != nil {
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
return
}
2020-05-05 17:18:13 -04:00
if s3Error := checkRequestAuthType ( ctx , r , policy . PutBucketTaggingAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2020-05-05 17:18:13 -04:00
return
}
2020-05-08 16:44:44 -04:00
2020-05-05 17:18:13 -04:00
tags , err := tags . ParseBucketXML ( io . LimitReader ( r . Body , r . ContentLength ) )
if err != nil {
apiErr := errorCodes . ToAPIErr ( ErrMalformedXML )
apiErr . Description = err . Error ( )
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , apiErr , r . URL )
2020-05-05 17:18:13 -04:00
return
}
2020-05-08 16:44:44 -04:00
2020-05-19 16:53:54 -04:00
configData , err := xml . Marshal ( tags )
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-05-19 16:53:54 -04:00
return
}
2022-01-31 20:27:43 -05:00
if err = globalBucketMetadataSys . Update ( ctx , bucket , bucketTaggingConfig , configData ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-05-05 17:18:13 -04:00
return
}
2021-10-06 19:36:31 -04:00
// Call site replication hook.
//
// We encode the xml bytes as base64 to ensure there are no encoding
// errors.
cfgStr := base64 . StdEncoding . EncodeToString ( configData )
if err = globalSiteReplicationSys . BucketMetaHook ( ctx , madmin . SRBucketMeta {
Type : madmin . SRBucketMetaTypeTags ,
Bucket : bucket ,
Tags : & cfgStr ,
} ) ; err != nil {
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
return
}
2020-05-05 17:18:13 -04:00
// Write success response.
writeSuccessResponseHeadersOnly ( w )
}
// GetBucketTaggingHandler - GET Bucket tagging.
// ----------
func ( api objectAPIHandlers ) GetBucketTaggingHandler ( w http . ResponseWriter , r * http . Request ) {
ctx := newContext ( r , w , "GetBucketTagging" )
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2020-05-05 17:18:13 -04:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2020-05-05 17:18:13 -04:00
return
}
2020-05-08 16:44:44 -04:00
2020-05-05 17:18:13 -04:00
// check if user has permissions to perform this operation
if s3Error := checkRequestAuthType ( ctx , r , policy . GetBucketTaggingAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2020-05-05 17:18:13 -04:00
return
}
2020-05-08 16:44:44 -04:00
2020-05-20 13:18:15 -04:00
config , err := globalBucketMetadataSys . GetTaggingConfig ( bucket )
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-05-20 13:18:15 -04:00
return
}
configData , err := xml . Marshal ( config )
2020-05-08 16:44:44 -04:00
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-05-05 17:18:13 -04:00
return
}
// Write success response.
writeSuccessResponseXML ( w , configData )
}
// DeleteBucketTaggingHandler - DELETE Bucket tagging.
// ----------
func ( api objectAPIHandlers ) DeleteBucketTaggingHandler ( w http . ResponseWriter , r * http . Request ) {
ctx := newContext ( r , w , "DeleteBucketTagging" )
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2020-05-05 17:18:13 -04:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2020-05-05 17:18:13 -04:00
return
}
if s3Error := checkRequestAuthType ( ctx , r , policy . PutBucketTaggingAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2020-05-05 17:18:13 -04:00
return
}
2022-01-31 20:27:43 -05:00
if err := globalBucketMetadataSys . Update ( ctx , bucket , bucketTaggingConfig , nil ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2021-10-06 19:36:31 -04:00
return
}
if err := globalSiteReplicationSys . BucketMetaHook ( ctx , madmin . SRBucketMeta {
Type : madmin . SRBucketMetaTypeTags ,
Bucket : bucket ,
} ) ; err != nil {
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-05-05 17:18:13 -04:00
return
}
// Write success response.
writeSuccessResponseHeadersOnly ( w )
}
2020-07-21 20:49:56 -04:00
// PutBucketReplicationConfigHandler - PUT Bucket replication configuration.
// ----------
// Add a replication configuration on the specified bucket as specified in https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html
func ( api objectAPIHandlers ) PutBucketReplicationConfigHandler ( w http . ResponseWriter , r * http . Request ) {
ctx := newContext ( r , w , "PutBucketReplicationConfig" )
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2020-07-21 20:49:56 -04:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2020-07-21 20:49:56 -04:00
return
}
2020-07-22 20:39:40 -04:00
if ! globalIsErasure {
writeErrorResponseJSON ( ctx , w , errorCodes . ToAPIErr ( ErrNotImplemented ) , r . URL )
return
}
2020-07-21 20:49:56 -04:00
if s3Error := checkRequestAuthType ( ctx , r , policy . PutReplicationConfigurationAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2020-07-21 20:49:56 -04:00
return
}
// Check if bucket exists.
if _ , err := objectAPI . GetBucketInfo ( ctx , bucket ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-07-21 20:49:56 -04:00
return
}
2021-12-15 13:37:08 -05:00
if globalSiteReplicationSys . isEnabled ( ) {
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrReplicationDenyEditError ) , r . URL )
return
}
2020-07-21 20:49:56 -04:00
if versioned := globalBucketVersioningSys . Enabled ( bucket ) ; ! versioned {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrReplicationNeedsVersioningError ) , r . URL )
2020-07-21 20:49:56 -04:00
return
}
replicationConfig , err := replication . ParseConfig ( io . LimitReader ( r . Body , r . ContentLength ) )
if err != nil {
apiErr := errorCodes . ToAPIErr ( ErrMalformedXML )
apiErr . Description = err . Error ( )
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , apiErr , r . URL )
2020-07-21 20:49:56 -04:00
return
}
2021-09-21 16:03:20 -04:00
sameTarget , apiErr := validateReplicationDestination ( ctx , bucket , replicationConfig )
if apiErr != noError {
writeErrorResponse ( ctx , w , apiErr , r . URL )
2020-07-21 20:49:56 -04:00
return
}
// Validate the received bucket replication config
if err = replicationConfig . Validate ( bucket , sameTarget ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-07-21 20:49:56 -04:00
return
}
configData , err := xml . Marshal ( replicationConfig )
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-07-21 20:49:56 -04:00
return
}
2022-01-31 20:27:43 -05:00
if err = globalBucketMetadataSys . Update ( ctx , bucket , bucketReplicationConfig , configData ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-07-21 20:49:56 -04:00
return
}
// Write success response.
writeSuccessResponseHeadersOnly ( w )
}
// GetBucketReplicationConfigHandler - GET Bucket replication configuration.
// ----------
// Gets the replication configuration for a bucket.
func ( api objectAPIHandlers ) GetBucketReplicationConfigHandler ( w http . ResponseWriter , r * http . Request ) {
ctx := newContext ( r , w , "GetBucketReplicationConfig" )
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2020-07-21 20:49:56 -04:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2020-07-21 20:49:56 -04:00
return
}
// check if user has permissions to perform this operation
if s3Error := checkRequestAuthType ( ctx , r , policy . GetReplicationConfigurationAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2020-07-21 20:49:56 -04:00
return
}
// Check if bucket exists.
if _ , err := objectAPI . GetBucketInfo ( ctx , bucket ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-07-21 20:49:56 -04:00
return
}
config , err := globalBucketMetadataSys . GetReplicationConfig ( ctx , bucket )
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-07-21 20:49:56 -04:00
return
}
configData , err := xml . Marshal ( config )
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-07-21 20:49:56 -04:00
return
}
// Write success response.
writeSuccessResponseXML ( w , configData )
}
// DeleteBucketReplicationConfigHandler - DELETE Bucket replication config.
// ----------
func ( api objectAPIHandlers ) DeleteBucketReplicationConfigHandler ( w http . ResponseWriter , r * http . Request ) {
ctx := newContext ( r , w , "DeleteBucketReplicationConfig" )
2021-01-26 16:21:51 -05:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
2020-07-21 20:49:56 -04:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2020-07-21 20:49:56 -04:00
return
}
if s3Error := checkRequestAuthType ( ctx , r , policy . PutReplicationConfigurationAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2020-07-21 20:49:56 -04:00
return
}
// Check if bucket exists.
if _ , err := objectAPI . GetBucketInfo ( ctx , bucket ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-07-21 20:49:56 -04:00
return
}
2021-12-15 13:37:08 -05:00
if globalSiteReplicationSys . isEnabled ( ) {
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrReplicationDenyEditError ) , r . URL )
return
}
2022-01-31 20:27:43 -05:00
if err := globalBucketMetadataSys . Update ( ctx , bucket , bucketReplicationConfig , nil ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2020-07-21 20:49:56 -04:00
return
}
// Write success response.
writeSuccessResponseHeadersOnly ( w )
}
2021-04-03 12:03:42 -04:00
// GetBucketReplicationMetricsHandler - GET Bucket replication metrics.
// ----------
// Gets the replication metrics for a bucket.
func ( api objectAPIHandlers ) GetBucketReplicationMetricsHandler ( w http . ResponseWriter , r * http . Request ) {
ctx := newContext ( r , w , "GetBucketReplicationMetrics" )
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2021-04-03 12:03:42 -04:00
return
}
// check if user has permissions to perform this operation
if s3Error := checkRequestAuthType ( ctx , r , policy . GetReplicationConfigurationAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2021-04-03 12:03:42 -04:00
return
}
// Check if bucket exists.
if _ , err := objectAPI . GetBucketInfo ( ctx , bucket ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2021-04-03 12:03:42 -04:00
return
}
2021-09-18 16:31:35 -04:00
var usageInfo BucketUsageInfo
dataUsageInfo , err := loadDataUsageFromBackend ( ctx , objectAPI )
if err == nil && ! dataUsageInfo . LastUpdate . IsZero ( ) {
usageInfo = dataUsageInfo . BucketsUsage [ bucket ]
}
2021-04-03 12:03:42 -04:00
2021-11-19 17:46:14 -05:00
w . Header ( ) . Set ( xhttp . ContentType , string ( mimeJSON ) )
enc := json . NewEncoder ( w )
if err = enc . Encode ( getLatestReplicationStats ( bucket , usageInfo ) ) ; err != nil {
2021-09-18 16:31:35 -04:00
writeErrorResponseJSON ( ctx , w , toAdminAPIErr ( ctx , err ) , r . URL )
2021-04-03 12:03:42 -04:00
return
}
}
2021-06-01 22:59:11 -04:00
2022-02-10 13:16:52 -05:00
// ResetBucketReplicationStartHandler - starts a replication reset for all objects in a bucket which
2021-06-01 22:59:11 -04:00
// qualify for replication and re-sync the object(s) to target, provided ExistingObjectReplication is
// enabled for the qualifying rule. This API is a MinIO only extension provided for situations where
2022-02-10 13:16:52 -05:00
// remote target is entirely lost,and previously replicated objects need to be re-synced. If resync is
// already in progress it returns an error
func ( api objectAPIHandlers ) ResetBucketReplicationStartHandler ( w http . ResponseWriter , r * http . Request ) {
ctx := newContext ( r , w , "ResetBucketReplicationStart" )
2021-06-01 22:59:11 -04:00
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
2021-09-18 16:31:35 -04:00
durationStr := r . URL . Query ( ) . Get ( "older-than" )
arn := r . URL . Query ( ) . Get ( "arn" )
resetID := r . URL . Query ( ) . Get ( "reset-id" )
if resetID == "" {
resetID = mustGetUUID ( )
}
2021-06-01 22:59:11 -04:00
var (
days time . Duration
err error
)
if durationStr != "" {
days , err = time . ParseDuration ( durationStr )
if err != nil {
writeErrorResponse ( ctx , w , toAPIError ( ctx , InvalidArgument {
Bucket : bucket ,
Err : fmt . Errorf ( "invalid query parameter older-than %s for %s : %w" , durationStr , bucket , err ) ,
2021-06-17 23:27:04 -04:00
} ) , r . URL )
2021-06-01 22:59:11 -04:00
}
}
2022-02-10 13:16:52 -05:00
resetBeforeDate := UTCNow ( ) . AddDate ( 0 , 0 , - 1 * int ( days / 24 ) )
2021-06-01 22:59:11 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
2021-06-01 22:59:11 -04:00
return
}
if s3Error := checkRequestAuthType ( ctx , r , policy . ResetBucketReplicationStateAction , bucket , "" ) ; s3Error != ErrNone {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
2021-06-01 22:59:11 -04:00
return
}
// Check if bucket exists.
if _ , err := objectAPI . GetBucketInfo ( ctx , bucket ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2021-06-01 22:59:11 -04:00
return
}
config , err := globalBucketMetadataSys . GetReplicationConfig ( ctx , bucket )
if err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2021-06-01 22:59:11 -04:00
return
}
if ! config . HasActiveRules ( "" , true ) {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrReplicationNoMatchingRuleError ) , r . URL )
2021-06-01 22:59:11 -04:00
return
}
2021-09-18 16:31:35 -04:00
tgtArns := config . FilterTargetArns (
replication . ObjectOpts {
OpType : replication . ResyncReplicationType ,
2022-01-02 12:15:06 -05:00
TargetArn : arn ,
} )
2021-09-18 16:31:35 -04:00
if len ( tgtArns ) == 0 {
writeErrorResponseJSON ( ctx , w , errorCodes . ToAPIErrWithErr ( ErrBadRequest , InvalidArgument {
Bucket : bucket ,
2022-02-10 13:16:52 -05:00
Err : fmt . Errorf ( "Remote target ARN %s missing or ineligible for replication resync" , arn ) ,
2021-09-18 16:31:35 -04:00
} ) , r . URL )
return
}
if len ( tgtArns ) > 1 && arn == "" {
writeErrorResponseJSON ( ctx , w , errorCodes . ToAPIErrWithErr ( ErrBadRequest , InvalidArgument {
Bucket : bucket ,
Err : fmt . Errorf ( "ARN should be specified for replication reset" ) ,
} ) , r . URL )
return
}
var rinfo ResyncTargetsInfo
target := globalBucketTargetSys . GetRemoteBucketTargetByArn ( ctx , bucket , tgtArns [ 0 ] )
2021-06-01 22:59:11 -04:00
target . ResetBeforeDate = UTCNow ( ) . AddDate ( 0 , 0 , - 1 * int ( days / 24 ) )
2021-09-18 16:31:35 -04:00
target . ResetID = resetID
rinfo . Targets = append ( rinfo . Targets , ResyncTarget { Arn : tgtArns [ 0 ] , ResetID : target . ResetID } )
2021-06-01 22:59:11 -04:00
if err = globalBucketTargetSys . SetTarget ( ctx , bucket , & target , true ) ; err != nil {
switch err . ( type ) {
case BucketRemoteConnectionErr :
writeErrorResponseJSON ( ctx , w , errorCodes . ToAPIErrWithErr ( ErrReplicationRemoteConnectionError , err ) , r . URL )
default :
writeErrorResponseJSON ( ctx , w , toAPIError ( ctx , err ) , r . URL )
}
2022-02-10 13:16:52 -05:00
}
if err := startReplicationResync ( ctx , bucket , arn , resetID , resetBeforeDate , objectAPI ) ; err != nil {
writeErrorResponseJSON ( ctx , w , errorCodes . ToAPIErrWithErr ( ErrBadRequest , InvalidArgument {
Bucket : bucket ,
Err : err ,
} ) , r . URL )
2021-06-01 22:59:11 -04:00
return
}
2022-02-10 13:16:52 -05:00
data , err := json . Marshal ( rinfo )
2021-06-01 22:59:11 -04:00
if err != nil {
2022-02-10 13:16:52 -05:00
writeErrorResponseJSON ( ctx , w , toAdminAPIErr ( ctx , err ) , r . URL )
2021-06-01 22:59:11 -04:00
return
}
2022-02-10 13:16:52 -05:00
// Write success response.
writeSuccessResponseJSON ( w , data )
}
// ResetBucketReplicationStatusHandler - returns the status of replication reset.
// This API is a MinIO only extension
func ( api objectAPIHandlers ) ResetBucketReplicationStatusHandler ( w http . ResponseWriter , r * http . Request ) {
ctx := newContext ( r , w , "ResetBucketReplicationStatus" )
defer logger . AuditLog ( ctx , w , r , mustGetClaimsFromToken ( r ) )
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
arn := r . URL . Query ( ) . Get ( "arn" )
var err error
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( ErrServerNotInitialized ) , r . URL )
return
}
if s3Error := checkRequestAuthType ( ctx , r , policy . ResetBucketReplicationStateAction , bucket , "" ) ; s3Error != ErrNone {
writeErrorResponse ( ctx , w , errorCodes . ToAPIErr ( s3Error ) , r . URL )
return
}
// Check if bucket exists.
if _ , err := objectAPI . GetBucketInfo ( ctx , bucket ) ; err != nil {
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2021-06-01 22:59:11 -04:00
return
}
2022-02-10 13:16:52 -05:00
if _ , err := globalBucketMetadataSys . GetReplicationConfig ( ctx , bucket ) ; err != nil {
2021-06-17 23:27:04 -04:00
writeErrorResponse ( ctx , w , toAPIError ( ctx , err ) , r . URL )
2021-06-01 22:59:11 -04:00
return
}
2022-02-10 13:16:52 -05:00
globalReplicationPool . resyncState . RLock ( )
brs , ok := globalReplicationPool . resyncState . statusMap [ bucket ]
if ! ok {
brs , err = loadBucketResyncMetadata ( ctx , bucket , objectAPI )
if err != nil {
writeErrorResponse ( ctx , w , errorCodes . ToAPIErrWithErr ( ErrBadRequest , InvalidArgument {
Bucket : bucket ,
Err : fmt . Errorf ( "No replication resync status available for %s" , arn ) ,
} ) , r . URL )
}
return
}
var rinfo ResyncTargetsInfo
for tarn , st := range brs . TargetsMap {
if arn != "" && tarn != arn {
continue
}
rinfo . Targets = append ( rinfo . Targets , ResyncTarget {
Arn : tarn ,
ResetID : st . ResyncID ,
StartTime : st . StartTime ,
EndTime : st . EndTime ,
ResyncStatus : st . ResyncStatus . String ( ) ,
ReplicatedSize : st . ReplicatedSize ,
ReplicatedCount : st . ReplicatedCount ,
FailedSize : st . FailedSize ,
FailedCount : st . FailedCount ,
Bucket : st . Bucket ,
Object : st . Object ,
} )
}
globalReplicationPool . resyncState . RUnlock ( )
2021-09-18 16:31:35 -04:00
data , err := json . Marshal ( rinfo )
2021-06-01 22:59:11 -04:00
if err != nil {
writeErrorResponseJSON ( ctx , w , toAdminAPIErr ( ctx , err ) , r . URL )
return
}
// Write success response.
writeSuccessResponseJSON ( w , data )
}