2015-06-30 17:42:29 -04:00
/ *
2018-03-15 16:03:41 -04:00
* Minio Cloud Storage , ( C ) 2015 , 2016 , 2017 , 2018 Minio , Inc .
2015-06-30 17:42:29 -04:00
*
* Licensed under the Apache License , Version 2.0 ( the "License" ) ;
* you may not use this file except in compliance with the License .
* You may obtain a copy of the License at
*
* http : //www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing , software
* distributed under the License is distributed on an "AS IS" BASIS ,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
* See the License for the specific language governing permissions and
* limitations under the License .
* /
2016-08-18 19:23:42 -04:00
package cmd
2015-06-30 17:42:29 -04:00
import (
2018-04-05 11:18:42 -04:00
"context"
2016-10-25 02:47:03 -04:00
"encoding/base64"
2016-03-05 19:43:48 -05:00
"encoding/xml"
2018-04-05 11:18:42 -04:00
"fmt"
2016-02-15 20:42:39 -05:00
"io"
2017-03-22 21:44:35 -04:00
"net"
2015-06-30 17:42:29 -04:00
"net/http"
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
"net/url"
2017-01-11 16:26:42 -05:00
"path"
2017-11-15 17:10:45 -05:00
"path/filepath"
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
"strings"
2015-06-30 17:42:29 -04:00
2018-04-21 22:23:54 -04:00
"github.com/gorilla/mux"
2018-05-11 15:02:30 -04:00
2018-07-02 17:40:18 -04:00
"github.com/minio/minio-go/pkg/set"
2018-08-17 15:52:14 -04:00
"github.com/minio/minio/cmd/crypto"
2018-04-05 18:04:40 -04:00
"github.com/minio/minio/cmd/logger"
2018-05-11 15:02:30 -04:00
"github.com/minio/minio/pkg/dns"
2018-03-15 16:03:41 -04:00
"github.com/minio/minio/pkg/event"
2018-07-02 17:40:18 -04:00
"github.com/minio/minio/pkg/handlers"
2017-10-22 01:30:34 -04:00
"github.com/minio/minio/pkg/hash"
2018-04-24 18:53:30 -04:00
"github.com/minio/minio/pkg/policy"
2018-04-05 11:18:42 -04:00
"github.com/minio/minio/pkg/sync/errgroup"
2015-06-30 17:42:29 -04:00
)
2018-04-05 11:18:42 -04:00
// Check if there are buckets on server without corresponding entry in etcd backend and
// make entries. Here is the general flow
// - Range over all the available buckets
// - Check if a bucket has an entry in etcd backend
// -- If no, make an entry
// -- If yes, check if the IP of entry matches local IP. This means entry is for this instance.
// -- If IP of the entry doesn't match, this means entry is for another instance. Log an error to console.
func initFederatorBackend ( objLayer ObjectLayer ) {
b , err := objLayer . ListBuckets ( context . Background ( ) )
if err != nil {
logger . LogIf ( context . Background ( ) , err )
return
}
g := errgroup . WithNErrs ( len ( b ) )
for index := range b {
index := index
g . Go ( func ( ) error {
r , gerr := globalDNSConfig . Get ( b [ index ] . Name )
if gerr != nil {
2018-07-12 17:12:40 -04:00
if gerr == dns . ErrNoEntriesFound {
2018-04-05 11:18:42 -04:00
return globalDNSConfig . Put ( b [ index ] . Name )
}
return gerr
}
2018-05-11 15:02:30 -04:00
if globalDomainIPs . Intersection ( set . CreateStringSet ( getHostsSlice ( r ) ... ) ) . IsEmpty ( ) {
// There is already an entry for this bucket, with all IP addresses different. This indicates a bucket name collision. Log an error and continue.
return fmt . Errorf ( "Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket. Use one of these IP addresses %v to access the bucket" , b [ index ] . Name , globalDomainIPs . ToSlice ( ) )
2018-04-05 11:18:42 -04:00
}
return nil
} , index )
}
for _ , err := range g . Wait ( ) {
if err != nil {
logger . LogIf ( context . Background ( ) , err )
return
}
}
}
2015-12-27 02:38:38 -05:00
// GetBucketLocationHandler - GET Bucket location.
// -------------------------
// This operation returns bucket location.
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) GetBucketLocationHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "GetBucketLocation" )
2018-03-14 15:01:47 -04:00
2016-02-15 20:42:39 -05:00
vars := mux . Vars ( r )
2015-12-27 02:38:38 -05:00
bucket := vars [ "bucket" ]
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , ErrServerNotInitialized , r . URL )
2016-08-10 21:47:49 -04:00
return
}
2018-04-24 18:53:30 -04:00
if s3Error := checkRequestAuthType ( ctx , r , policy . GetBucketLocationAction , bucket , "" ) ; s3Error != ErrNone {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , s3Error , r . URL )
2016-11-21 16:51:05 -05:00
return
2016-02-04 15:52:25 -05:00
}
2018-03-28 17:14:06 -04:00
getBucketInfo := objectAPI . GetBucketInfo
if api . CacheAPI ( ) != nil {
getBucketInfo = api . CacheAPI ( ) . GetBucketInfo
}
if _ , err := getBucketInfo ( ctx , bucket ) ; err != nil {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , toAPIErrorCode ( err ) , r . URL )
2016-01-19 20:49:48 -05:00
return
2015-12-27 02:38:38 -05:00
}
2016-02-15 20:42:39 -05:00
// Generate response.
2016-03-06 15:16:22 -05:00
encodedSuccessResponse := encodeResponse ( LocationResponse { } )
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
// Get current region.
2017-11-29 16:12:47 -05:00
region := globalServerConfig . GetRegion ( )
2017-01-18 15:24:34 -05:00
if region != globalMinioDefaultRegion {
2016-03-06 15:16:22 -05:00
encodedSuccessResponse = encodeResponse ( LocationResponse {
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
Location : region ,
2016-02-15 20:42:39 -05:00
} )
}
2017-01-06 03:37:00 -05:00
// Write success response.
writeSuccessResponseXML ( w , encodedSuccessResponse )
2015-12-27 02:38:38 -05:00
}
2015-06-30 23:15:48 -04:00
// ListMultipartUploadsHandler - GET Bucket (List Multipart uploads)
2015-06-30 17:42:29 -04:00
// -------------------------
// This operation lists in-progress multipart uploads. An in-progress
// multipart upload is a multipart upload that has been initiated,
2015-10-16 22:09:35 -04:00
// using the Initiate Multipart Upload request, but has not yet been
// completed or aborted. This operation returns at most 1,000 multipart
// uploads in the response.
2015-06-30 17:42:29 -04:00
//
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) ListMultipartUploadsHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "ListMultipartUploads" )
2018-03-14 15:01:47 -04:00
2016-02-15 20:42:39 -05:00
vars := mux . Vars ( r )
2015-10-16 22:09:35 -04:00
bucket := vars [ "bucket" ]
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , ErrServerNotInitialized , r . URL )
2016-08-10 21:47:49 -04:00
return
}
2018-04-24 18:53:30 -04:00
if s3Error := checkRequestAuthType ( ctx , r , policy . ListBucketMultipartUploadsAction , bucket , "" ) ; s3Error != ErrNone {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , s3Error , r . URL )
2016-02-15 20:42:39 -05:00
return
2016-02-04 15:52:25 -05:00
}
objectAPI: Fix object API interface, remove unnecessary structs.
ObjectAPI changes.
```
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, *probe.Error)
ListMultipartUploads(bucket, objectPrefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, *probe.Error)
ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, *probe.Error)
```
2016-04-03 04:34:20 -04:00
prefix , keyMarker , uploadIDMarker , delimiter , maxUploads , _ := getBucketMultipartResources ( r . URL . Query ( ) )
if maxUploads < 0 {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , ErrInvalidMaxUploads , r . URL )
2015-07-16 20:22:45 -04:00
return
}
2016-04-05 15:26:17 -04:00
if keyMarker != "" {
2016-04-29 17:24:10 -04:00
// Marker not common with prefix is not implemented.
2017-02-04 02:27:50 -05:00
if ! hasPrefix ( keyMarker , prefix ) {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , ErrNotImplemented , r . URL )
2016-04-29 17:24:10 -04:00
return
2016-04-05 15:26:17 -04:00
}
2015-06-30 17:42:29 -04:00
}
2018-03-14 15:01:47 -04:00
listMultipartsInfo , err := objectAPI . ListMultipartUploads ( ctx , bucket , prefix , keyMarker , uploadIDMarker , delimiter , maxUploads )
2015-09-19 06:20:07 -04:00
if err != nil {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , toAPIErrorCode ( err ) , r . URL )
2015-08-03 19:17:21 -04:00
return
}
2015-09-19 06:20:07 -04:00
// generate response
objectAPI: Fix object API interface, remove unnecessary structs.
ObjectAPI changes.
```
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, *probe.Error)
ListMultipartUploads(bucket, objectPrefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, *probe.Error)
ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, *probe.Error)
```
2016-04-03 04:34:20 -04:00
response := generateListMultipartUploadsResponse ( bucket , listMultipartsInfo )
2016-03-06 15:16:22 -05:00
encodedSuccessResponse := encodeResponse ( response )
2017-01-06 03:37:00 -05:00
2016-01-08 03:40:06 -05:00
// write success response.
2017-01-06 03:37:00 -05:00
writeSuccessResponseXML ( w , encodedSuccessResponse )
2015-06-30 17:42:29 -04:00
}
2016-10-09 12:21:37 -04:00
// ListBucketsHandler - GET Service.
2015-06-30 17:42:29 -04:00
// -----------
// This implementation of the GET operation returns a list of all buckets
// owned by the authenticated sender of the request.
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) ListBucketsHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "ListBuckets" )
2018-03-14 15:01:47 -04:00
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , ErrServerNotInitialized , r . URL )
2016-08-10 21:47:49 -04:00
return
}
2018-03-28 17:14:06 -04:00
listBuckets := objectAPI . ListBuckets
2016-08-10 21:47:49 -04:00
2018-03-28 17:14:06 -04:00
if api . CacheAPI ( ) != nil {
listBuckets = api . CacheAPI ( ) . ListBuckets
}
2018-04-24 18:53:30 -04:00
if s3Error := checkRequestAuthType ( ctx , r , policy . ListAllMyBucketsAction , "" , "" ) ; s3Error != ErrNone {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , s3Error , r . URL )
2016-02-15 20:42:39 -05:00
return
2016-02-04 15:52:25 -05:00
}
2018-02-02 21:18:52 -05:00
// If etcd, dns federation configured list buckets from etcd.
var bucketsInfo [ ] BucketInfo
if globalDNSConfig != nil {
dnsBuckets , err := globalDNSConfig . List ( )
2018-07-12 17:12:40 -04:00
if err != nil && err != dns . ErrNoEntriesFound {
2018-02-02 21:18:52 -05:00
writeErrorResponse ( w , toAPIErrorCode ( err ) , r . URL )
return
}
2018-05-15 21:20:22 -04:00
bucketSet := set . NewStringSet ( )
2018-02-02 21:18:52 -05:00
for _ , dnsRecord := range dnsBuckets {
2018-05-15 21:20:22 -04:00
if bucketSet . Contains ( dnsRecord . Key ) {
continue
}
2018-02-02 21:18:52 -05:00
bucketsInfo = append ( bucketsInfo , BucketInfo {
2018-05-15 21:20:22 -04:00
Name : strings . Trim ( dnsRecord . Key , slashSeparator ) ,
2018-02-02 21:18:52 -05:00
Created : dnsRecord . CreationDate ,
} )
2018-05-15 21:20:22 -04:00
bucketSet . Add ( dnsRecord . Key )
2018-02-02 21:18:52 -05:00
}
} else {
// Invoke the list buckets.
var err error
bucketsInfo , err = listBuckets ( ctx )
if err != nil {
writeErrorResponse ( w , toAPIErrorCode ( err ) , r . URL )
return
}
2015-08-03 19:17:21 -04:00
}
2016-07-17 16:23:15 -04:00
// Generate response.
response := generateListBucketsResponse ( bucketsInfo )
encodedSuccessResponse := encodeResponse ( response )
2017-01-06 03:37:00 -05:00
2016-07-17 16:23:15 -04:00
// Write response.
2017-01-06 03:37:00 -05:00
writeSuccessResponseXML ( w , encodedSuccessResponse )
2015-06-30 17:42:29 -04:00
}
2016-03-05 19:43:48 -05:00
// DeleteMultipleObjectsHandler - deletes multiple objects.
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) DeleteMultipleObjectsHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "DeleteMultipleObjects" )
2018-03-14 15:01:47 -04:00
2016-03-05 19:43:48 -05:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , ErrServerNotInitialized , r . URL )
2016-08-10 21:47:49 -04:00
return
}
2018-04-24 18:53:30 -04:00
var s3Error APIErrorCode
if s3Error = checkRequestAuthType ( ctx , r , policy . DeleteObjectAction , bucket , "" ) ; s3Error != ErrNone {
2017-08-15 15:49:31 -04:00
// In the event access is denied, a 200 response should still be returned
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
2018-04-24 18:53:30 -04:00
if s3Error != ErrAccessDenied {
writeErrorResponse ( w , s3Error , r . URL )
2017-08-15 15:49:31 -04:00
return
}
2016-03-12 19:08:15 -05:00
}
2016-03-05 19:43:48 -05:00
// Content-Length is required and should be non-zero
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if r . ContentLength <= 0 {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , ErrMissingContentLength , r . URL )
2016-03-05 19:43:48 -05:00
return
}
// Content-Md5 is requied should be set
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if _ , ok := r . Header [ "Content-Md5" ] ; ! ok {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , ErrMissingContentMD5 , r . URL )
2016-03-05 19:43:48 -05:00
return
}
// Allocate incoming content length bytes.
2018-06-11 13:51:38 -04:00
var deleteXMLBytes [ ] byte
const maxBodySize = 2 * 1000 * 1024 // The max. XML contains 1000 object names (each at most 1024 bytes long) + XML overhead
if r . ContentLength > maxBodySize { // Only allocated memory for at most 1000 objects
deleteXMLBytes = make ( [ ] byte , maxBodySize )
} else {
deleteXMLBytes = make ( [ ] byte , r . ContentLength )
}
2016-03-05 19:43:48 -05:00
// Read incoming body XML bytes.
2016-04-29 17:24:10 -04:00
if _ , err := io . ReadFull ( r . Body , deleteXMLBytes ) ; err != nil {
2018-04-05 18:04:40 -04:00
logger . LogIf ( ctx , err )
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , ErrInternalError , r . URL )
2016-03-05 19:43:48 -05:00
return
}
// Unmarshal list of keys to be deleted.
deleteObjects := & DeleteObjectsRequest { }
2016-04-29 17:24:10 -04:00
if err := xml . Unmarshal ( deleteXMLBytes , deleteObjects ) ; err != nil {
2018-04-05 18:04:40 -04:00
logger . LogIf ( ctx , err )
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , ErrMalformedXML , r . URL )
2016-03-05 19:43:48 -05:00
return
}
2018-03-27 19:44:45 -04:00
// Deny if WORM is enabled
if globalWORMEnabled {
// Not required to check whether given objects exist or not, because
// DeleteMultipleObject is always successful irrespective of object existence.
writeErrorResponse ( w , ErrMethodNotAllowed , r . URL )
return
}
2016-03-05 19:43:48 -05:00
2018-07-21 00:21:01 -04:00
deleteObject := objectAPI . DeleteObject
if api . CacheAPI ( ) != nil {
deleteObject = api . CacheAPI ( ) . DeleteObject
}
2016-09-02 04:59:08 -04:00
2018-07-21 00:21:01 -04:00
var dErrs = make ( [ ] error , len ( deleteObjects . Objects ) )
2016-09-02 04:59:08 -04:00
for index , object := range deleteObjects . Objects {
2018-07-21 00:21:01 -04:00
// If the request is denied access, each item
// should be marked as 'AccessDenied'
if s3Error == ErrAccessDenied {
dErrs [ index ] = PrefixAccessDenied {
Bucket : bucket ,
Object : object . ObjectName ,
2017-08-15 15:49:31 -04:00
}
2018-07-21 00:21:01 -04:00
continue
}
dErrs [ index ] = deleteObject ( ctx , bucket , object . ObjectName )
2016-09-02 04:59:08 -04:00
}
// Collect deleted objects and errors if any.
2016-03-05 19:43:48 -05:00
var deletedObjects [ ] ObjectIdentifier
2016-09-02 04:59:08 -04:00
var deleteErrors [ ] DeleteError
for index , err := range dErrs {
object := deleteObjects . Objects [ index ]
// Success deleted objects are collected separately.
2016-03-05 19:43:48 -05:00
if err == nil {
2016-09-02 04:59:08 -04:00
deletedObjects = append ( deletedObjects , object )
continue
2016-03-05 19:43:48 -05:00
}
2018-04-10 12:36:37 -04:00
if _ , ok := err . ( ObjectNotFound ) ; ok {
2016-09-07 14:49:12 -04:00
// If the object is not found it should be
// accounted as deleted as per S3 spec.
deletedObjects = append ( deletedObjects , object )
continue
}
2016-09-02 04:59:08 -04:00
// Error during delete should be collected separately.
deleteErrors = append ( deleteErrors , DeleteError {
Code : errorCodeResponse [ toAPIErrorCode ( err ) ] . Code ,
Message : errorCodeResponse [ toAPIErrorCode ( err ) ] . Description ,
Key : object . ObjectName ,
} )
2016-03-05 19:43:48 -05:00
}
2016-09-02 04:59:08 -04:00
2016-03-05 19:43:48 -05:00
// Generate response
response := generateMultiDeleteResponse ( deleteObjects . Quiet , deletedObjects , deleteErrors )
encodedSuccessResponse := encodeResponse ( response )
2017-01-06 03:37:00 -05:00
2016-03-05 19:43:48 -05:00
// Write success response.
2017-01-06 03:37:00 -05:00
writeSuccessResponseXML ( w , encodedSuccessResponse )
2016-09-02 04:59:08 -04:00
2017-03-22 21:44:35 -04:00
// Get host and port from Request.RemoteAddr failing which
// fill them with empty strings.
2018-07-02 17:40:18 -04:00
host , port , err := net . SplitHostPort ( handlers . GetSourceIP ( r ) )
2017-03-22 21:44:35 -04:00
if err != nil {
host , port = "" , ""
}
2016-09-29 01:46:19 -04:00
// Notify deleted event for objects.
for _ , dobj := range deletedObjects {
2018-03-15 16:03:41 -04:00
sendEvent ( eventArgs {
EventName : event . ObjectRemovedDelete ,
BucketName : bucket ,
Object : ObjectInfo {
2016-09-29 01:46:19 -04:00
Name : dobj . ObjectName ,
} ,
2017-03-13 17:41:13 -04:00
ReqParams : extractReqParams ( r ) ,
2017-03-22 21:44:35 -04:00
UserAgent : r . UserAgent ( ) ,
Host : host ,
Port : port ,
2016-09-29 01:46:19 -04:00
} )
2016-09-02 04:59:08 -04:00
}
2016-03-05 19:43:48 -05:00
}
2015-06-30 23:15:48 -04:00
// PutBucketHandler - PUT Bucket
2015-06-30 17:42:29 -04:00
// ----------
// This implementation of the PUT operation creates a new bucket for authenticated request
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) PutBucketHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "PutBucket" )
2018-03-14 15:01:47 -04:00
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , ErrServerNotInitialized , r . URL )
2016-08-10 21:47:49 -04:00
return
}
2018-04-24 18:53:30 -04:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
if s3Error := checkRequestAuthType ( ctx , r , policy . CreateBucketAction , bucket , "" ) ; s3Error != ErrNone {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , s3Error , r . URL )
2016-02-04 15:52:25 -05:00
return
2015-07-14 12:17:30 -04:00
}
2017-04-03 17:50:09 -04:00
// Parse incoming location constraint.
location , s3Error := parseLocationConstraint ( r )
if s3Error != ErrNone {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , s3Error , r . URL )
2016-08-25 23:00:47 -04:00
return
2016-04-20 20:35:38 -04:00
}
2016-07-24 01:51:12 -04:00
2017-04-03 17:50:09 -04:00
// Validate if location sent by the client is valid, reject
// requests which do not follow valid region requirements.
if ! isValidLocation ( location ) {
writeErrorResponse ( w , ErrInvalidRegion , r . URL )
return
}
2018-02-02 21:18:52 -05:00
if globalDNSConfig != nil {
if _ , err := globalDNSConfig . Get ( bucket ) ; err != nil {
2018-07-12 17:12:40 -04:00
if err == dns . ErrNoEntriesFound {
2018-02-02 21:18:52 -05:00
// Proceed to creating a bucket.
if err = objectAPI . MakeBucketWithLocation ( ctx , bucket , location ) ; err != nil {
writeErrorResponse ( w , toAPIErrorCode ( err ) , r . URL )
return
}
if err = globalDNSConfig . Put ( bucket ) ; err != nil {
objectAPI . DeleteBucket ( ctx , bucket )
writeErrorResponse ( w , toAPIErrorCode ( err ) , r . URL )
return
}
// Make sure to add Location information here only for bucket
w . Header ( ) . Set ( "Location" , getObjectLocation ( r , globalDomainName , bucket , "" ) )
writeSuccessResponseHeadersOnly ( w )
return
}
writeErrorResponse ( w , toAPIErrorCode ( err ) , r . URL )
return
}
writeErrorResponse ( w , ErrBucketAlreadyOwnedByYou , r . URL )
2018-02-15 20:45:57 -05:00
return
}
2016-07-24 01:51:12 -04:00
// Proceed to creating a bucket.
2018-03-14 15:01:47 -04:00
err := objectAPI . MakeBucketWithLocation ( ctx , bucket , location )
2015-09-19 06:20:07 -04:00
if err != nil {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , toAPIErrorCode ( err ) , r . URL )
2015-08-03 19:17:21 -04:00
return
}
2017-01-06 03:37:00 -05:00
2015-09-19 06:20:07 -04:00
// Make sure to add Location information here only for bucket
2018-03-23 16:46:57 -04:00
w . Header ( ) . Set ( "Location" , path . Clean ( r . URL . Path ) ) // Clean any trailing slashes.
2017-01-06 03:37:00 -05:00
writeSuccessResponseHeadersOnly ( w )
2015-06-30 17:42:29 -04:00
}
2015-10-02 02:51:17 -04:00
// PostPolicyBucketHandler - POST policy
// ----------
// This implementation of the POST operation handles object creation with a specified
// signature policy in multipart/form-data
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) PostPolicyBucketHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "PostPolicyBucket" )
2018-03-14 15:01:47 -04:00
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , ErrServerNotInitialized , r . URL )
2016-08-10 21:47:49 -04:00
return
}
2017-11-13 19:30:20 -05:00
bucket := mux . Vars ( r ) [ "bucket" ]
2017-02-02 13:45:00 -05:00
// Require Content-Length to be set in the request
size := r . ContentLength
if size < 0 {
writeErrorResponse ( w , ErrMissingContentLength , r . URL )
return
}
2017-11-15 17:10:45 -05:00
resource , err := getResource ( r . URL . Path , r . Host , globalDomainName )
if err != nil {
writeErrorResponse ( w , ErrInvalidRequest , r . URL )
return
}
// Make sure that the URL does not contain object name.
if bucket != filepath . Clean ( resource [ 1 : ] ) {
writeErrorResponse ( w , ErrMethodNotAllowed , r . URL )
return
}
2017-02-02 13:45:00 -05:00
2015-10-02 02:51:17 -04:00
// Here the parameter is the size of the form data that should
2016-03-22 20:54:31 -04:00
// be loaded in memory, the remaining being put in temporary files.
2016-04-29 17:24:10 -04:00
reader , err := r . MultipartReader ( )
if err != nil {
2018-04-05 18:04:40 -04:00
logger . LogIf ( ctx , err )
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , ErrMalformedPOSTRequest , r . URL )
2015-10-02 02:51:17 -04:00
return
}
2017-02-02 13:45:00 -05:00
// Read multipart data and save in memory and in the disk if needed
form , err := reader . ReadForm ( maxFormMemory )
if err != nil {
2018-04-05 18:04:40 -04:00
logger . LogIf ( ctx , err )
2017-02-02 13:45:00 -05:00
writeErrorResponse ( w , ErrMalformedPOSTRequest , r . URL )
return
}
// Remove all tmp files creating during multipart upload
defer form . RemoveAll ( )
// Extract all form fields
2018-04-05 18:04:40 -04:00
fileBody , fileName , fileSize , formValues , err := extractPostPolicyFormValues ( ctx , form )
2016-02-04 15:52:25 -05:00
if err != nil {
2018-04-05 18:04:40 -04:00
logger . LogIf ( ctx , err )
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , ErrMalformedPOSTRequest , r . URL )
2015-10-02 02:51:17 -04:00
return
}
2017-02-02 13:45:00 -05:00
2017-02-09 15:37:32 -05:00
// Check if file is provided, error out otherwise.
if fileBody == nil {
writeErrorResponse ( w , ErrPOSTFileRequired , r . URL )
return
}
2017-02-02 13:45:00 -05:00
// Close multipart file
defer fileBody . Close ( )
2017-03-13 17:41:13 -04:00
formValues . Set ( "Bucket" , bucket )
2016-02-15 20:42:39 -05:00
2017-03-13 17:41:13 -04:00
if fileName != "" && strings . Contains ( formValues . Get ( "Key" ) , "${filename}" ) {
2016-07-27 20:51:55 -04:00
// S3 feature to replace ${filename} found in Key form field
// by the filename attribute passed in multipart
2017-03-13 17:41:13 -04:00
formValues . Set ( "Key" , strings . Replace ( formValues . Get ( "Key" ) , "${filename}" , fileName , - 1 ) )
}
object := formValues . Get ( "Key" )
successRedirect := formValues . Get ( "success_action_redirect" )
successStatus := formValues . Get ( "success_action_status" )
var redirectURL * url . URL
if successRedirect != "" {
redirectURL , err = url . Parse ( successRedirect )
if err != nil {
writeErrorResponse ( w , ErrMalformedPOSTRequest , r . URL )
return
}
2016-07-27 20:51:55 -04:00
}
2016-02-15 20:42:39 -05:00
// Verify policy signature.
2016-03-30 23:04:51 -04:00
apiErr := doesPolicySignatureMatch ( formValues )
if apiErr != ErrNone {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , apiErr , r . URL )
2015-10-02 02:51:17 -04:00
return
}
2016-10-25 02:47:03 -04:00
2017-03-13 17:41:13 -04:00
policyBytes , err := base64 . StdEncoding . DecodeString ( formValues . Get ( "Policy" ) )
2016-10-25 02:47:03 -04:00
if err != nil {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , ErrMalformedPOSTRequest , r . URL )
2016-10-25 02:47:03 -04:00
return
}
2016-10-26 13:15:57 -04:00
postPolicyForm , err := parsePostPolicyForm ( string ( policyBytes ) )
2016-10-25 02:47:03 -04:00
if err != nil {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , ErrMalformedPOSTRequest , r . URL )
2016-10-25 02:47:03 -04:00
return
}
// Make sure formValues adhere to policy restrictions.
if apiErr = checkPostPolicy ( formValues , postPolicyForm ) ; apiErr != ErrNone {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , apiErr , r . URL )
2015-10-06 13:12:06 -04:00
return
}
2016-05-19 20:10:08 -04:00
2017-02-02 13:45:00 -05:00
// Ensure that the object size is within expected range, also the file size
// should not exceed the maximum single Put size (5 GiB)
2016-10-25 02:47:03 -04:00
lengthRange := postPolicyForm . Conditions . ContentLengthRange
if lengthRange . Valid {
2017-02-02 13:45:00 -05:00
if fileSize < lengthRange . Min {
writeErrorResponse ( w , toAPIErrorCode ( errDataTooSmall ) , r . URL )
return
2016-11-21 07:15:26 -05:00
}
2017-02-02 13:45:00 -05:00
2017-03-03 13:14:17 -05:00
if fileSize > lengthRange . Max || isMaxObjectSize ( fileSize ) {
2017-02-02 13:45:00 -05:00
writeErrorResponse ( w , toAPIErrorCode ( errDataTooLarge ) , r . URL )
return
2016-11-21 07:15:26 -05:00
}
2016-10-25 02:47:03 -04:00
}
2016-12-19 19:14:04 -05:00
// Extract metadata to be saved from received Form.
2018-07-10 23:27:10 -04:00
metadata := make ( map [ string ] string )
err = extractMetadataFromMap ( ctx , formValues , metadata )
2017-07-05 19:56:10 -04:00
if err != nil {
writeErrorResponse ( w , ErrInternalError , r . URL )
return
}
2016-10-02 18:51:49 -04:00
2018-09-27 23:36:17 -04:00
hashReader , err := hash . NewReader ( fileBody , fileSize , "" , "" , fileSize )
2017-10-22 01:30:34 -04:00
if err != nil {
2018-04-05 18:04:40 -04:00
logger . LogIf ( ctx , err )
2017-10-22 01:30:34 -04:00
writeErrorResponse ( w , toAPIErrorCode ( err ) , r . URL )
return
}
2018-03-05 11:02:56 -05:00
if objectAPI . IsEncryptionSupported ( ) {
2018-08-17 15:52:14 -04:00
if hasServerSideEncryptionHeader ( formValues ) && ! hasSuffix ( object , slashSeparator ) { // handle SSE-C and SSE-S3 requests
2018-03-05 11:02:56 -05:00
var reader io . Reader
var key [ ] byte
2018-08-17 15:52:14 -04:00
if crypto . SSEC . IsRequested ( formValues ) {
key , err = ParseSSECustomerHeader ( formValues )
if err != nil {
writeErrorResponse ( w , toAPIErrorCode ( err ) , r . URL )
return
}
2018-03-05 11:02:56 -05:00
}
2018-08-17 15:52:14 -04:00
reader , err = newEncryptReader ( hashReader , key , bucket , object , metadata , crypto . S3 . IsRequested ( formValues ) )
2018-03-05 11:02:56 -05:00
if err != nil {
writeErrorResponse ( w , toAPIErrorCode ( err ) , r . URL )
return
}
info := ObjectInfo { Size : fileSize }
2018-09-27 23:36:17 -04:00
hashReader , err = hash . NewReader ( reader , info . EncryptedSize ( ) , "" , "" , fileSize ) // do not try to verify encrypted content
2018-03-05 11:02:56 -05:00
if err != nil {
writeErrorResponse ( w , toAPIErrorCode ( err ) , r . URL )
return
}
}
}
2018-09-10 12:42:43 -04:00
objInfo , err := objectAPI . PutObject ( ctx , bucket , object , hashReader , metadata , ObjectOptions { } )
2016-02-04 15:52:25 -05:00
if err != nil {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , toAPIErrorCode ( err ) , r . URL )
2015-10-02 02:51:17 -04:00
return
}
2017-03-13 17:41:13 -04:00
2018-03-02 18:23:04 -05:00
location := getObjectLocation ( r , globalDomainName , bucket , object )
2017-05-14 15:05:51 -04:00
w . Header ( ) . Set ( "ETag" , ` " ` + objInfo . ETag + ` " ` )
2017-09-24 19:43:21 -04:00
w . Header ( ) . Set ( "Location" , location )
2016-08-05 01:01:58 -04:00
2017-03-22 21:44:35 -04:00
// Get host and port from Request.RemoteAddr.
2018-07-02 17:40:18 -04:00
host , port , err := net . SplitHostPort ( handlers . GetSourceIP ( r ) )
2017-03-22 21:44:35 -04:00
if err != nil {
host , port = "" , ""
}
2017-03-13 17:41:13 -04:00
// Notify object created event.
2018-03-15 16:03:41 -04:00
defer sendEvent ( eventArgs {
EventName : event . ObjectCreatedPost ,
BucketName : objInfo . Bucket ,
Object : objInfo ,
ReqParams : extractReqParams ( r ) ,
UserAgent : r . UserAgent ( ) ,
Host : host ,
Port : port ,
2017-03-13 17:41:13 -04:00
} )
if successRedirect != "" {
// Replace raw query params..
redirectURL . RawQuery = getRedirectPostRawQuery ( objInfo )
writeRedirectSeeOther ( w , redirectURL . String ( ) )
return
}
2016-07-24 01:51:12 -04:00
2017-03-13 17:41:13 -04:00
// Decide what http response to send depending on success_action_status parameter
switch successStatus {
case "201" :
resp := encodeResponse ( PostResponse {
Bucket : objInfo . Bucket ,
Key : objInfo . Name ,
2017-05-14 15:05:51 -04:00
ETag : ` " ` + objInfo . ETag + ` " ` ,
2017-09-24 19:43:21 -04:00
Location : location ,
2017-03-13 17:41:13 -04:00
} )
writeResponse ( w , http . StatusCreated , resp , "application/xml" )
case "200" :
writeSuccessResponseHeadersOnly ( w )
default :
2016-12-18 16:39:56 -05:00
writeSuccessNoContent ( w )
}
2015-10-02 02:51:17 -04:00
}
2015-06-30 23:15:48 -04:00
// HeadBucketHandler - HEAD Bucket
2015-06-30 17:42:29 -04:00
// ----------
// This operation is useful to determine if a bucket exists.
// The operation returns a 200 OK if the bucket exists and you
// have permission to access it. Otherwise, the operation might
// return responses such as 404 Not Found and 403 Forbidden.
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) HeadBucketHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "HeadBucket" )
2018-03-14 15:01:47 -04:00
2016-02-15 20:42:39 -05:00
vars := mux . Vars ( r )
2015-06-30 17:42:29 -04:00
bucket := vars [ "bucket" ]
2015-07-02 23:31:22 -04:00
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2017-01-06 03:37:00 -05:00
writeErrorResponseHeadersOnly ( w , ErrServerNotInitialized )
2016-08-10 21:47:49 -04:00
return
}
2016-11-21 16:51:05 -05:00
2018-04-24 18:53:30 -04:00
if s3Error := checkRequestAuthType ( ctx , r , policy . ListBucketAction , bucket , "" ) ; s3Error != ErrNone {
2017-01-06 03:37:00 -05:00
writeErrorResponseHeadersOnly ( w , s3Error )
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
return
2016-02-04 15:52:25 -05:00
}
2018-04-24 18:53:30 -04:00
2018-03-28 17:14:06 -04:00
getBucketInfo := objectAPI . GetBucketInfo
if api . CacheAPI ( ) != nil {
getBucketInfo = api . CacheAPI ( ) . GetBucketInfo
}
if _ , err := getBucketInfo ( ctx , bucket ) ; err != nil {
2017-01-06 03:37:00 -05:00
writeErrorResponseHeadersOnly ( w , toAPIErrorCode ( err ) )
2015-08-03 19:17:21 -04:00
return
}
2017-01-06 03:37:00 -05:00
writeSuccessResponseHeadersOnly ( w )
2015-06-30 17:42:29 -04:00
}
2015-10-16 14:26:01 -04:00
// DeleteBucketHandler - Delete bucket
2016-04-12 15:45:15 -04:00
func ( api objectAPIHandlers ) DeleteBucketHandler ( w http . ResponseWriter , r * http . Request ) {
2018-07-20 21:46:32 -04:00
ctx := newContext ( r , w , "DeleteBucket" )
2018-03-14 15:01:47 -04:00
2018-04-24 18:53:30 -04:00
vars := mux . Vars ( r )
bucket := vars [ "bucket" ]
2016-08-10 21:47:49 -04:00
objectAPI := api . ObjectAPI ( )
if objectAPI == nil {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , ErrServerNotInitialized , r . URL )
2016-08-10 21:47:49 -04:00
return
}
2018-04-24 18:53:30 -04:00
if s3Error := checkRequestAuthType ( ctx , r , policy . DeleteBucketAction , bucket , "" ) ; s3Error != ErrNone {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , s3Error , r . URL )
2016-02-15 20:42:39 -05:00
return
2016-02-04 15:52:25 -05:00
}
2018-03-28 17:14:06 -04:00
deleteBucket := objectAPI . DeleteBucket
if api . CacheAPI ( ) != nil {
deleteBucket = api . CacheAPI ( ) . DeleteBucket
}
2016-07-24 01:51:12 -04:00
// Attempt to delete bucket.
2018-03-28 17:14:06 -04:00
if err := deleteBucket ( ctx , bucket ) ; err != nil {
2017-01-06 03:37:00 -05:00
writeErrorResponse ( w , toAPIErrorCode ( err ) , r . URL )
2015-10-16 14:26:01 -04:00
return
}
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
2018-03-15 16:03:41 -04:00
globalNotificationSys . RemoveNotification ( bucket )
2018-04-24 18:53:30 -04:00
globalPolicySys . Remove ( bucket )
2018-07-03 14:09:36 -04:00
globalNotificationSys . DeleteBucket ( ctx , bucket )
2018-03-15 16:03:41 -04:00
2018-02-02 21:18:52 -05:00
if globalDNSConfig != nil {
if err := globalDNSConfig . Delete ( bucket ) ; err != nil {
// Deleting DNS entry failed, attempt to create the bucket again.
objectAPI . MakeBucketWithLocation ( ctx , bucket , "" )
writeErrorResponse ( w , toAPIErrorCode ( err ) , r . URL )
return
}
}
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
// Write success response.
2015-10-16 23:02:37 -04:00
writeSuccessNoContent ( w )
2015-10-16 14:26:01 -04:00
}