2017-05-01 13:59:54 -04:00
/ *
* Minio Cloud Storage , ( C ) 2017 Minio , Inc .
*
* Licensed under the Apache License , Version 2.0 ( the "License" ) ;
* you may not use this file except in compliance with the License .
* You may obtain a copy of the License at
*
* http : //www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing , software
* distributed under the License is distributed on an "AS IS" BASIS ,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
* See the License for the specific language governing permissions and
* limitations under the License .
* /
package cmd
import (
"context"
2017-05-12 01:38:22 -04:00
"encoding/base64"
2017-06-17 19:00:41 -04:00
"encoding/json"
2017-10-13 06:56:16 -04:00
"errors"
2017-05-01 13:59:54 -04:00
"fmt"
"io"
2017-10-20 16:59:12 -04:00
"io/ioutil"
2017-06-28 01:27:05 -04:00
"math"
2017-10-20 16:59:12 -04:00
"os"
2017-05-24 21:39:45 -04:00
"regexp"
2017-05-01 13:59:54 -04:00
"strings"
2017-07-19 22:33:10 -04:00
"time"
2017-05-01 13:59:54 -04:00
"cloud.google.com/go/storage"
2017-10-27 18:07:46 -04:00
"github.com/minio/cli"
2017-05-01 13:59:54 -04:00
minio "github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/policy"
2017-10-22 01:30:34 -04:00
"github.com/minio/minio/pkg/hash"
2017-10-27 18:07:46 -04:00
"golang.org/x/oauth2/google"
cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
2017-05-01 13:59:54 -04:00
)
2017-10-13 06:56:16 -04:00
var (
// Project ID format is not valid.
errGCSInvalidProjectID = errors . New ( "GCS project id is either empty or invalid" )
// Project ID not found
errGCSProjectIDNotFound = errors . New ( "unknown project id" )
)
2017-05-05 21:49:26 -04:00
const (
2017-06-17 19:00:41 -04:00
// Path where multipart objects are saved.
2017-06-21 13:27:44 -04:00
// If we change the backend format we will use a different url path like /multipart/v2
// but we will not migrate old data.
2017-09-19 19:08:08 -04:00
gcsMinioMultipartPathV1 = globalMinioSysTmp + "multipart/v1"
2017-07-19 22:33:10 -04:00
2017-06-17 19:00:41 -04:00
// Multipart meta file.
gcsMinioMultipartMeta = "gcs.json"
2017-07-19 22:33:10 -04:00
2017-06-17 19:00:41 -04:00
// gcs.json version number
2017-07-11 12:25:19 -04:00
gcsMinioMultipartMetaCurrentVersion = "1"
2017-06-09 22:48:20 -04:00
// token prefixed with GCS returned marker to differentiate
// from user supplied marker.
2017-07-19 22:33:10 -04:00
gcsTokenPrefix = "{minio}"
2017-06-28 01:27:05 -04:00
2017-07-19 22:33:10 -04:00
// Maximum component object count to create a composite object.
2017-06-28 01:27:05 -04:00
// Refer https://cloud.google.com/storage/docs/composite-objects
2017-07-19 22:33:10 -04:00
gcsMaxComponents = 32
// Every 24 hours we scan minio.sys.tmp to delete expired multiparts in minio.sys.tmp
gcsCleanupInterval = time . Hour * 24
// The cleanup routine deletes files older than 2 weeks in minio.sys.tmp
gcsMultipartExpiry = time . Hour * 24 * 14
2017-10-20 16:59:12 -04:00
// Project ID key in credentials.json
gcsProjectIDKey = "project_id"
2017-10-27 18:07:46 -04:00
gcsBackend = "gcs"
2017-05-05 21:49:26 -04:00
)
2017-10-27 18:07:46 -04:00
func init ( ) {
const gcsGatewayTemplate = ` NAME :
{ { . HelpName } } - { { . Usage } }
USAGE :
{ { . HelpName } } { { if . VisibleFlags } } [ FLAGS ] { { end } } [ PROJECTID ]
{ { if . VisibleFlags } }
FLAGS :
{ { range . VisibleFlags } } { { . } }
{ { end } } { { end } }
PROJECTID :
GCS project - id should be provided if GOOGLE_APPLICATION_CREDENTIALS environmental variable is not set .
ENVIRONMENT VARIABLES :
ACCESS :
MINIO_ACCESS_KEY : Username or access key of GCS .
MINIO_SECRET_KEY : Password or secret key of GCS .
BROWSER :
MINIO_BROWSER : To disable web browser access , set this value to "off" .
GCS credentials file :
GOOGLE_APPLICATION_CREDENTIALS : Path to credentials . json
EXAMPLES :
1. Start minio gateway server for GCS backend .
$ export GOOGLE_APPLICATION_CREDENTIALS = / path / to / credentials . json
( Instructions to generate credentials : https : //developers.google.com/identity/protocols/application-default-credentials)
$ export MINIO_ACCESS_KEY = accesskey
$ export MINIO_SECRET_KEY = secretkey
$ { { . HelpName } } mygcsprojectid
`
MustRegisterGatewayCommand ( cli . Command {
Name : gcsBackend ,
Usage : "Google Cloud Storage." ,
Action : gcsGatewayMain ,
CustomHelpTemplate : gcsGatewayTemplate ,
Flags : append ( serverFlags , globalFlags ... ) ,
HideHelpCommand : true ,
} )
}
// Handler for 'minio gateway gcs' command line.
func gcsGatewayMain ( ctx * cli . Context ) {
projectID := ctx . Args ( ) . First ( )
if projectID == "" && os . Getenv ( "GOOGLE_APPLICATION_CREDENTIALS" ) == "" {
errorIf ( errGCSProjectIDNotFound , "project-id should be provided as argument or GOOGLE_APPLICATION_CREDENTIALS should be set with path to credentials.json" )
cli . ShowCommandHelpAndExit ( ctx , "gcs" , 1 )
}
if projectID != "" && ! isValidGCSProjectIDFormat ( projectID ) {
errorIf ( errGCSInvalidProjectID , "Unable to start GCS gateway with %s" , ctx . Args ( ) . First ( ) )
cli . ShowCommandHelpAndExit ( ctx , "gcs" , 1 )
}
startGateway ( ctx , & GCSGateway { projectID } )
}
// GCSGateway implements Gateway.
type GCSGateway struct {
projectID string
}
// Name returns the name of gcs gatewaylayer.
func ( g * GCSGateway ) Name ( ) string {
return gcsBackend
}
// NewGatewayLayer returns gcs gatewaylayer.
func ( g * GCSGateway ) NewGatewayLayer ( ) ( GatewayLayer , error ) {
log . Println ( colorYellow ( "\n *** Warning: Not Ready for Production ***" ) )
return newGCSGatewayLayer ( g . projectID )
}
2017-06-17 19:00:41 -04:00
// Stored in gcs.json - Contents of this file is not used anywhere. It can be
// used for debugging purposes.
type gcsMultipartMetaV1 struct {
Version string ` json:"version" ` // Version number
Bucket string ` json:"bucket" ` // Bucket name
Object string ` json:"object" ` // Object name
}
// Returns name of the multipart meta object.
func gcsMultipartMetaName ( uploadID string ) string {
2017-06-21 13:27:44 -04:00
return fmt . Sprintf ( "%s/%s/%s" , gcsMinioMultipartPathV1 , uploadID , gcsMinioMultipartMeta )
2017-06-17 19:00:41 -04:00
}
// Returns name of the part object.
2017-07-14 02:20:16 -04:00
func gcsMultipartDataName ( uploadID string , partNumber int , etag string ) string {
return fmt . Sprintf ( "%s/%s/%05d.%s" , gcsMinioMultipartPathV1 , uploadID , partNumber , etag )
2017-06-06 02:13:53 -04:00
}
2017-05-01 13:59:54 -04:00
// Convert Minio errors to minio object layer errors.
func gcsToObjectError ( err error , params ... string ) error {
if err == nil {
return nil
}
e , ok := err . ( * Error )
if ! ok {
// Code should be fixed if this function is called without doing traceError()
// Else handling different situations in this function makes this function complicated.
errorIf ( err , "Expected type *Error" )
return err
}
err = e . e
bucket := ""
object := ""
2017-11-06 13:09:21 -05:00
uploadID := ""
2017-05-01 13:59:54 -04:00
if len ( params ) >= 1 {
bucket = params [ 0 ]
}
if len ( params ) == 2 {
object = params [ 1 ]
}
2017-11-06 13:09:21 -05:00
if len ( params ) == 3 {
uploadID = params [ 2 ]
}
2017-05-01 13:59:54 -04:00
// in some cases just a plain error is being returned
switch err . Error ( ) {
case "storage: bucket doesn't exist" :
err = BucketNotFound {
Bucket : bucket ,
}
e . e = err
return e
case "storage: object doesn't exist" :
2017-11-06 13:09:21 -05:00
if uploadID != "" {
err = InvalidUploadID {
UploadID : uploadID ,
}
} else {
err = ObjectNotFound {
Bucket : bucket ,
Object : object ,
}
2017-05-01 13:59:54 -04:00
}
e . e = err
return e
}
2017-05-05 21:49:26 -04:00
googleAPIErr , ok := err . ( * googleapi . Error )
2017-05-01 13:59:54 -04:00
if ! ok {
// We don't interpret non Minio errors. As minio errors will
// have StatusCode to help to convert to object errors.
2017-06-17 19:00:41 -04:00
e . e = err
return e
}
if len ( googleAPIErr . Errors ) == 0 {
e . e = err
2017-05-01 13:59:54 -04:00
return e
}
2017-05-05 21:49:26 -04:00
reason := googleAPIErr . Errors [ 0 ] . Reason
message := googleAPIErr . Errors [ 0 ] . Message
2017-05-01 13:59:54 -04:00
switch reason {
2017-05-08 17:07:51 -04:00
case "required" :
// Anonymous users does not have storage.xyz access to project 123.
fallthrough
case "keyInvalid" :
fallthrough
2017-05-01 13:59:54 -04:00
case "forbidden" :
err = PrefixAccessDenied {
Bucket : bucket ,
Object : object ,
}
case "invalid" :
err = BucketNameInvalid {
Bucket : bucket ,
}
case "notFound" :
if object != "" {
err = ObjectNotFound {
Bucket : bucket ,
Object : object ,
}
2017-06-23 20:57:25 -04:00
break
2017-05-01 13:59:54 -04:00
}
2017-06-23 20:57:25 -04:00
err = BucketNotFound { Bucket : bucket }
2017-05-01 13:59:54 -04:00
case "conflict" :
if message == "You already own this bucket. Please select another name." {
2017-06-23 20:57:25 -04:00
err = BucketAlreadyOwnedByYou { Bucket : bucket }
break
2017-05-01 13:59:54 -04:00
}
2017-06-23 20:57:25 -04:00
if message == "Sorry, that name is not available. Please try a different one." {
err = BucketAlreadyExists { Bucket : bucket }
break
}
err = BucketNotEmpty { Bucket : bucket }
2017-05-01 13:59:54 -04:00
default :
err = fmt . Errorf ( "Unsupported error reason: %s" , reason )
}
e . e = err
return e
}
2017-05-24 21:39:45 -04:00
// gcsProjectIDRegex defines a valid gcs project id format
var gcsProjectIDRegex = regexp . MustCompile ( "^[a-z][a-z0-9-]{5,29}$" )
2017-06-24 01:10:29 -04:00
// isValidGCSProjectIDFormat - checks if a given project id format is valid or not.
// Project IDs must start with a lowercase letter and can have lowercase ASCII letters,
// digits or hyphens. Project IDs must be between 6 and 30 characters.
2017-05-24 21:39:45 -04:00
// Ref: https://cloud.google.com/resource-manager/reference/rest/v1/projects#Project (projectId section)
2017-06-24 01:10:29 -04:00
func isValidGCSProjectIDFormat ( projectID string ) bool {
// Checking projectID format
2017-05-24 21:39:45 -04:00
return gcsProjectIDRegex . MatchString ( projectID )
}
2017-06-24 01:10:29 -04:00
// checkGCSProjectID - checks if the project ID does really exist using resource manager API.
func checkGCSProjectID ( ctx context . Context , projectID string ) error {
// Check if a project id associated to the current account does really exist
resourceManagerClient , err := google . DefaultClient ( ctx , cloudresourcemanager . CloudPlatformReadOnlyScope )
if err != nil {
return err
}
baseSvc , err := cloudresourcemanager . New ( resourceManagerClient )
if err != nil {
return err
}
projectSvc := cloudresourcemanager . NewProjectsService ( baseSvc )
curPageToken := ""
// Iterate over projects list result pages and immediately return nil when
// the project ID is found.
for {
resp , err := projectSvc . List ( ) . PageToken ( curPageToken ) . Context ( ctx ) . Do ( )
if err != nil {
return fmt . Errorf ( "Error getting projects list: %s" , err . Error ( ) )
}
for _ , p := range resp . Projects {
if p . ProjectId == projectID {
return nil
}
}
if resp . NextPageToken != "" {
curPageToken = resp . NextPageToken
} else {
break
}
}
return errGCSProjectIDNotFound
}
2017-05-01 13:59:54 -04:00
// gcsGateway - Implements gateway for Minio and GCS compatible object storage servers.
type gcsGateway struct {
2017-10-09 19:41:35 -04:00
gatewayUnsupported
2017-05-01 13:59:54 -04:00
client * storage . Client
anonClient * minio . Core
projectID string
ctx context . Context
}
2017-06-09 22:50:51 -04:00
const googleStorageEndpoint = "storage.googleapis.com"
2017-05-24 21:39:45 -04:00
2017-10-20 16:59:12 -04:00
// Returns projectID from the GOOGLE_APPLICATION_CREDENTIALS file.
func gcsParseProjectID ( credsFile string ) ( projectID string , err error ) {
contents , err := ioutil . ReadFile ( credsFile )
if err != nil {
return projectID , err
}
googleCreds := make ( map [ string ] string )
if err = json . Unmarshal ( contents , & googleCreds ) ; err != nil {
return projectID , err
}
return googleCreds [ gcsProjectIDKey ] , err
}
2017-10-27 18:07:46 -04:00
// newGCSGatewayLayer returns gcs gatewaylayer
func newGCSGatewayLayer ( projectID string ) ( GatewayLayer , error ) {
2017-05-01 13:59:54 -04:00
ctx := context . Background ( )
2017-10-20 16:59:12 -04:00
var err error
if projectID == "" {
// If project ID is not provided on command line, we figure it out
// from the credentials.json file.
projectID , err = gcsParseProjectID ( os . Getenv ( "GOOGLE_APPLICATION_CREDENTIALS" ) )
if err != nil {
return nil , err
}
}
err = checkGCSProjectID ( ctx , projectID )
2017-06-24 01:10:29 -04:00
if err != nil {
return nil , err
}
2017-06-09 22:50:51 -04:00
// Initialize a GCS client.
2017-05-01 13:59:54 -04:00
client , err := storage . NewClient ( ctx )
if err != nil {
return nil , err
}
2017-06-09 22:50:51 -04:00
// Initialize a anonymous client with minio core APIs.
anonClient , err := minio . NewCore ( googleStorageEndpoint , "" , "" , true )
2017-05-05 21:49:26 -04:00
if err != nil {
return nil , err
}
2017-08-03 21:38:27 -04:00
anonClient . SetCustomTransport ( newCustomHTTPTransport ( ) )
2017-05-05 21:49:26 -04:00
2017-07-19 22:33:10 -04:00
gateway := & gcsGateway {
2017-05-01 13:59:54 -04:00
client : client ,
2017-05-08 18:42:48 -04:00
projectID : projectID ,
2017-05-01 13:59:54 -04:00
ctx : ctx ,
2017-05-05 21:49:26 -04:00
anonClient : anonClient ,
2017-07-19 22:33:10 -04:00
}
// Start background process to cleanup old files in minio.sys.tmp
go gateway . CleanupGCSMinioSysTmp ( )
return gateway , nil
}
// Cleanup old files in minio.sys.tmp of the given bucket.
func ( l * gcsGateway ) CleanupGCSMinioSysTmpBucket ( bucket string ) {
2017-09-19 19:08:08 -04:00
it := l . client . Bucket ( bucket ) . Objects ( l . ctx , & storage . Query { Prefix : globalMinioSysTmp , Versions : false } )
2017-07-19 22:33:10 -04:00
for {
attrs , err := it . Next ( )
if err != nil {
if err != iterator . Done {
errorIf ( err , "Object listing error on bucket %s during purging of old files in minio.sys.tmp" , bucket )
}
return
}
if time . Since ( attrs . Updated ) > gcsMultipartExpiry {
// Delete files older than 2 weeks.
err := l . client . Bucket ( bucket ) . Object ( attrs . Name ) . Delete ( l . ctx )
if err != nil {
errorIf ( err , "Unable to delete %s/%s during purging of old files in minio.sys.tmp" , bucket , attrs . Name )
return
}
}
}
}
// Cleanup old files in minio.sys.tmp of all buckets.
func ( l * gcsGateway ) CleanupGCSMinioSysTmp ( ) {
for {
it := l . client . Buckets ( l . ctx , l . projectID )
for {
attrs , err := it . Next ( )
if err != nil {
if err != iterator . Done {
errorIf ( err , "Bucket listing error during purging of old files in minio.sys.tmp" )
}
break
}
l . CleanupGCSMinioSysTmpBucket ( attrs . Name )
}
// Run the cleanup loop every 1 day.
time . Sleep ( gcsCleanupInterval )
}
2017-05-01 13:59:54 -04:00
}
// Shutdown - save any gateway metadata to disk
// if necessary and reload upon next restart.
func ( l * gcsGateway ) Shutdown ( ) error {
return nil
}
// StorageInfo - Not relevant to GCS backend.
func ( l * gcsGateway ) StorageInfo ( ) StorageInfo {
return StorageInfo { }
}
2017-06-17 19:00:41 -04:00
// MakeBucketWithLocation - Create a new container on GCS backend.
2017-05-01 13:59:54 -04:00
func ( l * gcsGateway ) MakeBucketWithLocation ( bucket , location string ) error {
bkt := l . client . Bucket ( bucket )
2017-06-17 19:00:41 -04:00
// we'll default to the us multi-region in case of us-east-1
if location == "us-east-1" {
location = "us"
2017-05-01 13:59:54 -04:00
}
2017-06-17 19:00:41 -04:00
err := bkt . Create ( l . ctx , l . projectID , & storage . BucketAttrs {
Location : location ,
} )
return gcsToObjectError ( traceError ( err ) , bucket )
2017-05-01 13:59:54 -04:00
}
// GetBucketInfo - Get bucket metadata..
func ( l * gcsGateway ) GetBucketInfo ( bucket string ) ( BucketInfo , error ) {
attrs , err := l . client . Bucket ( bucket ) . Attrs ( l . ctx )
if err != nil {
return BucketInfo { } , gcsToObjectError ( traceError ( err ) , bucket )
}
return BucketInfo {
Name : attrs . Name ,
Created : attrs . Created ,
} , nil
}
2017-07-11 12:25:19 -04:00
// ListBuckets lists all buckets under your project-id on GCS.
func ( l * gcsGateway ) ListBuckets ( ) ( buckets [ ] BucketInfo , err error ) {
2017-05-01 13:59:54 -04:00
it := l . client . Buckets ( l . ctx , l . projectID )
2017-07-11 12:25:19 -04:00
// Iterate and capture all the buckets.
2017-05-01 13:59:54 -04:00
for {
2017-07-11 12:25:19 -04:00
attrs , ierr := it . Next ( )
if ierr == iterator . Done {
2017-05-01 13:59:54 -04:00
break
}
2017-07-11 12:25:19 -04:00
if ierr != nil {
return buckets , gcsToObjectError ( traceError ( ierr ) )
2017-05-01 13:59:54 -04:00
}
2017-07-11 12:25:19 -04:00
buckets = append ( buckets , BucketInfo {
2017-05-01 13:59:54 -04:00
Name : attrs . Name ,
Created : attrs . Created ,
} )
}
2017-07-11 12:25:19 -04:00
return buckets , nil
2017-05-01 13:59:54 -04:00
}
2017-06-23 20:57:25 -04:00
// DeleteBucket delete a bucket on GCS.
2017-05-01 13:59:54 -04:00
func ( l * gcsGateway ) DeleteBucket ( bucket string ) error {
2017-06-23 20:57:25 -04:00
itObject := l . client . Bucket ( bucket ) . Objects ( l . ctx , & storage . Query { Delimiter : slashSeparator , Versions : false } )
// We list the bucket and if we find any objects we return BucketNotEmpty error. If we
2017-07-19 22:33:10 -04:00
// find only "minio.sys.tmp/" then we remove it before deleting the bucket.
2017-06-23 20:57:25 -04:00
gcsMinioPathFound := false
nonGCSMinioPathFound := false
for {
objAttrs , err := itObject . Next ( )
if err == iterator . Done {
break
}
if err != nil {
return gcsToObjectError ( traceError ( err ) )
}
2017-09-19 19:08:08 -04:00
if objAttrs . Prefix == globalMinioSysTmp {
2017-06-23 20:57:25 -04:00
gcsMinioPathFound = true
continue
}
nonGCSMinioPathFound = true
break
}
if nonGCSMinioPathFound {
return gcsToObjectError ( traceError ( BucketNotEmpty { } ) )
}
if gcsMinioPathFound {
2017-07-19 22:33:10 -04:00
// Remove minio.sys.tmp before deleting the bucket.
2017-09-19 19:08:08 -04:00
itObject = l . client . Bucket ( bucket ) . Objects ( l . ctx , & storage . Query { Versions : false , Prefix : globalMinioSysTmp } )
2017-06-23 20:57:25 -04:00
for {
objAttrs , err := itObject . Next ( )
if err == iterator . Done {
break
}
if err != nil {
return gcsToObjectError ( traceError ( err ) )
}
err = l . client . Bucket ( bucket ) . Object ( objAttrs . Name ) . Delete ( l . ctx )
if err != nil {
return gcsToObjectError ( traceError ( err ) )
}
}
}
2017-05-01 13:59:54 -04:00
err := l . client . Bucket ( bucket ) . Delete ( l . ctx )
2017-06-17 19:00:41 -04:00
return gcsToObjectError ( traceError ( err ) , bucket )
2017-05-01 13:59:54 -04:00
}
2017-05-10 18:36:49 -04:00
func toGCSPageToken ( name string ) string {
length := uint16 ( len ( name ) )
b := [ ] byte {
0xa ,
byte ( length & 0xFF ) ,
}
length = length >> 7
if length > 0 {
b = append ( b , byte ( length & 0xFF ) )
}
b = append ( b , [ ] byte ( name ) ... )
return base64 . StdEncoding . EncodeToString ( b )
}
2017-06-09 22:48:20 -04:00
// Returns true if marker was returned by GCS, i.e prefixed with
// ##minio by minio gcs gateway.
func isGCSMarker ( marker string ) bool {
return strings . HasPrefix ( marker , gcsTokenPrefix )
}
2017-05-01 13:59:54 -04:00
// ListObjects - lists all blobs in GCS bucket filtered by prefix
func ( l * gcsGateway ) ListObjects ( bucket string , prefix string , marker string , delimiter string , maxKeys int ) ( ListObjectsInfo , error ) {
2017-07-11 12:25:19 -04:00
it := l . client . Bucket ( bucket ) . Objects ( l . ctx , & storage . Query {
Delimiter : delimiter ,
Prefix : prefix ,
Versions : false ,
} )
2017-05-01 13:59:54 -04:00
isTruncated := false
nextMarker := ""
prefixes := [ ] string { }
2017-06-09 22:48:20 -04:00
// To accommodate S3-compatible applications using
// ListObjectsV1 to use object keys as markers to control the
// listing of objects, we use the following encoding scheme to
// distinguish between GCS continuation tokens and application
// supplied markers.
//
// - NextMarker in ListObjectsV1 response is constructed by
// prefixing "##minio" to the GCS continuation token,
// e.g, "##minioCgRvYmoz"
//
// - Application supplied markers are used as-is to list
// object keys that appear after it in the lexicographical order.
// If application is using GCS continuation token we should
// strip the gcsTokenPrefix we added.
gcsMarker := isGCSMarker ( marker )
if gcsMarker {
it . PageInfo ( ) . Token = strings . TrimPrefix ( marker , gcsTokenPrefix )
}
2017-05-10 19:19:35 -04:00
it . PageInfo ( ) . MaxSize = maxKeys
2017-05-10 18:36:49 -04:00
2017-05-01 13:59:54 -04:00
objects := [ ] ObjectInfo { }
for {
if len ( objects ) >= maxKeys {
// check if there is one next object and
// if that one next object is our hidden
// metadata folder, then just break
// otherwise we've truncated the output
attrs , _ := it . Next ( )
2017-09-19 19:08:08 -04:00
if attrs != nil && attrs . Prefix == globalMinioSysTmp {
2017-05-01 13:59:54 -04:00
break
}
isTruncated = true
break
}
attrs , err := it . Next ( )
if err == iterator . Done {
break
2017-06-23 20:57:25 -04:00
}
if err != nil {
2017-05-01 13:59:54 -04:00
return ListObjectsInfo { } , gcsToObjectError ( traceError ( err ) , bucket , prefix )
}
2017-05-10 18:36:49 -04:00
nextMarker = toGCSPageToken ( attrs . Name )
2017-09-19 19:08:08 -04:00
if attrs . Prefix == globalMinioSysTmp {
2017-06-23 20:57:25 -04:00
// We don't return our metadata prefix.
2017-05-01 13:59:54 -04:00
continue
2017-06-23 20:57:25 -04:00
}
2017-09-19 19:08:08 -04:00
if ! strings . HasPrefix ( prefix , globalMinioSysTmp ) {
2017-06-23 20:57:25 -04:00
// If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries.
// But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/
// which will be helpful to observe the "directory structure" for debugging purposes.
2017-09-19 19:08:08 -04:00
if strings . HasPrefix ( attrs . Prefix , globalMinioSysTmp ) ||
strings . HasPrefix ( attrs . Name , globalMinioSysTmp ) {
2017-06-23 20:57:25 -04:00
continue
}
}
if attrs . Prefix != "" {
2017-05-01 13:59:54 -04:00
prefixes = append ( prefixes , attrs . Prefix )
continue
2017-06-23 20:57:25 -04:00
}
if ! gcsMarker && attrs . Name <= marker {
2017-06-09 22:48:20 -04:00
// if user supplied a marker don't append
// objects until we reach marker (and skip it).
continue
2017-05-01 13:59:54 -04:00
}
objects = append ( objects , ObjectInfo {
Name : attrs . Name ,
Bucket : attrs . Bucket ,
ModTime : attrs . Updated ,
Size : attrs . Size ,
2017-10-26 13:17:07 -04:00
ETag : toS3ETag ( fmt . Sprintf ( "%d" , attrs . CRC32C ) ) ,
2017-05-01 13:59:54 -04:00
UserDefined : attrs . Metadata ,
ContentType : attrs . ContentType ,
ContentEncoding : attrs . ContentEncoding ,
} )
}
return ListObjectsInfo {
IsTruncated : isTruncated ,
2017-06-09 22:48:20 -04:00
NextMarker : gcsTokenPrefix + nextMarker ,
2017-05-01 13:59:54 -04:00
Prefixes : prefixes ,
Objects : objects ,
} , nil
}
// ListObjectsV2 - lists all blobs in GCS bucket filtered by prefix
2017-09-29 15:08:23 -04:00
func ( l * gcsGateway ) ListObjectsV2 ( bucket , prefix , continuationToken , delimiter string , maxKeys int , fetchOwner bool , startAfter string ) ( ListObjectsV2Info , error ) {
2017-07-11 12:25:19 -04:00
it := l . client . Bucket ( bucket ) . Objects ( l . ctx , & storage . Query {
Delimiter : delimiter ,
Prefix : prefix ,
Versions : false ,
} )
2017-05-01 13:59:54 -04:00
isTruncated := false
2017-06-29 14:19:55 -04:00
it . PageInfo ( ) . MaxSize = maxKeys
2017-05-01 13:59:54 -04:00
2017-06-29 14:19:55 -04:00
if continuationToken != "" {
// If client sends continuationToken, set it
it . PageInfo ( ) . Token = continuationToken
} else {
// else set the continuationToken to return
continuationToken = it . PageInfo ( ) . Token
if continuationToken != "" {
// If GCS SDK sets continuationToken, it means there are more than maxKeys in the current page
// and the response will be truncated
2017-05-01 13:59:54 -04:00
isTruncated = true
}
2017-06-29 14:19:55 -04:00
}
2017-05-01 13:59:54 -04:00
2017-06-29 14:19:55 -04:00
prefixes := [ ] string { }
objects := [ ] ObjectInfo { }
for {
2017-05-01 13:59:54 -04:00
attrs , err := it . Next ( )
if err == iterator . Done {
break
}
if err != nil {
return ListObjectsV2Info { } , gcsToObjectError ( traceError ( err ) , bucket , prefix )
}
2017-09-19 19:08:08 -04:00
if attrs . Prefix == globalMinioSysTmp {
2017-06-29 14:19:55 -04:00
// We don't return our metadata prefix.
continue
}
2017-09-19 19:08:08 -04:00
if ! strings . HasPrefix ( prefix , globalMinioSysTmp ) {
2017-06-29 14:19:55 -04:00
// If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries.
// But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/
// which will be helpful to observe the "directory structure" for debugging purposes.
2017-09-19 19:08:08 -04:00
if strings . HasPrefix ( attrs . Prefix , globalMinioSysTmp ) ||
strings . HasPrefix ( attrs . Name , globalMinioSysTmp ) {
2017-06-29 14:19:55 -04:00
continue
}
}
2017-05-01 13:59:54 -04:00
if attrs . Prefix != "" {
prefixes = append ( prefixes , attrs . Prefix )
continue
}
2017-05-31 18:40:33 -04:00
objects = append ( objects , fromGCSAttrsToObjectInfo ( attrs ) )
2017-05-01 13:59:54 -04:00
}
return ListObjectsV2Info {
IsTruncated : isTruncated ,
ContinuationToken : continuationToken ,
2017-06-29 14:19:55 -04:00
NextContinuationToken : continuationToken ,
2017-05-01 13:59:54 -04:00
Prefixes : prefixes ,
Objects : objects ,
} , nil
}
// GetObject - reads an object from GCS. Supports additional
// parameters like offset and length which are synonymous with
// HTTP Range requests.
//
// startOffset indicates the starting read location of the object.
// length indicates the total length of the object.
func ( l * gcsGateway ) GetObject ( bucket string , key string , startOffset int64 , length int64 , writer io . Writer ) error {
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket
if _ , err := l . client . Bucket ( bucket ) . Attrs ( l . ctx ) ; err != nil {
return gcsToObjectError ( traceError ( err ) , bucket )
}
object := l . client . Bucket ( bucket ) . Object ( key )
r , err := object . NewRangeReader ( l . ctx , startOffset , length )
if err != nil {
return gcsToObjectError ( traceError ( err ) , bucket , key )
}
defer r . Close ( )
if _ , err := io . Copy ( writer , r ) ; err != nil {
return gcsToObjectError ( traceError ( err ) , bucket , key )
}
return nil
}
2017-06-23 20:35:45 -04:00
// fromMinioClientListBucketResultToV2Info converts minio ListBucketResult to ListObjectsV2Info
func fromMinioClientListBucketResultToV2Info ( bucket string , result minio . ListBucketResult ) ListObjectsV2Info {
objects := make ( [ ] ObjectInfo , len ( result . Contents ) )
for i , oi := range result . Contents {
objects [ i ] = fromMinioClientObjectInfo ( bucket , oi )
}
prefixes := make ( [ ] string , len ( result . CommonPrefixes ) )
for i , p := range result . CommonPrefixes {
prefixes [ i ] = p . Prefix
}
return ListObjectsV2Info {
2017-07-11 12:25:19 -04:00
IsTruncated : result . IsTruncated ,
Prefixes : prefixes ,
Objects : objects ,
2017-06-23 20:35:45 -04:00
ContinuationToken : result . Marker ,
NextContinuationToken : result . NextMarker ,
}
}
2017-05-31 18:40:33 -04:00
// fromGCSAttrsToObjectInfo converts GCS BucketAttrs to gateway ObjectInfo
func fromGCSAttrsToObjectInfo ( attrs * storage . ObjectAttrs ) ObjectInfo {
// All google cloud storage objects have a CRC32c hash, whereas composite objects may not have a MD5 hash
// Refer https://cloud.google.com/storage/docs/hashes-etags. Use CRC32C for ETag
2017-05-01 13:59:54 -04:00
return ObjectInfo {
Name : attrs . Name ,
Bucket : attrs . Bucket ,
ModTime : attrs . Updated ,
Size : attrs . Size ,
2017-10-26 13:17:07 -04:00
ETag : toS3ETag ( fmt . Sprintf ( "%d" , attrs . CRC32C ) ) ,
2017-05-01 13:59:54 -04:00
UserDefined : attrs . Metadata ,
ContentType : attrs . ContentType ,
ContentEncoding : attrs . ContentEncoding ,
}
}
// GetObjectInfo - reads object info and replies back ObjectInfo
2017-05-05 21:49:26 -04:00
func ( l * gcsGateway ) GetObjectInfo ( bucket string , object string ) ( ObjectInfo , error ) {
2017-05-01 13:59:54 -04:00
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket
if _ , err := l . client . Bucket ( bucket ) . Attrs ( l . ctx ) ; err != nil {
return ObjectInfo { } , gcsToObjectError ( traceError ( err ) , bucket )
}
attrs , err := l . client . Bucket ( bucket ) . Object ( object ) . Attrs ( l . ctx )
if err != nil {
return ObjectInfo { } , gcsToObjectError ( traceError ( err ) , bucket , object )
}
2017-07-11 12:25:19 -04:00
return fromGCSAttrsToObjectInfo ( attrs ) , nil
2017-05-01 13:59:54 -04:00
}
// PutObject - Create a new object with the incoming data,
2017-10-22 01:30:34 -04:00
func ( l * gcsGateway ) PutObject ( bucket string , key string , data * hash . Reader , metadata map [ string ] string ) ( ObjectInfo , error ) {
2017-05-01 13:59:54 -04:00
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket
if _ , err := l . client . Bucket ( bucket ) . Attrs ( l . ctx ) ; err != nil {
return ObjectInfo { } , gcsToObjectError ( traceError ( err ) , bucket )
}
object := l . client . Bucket ( bucket ) . Object ( key )
w := object . NewWriter ( l . ctx )
w . ContentType = metadata [ "content-type" ]
w . ContentEncoding = metadata [ "content-encoding" ]
2017-07-11 12:25:19 -04:00
w . Metadata = metadata
2017-05-31 18:40:33 -04:00
2017-09-19 15:40:27 -04:00
if _ , err := io . Copy ( w , data ) ; err != nil {
2017-07-11 12:25:19 -04:00
// Close the object writer upon error.
w . Close ( )
2017-05-01 13:59:54 -04:00
return ObjectInfo { } , gcsToObjectError ( traceError ( err ) , bucket , key )
}
2017-10-22 01:30:34 -04:00
2017-07-11 12:25:19 -04:00
// Close the object writer upon success.
w . Close ( )
2017-05-10 19:52:38 -04:00
2017-07-11 12:25:19 -04:00
attrs , err := object . Attrs ( l . ctx )
if err != nil {
return ObjectInfo { } , gcsToObjectError ( traceError ( err ) , bucket , key )
}
2017-05-31 18:40:33 -04:00
return fromGCSAttrsToObjectInfo ( attrs ) , nil
2017-05-01 13:59:54 -04:00
}
// CopyObject - Copies a blob from source container to destination container.
2017-07-11 12:25:19 -04:00
func ( l * gcsGateway ) CopyObject ( srcBucket string , srcObject string , destBucket string , destObject string ,
metadata map [ string ] string ) ( ObjectInfo , error ) {
2017-05-01 13:59:54 -04:00
src := l . client . Bucket ( srcBucket ) . Object ( srcObject )
dst := l . client . Bucket ( destBucket ) . Object ( destObject )
2017-09-26 14:04:42 -04:00
copier := dst . CopierFrom ( src )
copier . ObjectAttrs . Metadata = metadata
attrs , err := copier . Run ( l . ctx )
2017-05-01 13:59:54 -04:00
if err != nil {
return ObjectInfo { } , gcsToObjectError ( traceError ( err ) , destBucket , destObject )
}
2017-05-31 18:40:33 -04:00
return fromGCSAttrsToObjectInfo ( attrs ) , nil
2017-05-01 13:59:54 -04:00
}
// DeleteObject - Deletes a blob in bucket
func ( l * gcsGateway ) DeleteObject ( bucket string , object string ) error {
err := l . client . Bucket ( bucket ) . Object ( object ) . Delete ( l . ctx )
if err != nil {
return gcsToObjectError ( traceError ( err ) , bucket , object )
}
return nil
}
// NewMultipartUpload - upload object in multiple parts
func ( l * gcsGateway ) NewMultipartUpload ( bucket string , key string , metadata map [ string ] string ) ( uploadID string , err error ) {
// generate new uploadid
uploadID = mustGetUUID ( )
// generate name for part zero
2017-06-17 19:00:41 -04:00
meta := gcsMultipartMetaName ( uploadID )
2017-05-01 13:59:54 -04:00
2017-06-17 19:00:41 -04:00
w := l . client . Bucket ( bucket ) . Object ( meta ) . NewWriter ( l . ctx )
2017-07-11 12:25:19 -04:00
defer w . Close ( )
2017-05-01 13:59:54 -04:00
w . ContentType = metadata [ "content-type" ]
w . ContentEncoding = metadata [ "content-encoding" ]
w . Metadata = metadata
2017-06-17 19:00:41 -04:00
2017-07-11 12:25:19 -04:00
if err = json . NewEncoder ( w ) . Encode ( gcsMultipartMetaV1 {
gcsMinioMultipartMetaCurrentVersion ,
bucket ,
key ,
} ) ; err != nil {
2017-06-17 19:00:41 -04:00
return "" , gcsToObjectError ( traceError ( err ) , bucket , key )
}
2017-05-01 13:59:54 -04:00
return uploadID , nil
}
2017-06-17 19:00:41 -04:00
// ListMultipartUploads - lists all multipart uploads.
func ( l * gcsGateway ) ListMultipartUploads ( bucket string , prefix string , keyMarker string , uploadIDMarker string , delimiter string , maxUploads int ) ( ListMultipartsInfo , error ) {
return ListMultipartsInfo {
KeyMarker : keyMarker ,
UploadIDMarker : uploadIDMarker ,
MaxUploads : maxUploads ,
Prefix : prefix ,
Delimiter : delimiter ,
} , nil
}
2017-07-19 22:33:10 -04:00
// Checks if minio.sys.tmp/multipart/v1/<upload-id>/gcs.json exists, returns
2017-07-11 12:25:19 -04:00
// an object layer compatible error upon any error.
func ( l * gcsGateway ) checkUploadIDExists ( bucket string , key string , uploadID string ) error {
_ , err := l . client . Bucket ( bucket ) . Object ( gcsMultipartMetaName ( uploadID ) ) . Attrs ( l . ctx )
2017-11-06 13:09:21 -05:00
return gcsToObjectError ( traceError ( err ) , bucket , key , uploadID )
2017-07-11 12:25:19 -04:00
}
2017-05-01 13:59:54 -04:00
// PutObjectPart puts a part of object in bucket
2017-10-22 01:30:34 -04:00
func ( l * gcsGateway ) PutObjectPart ( bucket string , key string , uploadID string , partNumber int , data * hash . Reader ) ( PartInfo , error ) {
2017-07-11 12:25:19 -04:00
if err := l . checkUploadIDExists ( bucket , key , uploadID ) ; err != nil {
return PartInfo { } , err
2017-06-17 19:00:41 -04:00
}
2017-10-22 01:30:34 -04:00
etag := data . MD5HexString ( )
2017-09-19 15:40:27 -04:00
if etag == "" {
2017-07-11 12:25:19 -04:00
// Generate random ETag.
2017-10-26 13:17:07 -04:00
etag = genETag ( )
2017-07-11 12:25:19 -04:00
}
2017-07-14 02:20:16 -04:00
object := l . client . Bucket ( bucket ) . Object ( gcsMultipartDataName ( uploadID , partNumber , etag ) )
2017-06-17 19:00:41 -04:00
w := object . NewWriter ( l . ctx )
// Disable "chunked" uploading in GCS client. If enabled, it can cause a corner case
// where it tries to upload 0 bytes in the last chunk and get error from server.
w . ChunkSize = 0
2017-09-19 15:40:27 -04:00
if _ , err := io . Copy ( w , data ) ; err != nil {
2017-07-11 12:25:19 -04:00
// Make sure to close object writer upon error.
w . Close ( )
2017-06-17 19:00:41 -04:00
return PartInfo { } , gcsToObjectError ( traceError ( err ) , bucket , key )
}
2017-07-11 12:25:19 -04:00
// Make sure to close the object writer upon success.
w . Close ( )
2017-06-17 19:00:41 -04:00
return PartInfo {
2017-07-14 02:20:16 -04:00
PartNumber : partNumber ,
2017-06-17 19:00:41 -04:00
ETag : etag ,
2017-07-11 12:25:19 -04:00
LastModified : UTCNow ( ) ,
2017-09-19 15:40:27 -04:00
Size : data . Size ( ) ,
2017-06-17 19:00:41 -04:00
} , nil
2017-07-11 12:25:19 -04:00
2017-06-17 19:00:41 -04:00
}
2017-05-01 13:59:54 -04:00
2017-06-17 19:00:41 -04:00
// ListObjectParts returns all object parts for specified object in specified bucket
func ( l * gcsGateway ) ListObjectParts ( bucket string , key string , uploadID string , partNumberMarker int , maxParts int ) ( ListPartsInfo , error ) {
2017-07-11 12:25:19 -04:00
return ListPartsInfo { } , l . checkUploadIDExists ( bucket , key , uploadID )
2017-05-01 13:59:54 -04:00
}
2017-06-17 19:00:41 -04:00
// Called by AbortMultipartUpload and CompleteMultipartUpload for cleaning up.
func ( l * gcsGateway ) cleanupMultipartUpload ( bucket , key , uploadID string ) error {
2017-06-21 13:27:44 -04:00
prefix := fmt . Sprintf ( "%s/%s/" , gcsMinioMultipartPathV1 , uploadID )
2017-06-17 19:00:41 -04:00
// iterate through all parts and delete them
it := l . client . Bucket ( bucket ) . Objects ( l . ctx , & storage . Query { Prefix : prefix , Versions : false } )
2017-05-11 17:21:56 -04:00
2017-05-01 13:59:54 -04:00
for {
attrs , err := it . Next ( )
if err == iterator . Done {
break
2017-06-17 19:00:41 -04:00
}
if err != nil {
2017-05-11 17:21:56 -04:00
return gcsToObjectError ( traceError ( err ) , bucket , key )
2017-05-01 13:59:54 -04:00
}
2017-06-17 19:00:41 -04:00
object := l . client . Bucket ( bucket ) . Object ( attrs . Name )
// Ignore the error as parallel AbortMultipartUpload might have deleted it.
object . Delete ( l . ctx )
2017-05-01 13:59:54 -04:00
}
return nil
}
2017-06-17 19:00:41 -04:00
// AbortMultipartUpload aborts a ongoing multipart upload
func ( l * gcsGateway ) AbortMultipartUpload ( bucket string , key string , uploadID string ) error {
2017-07-11 12:25:19 -04:00
if err := l . checkUploadIDExists ( bucket , key , uploadID ) ; err != nil {
return err
}
2017-06-17 19:00:41 -04:00
return l . cleanupMultipartUpload ( bucket , key , uploadID )
}
2017-05-01 13:59:54 -04:00
2017-06-17 19:00:41 -04:00
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
2017-07-11 12:25:19 -04:00
// Note that there is a limit (currently 32) to the number of components that can
// be composed in a single operation. There is a limit (currently 1024) to the total
// number of components for a given composite object. This means you can append to
// each object at most 1023 times. There is a per-project rate limit (currently 200)
// to the number of components you can compose per second. This rate counts both the
// components being appended to a composite object as well as the components being
// copied when the composite object of which they are a part is copied.
2017-05-01 13:59:54 -04:00
func ( l * gcsGateway ) CompleteMultipartUpload ( bucket string , key string , uploadID string , uploadedParts [ ] completePart ) ( ObjectInfo , error ) {
2017-06-17 19:00:41 -04:00
meta := gcsMultipartMetaName ( uploadID )
object := l . client . Bucket ( bucket ) . Object ( meta )
2017-05-01 13:59:54 -04:00
2017-06-17 19:00:41 -04:00
partZeroAttrs , err := object . Attrs ( l . ctx )
if err != nil {
2017-11-06 13:09:21 -05:00
return ObjectInfo { } , gcsToObjectError ( traceError ( err ) , bucket , key , uploadID )
2017-06-17 19:00:41 -04:00
}
2017-07-11 12:25:19 -04:00
2017-06-17 19:00:41 -04:00
r , err := object . NewReader ( l . ctx )
2017-05-01 13:59:54 -04:00
if err != nil {
return ObjectInfo { } , gcsToObjectError ( traceError ( err ) , bucket , key )
}
2017-07-11 12:25:19 -04:00
defer r . Close ( )
2017-05-01 13:59:54 -04:00
2017-06-17 19:00:41 -04:00
// Check version compatibility of the meta file before compose()
multipartMeta := gcsMultipartMetaV1 { }
2017-07-11 12:25:19 -04:00
if err = json . NewDecoder ( r ) . Decode ( & multipartMeta ) ; err != nil {
2017-06-17 19:00:41 -04:00
return ObjectInfo { } , gcsToObjectError ( traceError ( err ) , bucket , key )
}
2017-07-11 12:25:19 -04:00
if multipartMeta . Version != gcsMinioMultipartMetaCurrentVersion {
2017-06-17 19:00:41 -04:00
return ObjectInfo { } , gcsToObjectError ( traceError ( errFormatNotSupported ) , bucket , key )
}
2017-07-11 12:25:19 -04:00
// Validate if the gcs.json stores valid entries for the bucket and key.
if multipartMeta . Bucket != bucket || multipartMeta . Object != key {
return ObjectInfo { } , gcsToObjectError ( InvalidUploadID {
UploadID : uploadID ,
} , bucket , key )
2017-05-01 13:59:54 -04:00
}
2017-07-11 12:25:19 -04:00
var parts [ ] * storage . ObjectHandle
2017-11-06 13:09:21 -05:00
partSizes := make ( [ ] int64 , len ( uploadedParts ) )
for i , uploadedPart := range uploadedParts {
2017-07-14 02:20:16 -04:00
parts = append ( parts , l . client . Bucket ( bucket ) . Object ( gcsMultipartDataName ( uploadID ,
uploadedPart . PartNumber , uploadedPart . ETag ) ) )
2017-11-06 13:09:21 -05:00
partAttr , pErr := l . client . Bucket ( bucket ) . Object ( gcsMultipartDataName ( uploadID , uploadedPart . PartNumber , uploadedPart . ETag ) ) . Attrs ( l . ctx )
if pErr != nil {
return ObjectInfo { } , gcsToObjectError ( traceError ( pErr ) , bucket , key , uploadID )
}
partSizes [ i ] = partAttr . Size
}
// Error out if parts except last part sizing < 5MiB.
for i , size := range partSizes [ : len ( partSizes ) - 1 ] {
if size < globalMinPartSize {
return ObjectInfo { } , traceError ( PartTooSmall {
PartNumber : uploadedParts [ i ] . PartNumber ,
PartSize : size ,
PartETag : uploadedParts [ i ] . ETag ,
} )
}
2017-07-14 02:20:16 -04:00
}
// Returns name of the composed object.
gcsMultipartComposeName := func ( uploadID string , composeNumber int ) string {
2017-09-19 19:08:08 -04:00
return fmt . Sprintf ( "%s/tmp/%s/composed-object-%05d" , globalMinioSysTmp , uploadID , composeNumber )
2017-05-01 13:59:54 -04:00
}
2017-07-19 22:33:10 -04:00
composeCount := int ( math . Ceil ( float64 ( len ( parts ) ) / float64 ( gcsMaxComponents ) ) )
2017-06-28 01:27:05 -04:00
if composeCount > 1 {
// Create composes of every 32 parts.
composeParts := make ( [ ] * storage . ObjectHandle , composeCount )
for i := 0 ; i < composeCount ; i ++ {
// Create 'composed-object-N' using next 32 parts.
2017-07-14 02:20:16 -04:00
composeParts [ i ] = l . client . Bucket ( bucket ) . Object ( gcsMultipartComposeName ( uploadID , i ) )
2017-07-19 22:33:10 -04:00
start := i * gcsMaxComponents
end := start + gcsMaxComponents
2017-06-28 01:27:05 -04:00
if end > len ( parts ) {
end = len ( parts )
}
composer := composeParts [ i ] . ComposerFrom ( parts [ start : end ] ... )
composer . ContentType = partZeroAttrs . ContentType
composer . Metadata = partZeroAttrs . Metadata
2017-07-11 12:25:19 -04:00
if _ , err = composer . Run ( l . ctx ) ; err != nil {
2017-06-28 01:27:05 -04:00
return ObjectInfo { } , gcsToObjectError ( traceError ( err ) , bucket , key )
}
}
// As composes are successfully created, final object needs to be created using composes.
parts = composeParts
}
2017-07-11 12:25:19 -04:00
composer := l . client . Bucket ( bucket ) . Object ( key ) . ComposerFrom ( parts ... )
2017-05-01 13:59:54 -04:00
composer . ContentType = partZeroAttrs . ContentType
composer . Metadata = partZeroAttrs . Metadata
attrs , err := composer . Run ( l . ctx )
2017-06-17 19:00:41 -04:00
if err != nil {
return ObjectInfo { } , gcsToObjectError ( traceError ( err ) , bucket , key )
2017-05-01 13:59:54 -04:00
}
2017-06-17 19:00:41 -04:00
if err = l . cleanupMultipartUpload ( bucket , key , uploadID ) ; err != nil {
return ObjectInfo { } , gcsToObjectError ( traceError ( err ) , bucket , key )
}
return fromGCSAttrsToObjectInfo ( attrs ) , nil
2017-05-01 13:59:54 -04:00
}
// SetBucketPolicies - Set policy on bucket
func ( l * gcsGateway ) SetBucketPolicies ( bucket string , policyInfo policy . BucketAccessPolicy ) error {
2017-05-08 20:55:39 -04:00
var policies [ ] BucketAccessPolicy
for prefix , policy := range policy . GetPolicies ( policyInfo . Statements , bucket ) {
policies = append ( policies , BucketAccessPolicy {
Prefix : prefix ,
Policy : policy ,
} )
}
prefix := bucket + "/*" // For all objects inside the bucket.
if len ( policies ) != 1 {
return traceError ( NotImplemented { } )
2017-06-23 20:57:25 -04:00
}
if policies [ 0 ] . Prefix != prefix {
2017-05-08 20:55:39 -04:00
return traceError ( NotImplemented { } )
}
acl := l . client . Bucket ( bucket ) . ACL ( )
if policies [ 0 ] . Policy == policy . BucketPolicyNone {
if err := acl . Delete ( l . ctx , storage . AllUsers ) ; err != nil {
return gcsToObjectError ( traceError ( err ) , bucket )
}
return nil
}
2017-05-12 19:49:14 -04:00
var role storage . ACLRole
2017-05-10 13:02:45 -04:00
switch policies [ 0 ] . Policy {
case policy . BucketPolicyReadOnly :
2017-05-08 20:55:39 -04:00
role = storage . RoleReader
2017-05-10 13:02:45 -04:00
case policy . BucketPolicyWriteOnly :
2017-05-08 20:55:39 -04:00
role = storage . RoleWriter
2017-05-10 13:02:45 -04:00
default :
2017-05-12 19:49:14 -04:00
return traceError ( NotImplemented { } )
2017-05-08 20:55:39 -04:00
}
if err := acl . Set ( l . ctx , storage . AllUsers , role ) ; err != nil {
return gcsToObjectError ( traceError ( err ) , bucket )
}
return nil
2017-05-01 13:59:54 -04:00
}
// GetBucketPolicies - Get policy on bucket
func ( l * gcsGateway ) GetBucketPolicies ( bucket string ) ( policy . BucketAccessPolicy , error ) {
2017-07-11 12:25:19 -04:00
rules , err := l . client . Bucket ( bucket ) . ACL ( ) . List ( l . ctx )
2017-05-08 20:55:39 -04:00
if err != nil {
return policy . BucketAccessPolicy { } , gcsToObjectError ( traceError ( err ) , bucket )
}
policyInfo := policy . BucketAccessPolicy { Version : "2012-10-17" }
for _ , r := range rules {
2017-05-12 19:49:14 -04:00
if r . Entity != storage . AllUsers || r . Role == storage . RoleOwner {
2017-05-08 20:55:39 -04:00
continue
}
switch r . Role {
case storage . RoleReader :
policyInfo . Statements = policy . SetPolicy ( policyInfo . Statements , policy . BucketPolicyReadOnly , bucket , "" )
case storage . RoleWriter :
policyInfo . Statements = policy . SetPolicy ( policyInfo . Statements , policy . BucketPolicyWriteOnly , bucket , "" )
}
}
2017-10-26 21:01:00 -04:00
// Return NoSuchBucketPolicy error, when policy is not set
if len ( policyInfo . Statements ) == 0 {
return policy . BucketAccessPolicy { } , gcsToObjectError ( traceError ( PolicyNotFound { } ) , bucket )
}
2017-05-08 20:55:39 -04:00
return policyInfo , nil
2017-05-01 13:59:54 -04:00
}
// DeleteBucketPolicies - Delete all policies on bucket
func ( l * gcsGateway ) DeleteBucketPolicies ( bucket string ) error {
2017-05-12 19:49:14 -04:00
// This only removes the storage.AllUsers policies
2017-07-11 12:25:19 -04:00
if err := l . client . Bucket ( bucket ) . ACL ( ) . Delete ( l . ctx , storage . AllUsers ) ; err != nil {
2017-05-08 20:55:39 -04:00
return gcsToObjectError ( traceError ( err ) , bucket )
}
return nil
2017-05-01 13:59:54 -04:00
}