diff --git a/cmd/gateway-interface.go b/cmd/gateway-interface.go index 960f16ca2..07ce5ac97 100644 --- a/cmd/gateway-interface.go +++ b/cmd/gateway-interface.go @@ -19,12 +19,10 @@ package cmd import "github.com/minio/madmin-go" -// GatewayMinioSysTmp prefix is used in Azure/GCS gateway for save metadata sent by Initialize Multipart Upload API. +// GatewayMinioSysTmp prefix is used in Azure gateway for save metadata sent by Initialize Multipart Upload API. const ( GatewayMinioSysTmp = "minio.sys.tmp/" AzureBackendGateway = "azure" - GCSBackendGateway = "gcs" - HDFSBackendGateway = "hdfs" NASBackendGateway = "nas" S3BackendGateway = "s3" ) diff --git a/cmd/gateway/gateway.go b/cmd/gateway/gateway.go index f1df8ac59..43c6d2f67 100644 --- a/cmd/gateway/gateway.go +++ b/cmd/gateway/gateway.go @@ -27,12 +27,6 @@ import ( // S3 _ "github.com/minio/minio/cmd/gateway/s3" - - // HDFS - _ "github.com/minio/minio/cmd/gateway/hdfs" - - // GCS (use only if you must, GCS already supports S3 API) - _ "github.com/minio/minio/cmd/gateway/gcs" // gateway functionality is frozen, no new gateways are being implemented // or considered for upstream inclusion at this point in time. if needed // please keep a fork of the project. diff --git a/cmd/gateway/gcs/gateway-gcs.go b/cmd/gateway/gcs/gateway-gcs.go deleted file mode 100644 index 9d2b3e3cc..000000000 --- a/cmd/gateway/gcs/gateway-gcs.go +++ /dev/null @@ -1,1501 +0,0 @@ -/* - * MinIO Object Storage (c) 2021 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package gcs - -import ( - "context" - "encoding/base64" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "math" - "net/http" - "os" - "path" - "regexp" - "strconv" - "strings" - "time" - - "cloud.google.com/go/storage" - humanize "github.com/dustin/go-humanize" - "github.com/minio/cli" - "github.com/minio/madmin-go" - miniogopolicy "github.com/minio/minio-go/v7/pkg/policy" - minio "github.com/minio/minio/cmd" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/bucket/policy" - "github.com/minio/pkg/bucket/policy/condition" - "github.com/minio/pkg/env" - "google.golang.org/api/googleapi" - "google.golang.org/api/iterator" - "google.golang.org/api/option" -) - -var ( - // Project ID format is not valid. - errGCSInvalidProjectID = fmt.Errorf("GCS project id is either empty or invalid") - - // Project ID not found - errGCSProjectIDNotFound = fmt.Errorf("Unknown project id") - - // Invalid format. - errGCSFormat = fmt.Errorf("Unknown format") -) - -const ( - // Path where multipart objects are saved. - // If we change the backend format we will use a different url path like /multipart/v2 - // but we will not migrate old data. - gcsMinioMultipartPathV1 = minio.GatewayMinioSysTmp + "multipart/v1" - - // Multipart meta file. - gcsMinioMultipartMeta = "gcs.json" - - // gcs.json version number - gcsMinioMultipartMetaCurrentVersion = "1" - - // token prefixed with GCS returned marker to differentiate - // from user supplied marker. - gcsTokenPrefix = "{minio}" - - // Maximum component object count to create a composite object. - // Refer https://cloud.google.com/storage/docs/composite-objects - gcsMaxComponents = 32 - - // Every 24 hours we scan minio.sys.tmp to delete expired multiparts in minio.sys.tmp - gcsCleanupInterval = time.Hour * 24 - - // The cleanup routine deletes files older than 2 weeks in minio.sys.tmp - gcsMultipartExpiry = time.Hour * 24 * 14 - - // Project ID key in credentials.json - gcsProjectIDKey = "project_id" -) - -func init() { - const gcsGatewayTemplate = `NAME: - {{.HelpName}} - {{.Usage}} - -USAGE: - {{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [PROJECTID] -{{if .VisibleFlags}} -FLAGS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}} -PROJECTID: - optional GCS project-id expected GOOGLE_APPLICATION_CREDENTIALS env is not set - -GOOGLE_APPLICATION_CREDENTIALS: - path to credentials.json, generated it from here https://developers.google.com/identity/protocols/application-default-credentials - -EXAMPLES: - 1. Start minio gateway server for GCS backend - {{.Prompt}} {{.EnvVarSetCommand}} GOOGLE_APPLICATION_CREDENTIALS{{.AssignmentOperator}}/path/to/credentials.json - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_USER{{.AssignmentOperator}}accesskey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_PASSWORD{{.AssignmentOperator}}secretkey - {{.Prompt}} {{.HelpName}} mygcsprojectid - - 2. Start minio gateway server for GCS backend with edge caching enabled - {{.Prompt}} {{.EnvVarSetCommand}} GOOGLE_APPLICATION_CREDENTIALS{{.AssignmentOperator}}/path/to/credentials.json - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_USER{{.AssignmentOperator}}accesskey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_PASSWORD{{.AssignmentOperator}}secretkey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*;*.png" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_AFTER{{.AssignmentOperator}}3 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_LOW{{.AssignmentOperator}}75 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_HIGH{{.AssignmentOperator}}85 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}90 - {{.Prompt}} {{.HelpName}} mygcsprojectid -` - - minio.RegisterGatewayCommand(cli.Command{ - Name: minio.GCSBackendGateway, - Usage: "Google Cloud Storage", - Action: gcsGatewayMain, - CustomHelpTemplate: gcsGatewayTemplate, - HideHelpCommand: true, - }) -} - -// Handler for 'minio gateway gcs' command line. -func gcsGatewayMain(ctx *cli.Context) { - projectID := ctx.Args().First() - if projectID == "" && os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") == "" { - logger.LogIf(minio.GlobalContext, errGCSProjectIDNotFound, logger.Application) - cli.ShowCommandHelpAndExit(ctx, minio.GCSBackendGateway, 1) - } - if projectID != "" && !isValidGCSProjectIDFormat(projectID) { - reqInfo := (&logger.ReqInfo{}).AppendTags("projectID", ctx.Args().First()) - contxt := logger.SetReqInfo(minio.GlobalContext, reqInfo) - logger.LogIf(contxt, errGCSInvalidProjectID, logger.Application) - cli.ShowCommandHelpAndExit(ctx, minio.GCSBackendGateway, 1) - } - - minio.StartGateway(ctx, &GCS{projectID}) -} - -// GCS implements Azure. -type GCS struct { - projectID string -} - -// Name returns the name of gcs ObjectLayer. -func (g *GCS) Name() string { - return minio.GCSBackendGateway -} - -// NewGatewayLayer returns gcs ObjectLayer. -func (g *GCS) NewGatewayLayer(creds madmin.Credentials) (minio.ObjectLayer, error) { - ctx := minio.GlobalContext - - var err error - if g.projectID == "" { - // If project ID is not provided on command line, we figure it out - // from the credentials.json file. - g.projectID, err = gcsParseProjectID(env.Get("GOOGLE_APPLICATION_CREDENTIALS", "")) - if err != nil { - return nil, err - } - } - - metrics := minio.NewMetrics() - - t := &minio.MetricsTransport{ - Transport: minio.NewGatewayHTTPTransport(), - Metrics: metrics, - } - - // Initialize a GCS client. - // Send user-agent in this format for Google to obtain usage insights while participating in the - // Google Cloud Technology Partners (https://cloud.google.com/partners/) - client, err := storage.NewClient(ctx, option.WithUserAgent(fmt.Sprintf("MinIO/%s (GPN:MinIO;)", minio.Version))) - if err != nil { - return nil, err - } - - gcs := &gcsGateway{ - client: client, - projectID: g.projectID, - metrics: metrics, - httpClient: &http.Client{ - Transport: t, - }, - } - - // Start background process to cleanup old files in minio.sys.tmp - go gcs.CleanupGCSMinioSysTmp(ctx) - return gcs, nil -} - -// Stored in gcs.json - Contents of this file is not used anywhere. It can be -// used for debugging purposes. -type gcsMultipartMetaV1 struct { - Version string `json:"version"` // Version number - Bucket string `json:"bucket"` // Bucket name - Object string `json:"object"` // Object name -} - -// Returns name of the multipart meta object. -func gcsMultipartMetaName(uploadID string) string { - return fmt.Sprintf("%s/%s/%s", gcsMinioMultipartPathV1, uploadID, gcsMinioMultipartMeta) -} - -// Returns name of the part object. -func gcsMultipartDataName(uploadID string, partNumber int, etag string) string { - return fmt.Sprintf("%s/%s/%05d.%s", gcsMinioMultipartPathV1, uploadID, partNumber, etag) -} - -// Convert MinIO errors to minio object layer errors. -func gcsToObjectError(err error, params ...string) error { - if err == nil { - return nil - } - - bucket := "" - object := "" - uploadID := "" - if len(params) >= 1 { - bucket = params[0] - } - if len(params) == 2 { - object = params[1] - } - if len(params) == 3 { - uploadID = params[2] - } - - // in some cases just a plain error is being returned - switch err.Error() { - case "storage: bucket doesn't exist": - err = minio.BucketNotFound{ - Bucket: bucket, - } - return err - case "storage: object doesn't exist": - if uploadID != "" { - err = minio.InvalidUploadID{ - UploadID: uploadID, - } - } else { - err = minio.ObjectNotFound{ - Bucket: bucket, - Object: object, - } - } - return err - } - - googleAPIErr, ok := err.(*googleapi.Error) - if !ok { - // We don't interpret non MinIO errors. As minio errors will - // have StatusCode to help to convert to object errors. - return err - } - - if len(googleAPIErr.Errors) == 0 { - return err - } - - reason := googleAPIErr.Errors[0].Reason - message := googleAPIErr.Errors[0].Message - - switch reason { - case "required": - // Anonymous users does not have storage.xyz access to project 123. - fallthrough - case "keyInvalid": - fallthrough - case "forbidden": - err = minio.PrefixAccessDenied{ - Bucket: bucket, - Object: object, - } - case "invalid": - err = minio.BucketNameInvalid{ - Bucket: bucket, - } - case "notFound": - if object != "" { - err = minio.ObjectNotFound{ - Bucket: bucket, - Object: object, - } - break - } - err = minio.BucketNotFound{Bucket: bucket} - case "conflict": - if message == "You already own this bucket. Please select another name." { - err = minio.BucketAlreadyOwnedByYou{Bucket: bucket} - break - } - if message == "Sorry, that name is not available. Please try a different one." { - err = minio.BucketAlreadyExists{Bucket: bucket} - break - } - err = minio.BucketNotEmpty{Bucket: bucket} - } - - return err -} - -// gcsProjectIDRegex defines a valid gcs project id format -var gcsProjectIDRegex = regexp.MustCompile("^[a-z][a-z0-9-]{5,29}$") - -// isValidGCSProjectIDFormat - checks if a given project id format is valid or not. -// Project IDs must start with a lowercase letter and can have lowercase ASCII letters, -// digits or hyphens. Project IDs must be between 6 and 30 characters. -// Ref: https://cloud.google.com/resource-manager/reference/rest/v1/projects#Project (projectId section) -func isValidGCSProjectIDFormat(projectID string) bool { - // Checking projectID format - return gcsProjectIDRegex.MatchString(projectID) -} - -// gcsGateway - Implements gateway for MinIO and GCS compatible object storage servers. -type gcsGateway struct { - minio.GatewayUnsupported - client *storage.Client - httpClient *http.Client - metrics *minio.BackendMetrics - projectID string -} - -// Returns projectID from the GOOGLE_APPLICATION_CREDENTIALS file. -func gcsParseProjectID(credsFile string) (projectID string, err error) { - contents, err := ioutil.ReadFile(credsFile) - if err != nil { - return projectID, err - } - googleCreds := make(map[string]string) - if err = json.Unmarshal(contents, &googleCreds); err != nil { - return projectID, err - } - return googleCreds[gcsProjectIDKey], err -} - -// GetMetrics returns this gateway's metrics -func (l *gcsGateway) GetMetrics(ctx context.Context) (*minio.BackendMetrics, error) { - return l.metrics, nil -} - -// Cleanup old files in minio.sys.tmp of the given bucket. -func (l *gcsGateway) CleanupGCSMinioSysTmpBucket(ctx context.Context, bucket string) { - it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: minio.GatewayMinioSysTmp, Versions: false}) - for { - attrs, err := it.Next() - if err != nil { - if err != iterator.Done { - reqInfo := &logger.ReqInfo{BucketName: bucket} - ctx := logger.SetReqInfo(minio.GlobalContext, reqInfo) - logger.LogIf(ctx, err) - } - return - } - if time.Since(attrs.Updated) > gcsMultipartExpiry { - // Delete files older than 2 weeks. - err := l.client.Bucket(bucket).Object(attrs.Name).Delete(ctx) - if err != nil { - reqInfo := &logger.ReqInfo{BucketName: bucket, ObjectName: attrs.Name} - ctx := logger.SetReqInfo(minio.GlobalContext, reqInfo) - logger.LogIf(ctx, err) - return - } - } - } -} - -// Cleanup old files in minio.sys.tmp of all buckets. -func (l *gcsGateway) CleanupGCSMinioSysTmp(ctx context.Context) { - for { - it := l.client.Buckets(ctx, l.projectID) - for { - attrs, err := it.Next() - if err != nil { - break - } - l.CleanupGCSMinioSysTmpBucket(ctx, attrs.Name) - } - // Run the cleanup loop every 1 day. - time.Sleep(gcsCleanupInterval) - } -} - -// Shutdown - save any gateway metadata to disk -// if necessary and reload upon next restart. -func (l *gcsGateway) Shutdown(ctx context.Context) error { - return nil -} - -// StorageInfo - Not relevant to GCS backend. -func (l *gcsGateway) StorageInfo(ctx context.Context) (si minio.StorageInfo, _ []error) { - si.Backend.Type = madmin.Gateway - si.Backend.GatewayOnline = minio.IsBackendOnline(ctx, "storage.googleapis.com:443") - return si, nil -} - -// MakeBucketWithLocation - Create a new container on GCS backend. -func (l *gcsGateway) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.BucketOptions) error { - if opts.LockEnabled || opts.VersioningEnabled { - return minio.NotImplemented{} - } - - bkt := l.client.Bucket(bucket) - - // we'll default to the us multi-region in case of us-east-1 - location := opts.Location - if location == "us-east-1" { - location = "us" - } - - err := bkt.Create(ctx, l.projectID, &storage.BucketAttrs{ - Location: location, - }) - logger.LogIf(ctx, err) - return gcsToObjectError(err, bucket) -} - -// GetBucketInfo - Get bucket metadata.. -func (l *gcsGateway) GetBucketInfo(ctx context.Context, bucket string) (minio.BucketInfo, error) { - attrs, err := l.client.Bucket(bucket).Attrs(ctx) - if err != nil { - logger.LogIf(ctx, err) - return minio.BucketInfo{}, gcsToObjectError(err, bucket) - } - - return minio.BucketInfo{ - Name: attrs.Name, - Created: attrs.Created, - }, nil -} - -// ListBuckets lists all buckets under your project-id on GCS. -func (l *gcsGateway) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) { - it := l.client.Buckets(ctx, l.projectID) - - // Iterate and capture all the buckets. - for { - attrs, ierr := it.Next() - if ierr == iterator.Done { - break - } - - if ierr != nil { - return buckets, gcsToObjectError(ierr) - } - - buckets = append(buckets, minio.BucketInfo{ - Name: attrs.Name, - Created: attrs.Created, - }) - } - - return buckets, nil -} - -// DeleteBucket delete a bucket on GCS. -func (l *gcsGateway) DeleteBucket(ctx context.Context, bucket string, opts minio.DeleteBucketOptions) error { - itObject := l.client.Bucket(bucket).Objects(ctx, &storage.Query{ - Delimiter: minio.SlashSeparator, - Versions: false, - }) - // We list the bucket and if we find any objects we return BucketNotEmpty error. If we - // find only "minio.sys.tmp/" then we remove it before deleting the bucket. - gcsMinioPathFound := false - nonGCSMinioPathFound := false - for { - objAttrs, err := itObject.Next() - if err == iterator.Done { - break - } - if err != nil { - logger.LogIf(ctx, err) - return gcsToObjectError(err) - } - if objAttrs.Prefix == minio.GatewayMinioSysTmp { - gcsMinioPathFound = true - continue - } - nonGCSMinioPathFound = true - break - } - if nonGCSMinioPathFound { - logger.LogIf(ctx, minio.BucketNotEmpty{}) - return gcsToObjectError(minio.BucketNotEmpty{}) - } - if gcsMinioPathFound { - // Remove minio.sys.tmp before deleting the bucket. - itObject = l.client.Bucket(bucket).Objects(ctx, &storage.Query{Versions: false, Prefix: minio.GatewayMinioSysTmp}) - for { - objAttrs, err := itObject.Next() - if err == iterator.Done { - break - } - if err != nil { - logger.LogIf(ctx, err) - return gcsToObjectError(err) - } - err = l.client.Bucket(bucket).Object(objAttrs.Name).Delete(ctx) - if err != nil { - logger.LogIf(ctx, err) - return gcsToObjectError(err) - } - } - } - err := l.client.Bucket(bucket).Delete(ctx) - logger.LogIf(ctx, err) - return gcsToObjectError(err, bucket) -} - -func toGCSPageToken(name string) string { - length := uint16(len(name)) - - b := []byte{ - 0xa, - byte(length & 0xFF), - } - - length >>= 7 - if length > 0 { - b = append(b, byte(length&0xFF)) - } - - b = append(b, []byte(name)...) - - return base64.StdEncoding.EncodeToString(b) -} - -// Returns true if marker was returned by GCS, i.e prefixed with -// ##minio by minio gcs minio. -func isGCSMarker(marker string) bool { - return strings.HasPrefix(marker, gcsTokenPrefix) -} - -// ListObjects - lists all blobs in GCS bucket filtered by prefix -func (l *gcsGateway) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (minio.ListObjectsInfo, error) { - if maxKeys == 0 { - return minio.ListObjectsInfo{}, nil - } - - it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{ - Delimiter: delimiter, - Prefix: prefix, - Versions: false, - }) - - // To accommodate S3-compatible applications using - // ListObjectsV1 to use object keys as markers to control the - // listing of objects, we use the following encoding scheme to - // distinguish between GCS continuation tokens and application - // supplied markers. - // - // - NextMarker in ListObjectsV1 response is constructed by - // prefixing "{minio}" to the GCS continuation token, - // e.g, "{minio}CgRvYmoz" - // - // - Application supplied markers are transformed to a - // GCS continuation token. - - // If application is using GCS continuation token we should - // strip the gcsTokenPrefix we added. - token := "" - if marker != "" { - if isGCSMarker(marker) { - token = strings.TrimPrefix(marker, gcsTokenPrefix) - } else { - token = toGCSPageToken(marker) - } - } - nextMarker := "" - - var prefixes []string - var objects []minio.ObjectInfo - var nextPageToken string - var err error - - pager := iterator.NewPager(it, maxKeys, token) - for { - gcsObjects := make([]*storage.ObjectAttrs, 0) - nextPageToken, err = pager.NextPage(&gcsObjects) - if err != nil { - logger.LogIf(ctx, err) - return minio.ListObjectsInfo{}, gcsToObjectError(err, bucket, prefix) - } - - for _, attrs := range gcsObjects { - - // Due to minio.GatewayMinioSysTmp keys being skipped, the number of objects + prefixes - // returned may not total maxKeys. This behavior is compatible with the S3 spec which - // allows the response to include less keys than maxKeys. - if attrs.Prefix == minio.GatewayMinioSysTmp { - // We don't return our metadata prefix. - continue - } - if !strings.HasPrefix(prefix, minio.GatewayMinioSysTmp) { - // If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries. - // But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/ - // which will be helpful to observe the "directory structure" for debugging purposes. - if strings.HasPrefix(attrs.Prefix, minio.GatewayMinioSysTmp) || - strings.HasPrefix(attrs.Name, minio.GatewayMinioSysTmp) { - continue - } - } - - if attrs.Prefix != "" { - prefixes = append(prefixes, attrs.Prefix) - } else { - objects = append(objects, fromGCSAttrsToObjectInfo(attrs)) - } - - // The NextMarker property should only be set in the response if a delimiter is used - if delimiter != "" { - if attrs.Prefix > nextMarker { - nextMarker = attrs.Prefix - } else if attrs.Name > nextMarker { - nextMarker = attrs.Name - } - } - } - - // Exit the loop if at least one item can be returned from - // the current page or there are no more pages available - if nextPageToken == "" || len(prefixes)+len(objects) > 0 { - break - } - } - - if nextPageToken == "" { - nextMarker = "" - } else if nextMarker != "" { - nextMarker = gcsTokenPrefix + toGCSPageToken(nextMarker) - } - - return minio.ListObjectsInfo{ - IsTruncated: nextPageToken != "", - NextMarker: nextMarker, - Prefixes: prefixes, - Objects: objects, - }, nil -} - -// ListObjectsV2 - lists all blobs in GCS bucket filtered by prefix -func (l *gcsGateway) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (minio.ListObjectsV2Info, error) { - if maxKeys == 0 { - return minio.ListObjectsV2Info{ContinuationToken: continuationToken}, nil - } - - it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{ - Delimiter: delimiter, - Prefix: prefix, - Versions: false, - }) - - token := continuationToken - if token == "" && startAfter != "" { - token = toGCSPageToken(startAfter) - } - - var prefixes []string - var objects []minio.ObjectInfo - var nextPageToken string - var err error - - pager := iterator.NewPager(it, maxKeys, token) - for { - gcsObjects := make([]*storage.ObjectAttrs, 0) - nextPageToken, err = pager.NextPage(&gcsObjects) - if err != nil { - logger.LogIf(ctx, err) - return minio.ListObjectsV2Info{}, gcsToObjectError(err, bucket, prefix) - } - - for _, attrs := range gcsObjects { - - // Due to minio.GatewayMinioSysTmp keys being skipped, the number of objects + prefixes - // returned may not total maxKeys. This behavior is compatible with the S3 spec which - // allows the response to include less keys than maxKeys. - if attrs.Prefix == minio.GatewayMinioSysTmp { - // We don't return our metadata prefix. - continue - } - if !strings.HasPrefix(prefix, minio.GatewayMinioSysTmp) { - // If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries. - // But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/ - // which will be helpful to observe the "directory structure" for debugging purposes. - if strings.HasPrefix(attrs.Prefix, minio.GatewayMinioSysTmp) || - strings.HasPrefix(attrs.Name, minio.GatewayMinioSysTmp) { - continue - } - } - - if attrs.Prefix != "" { - prefixes = append(prefixes, attrs.Prefix) - } else { - objects = append(objects, fromGCSAttrsToObjectInfo(attrs)) - } - } - - // Exit the loop if at least one item can be returned from - // the current page or there are no more pages available - if nextPageToken == "" || len(prefixes)+len(objects) > 0 { - break - } - } - - return minio.ListObjectsV2Info{ - IsTruncated: nextPageToken != "", - ContinuationToken: continuationToken, - NextContinuationToken: nextPageToken, - Prefixes: prefixes, - Objects: objects, - }, nil -} - -// GetObjectNInfo - returns object info and locked object ReadCloser -func (l *gcsGateway) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) { - var objInfo minio.ObjectInfo - objInfo, err = l.GetObjectInfo(ctx, bucket, object, opts) - if err != nil { - return nil, err - } - - var startOffset, length int64 - startOffset, length, err = rs.GetOffsetLength(objInfo.Size) - if err != nil { - return nil, err - } - - pr, pw := io.Pipe() - go func() { - err := l.getObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, opts) - pw.CloseWithError(err) - }() - // Setup cleanup function to cause the above go-routine to - // exit in case of partial read - pipeCloser := func() { pr.Close() } - return minio.NewGetObjectReaderFromReader(pr, objInfo, opts, pipeCloser) -} - -// GetObject - reads an object from GCS. Supports additional -// parameters like offset and length which are synonymous with -// HTTP Range requests. -// -// startOffset indicates the starting read location of the object. -// length indicates the total length of the object. -func (l *gcsGateway) getObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error { - // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, - // otherwise gcs will just return object not exist in case of non-existing bucket - if _, err := l.client.Bucket(bucket).Attrs(ctx); err != nil { - logger.LogIf(ctx, err, logger.Application) - return gcsToObjectError(err, bucket) - } - - // GCS storage decompresses a gzipped object by default and returns the data. - // Refer to https://cloud.google.com/storage/docs/transcoding#decompressive_transcoding - // Need to set `Accept-Encoding` header to `gzip` when issuing a GetObject call, to be able - // to download the object in compressed state. - // Calling ReadCompressed with true accomplishes that. - object := l.client.Bucket(bucket).Object(key).ReadCompressed(true) - - r, err := object.NewRangeReader(ctx, startOffset, length) - if err != nil { - logger.LogIf(ctx, err, logger.Application) - return gcsToObjectError(err, bucket, key) - } - defer r.Close() - - if _, err := io.Copy(writer, r); err != nil { - logger.LogIf(ctx, err) - return gcsToObjectError(err, bucket, key) - } - - return nil -} - -// fromGCSAttrsToObjectInfo converts GCS BucketAttrs to gateway ObjectInfo -func fromGCSAttrsToObjectInfo(attrs *storage.ObjectAttrs) minio.ObjectInfo { - // All google cloud storage objects have a CRC32c hash, whereas composite objects may not have a MD5 hash - // Refer https://cloud.google.com/storage/docs/hashes-etags. Use CRC32C for ETag - metadata := make(map[string]string) - var ( - expiry time.Time - e error - ) - for k, v := range attrs.Metadata { - k = http.CanonicalHeaderKey(k) - // Translate the GCS custom metadata prefix - if strings.HasPrefix(k, "X-Goog-Meta-") { - k = strings.Replace(k, "X-Goog-Meta-", "X-Amz-Meta-", 1) - } - if k == "Expires" { - if expiry, e = time.Parse(http.TimeFormat, v); e == nil { - expiry = expiry.UTC() - } - continue - } - metadata[k] = v - } - if attrs.ContentType != "" { - metadata["Content-Type"] = attrs.ContentType - } - if attrs.ContentEncoding != "" { - metadata["Content-Encoding"] = attrs.ContentEncoding - } - if attrs.CacheControl != "" { - metadata["Cache-Control"] = attrs.CacheControl - } - if attrs.ContentDisposition != "" { - metadata["Content-Disposition"] = attrs.ContentDisposition - } - if attrs.ContentLanguage != "" { - metadata["Content-Language"] = attrs.ContentLanguage - } - - etag := hex.EncodeToString(attrs.MD5) - if etag == "" { - etag = minio.ToS3ETag(fmt.Sprintf("%d", attrs.CRC32C)) - } - return minio.ObjectInfo{ - Name: attrs.Name, - Bucket: attrs.Bucket, - ModTime: attrs.Updated, - Size: attrs.Size, - ETag: etag, - UserDefined: metadata, - ContentType: attrs.ContentType, - ContentEncoding: attrs.ContentEncoding, - Expires: expiry, - } -} - -// applyMetadataToGCSAttrs applies metadata to a GCS ObjectAttrs instance -func applyMetadataToGCSAttrs(metadata map[string]string, attrs *storage.ObjectAttrs) { - attrs.Metadata = make(map[string]string) - for k, v := range metadata { - k = http.CanonicalHeaderKey(k) - switch { - case strings.HasPrefix(k, "X-Amz-Meta-"): - // Translate the S3 user-defined metadata prefix - k = strings.Replace(k, "X-Amz-Meta-", "x-goog-meta-", 1) - attrs.Metadata[k] = v - case k == "Content-Type": - attrs.ContentType = v - case k == "Content-Encoding": - attrs.ContentEncoding = v - case k == "Cache-Control": - attrs.CacheControl = v - case k == "Content-Disposition": - attrs.ContentDisposition = v - case k == "Content-Language": - attrs.ContentLanguage = v - } - } -} - -// GetObjectInfo - reads object info and replies back ObjectInfo -func (l *gcsGateway) GetObjectInfo(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { - // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, - // otherwise gcs will just return object not exist in case of non-existing bucket - if _, err := l.client.Bucket(bucket).Attrs(ctx); err != nil { - logger.LogIf(ctx, err, logger.Application) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket) - } - - attrs, err := l.client.Bucket(bucket).Object(object).Attrs(ctx) - if err != nil { - logger.LogIf(ctx, err) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket, object) - } - - return fromGCSAttrsToObjectInfo(attrs), nil -} - -// PutObject - Create a new object with the incoming data, -func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, r *minio.PutObjReader, opts minio.ObjectOptions) (minio.ObjectInfo, error) { - data := r.Reader - - nctx, cancel := context.WithCancel(ctx) - - defer cancel() - - // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, - // otherwise gcs will just return object not exist in case of non-existing bucket - if _, err := l.client.Bucket(bucket).Attrs(nctx); err != nil { - logger.LogIf(ctx, err, logger.Application) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket) - } - - object := l.client.Bucket(bucket).Object(key) - - w := object.NewWriter(nctx) - - // Disable "chunked" uploading in GCS client if the size of the data to be uploaded is below - // the current chunk-size of the writer. This avoids an unnecessary memory allocation. - if data.Size() < int64(w.ChunkSize) { - w.ChunkSize = 0 - } - applyMetadataToGCSAttrs(opts.UserDefined, &w.ObjectAttrs) - - if _, err := io.Copy(w, data); err != nil { - // Close the object writer upon error. - logger.LogIf(ctx, err) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) - } - - // Close the object writer upon success. - if err := w.Close(); err != nil { - logger.LogIf(ctx, err) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) - } - - return fromGCSAttrsToObjectInfo(w.Attrs()), nil -} - -// CopyObject - Copies a blob from source container to destination container. -func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string, - srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions, -) (minio.ObjectInfo, error) { - if srcOpts.CheckPrecondFn != nil && srcOpts.CheckPrecondFn(srcInfo) { - return minio.ObjectInfo{}, minio.PreConditionFailed{} - } - src := l.client.Bucket(srcBucket).Object(srcObject) - dst := l.client.Bucket(destBucket).Object(destObject) - - copier := dst.CopierFrom(src) - applyMetadataToGCSAttrs(srcInfo.UserDefined, &copier.ObjectAttrs) - - attrs, err := copier.Run(ctx) - if err != nil { - logger.LogIf(ctx, err) - return minio.ObjectInfo{}, gcsToObjectError(err, destBucket, destObject) - } - - return fromGCSAttrsToObjectInfo(attrs), nil -} - -// DeleteObject - Deletes a blob in bucket -func (l *gcsGateway) DeleteObject(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { - err := l.client.Bucket(bucket).Object(object).Delete(ctx) - if err != nil { - logger.LogIf(ctx, err) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket, object) - } - - return minio.ObjectInfo{ - Bucket: bucket, - Name: object, - }, nil -} - -func (l *gcsGateway) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) { - errs := make([]error, len(objects)) - dobjects := make([]minio.DeletedObject, len(objects)) - for idx, object := range objects { - _, errs[idx] = l.DeleteObject(ctx, bucket, object.ObjectName, opts) - if errs[idx] == nil { - dobjects[idx] = minio.DeletedObject{ - ObjectName: object.ObjectName, - } - } - } - return dobjects, errs -} - -// NewMultipartUpload - upload object in multiple parts -func (l *gcsGateway) NewMultipartUpload(ctx context.Context, bucket string, key string, o minio.ObjectOptions) (uploadID string, err error) { - // generate new uploadid - uploadID = minio.MustGetUUID() - - // generate name for part zero - meta := gcsMultipartMetaName(uploadID) - - w := l.client.Bucket(bucket).Object(meta).NewWriter(ctx) - defer w.Close() - - applyMetadataToGCSAttrs(o.UserDefined, &w.ObjectAttrs) - - if err = json.NewEncoder(w).Encode(gcsMultipartMetaV1{ - gcsMinioMultipartMetaCurrentVersion, - bucket, - key, - }); err != nil { - logger.LogIf(ctx, err) - return "", gcsToObjectError(err, bucket, key) - } - return uploadID, nil -} - -// ListMultipartUploads - lists the (first) multipart upload for an object -// matched _exactly_ by the prefix -func (l *gcsGateway) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (minio.ListMultipartsInfo, error) { - // List objects under /gcsMinioMultipartPathV1 - it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{ - Prefix: gcsMinioMultipartPathV1, - }) - - var uploads []minio.MultipartInfo - - for { - attrs, err := it.Next() - if err == iterator.Done { - break - } - - if err != nil { - logger.LogIf(ctx, err) - return minio.ListMultipartsInfo{ - KeyMarker: keyMarker, - UploadIDMarker: uploadIDMarker, - MaxUploads: maxUploads, - Prefix: prefix, - Delimiter: delimiter, - }, gcsToObjectError(err) - } - - // Skip entries other than gcs.json - if !strings.HasSuffix(attrs.Name, gcsMinioMultipartMeta) { - continue - } - - // Extract multipart upload information from gcs.json - obj := l.client.Bucket(bucket).Object(attrs.Name) - objReader, rErr := obj.NewReader(ctx) - if rErr != nil { - logger.LogIf(ctx, rErr) - return minio.ListMultipartsInfo{}, rErr - } - defer objReader.Close() - - var mpMeta gcsMultipartMetaV1 - dec := json.NewDecoder(objReader) - decErr := dec.Decode(&mpMeta) - if decErr != nil { - logger.LogIf(ctx, decErr) - return minio.ListMultipartsInfo{}, decErr - } - - if prefix == mpMeta.Object { - // Extract uploadId - // E.g minio.sys.tmp/multipart/v1/d063ad89-fdc4-4ea3-a99e-22dba98151f5/gcs.json - components := strings.SplitN(attrs.Name, minio.SlashSeparator, 5) - if len(components) != 5 { - compErr := errors.New("Invalid multipart upload format") - logger.LogIf(ctx, compErr) - return minio.ListMultipartsInfo{}, compErr - } - upload := minio.MultipartInfo{ - Object: mpMeta.Object, - UploadID: components[3], - Initiated: attrs.Created, - } - uploads = append(uploads, upload) - } - } - - return minio.ListMultipartsInfo{ - KeyMarker: keyMarker, - UploadIDMarker: uploadIDMarker, - MaxUploads: maxUploads, - Prefix: prefix, - Delimiter: delimiter, - Uploads: uploads, - NextKeyMarker: "", - NextUploadIDMarker: "", - IsTruncated: false, - }, nil -} - -// Checks if minio.sys.tmp/multipart/v1//gcs.json exists, returns -// an object layer compatible error upon any error. -func (l *gcsGateway) checkUploadIDExists(ctx context.Context, bucket string, key string, uploadID string) error { - _, err := l.client.Bucket(bucket).Object(gcsMultipartMetaName(uploadID)).Attrs(ctx) - logger.LogIf(ctx, err) - return gcsToObjectError(err, bucket, key, uploadID) -} - -// PutObjectPart puts a part of object in bucket -func (l *gcsGateway) PutObjectPart(ctx context.Context, bucket string, key string, uploadID string, partNumber int, r *minio.PutObjReader, opts minio.ObjectOptions) (minio.PartInfo, error) { - data := r.Reader - if err := l.checkUploadIDExists(ctx, bucket, key, uploadID); err != nil { - return minio.PartInfo{}, err - } - etag := data.MD5HexString() - if etag == "" { - // Generate random ETag. - etag = minio.GenETag() - } - object := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, partNumber, etag)) - w := object.NewWriter(ctx) - // Disable "chunked" uploading in GCS client. If enabled, it can cause a corner case - // where it tries to upload 0 bytes in the last chunk and get error from server. - w.ChunkSize = 0 - if _, err := io.Copy(w, data); err != nil { - // Make sure to close object writer upon error. - w.Close() - logger.LogIf(ctx, err) - return minio.PartInfo{}, gcsToObjectError(err, bucket, key) - } - // Make sure to close the object writer upon success. - if err := w.Close(); err != nil { - logger.LogIf(ctx, err) - return minio.PartInfo{}, gcsToObjectError(err, bucket, key) - } - return minio.PartInfo{ - PartNumber: partNumber, - ETag: etag, - LastModified: minio.UTCNow(), - Size: data.Size(), - }, nil -} - -// gcsGetPartInfo returns PartInfo of a given object part -func gcsGetPartInfo(ctx context.Context, attrs *storage.ObjectAttrs) (minio.PartInfo, error) { - components := strings.SplitN(attrs.Name, minio.SlashSeparator, 5) - if len(components) != 5 { - logger.LogIf(ctx, errors.New("Invalid multipart upload format")) - return minio.PartInfo{}, errors.New("Invalid multipart upload format") - } - - partComps := strings.SplitN(components[4], ".", 2) - if len(partComps) != 2 { - logger.LogIf(ctx, errors.New("Invalid multipart part format")) - return minio.PartInfo{}, errors.New("Invalid multipart part format") - } - - partNum, pErr := strconv.Atoi(partComps[0]) - if pErr != nil { - logger.LogIf(ctx, pErr) - return minio.PartInfo{}, errors.New("Invalid part number") - } - - return minio.PartInfo{ - PartNumber: partNum, - LastModified: attrs.Updated, - Size: attrs.Size, - ETag: partComps[1], - }, nil -} - -// GetMultipartInfo returns multipart info of the uploadId of the object -func (l *gcsGateway) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts minio.ObjectOptions) (result minio.MultipartInfo, err error) { - result.Bucket = bucket - result.Object = object - result.UploadID = uploadID - return result, nil -} - -// ListObjectParts returns all object parts for specified object in specified bucket -func (l *gcsGateway) ListObjectParts(ctx context.Context, bucket string, key string, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (minio.ListPartsInfo, error) { - it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{ - Prefix: path.Join(gcsMinioMultipartPathV1, uploadID), - }) - - var ( - count int - partInfos []minio.PartInfo - ) - - isTruncated := true - for count < maxParts { - attrs, err := it.Next() - if err == iterator.Done { - isTruncated = false - break - } - - if err != nil { - logger.LogIf(ctx, err) - return minio.ListPartsInfo{}, gcsToObjectError(err) - } - - if strings.HasSuffix(attrs.Name, gcsMinioMultipartMeta) { - continue - } - - partInfo, pErr := gcsGetPartInfo(ctx, attrs) - if pErr != nil { - logger.LogIf(ctx, pErr) - return minio.ListPartsInfo{}, pErr - } - - if partInfo.PartNumber <= partNumberMarker { - continue - } - - partInfos = append(partInfos, partInfo) - count++ - } - - nextPartNumberMarker := 0 - if isTruncated { - nextPartNumberMarker = partInfos[maxParts-1].PartNumber - } - - return minio.ListPartsInfo{ - Bucket: bucket, - Object: key, - UploadID: uploadID, - PartNumberMarker: partNumberMarker, - NextPartNumberMarker: nextPartNumberMarker, - MaxParts: maxParts, - Parts: partInfos, - IsTruncated: isTruncated, - }, nil -} - -// Called by AbortMultipartUpload and CompleteMultipartUpload for cleaning up. -func (l *gcsGateway) cleanupMultipartUpload(ctx context.Context, bucket, key, uploadID string) error { - prefix := fmt.Sprintf("%s/%s/", gcsMinioMultipartPathV1, uploadID) - - // iterate through all parts and delete them - it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: prefix, Versions: false}) - - for { - attrs, err := it.Next() - if err == iterator.Done { - break - } - if err != nil { - logger.LogIf(ctx, err) - return gcsToObjectError(err, bucket, key) - } - - object := l.client.Bucket(bucket).Object(attrs.Name) - // Ignore the error as parallel AbortMultipartUpload might have deleted it. - object.Delete(ctx) - } - - return nil -} - -// AbortMultipartUpload aborts a ongoing multipart upload -func (l *gcsGateway) AbortMultipartUpload(ctx context.Context, bucket string, key string, uploadID string, opts minio.ObjectOptions) error { - if err := l.checkUploadIDExists(ctx, bucket, key, uploadID); err != nil { - return err - } - return l.cleanupMultipartUpload(ctx, bucket, key, uploadID) -} - -// CompleteMultipartUpload completes ongoing multipart upload and finalizes object -// Note that there is a limit (currently 32) to the number of components that can -// be composed in a single operation. There is a per-project rate limit (currently 200) -// to the number of source objects you can compose per second. -func (l *gcsGateway) CompleteMultipartUpload(ctx context.Context, bucket string, key string, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (minio.ObjectInfo, error) { - meta := gcsMultipartMetaName(uploadID) - object := l.client.Bucket(bucket).Object(meta) - - partZeroAttrs, err := object.Attrs(ctx) - if err != nil { - logger.LogIf(ctx, err) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key, uploadID) - } - - r, err := object.NewReader(ctx) - if err != nil { - logger.LogIf(ctx, err) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) - } - defer r.Close() - - // Check version compatibility of the meta file before compose() - multipartMeta := gcsMultipartMetaV1{} - if err = json.NewDecoder(r).Decode(&multipartMeta); err != nil { - logger.LogIf(ctx, err) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) - } - - if multipartMeta.Version != gcsMinioMultipartMetaCurrentVersion { - logger.LogIf(ctx, errGCSFormat) - return minio.ObjectInfo{}, gcsToObjectError(errGCSFormat, bucket, key) - } - - // Validate if the gcs.json stores valid entries for the bucket and key. - if multipartMeta.Bucket != bucket || multipartMeta.Object != key { - return minio.ObjectInfo{}, gcsToObjectError(minio.InvalidUploadID{ - UploadID: uploadID, - }, bucket, key) - } - - var parts []*storage.ObjectHandle - partSizes := make([]int64, len(uploadedParts)) - for i, uploadedPart := range uploadedParts { - parts = append(parts, l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, - uploadedPart.PartNumber, uploadedPart.ETag))) - partAttr, pErr := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, uploadedPart.PartNumber, uploadedPart.ETag)).Attrs(ctx) - if pErr != nil { - logger.LogIf(ctx, pErr) - return minio.ObjectInfo{}, gcsToObjectError(pErr, bucket, key, uploadID) - } - partSizes[i] = partAttr.Size - } - - // Error out if parts except last part sizing < 5MiB. - for i, size := range partSizes[:len(partSizes)-1] { - if size < 5*humanize.MiByte { - logger.LogIf(ctx, minio.PartTooSmall{ - PartNumber: uploadedParts[i].PartNumber, - PartSize: size, - PartETag: uploadedParts[i].ETag, - }) - return minio.ObjectInfo{}, minio.PartTooSmall{ - PartNumber: uploadedParts[i].PartNumber, - PartSize: size, - PartETag: uploadedParts[i].ETag, - } - } - } - - // Returns name of the composed object. - gcsMultipartComposeName := func(uploadID string, composeNumber int) string { - return fmt.Sprintf("%s/tmp/%s/composed-object-%05d", minio.GatewayMinioSysTmp, uploadID, composeNumber) - } - - composeCount := int(math.Ceil(float64(len(parts)) / float64(gcsMaxComponents))) - if composeCount > 1 { - // Create composes of every 32 parts. - composeParts := make([]*storage.ObjectHandle, composeCount) - for i := 0; i < composeCount; i++ { - // Create 'composed-object-N' using next 32 parts. - composeParts[i] = l.client.Bucket(bucket).Object(gcsMultipartComposeName(uploadID, i)) - start := i * gcsMaxComponents - end := start + gcsMaxComponents - if end > len(parts) { - end = len(parts) - } - - composer := composeParts[i].ComposerFrom(parts[start:end]...) - composer.ContentType = partZeroAttrs.ContentType - composer.Metadata = partZeroAttrs.Metadata - - if _, err = composer.Run(ctx); err != nil { - logger.LogIf(ctx, err) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) - } - } - - // As composes are successfully created, final object needs to be created using composes. - parts = composeParts - } - - composer := l.client.Bucket(bucket).Object(key).ComposerFrom(parts...) - composer.ContentType = partZeroAttrs.ContentType - composer.ContentEncoding = partZeroAttrs.ContentEncoding - composer.CacheControl = partZeroAttrs.CacheControl - composer.ContentDisposition = partZeroAttrs.ContentDisposition - composer.ContentLanguage = partZeroAttrs.ContentLanguage - composer.Metadata = partZeroAttrs.Metadata - attrs, err := composer.Run(ctx) - if err != nil { - logger.LogIf(ctx, err) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) - } - if err = l.cleanupMultipartUpload(ctx, bucket, key, uploadID); err != nil { - return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) - } - return fromGCSAttrsToObjectInfo(attrs), nil -} - -// SetBucketPolicy - Set policy on bucket -func (l *gcsGateway) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error { - policyInfo, err := minio.PolicyToBucketAccessPolicy(bucketPolicy) - if err != nil { - logger.LogIf(ctx, err) - return gcsToObjectError(err, bucket) - } - - var policies []minio.BucketAccessPolicy - for prefix, policy := range miniogopolicy.GetPolicies(policyInfo.Statements, bucket, "") { - policies = append(policies, minio.BucketAccessPolicy{ - Prefix: prefix, - Policy: policy, - }) - } - - prefix := bucket + "/*" // For all objects inside the bucket. - - if len(policies) != 1 { - logger.LogIf(ctx, minio.NotImplemented{}) - return minio.NotImplemented{} - } - if policies[0].Prefix != prefix { - logger.LogIf(ctx, minio.NotImplemented{}) - return minio.NotImplemented{} - } - - acl := l.client.Bucket(bucket).ACL() - if policies[0].Policy == miniogopolicy.BucketPolicyNone { - if err := acl.Delete(ctx, storage.AllUsers); err != nil { - logger.LogIf(ctx, err) - return gcsToObjectError(err, bucket) - } - return nil - } - - var role storage.ACLRole - switch policies[0].Policy { - case miniogopolicy.BucketPolicyReadOnly: - role = storage.RoleReader - case miniogopolicy.BucketPolicyWriteOnly: - role = storage.RoleWriter - default: - logger.LogIf(ctx, minio.NotImplemented{}) - return minio.NotImplemented{} - } - - if err := acl.Set(ctx, storage.AllUsers, role); err != nil { - logger.LogIf(ctx, err) - return gcsToObjectError(err, bucket) - } - - return nil -} - -// GetBucketPolicy - Get policy on bucket -func (l *gcsGateway) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) { - rules, err := l.client.Bucket(bucket).ACL().List(ctx) - if err != nil { - return nil, gcsToObjectError(err, bucket) - } - - var readOnly, writeOnly bool - for _, r := range rules { - if r.Entity != storage.AllUsers || r.Role == storage.RoleOwner { - continue - } - - switch r.Role { - case storage.RoleReader: - readOnly = true - case storage.RoleWriter: - writeOnly = true - } - } - - actionSet := policy.NewActionSet() - if readOnly { - actionSet.Add(policy.GetBucketLocationAction) - actionSet.Add(policy.ListBucketAction) - actionSet.Add(policy.GetObjectAction) - } - if writeOnly { - actionSet.Add(policy.GetBucketLocationAction) - actionSet.Add(policy.ListBucketMultipartUploadsAction) - actionSet.Add(policy.AbortMultipartUploadAction) - actionSet.Add(policy.DeleteObjectAction) - actionSet.Add(policy.ListMultipartUploadPartsAction) - actionSet.Add(policy.PutObjectAction) - } - - // Return NoSuchBucketPolicy error, when policy is not set - if len(actionSet) == 0 { - return nil, gcsToObjectError(minio.BucketPolicyNotFound{}, bucket) - } - - return &policy.Policy{ - Version: policy.DefaultVersion, - Statements: []policy.Statement{ - policy.NewStatement( - "", - policy.Allow, - policy.NewPrincipal("*"), - actionSet, - policy.NewResourceSet( - policy.NewResource(bucket, ""), - policy.NewResource(bucket, "*"), - ), - condition.NewFunctions(), - ), - }, - }, nil -} - -// DeleteBucketPolicy - Delete all policies on bucket -func (l *gcsGateway) DeleteBucketPolicy(ctx context.Context, bucket string) error { - // This only removes the storage.AllUsers policies - if err := l.client.Bucket(bucket).ACL().Delete(ctx, storage.AllUsers); err != nil { - return gcsToObjectError(err, bucket) - } - - return nil -} - -// IsCompressionSupported returns whether compression is applicable for this layer. -func (l *gcsGateway) IsCompressionSupported() bool { - return false -} diff --git a/cmd/gateway/gcs/gateway-gcs_test.go b/cmd/gateway/gcs/gateway-gcs_test.go deleted file mode 100644 index a725ead4a..000000000 --- a/cmd/gateway/gcs/gateway-gcs_test.go +++ /dev/null @@ -1,500 +0,0 @@ -/* - * MinIO Object Storage (c) 2021 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package gcs - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "reflect" - "testing" - "time" - - "cloud.google.com/go/storage" - "google.golang.org/api/googleapi" - - miniogo "github.com/minio/minio-go/v7" - minio "github.com/minio/minio/cmd" -) - -func TestToGCSPageToken(t *testing.T) { - testCases := []struct { - Name string - Token string - }{ - { - Name: "A", - Token: "CgFB", - }, - { - Name: "AAAAAAAAAA", - Token: "CgpBQUFBQUFBQUFB", - }, - { - Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - Token: "CmRBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFB", - }, - { - Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - Token: "CpEDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUE=", - }, - { - Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - Token: "CpIDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFB", - }, - { - Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - Token: "CpMDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQQ==", - }, - { - Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - Token: "CvQDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUE=", - }, - } - - for i, testCase := range testCases { - if toGCSPageToken(testCase.Name) != testCase.Token { - t.Errorf("Test %d: Expected %s, got %s", i+1, toGCSPageToken(testCase.Name), testCase.Token) - } - } -} - -// TestIsValidGCSProjectIDFormat tests isValidGCSProjectIDFormat -func TestValidGCSProjectIDFormat(t *testing.T) { - testCases := []struct { - ProjectID string - Valid bool - }{ - {"", false}, - {"a", false}, - {"Abc", false}, - {"1bcd", false}, - // 5 chars - {"abcdb", false}, - // 6 chars - {"abcdbz", true}, - // 30 chars - {"project-id-1-project-id-more-1", true}, - // 31 chars - {"project-id-1-project-id-more-11", false}, - {"storage.googleapis.com", false}, - {"http://storage.googleapis.com", false}, - {"http://localhost:9000", false}, - {"project-id-1", true}, - {"project-id-1988832", true}, - {"projectid1414", true}, - } - - for i, testCase := range testCases { - valid := isValidGCSProjectIDFormat(testCase.ProjectID) - if valid != testCase.Valid { - t.Errorf("Test %d: Expected %v, got %v", i+1, valid, testCase.Valid) - } - } -} - -// Test for isGCSMarker. -func TestIsGCSMarker(t *testing.T) { - testCases := []struct { - marker string - expected bool - }{ - { - marker: "{minio}gcs123", - expected: true, - }, - { - marker: "{mini_no}tgcs123", - expected: false, - }, - { - marker: "{minioagainnotgcs123", - expected: false, - }, - { - marker: "obj1", - expected: false, - }, - } - - for i, tc := range testCases { - if actual := isGCSMarker(tc.marker); actual != tc.expected { - t.Errorf("Test %d: marker is %s, expected %v but got %v", - i+1, tc.marker, tc.expected, actual) - } - } -} - -// Test for gcsMultipartMetaName. -func TestGCSMultipartMetaName(t *testing.T) { - uploadID := "a" - expected := path.Join(gcsMinioMultipartPathV1, uploadID, gcsMinioMultipartMeta) - got := gcsMultipartMetaName(uploadID) - if expected != got { - t.Errorf("expected: %s, got: %s", expected, got) - } -} - -// Test for gcsMultipartDataName. -func TestGCSMultipartDataName(t *testing.T) { - var ( - uploadID = "a" - etag = "b" - partNumber = 1 - ) - expected := path.Join(gcsMinioMultipartPathV1, uploadID, fmt.Sprintf("%05d.%s", partNumber, etag)) - got := gcsMultipartDataName(uploadID, partNumber, etag) - if expected != got { - t.Errorf("expected: %s, got: %s", expected, got) - } -} - -func TestFromMinioClientListBucketResultToV2Info(t *testing.T) { - listBucketResult := miniogo.ListBucketResult{ - IsTruncated: false, - Marker: "testMarker", - NextMarker: "testMarker2", - CommonPrefixes: []miniogo.CommonPrefix{{Prefix: "one"}, {Prefix: "two"}}, - Contents: []miniogo.ObjectInfo{{Key: "testobj", ContentType: ""}}, - } - - listBucketV2Info := minio.ListObjectsV2Info{ - Prefixes: []string{"one", "two"}, - Objects: []minio.ObjectInfo{{Name: "testobj", Bucket: "testbucket", UserDefined: map[string]string{"Content-Type": ""}}}, - IsTruncated: false, - ContinuationToken: "testMarker", - NextContinuationToken: "testMarker2", - } - - if got := minio.FromMinioClientListBucketResultToV2Info("testbucket", listBucketResult); !reflect.DeepEqual(got, listBucketV2Info) { - t.Errorf("fromMinioClientListBucketResultToV2Info() = %v, want %v", got, listBucketV2Info) - } -} - -// Test for gcsParseProjectID -func TestGCSParseProjectID(t *testing.T) { - f, err := ioutil.TempFile("", "TestGCSParseProjectID-*") - if err != nil { - t.Error(err) - return - } - defer os.Remove(f.Name()) - - contents := ` -{ - "type": "service_account", - "project_id": "miniotesting" -} -` - f.WriteString(contents) - f.Close() - projectID, err := gcsParseProjectID(f.Name()) - if err != nil { - t.Fatal(err) - } - if projectID != "miniotesting" { - t.Errorf(`Expected projectID value to be "miniotesting"`) - } - - if _, err = gcsParseProjectID("non-existent"); err == nil { - t.Errorf(`Expected to fail but succeeded reading "non-existent"`) - } - - contents = ` -{ - "type": "service_account", - "project_id": "miniotesting" -},} -` - f, err = ioutil.TempFile("", "TestGCSParseProjectID-*") - if err != nil { - t.Error(err) - return - } - defer os.Remove(f.Name()) - f.WriteString(contents) - f.Close() - if _, err := gcsParseProjectID(f.Name()); err == nil { - t.Errorf(`Expected to fail reading corrupted credentials file`) - } -} - -func TestGCSToObjectError(t *testing.T) { - testCases := []struct { - params []string - gcsErr error - expectedErr error - }{ - { - []string{}, nil, nil, - }, - { - []string{}, fmt.Errorf("Not *Error"), fmt.Errorf("Not *Error"), - }, - { - []string{"bucket"}, - fmt.Errorf("storage: bucket doesn't exist"), - minio.BucketNotFound{ - Bucket: "bucket", - }, - }, - { - []string{"bucket", "object"}, - fmt.Errorf("storage: object doesn't exist"), - minio.ObjectNotFound{ - Bucket: "bucket", - Object: "object", - }, - }, - { - []string{"bucket", "object", "uploadID"}, - fmt.Errorf("storage: object doesn't exist"), - minio.InvalidUploadID{ - UploadID: "uploadID", - }, - }, - { - []string{}, - fmt.Errorf("Unknown error"), - fmt.Errorf("Unknown error"), - }, - { - []string{"bucket", "object"}, - &googleapi.Error{ - Message: "No list of errors", - }, - &googleapi.Error{ - Message: "No list of errors", - }, - }, - { - []string{"bucket", "object"}, - &googleapi.Error{ - Errors: []googleapi.ErrorItem{{ - Reason: "conflict", - Message: "You already own this bucket. Please select another name.", - }}, - }, - minio.BucketAlreadyOwnedByYou{ - Bucket: "bucket", - }, - }, - { - []string{"bucket", "object"}, - &googleapi.Error{ - Errors: []googleapi.ErrorItem{{ - Reason: "conflict", - Message: "Sorry, that name is not available. Please try a different one.", - }}, - }, - minio.BucketAlreadyExists{ - Bucket: "bucket", - }, - }, - { - []string{"bucket", "object"}, - &googleapi.Error{ - Errors: []googleapi.ErrorItem{{ - Reason: "conflict", - }}, - }, - minio.BucketNotEmpty{Bucket: "bucket"}, - }, - { - []string{"bucket"}, - &googleapi.Error{ - Errors: []googleapi.ErrorItem{{ - Reason: "notFound", - }}, - }, - minio.BucketNotFound{ - Bucket: "bucket", - }, - }, - { - []string{"bucket", "object"}, - &googleapi.Error{ - Errors: []googleapi.ErrorItem{{ - Reason: "notFound", - }}, - }, - minio.ObjectNotFound{ - Bucket: "bucket", - Object: "object", - }, - }, - { - []string{"bucket"}, - &googleapi.Error{ - Errors: []googleapi.ErrorItem{{ - Reason: "invalid", - }}, - }, - minio.BucketNameInvalid{ - Bucket: "bucket", - }, - }, - { - []string{"bucket", "object"}, - &googleapi.Error{ - Errors: []googleapi.ErrorItem{{ - Reason: "forbidden", - }}, - }, - minio.PrefixAccessDenied{ - Bucket: "bucket", - Object: "object", - }, - }, - { - []string{"bucket", "object"}, - &googleapi.Error{ - Errors: []googleapi.ErrorItem{{ - Reason: "keyInvalid", - }}, - }, - minio.PrefixAccessDenied{ - Bucket: "bucket", - Object: "object", - }, - }, - { - []string{"bucket", "object"}, - &googleapi.Error{ - Errors: []googleapi.ErrorItem{{ - Reason: "required", - }}, - }, - minio.PrefixAccessDenied{ - Bucket: "bucket", - Object: "object", - }, - }, - } - - for i, testCase := range testCases { - actualErr := gcsToObjectError(testCase.gcsErr, testCase.params...) - if actualErr != nil { - if actualErr.Error() != testCase.expectedErr.Error() { - t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.expectedErr, actualErr) - } - } - } -} - -func TestS3MetaToGCSAttributes(t *testing.T) { - headers := map[string]string{ - "accept-encoding": "gzip", - "content-encoding": "gzip", - "cache-control": "age: 3600", - "content-disposition": "dummy", - "content-type": "application/javascript", - "Content-Language": "en", - "X-Amz-Meta-Hdr": "value", - "X-Amz-Meta-X-Amz-Key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=", - "X-Amz-Meta-X-Amz-Matdesc": "{}", - "X-Amz-Meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==", - } - // Only X-Amz-Meta- prefixed entries will be returned in - // Metadata (without the prefix!) - expectedHeaders := map[string]string{ - "x-goog-meta-Hdr": "value", - "x-goog-meta-X-Amz-Key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=", - "x-goog-meta-X-Amz-Matdesc": "{}", - "x-goog-meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==", - } - - attrs := storage.ObjectAttrs{} - applyMetadataToGCSAttrs(headers, &attrs) - - if !reflect.DeepEqual(attrs.Metadata, expectedHeaders) { - t.Fatalf("Test failed, expected %#v, got %#v", expectedHeaders, attrs.Metadata) - } - - if attrs.CacheControl != headers["cache-control"] { - t.Fatalf("Test failed with Cache-Control mistmatch, expected %s, got %s", headers["cache-control"], attrs.CacheControl) - } - if attrs.ContentDisposition != headers["content-disposition"] { - t.Fatalf("Test failed with Content-Disposition mistmatch, expected %s, got %s", headers["content-disposition"], attrs.ContentDisposition) - } - if attrs.ContentEncoding != headers["content-encoding"] { - t.Fatalf("Test failed with Content-Encoding mistmatch, expected %s, got %s", headers["content-encoding"], attrs.ContentEncoding) - } - if attrs.ContentLanguage != headers["Content-Language"] { - t.Fatalf("Test failed with Content-Language mistmatch, expected %s, got %s", headers["Content-Language"], attrs.ContentLanguage) - } - if attrs.ContentType != headers["content-type"] { - t.Fatalf("Test failed with Content-Type mistmatch, expected %s, got %s", headers["content-type"], attrs.ContentType) - } -} - -func TestGCSAttrsToObjectInfo(t *testing.T) { - metadata := map[string]string{ - "x-goog-meta-Hdr": "value", - "x-goog-meta-x_amz_key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=", - "x-goog-meta-x-amz-matdesc": "{}", - "x-goog-meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==", - } - expectedMeta := map[string]string{ - "X-Amz-Meta-Hdr": "value", - "X-Amz-Meta-X_amz_key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=", - "X-Amz-Meta-X-Amz-Matdesc": "{}", - "X-Amz-Meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==", - "Cache-Control": "max-age: 3600", - "Content-Disposition": "dummy", - "Content-Encoding": "gzip", - "Content-Language": "en", - "Content-Type": "application/javascript", - } - - attrs := storage.ObjectAttrs{ - Name: "test-obj", - Bucket: "test-bucket", - Updated: time.Now(), - Size: 123, - CRC32C: 45312398, - CacheControl: "max-age: 3600", - ContentDisposition: "dummy", - ContentEncoding: "gzip", - ContentLanguage: "en", - ContentType: "application/javascript", - Metadata: metadata, - } - expectedETag := minio.ToS3ETag(fmt.Sprintf("%d", attrs.CRC32C)) - - objInfo := fromGCSAttrsToObjectInfo(&attrs) - if !reflect.DeepEqual(objInfo.UserDefined, expectedMeta) { - t.Fatalf("Test failed, expected %#v, got %#v", expectedMeta, objInfo.UserDefined) - } - - if objInfo.Name != attrs.Name { - t.Fatalf("Test failed with Name mistmatch, expected %s, got %s", attrs.Name, objInfo.Name) - } - if objInfo.Bucket != attrs.Bucket { - t.Fatalf("Test failed with Bucket mistmatch, expected %s, got %s", attrs.Bucket, objInfo.Bucket) - } - if !objInfo.ModTime.Equal(attrs.Updated) { - t.Fatalf("Test failed with ModTime mistmatch, expected %s, got %s", attrs.Updated, objInfo.ModTime) - } - if objInfo.Size != attrs.Size { - t.Fatalf("Test failed with Size mistmatch, expected %d, got %d", attrs.Size, objInfo.Size) - } - if objInfo.ETag != expectedETag { - t.Fatalf("Test failed with ETag mistmatch, expected %s, got %s", expectedETag, objInfo.ETag) - } -} diff --git a/cmd/gateway/hdfs/gateway-hdfs-utils.go b/cmd/gateway/hdfs/gateway-hdfs-utils.go deleted file mode 100644 index 83fab72d0..000000000 --- a/cmd/gateway/hdfs/gateway-hdfs-utils.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - * MinIO Object Storage (c) 2021 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package hdfs - -import ( - "strings" - - "github.com/minio/minio-go/v7/pkg/s3utils" - minio "github.com/minio/minio/cmd" -) - -const ( - // Minio meta bucket. - minioMetaBucket = ".minio.sys" - - // Minio Tmp meta prefix. - minioMetaTmpBucket = minioMetaBucket + "/tmp" - - // Minio reserved bucket name. - minioReservedBucket = "minio" -) - -// Ignores all reserved bucket names or invalid bucket names. -func isReservedOrInvalidBucket(bucketEntry string, strict bool) bool { - bucketEntry = strings.TrimSuffix(bucketEntry, minio.SlashSeparator) - if strict { - if err := s3utils.CheckValidBucketNameStrict(bucketEntry); err != nil { - return true - } - } else { - if err := s3utils.CheckValidBucketName(bucketEntry); err != nil { - return true - } - } - return isMinioMetaBucket(bucketEntry) || isMinioReservedBucket(bucketEntry) -} - -// Returns true if input bucket is a reserved minio meta bucket '.minio.sys'. -func isMinioMetaBucket(bucketName string) bool { - return bucketName == minioMetaBucket -} - -// Returns true if input bucket is a reserved minio bucket 'minio'. -func isMinioReservedBucket(bucketName string) bool { - return bucketName == minioReservedBucket -} - -// byBucketName is a collection satisfying sort.Interface. -type byBucketName []minio.BucketInfo - -func (d byBucketName) Len() int { return len(d) } -func (d byBucketName) Swap(i, j int) { d[i], d[j] = d[j], d[i] } -func (d byBucketName) Less(i, j int) bool { return d[i].Name < d[j].Name } diff --git a/cmd/gateway/hdfs/gateway-hdfs.go b/cmd/gateway/hdfs/gateway-hdfs.go deleted file mode 100644 index 1abe3b92a..000000000 --- a/cmd/gateway/hdfs/gateway-hdfs.go +++ /dev/null @@ -1,887 +0,0 @@ -/* - * MinIO Object Storage (c) 2021 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package hdfs - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "net/http" - "os" - "os/user" - "path" - "sort" - "strings" - "sync" - "syscall" - "time" - - "github.com/colinmarc/hdfs/v2" - "github.com/colinmarc/hdfs/v2/hadoopconf" - krb "github.com/jcmturner/gokrb5/v8/client" - "github.com/jcmturner/gokrb5/v8/config" - "github.com/jcmturner/gokrb5/v8/credentials" - "github.com/jcmturner/gokrb5/v8/keytab" - "github.com/minio/cli" - "github.com/minio/madmin-go" - "github.com/minio/minio-go/v7/pkg/s3utils" - minio "github.com/minio/minio/cmd" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/env" - xnet "github.com/minio/pkg/net" -) - -const ( - hdfsSeparator = minio.SlashSeparator -) - -func init() { - const hdfsGatewayTemplate = `NAME: - {{.HelpName}} - {{.Usage}} - -USAGE: - {{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} HDFS-NAMENODE [HDFS-NAMENODE...] -{{if .VisibleFlags}} -FLAGS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}} -HDFS-NAMENODE: - HDFS namenode URI - -EXAMPLES: - 1. Start minio gateway server for HDFS backend - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_USER{{.AssignmentOperator}}accesskey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_PASSWORD{{.AssignmentOperator}}secretkey - {{.Prompt}} {{.HelpName}} hdfs://namenode:8200 - - 2. Start minio gateway server for HDFS with edge caching enabled - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_USER{{.AssignmentOperator}}accesskey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_PASSWORD{{.AssignmentOperator}}secretkey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*,*.png" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}90 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_AFTER{{.AssignmentOperator}}3 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_LOW{{.AssignmentOperator}}75 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_HIGH{{.AssignmentOperator}}85 - {{.Prompt}} {{.HelpName}} hdfs://namenode:8200 -` - - minio.RegisterGatewayCommand(cli.Command{ - Name: minio.HDFSBackendGateway, - Usage: "Hadoop Distributed File System (HDFS)", - Action: hdfsGatewayMain, - CustomHelpTemplate: hdfsGatewayTemplate, - HideHelpCommand: true, - }) -} - -// Handler for 'minio gateway hdfs' command line. -func hdfsGatewayMain(ctx *cli.Context) { - // Validate gateway arguments. - if ctx.Args().First() == "help" { - cli.ShowCommandHelpAndExit(ctx, minio.HDFSBackendGateway, 1) - } - - minio.StartGateway(ctx, &HDFS{args: ctx.Args()}) -} - -// HDFS implements Gateway. -type HDFS struct { - args []string -} - -// Name implements Gateway interface. -func (g *HDFS) Name() string { - return minio.HDFSBackendGateway -} - -func getKerberosClient() (*krb.Client, error) { - cfg, err := config.Load(env.Get("KRB5_CONFIG", "/etc/krb5.conf")) - if err != nil { - return nil, err - } - - u, err := user.Current() - if err != nil { - return nil, err - } - - keytabPath := env.Get("KRB5KEYTAB", "") - if keytabPath != "" { - kt, err := keytab.Load(keytabPath) - if err != nil { - return nil, err - } - - username := env.Get("KRB5USERNAME", "") - realm := env.Get("KRB5REALM", "") - if username == "" || realm == "" { - return nil, errors.New("empty KRB5USERNAME or KRB5REALM") - } - - return krb.NewWithKeytab(username, realm, kt, cfg), nil - } - - // Determine the ccache location from the environment, falling back to the default location. - ccachePath := env.Get("KRB5CCNAME", fmt.Sprintf("/tmp/krb5cc_%s", u.Uid)) - if strings.Contains(ccachePath, ":") { - if strings.HasPrefix(ccachePath, "FILE:") { - ccachePath = strings.TrimPrefix(ccachePath, "FILE:") - } else { - return nil, fmt.Errorf("unable to use kerberos ccache: %s", ccachePath) - } - } - - ccache, err := credentials.LoadCCache(ccachePath) - if err != nil { - return nil, err - } - - return krb.NewFromCCache(ccache, cfg) -} - -// NewGatewayLayer returns hdfs gatewaylayer. -func (g *HDFS) NewGatewayLayer(creds madmin.Credentials) (minio.ObjectLayer, error) { - dialFunc := (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext - - hconfig, err := hadoopconf.LoadFromEnvironment() - if err != nil { - return nil, err - } - - opts := hdfs.ClientOptionsFromConf(hconfig) - opts.NamenodeDialFunc = dialFunc - opts.DatanodeDialFunc = dialFunc - - // Not addresses found, load it from command line. - var commonPath string - if len(opts.Addresses) == 0 { - var addresses []string - for _, s := range g.args { - u, err := xnet.ParseURL(s) - if err != nil { - return nil, err - } - if u.Scheme != "hdfs" { - return nil, fmt.Errorf("unsupported scheme %s, only supports hdfs://", u) - } - if commonPath != "" && commonPath != u.Path { - return nil, fmt.Errorf("all namenode paths should be same %s", g.args) - } - if commonPath == "" { - commonPath = u.Path - } - addresses = append(addresses, u.Host) - } - opts.Addresses = addresses - } - - u, err := user.Current() - if err != nil { - return nil, fmt.Errorf("unable to lookup local user: %s", err) - } - - if opts.KerberosClient != nil { - opts.KerberosClient, err = getKerberosClient() - if err != nil { - return nil, fmt.Errorf("unable to initialize kerberos client: %s", err) - } - } else { - opts.User = env.Get("HADOOP_USER_NAME", u.Username) - } - - clnt, err := hdfs.NewClient(opts) - if err != nil { - return nil, fmt.Errorf("unable to initialize hdfsClient: %v", err) - } - - if err = clnt.MkdirAll(minio.PathJoin(commonPath, hdfsSeparator, minioMetaTmpBucket), os.FileMode(0o755)); err != nil { - return nil, err - } - - return &hdfsObjects{clnt: clnt, subPath: commonPath, listPool: minio.NewTreeWalkPool(time.Minute * 30)}, nil -} - -func (n *hdfsObjects) Shutdown(ctx context.Context) error { - return n.clnt.Close() -} - -func (n *hdfsObjects) LocalStorageInfo(ctx context.Context) (si minio.StorageInfo, errs []error) { - return n.StorageInfo(ctx) -} - -func (n *hdfsObjects) StorageInfo(ctx context.Context) (si minio.StorageInfo, errs []error) { - fsInfo, err := n.clnt.StatFs() - if err != nil { - return minio.StorageInfo{}, []error{err} - } - si.Disks = []madmin.Disk{{ - UsedSpace: fsInfo.Used, - }} - si.Backend.Type = madmin.Gateway - si.Backend.GatewayOnline = true - return si, nil -} - -// hdfsObjects implements gateway for Minio and S3 compatible object storage servers. -type hdfsObjects struct { - minio.GatewayUnsupported - clnt *hdfs.Client - subPath string - listPool *minio.TreeWalkPool -} - -func hdfsToObjectErr(ctx context.Context, err error, params ...string) error { - if err == nil { - return nil - } - bucket := "" - object := "" - uploadID := "" - switch len(params) { - case 3: - uploadID = params[2] - fallthrough - case 2: - object = params[1] - fallthrough - case 1: - bucket = params[0] - } - - switch { - case os.IsNotExist(err): - if uploadID != "" { - return minio.InvalidUploadID{ - UploadID: uploadID, - } - } - if object != "" { - return minio.ObjectNotFound{Bucket: bucket, Object: object} - } - return minio.BucketNotFound{Bucket: bucket} - case os.IsExist(err): - if object != "" { - return minio.PrefixAccessDenied{Bucket: bucket, Object: object} - } - return minio.BucketAlreadyOwnedByYou{Bucket: bucket} - case errors.Is(err, syscall.ENOTEMPTY): - if object != "" { - return minio.PrefixAccessDenied{Bucket: bucket, Object: object} - } - return minio.BucketNotEmpty{Bucket: bucket} - default: - logger.LogIf(ctx, err) - return err - } -} - -// hdfsIsValidBucketName verifies whether a bucket name is valid. -func hdfsIsValidBucketName(bucket string) bool { - return s3utils.CheckValidBucketNameStrict(bucket) == nil -} - -func (n *hdfsObjects) hdfsPathJoin(args ...string) string { - return minio.PathJoin(append([]string{n.subPath, hdfsSeparator}, args...)...) -} - -func (n *hdfsObjects) DeleteBucket(ctx context.Context, bucket string, opts minio.DeleteBucketOptions) error { - if !hdfsIsValidBucketName(bucket) { - return minio.BucketNameInvalid{Bucket: bucket} - } - if opts.Force { - return hdfsToObjectErr(ctx, n.clnt.RemoveAll(n.hdfsPathJoin(bucket)), bucket) - } - return hdfsToObjectErr(ctx, n.clnt.Remove(n.hdfsPathJoin(bucket)), bucket) -} - -func (n *hdfsObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.BucketOptions) error { - if opts.LockEnabled || opts.VersioningEnabled { - return minio.NotImplemented{} - } - - if !hdfsIsValidBucketName(bucket) { - return minio.BucketNameInvalid{Bucket: bucket} - } - return hdfsToObjectErr(ctx, n.clnt.Mkdir(n.hdfsPathJoin(bucket), os.FileMode(0o755)), bucket) -} - -func (n *hdfsObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, err error) { - fi, err := n.clnt.Stat(n.hdfsPathJoin(bucket)) - if err != nil { - return bi, hdfsToObjectErr(ctx, err, bucket) - } - // As hdfs.Stat() doesn't carry anything other than ModTime(), use ModTime() as CreatedTime. - return minio.BucketInfo{ - Name: bucket, - Created: fi.ModTime(), - }, nil -} - -func (n *hdfsObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) { - entries, err := n.clnt.ReadDir(n.hdfsPathJoin()) - if err != nil { - logger.LogIf(ctx, err) - return nil, hdfsToObjectErr(ctx, err) - } - - for _, entry := range entries { - // Ignore all reserved bucket names and invalid bucket names. - if isReservedOrInvalidBucket(entry.Name(), false) { - continue - } - buckets = append(buckets, minio.BucketInfo{ - Name: entry.Name(), - // As hdfs.Stat() doesnt carry CreatedTime, use ModTime() as CreatedTime. - Created: entry.ModTime(), - }) - } - - // Sort bucket infos by bucket name. - sort.Sort(byBucketName(buckets)) - return buckets, nil -} - -func (n *hdfsObjects) isLeafDir(bucket, leafPath string) bool { - return n.isObjectDir(context.Background(), bucket, leafPath) -} - -func (n *hdfsObjects) isLeaf(bucket, leafPath string) bool { - return !strings.HasSuffix(leafPath, hdfsSeparator) -} - -func (n *hdfsObjects) listDirFactory() minio.ListDirFunc { - // listDir - lists all the entries at a given prefix and given entry in the prefix. - listDir := func(bucket, prefixDir, prefixEntry string) (emptyDir bool, entries []string, delayIsLeaf bool) { - f, err := n.clnt.Open(n.hdfsPathJoin(bucket, prefixDir)) - if err != nil { - if os.IsNotExist(err) { - err = nil - } - logger.LogIf(minio.GlobalContext, err) - return - } - defer f.Close() - fis, err := f.Readdir(0) - if err != nil { - logger.LogIf(minio.GlobalContext, err) - return - } - if len(fis) == 0 { - return true, nil, false - } - for _, fi := range fis { - if fi.IsDir() { - entries = append(entries, fi.Name()+hdfsSeparator) - } else { - entries = append(entries, fi.Name()) - } - } - entries, delayIsLeaf = minio.FilterListEntries(bucket, prefixDir, entries, prefixEntry, n.isLeaf) - return false, entries, delayIsLeaf - } - - // Return list factory instance. - return listDir -} - -// ListObjects lists all blobs in HDFS bucket filtered by prefix. -func (n *hdfsObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) { - var mutex sync.Mutex - fileInfos := make(map[string]os.FileInfo) - targetPath := n.hdfsPathJoin(bucket, prefix) - - var targetFileInfo os.FileInfo - - if targetFileInfo, err = n.populateDirectoryListing(targetPath, fileInfos); err != nil { - return loi, hdfsToObjectErr(ctx, err, bucket) - } - - // If the user is trying to list a single file, bypass the entire directory-walking code below - // and just return the single file's information. - if !targetFileInfo.IsDir() { - return minio.ListObjectsInfo{ - IsTruncated: false, - NextMarker: "", - Objects: []minio.ObjectInfo{ - fileInfoToObjectInfo(bucket, prefix, targetFileInfo), - }, - Prefixes: []string{}, - }, nil - } - - getObjectInfo := func(ctx context.Context, bucket, entry string) (minio.ObjectInfo, error) { - mutex.Lock() - defer mutex.Unlock() - - filePath := path.Clean(n.hdfsPathJoin(bucket, entry)) - fi, ok := fileInfos[filePath] - - // If the file info is not known, this may be a recursive listing and filePath is a - // child of a sub-directory. In this case, obtain that sub-directory's listing. - if !ok { - parentPath := path.Dir(filePath) - - if _, err := n.populateDirectoryListing(parentPath, fileInfos); err != nil { - return minio.ObjectInfo{}, hdfsToObjectErr(ctx, err, bucket) - } - - fi, ok = fileInfos[filePath] - - if !ok { - err = fmt.Errorf("could not get FileInfo for path '%s'", filePath) - return minio.ObjectInfo{}, hdfsToObjectErr(ctx, err, bucket, entry) - } - } - - objectInfo := fileInfoToObjectInfo(bucket, entry, fi) - - delete(fileInfos, filePath) - - return objectInfo, nil - } - - return minio.ListObjects(ctx, n, bucket, prefix, marker, delimiter, maxKeys, n.listPool, n.listDirFactory(), n.isLeaf, n.isLeafDir, getObjectInfo, getObjectInfo) -} - -func fileInfoToObjectInfo(bucket string, entry string, fi os.FileInfo) minio.ObjectInfo { - return minio.ObjectInfo{ - Bucket: bucket, - Name: entry, - ModTime: fi.ModTime(), - Size: fi.Size(), - IsDir: fi.IsDir(), - AccTime: fi.(*hdfs.FileInfo).AccessTime(), - } -} - -// Lists a path's direct, first-level entries and populates them in the `fileInfos` cache which maps -// a path entry to an `os.FileInfo`. It also saves the listed path's `os.FileInfo` in the cache. -func (n *hdfsObjects) populateDirectoryListing(filePath string, fileInfos map[string]os.FileInfo) (os.FileInfo, error) { - dirReader, err := n.clnt.Open(filePath) - if err != nil { - return nil, err - } - - dirStat := dirReader.Stat() - key := path.Clean(filePath) - - if !dirStat.IsDir() { - return dirStat, nil - } - - fileInfos[key] = dirStat - infos, err := dirReader.Readdir(0) - if err != nil { - return nil, err - } - - for _, fileInfo := range infos { - filePath := minio.PathJoin(filePath, fileInfo.Name()) - fileInfos[filePath] = fileInfo - } - - return dirStat, nil -} - -// deleteObject deletes a file path if its empty. If it's successfully deleted, -// it will recursively move up the tree, deleting empty parent directories -// until it finds one with files in it. Returns nil for a non-empty directory. -func (n *hdfsObjects) deleteObject(basePath, deletePath string) error { - if basePath == deletePath { - return nil - } - - // Attempt to remove path. - if err := n.clnt.Remove(deletePath); err != nil { - if errors.Is(err, syscall.ENOTEMPTY) { - // Ignore errors if the directory is not empty. The server relies on - // this functionality, and sometimes uses recursion that should not - // error on parent directories. - return nil - } - return err - } - - // Trailing slash is removed when found to ensure - // slashpath.Dir() to work as intended. - deletePath = strings.TrimSuffix(deletePath, hdfsSeparator) - deletePath = path.Dir(deletePath) - - // Delete parent directory. Errors for parent directories shouldn't trickle down. - n.deleteObject(basePath, deletePath) - - return nil -} - -// ListObjectsV2 lists all blobs in HDFS bucket filtered by prefix -func (n *hdfsObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, - fetchOwner bool, startAfter string, -) (loi minio.ListObjectsV2Info, err error) { - // fetchOwner is not supported and unused. - marker := continuationToken - if marker == "" { - marker = startAfter - } - resultV1, err := n.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) - if err != nil { - return loi, err - } - return minio.ListObjectsV2Info{ - Objects: resultV1.Objects, - Prefixes: resultV1.Prefixes, - ContinuationToken: continuationToken, - NextContinuationToken: resultV1.NextMarker, - IsTruncated: resultV1.IsTruncated, - }, nil -} - -func (n *hdfsObjects) DeleteObject(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { - err := hdfsToObjectErr(ctx, n.deleteObject(n.hdfsPathJoin(bucket), n.hdfsPathJoin(bucket, object)), bucket, object) - return minio.ObjectInfo{ - Bucket: bucket, - Name: object, - }, err -} - -func (n *hdfsObjects) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) { - errs := make([]error, len(objects)) - dobjects := make([]minio.DeletedObject, len(objects)) - for idx, object := range objects { - _, errs[idx] = n.DeleteObject(ctx, bucket, object.ObjectName, opts) - if errs[idx] == nil { - dobjects[idx] = minio.DeletedObject{ - ObjectName: object.ObjectName, - } - } - } - return dobjects, errs -} - -func (n *hdfsObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) { - objInfo, err := n.GetObjectInfo(ctx, bucket, object, opts) - if err != nil { - return nil, err - } - - var startOffset, length int64 - startOffset, length, err = rs.GetOffsetLength(objInfo.Size) - if err != nil { - return nil, err - } - - pr, pw := io.Pipe() - go func() { - nerr := n.getObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, opts) - pw.CloseWithError(nerr) - }() - - // Setup cleanup function to cause the above go-routine to - // exit in case of partial read - pipeCloser := func() { pr.Close() } - return minio.NewGetObjectReaderFromReader(pr, objInfo, opts, pipeCloser) -} - -func (n *hdfsObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) { - cpSrcDstSame := minio.IsStringEqual(n.hdfsPathJoin(srcBucket, srcObject), n.hdfsPathJoin(dstBucket, dstObject)) - if cpSrcDstSame { - return n.GetObjectInfo(ctx, srcBucket, srcObject, minio.ObjectOptions{}) - } - - return n.PutObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, minio.ObjectOptions{ - ServerSideEncryption: dstOpts.ServerSideEncryption, - UserDefined: srcInfo.UserDefined, - }) -} - -func (n *hdfsObjects) getObject(ctx context.Context, bucket, key string, startOffset, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error { - if _, err := n.clnt.Stat(n.hdfsPathJoin(bucket)); err != nil { - return hdfsToObjectErr(ctx, err, bucket) - } - rd, err := n.clnt.Open(n.hdfsPathJoin(bucket, key)) - if err != nil { - return hdfsToObjectErr(ctx, err, bucket, key) - } - defer rd.Close() - _, err = io.Copy(writer, io.NewSectionReader(rd, startOffset, length)) - if err == io.ErrClosedPipe { - // hdfs library doesn't send EOF correctly, so io.Copy attempts - // to write which returns io.ErrClosedPipe - just ignore - // this for now. - err = nil - } - return hdfsToObjectErr(ctx, err, bucket, key) -} - -func (n *hdfsObjects) isObjectDir(ctx context.Context, bucket, object string) bool { - f, err := n.clnt.Open(n.hdfsPathJoin(bucket, object)) - if err != nil { - if os.IsNotExist(err) { - return false - } - logger.LogIf(ctx, err) - return false - } - defer f.Close() - fis, err := f.Readdir(1) - if err != nil && err != io.EOF { - logger.LogIf(ctx, err) - return false - } - // Readdir returns an io.EOF when len(fis) == 0. - return len(fis) == 0 -} - -// GetObjectInfo reads object info and replies back ObjectInfo. -func (n *hdfsObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - _, err = n.clnt.Stat(n.hdfsPathJoin(bucket)) - if err != nil { - return objInfo, hdfsToObjectErr(ctx, err, bucket) - } - if strings.HasSuffix(object, hdfsSeparator) && !n.isObjectDir(ctx, bucket, object) { - return objInfo, hdfsToObjectErr(ctx, os.ErrNotExist, bucket, object) - } - - fi, err := n.clnt.Stat(n.hdfsPathJoin(bucket, object)) - if err != nil { - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - return minio.ObjectInfo{ - Bucket: bucket, - Name: object, - ModTime: fi.ModTime(), - Size: fi.Size(), - IsDir: fi.IsDir(), - AccTime: fi.(*hdfs.FileInfo).AccessTime(), - }, nil -} - -func (n *hdfsObjects) PutObject(ctx context.Context, bucket string, object string, r *minio.PutObjReader, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - _, err = n.clnt.Stat(n.hdfsPathJoin(bucket)) - if err != nil { - return objInfo, hdfsToObjectErr(ctx, err, bucket) - } - - name := n.hdfsPathJoin(bucket, object) - - // If its a directory create a prefix { - if strings.HasSuffix(object, hdfsSeparator) && r.Size() == 0 { - if err = n.clnt.MkdirAll(name, os.FileMode(0o755)); err != nil { - n.deleteObject(n.hdfsPathJoin(bucket), name) - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - } else { - tmpname := n.hdfsPathJoin(minioMetaTmpBucket, minio.MustGetUUID()) - var w *hdfs.FileWriter - w, err = n.clnt.Create(tmpname) - if err != nil { - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - defer n.deleteObject(n.hdfsPathJoin(minioMetaTmpBucket), tmpname) - if _, err = io.Copy(w, r); err != nil { - w.Close() - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - dir := path.Dir(name) - if dir != "" { - if err = n.clnt.MkdirAll(dir, os.FileMode(0o755)); err != nil { - w.Close() - n.deleteObject(n.hdfsPathJoin(bucket), dir) - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - } - w.Close() - if err = n.clnt.Rename(tmpname, name); err != nil { - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - } - fi, err := n.clnt.Stat(name) - if err != nil { - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - return minio.ObjectInfo{ - Bucket: bucket, - Name: object, - ETag: r.MD5CurrentHexString(), - ModTime: fi.ModTime(), - Size: fi.Size(), - IsDir: fi.IsDir(), - AccTime: fi.(*hdfs.FileInfo).AccessTime(), - }, nil -} - -func (n *hdfsObjects) NewMultipartUpload(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (uploadID string, err error) { - _, err = n.clnt.Stat(n.hdfsPathJoin(bucket)) - if err != nil { - return uploadID, hdfsToObjectErr(ctx, err, bucket) - } - - uploadID = minio.MustGetUUID() - if err = n.clnt.CreateEmptyFile(n.hdfsPathJoin(minioMetaTmpBucket, uploadID)); err != nil { - return uploadID, hdfsToObjectErr(ctx, err, bucket) - } - - return uploadID, nil -} - -func (n *hdfsObjects) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, err error) { - _, err = n.clnt.Stat(n.hdfsPathJoin(bucket)) - if err != nil { - return lmi, hdfsToObjectErr(ctx, err, bucket) - } - - // It's decided not to support List Multipart Uploads, hence returning empty result. - return lmi, nil -} - -func (n *hdfsObjects) checkUploadIDExists(ctx context.Context, bucket, object, uploadID string) (err error) { - _, err = n.clnt.Stat(n.hdfsPathJoin(minioMetaTmpBucket, uploadID)) - if err != nil { - return hdfsToObjectErr(ctx, err, bucket, object, uploadID) - } - return nil -} - -// GetMultipartInfo returns multipart info of the uploadId of the object -func (n *hdfsObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts minio.ObjectOptions) (result minio.MultipartInfo, err error) { - _, err = n.clnt.Stat(n.hdfsPathJoin(bucket)) - if err != nil { - return result, hdfsToObjectErr(ctx, err, bucket) - } - - if err = n.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { - return result, err - } - - result.Bucket = bucket - result.Object = object - result.UploadID = uploadID - return result, nil -} - -func (n *hdfsObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (result minio.ListPartsInfo, err error) { - _, err = n.clnt.Stat(n.hdfsPathJoin(bucket)) - if err != nil { - return result, hdfsToObjectErr(ctx, err, bucket) - } - - if err = n.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { - return result, err - } - - // It's decided not to support List parts, hence returning empty result. - return result, nil -} - -func (n *hdfsObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, - startOffset int64, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions, -) (minio.PartInfo, error) { - return n.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.PutObjReader, dstOpts) -} - -func (n *hdfsObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *minio.PutObjReader, opts minio.ObjectOptions) (info minio.PartInfo, err error) { - _, err = n.clnt.Stat(n.hdfsPathJoin(bucket)) - if err != nil { - return info, hdfsToObjectErr(ctx, err, bucket) - } - - var w *hdfs.FileWriter - w, err = n.clnt.Append(n.hdfsPathJoin(minioMetaTmpBucket, uploadID)) - if err != nil { - return info, hdfsToObjectErr(ctx, err, bucket, object, uploadID) - } - defer w.Close() - _, err = io.Copy(w, r.Reader) - if err != nil { - return info, hdfsToObjectErr(ctx, err, bucket, object, uploadID) - } - - info.PartNumber = partID - info.ETag = r.MD5CurrentHexString() - info.LastModified = minio.UTCNow() - info.Size = r.Reader.Size() - - return info, nil -} - -func (n *hdfsObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []minio.CompletePart, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - _, err = n.clnt.Stat(n.hdfsPathJoin(bucket)) - if err != nil { - return objInfo, hdfsToObjectErr(ctx, err, bucket) - } - - if err = n.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { - return objInfo, err - } - - name := n.hdfsPathJoin(bucket, object) - dir := path.Dir(name) - if dir != "" { - if err = n.clnt.MkdirAll(dir, os.FileMode(0o755)); err != nil { - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - } - - err = n.clnt.Rename(n.hdfsPathJoin(minioMetaTmpBucket, uploadID), name) - // Object already exists is an error on HDFS - // remove it and then create it again. - if os.IsExist(err) { - if err = n.clnt.Remove(name); err != nil { - if dir != "" { - n.deleteObject(n.hdfsPathJoin(bucket), dir) - } - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - if err = n.clnt.Rename(n.hdfsPathJoin(minioMetaTmpBucket, uploadID), name); err != nil { - if dir != "" { - n.deleteObject(n.hdfsPathJoin(bucket), dir) - } - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - } - fi, err := n.clnt.Stat(name) - if err != nil { - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - - // Calculate s3 compatible md5sum for complete multipart. - s3MD5 := minio.ComputeCompleteMultipartMD5(parts) - - return minio.ObjectInfo{ - Bucket: bucket, - Name: object, - ETag: s3MD5, - ModTime: fi.ModTime(), - Size: fi.Size(), - IsDir: fi.IsDir(), - AccTime: fi.(*hdfs.FileInfo).AccessTime(), - }, nil -} - -func (n *hdfsObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts minio.ObjectOptions) (err error) { - _, err = n.clnt.Stat(n.hdfsPathJoin(bucket)) - if err != nil { - return hdfsToObjectErr(ctx, err, bucket) - } - return hdfsToObjectErr(ctx, n.clnt.Remove(n.hdfsPathJoin(minioMetaTmpBucket, uploadID)), bucket, object, uploadID) -} diff --git a/cmd/metrics.go b/cmd/metrics.go index 761d59777..f5f09239f 100644 --- a/cmd/metrics.go +++ b/cmd/metrics.go @@ -191,7 +191,7 @@ func healingMetricsPrometheus(ch chan<- prometheus.Metric) { // collects gateway specific metrics for MinIO instance in Prometheus specific format // and sends to given channel func gatewayMetricsPrometheus(ch chan<- prometheus.Metric) { - if !globalIsGateway || (globalGatewayName != S3BackendGateway && globalGatewayName != AzureBackendGateway && globalGatewayName != GCSBackendGateway) { + if !globalIsGateway || (globalGatewayName != S3BackendGateway && globalGatewayName != AzureBackendGateway) { return } diff --git a/docs/gateway/README.md b/docs/gateway/README.md index 5c2d16435..7395403a4 100644 --- a/docs/gateway/README.md +++ b/docs/gateway/README.md @@ -11,5 +11,3 @@ MinIO Gateway adds Amazon S3 compatibility layer to third party NAS and Cloud St - [NAS](https://github.com/minio/minio/blob/master/docs/gateway/nas.md) - [S3](https://github.com/minio/minio/blob/master/docs/gateway/s3.md) - [Microsoft Azure Blob Storage](https://github.com/minio/minio/blob/master/docs/gateway/azure.md) -- [Google Cloud Storage](https://github.com/minio/minio/blob/master/docs/gateway/gcs.md) -- [HDFS](https://github.com/minio/minio/blob/master/docs/gateway/hdfs.md) diff --git a/docs/gateway/gcs.md b/docs/gateway/gcs.md deleted file mode 100644 index 888ba7385..000000000 --- a/docs/gateway/gcs.md +++ /dev/null @@ -1,97 +0,0 @@ -# MinIO GCS Gateway [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) - -MinIO GCS Gateway allows you to access Google Cloud Storage (GCS) with Amazon S3-compatible APIs - -## Support - -Gateway implementations are frozen and are not accepting any new features. Please reports any bugs at . If you are an existing customer please login to for production support. - -## 1. Run MinIO Gateway for GCS - -### 1.1 Create a Service Account key for GCS and get the Credentials File - -1. Navigate to the [API Console Credentials page](https://console.developers.google.com/project/_/apis/credentials). -2. Select a project or create a new project. Note the project ID. -3. Select the **Create credentials** dropdown on the **Credentials** page, and click **Service account key**. -4. Select **New service account** from the **Service account** dropdown. -5. Populate the **Service account name** and **Service account ID**. -6. Click the dropdown for the **Role** and choose **Storage** > **Storage Admin** *(Full control of GCS resources)*. -7. Click the **Create** button to download a credentials file and rename it to `credentials.json`. - -**Note:** For alternate ways to set up *Application Default Credentials*, see [Setting Up Authentication for Server to Server Production Applications](https://developers.google.com/identity/protocols/application-default-credentials). - -### 1.2 Run MinIO GCS Gateway Using Docker - -```sh -podman run \ - -p 9000:9000 \ - -p 9001:9001 \ - --name gcs-s3 \ - -v /path/to/credentials.json:/credentials.json \ - -e "GOOGLE_APPLICATION_CREDENTIALS=/credentials.json" \ - -e "MINIO_ROOT_USER=minioaccountname" \ - -e "MINIO_ROOT_PASSWORD=minioaccountkey" \ - quay.io/minio/minio gateway gcs yourprojectid --console-address ":9001" -``` - -### 1.3 Run MinIO GCS Gateway Using the MinIO Binary - -```sh -export GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json -export MINIO_ROOT_USER=minioaccesskey -export MINIO_ROOT_PASSWORD=miniosecretkey -minio gateway gcs yourprojectid -``` - -## 2. Test Using MinIO Console - -MinIO Gateway comes with an embedded web-based object browser that outputs content to . To test that MinIO Gateway is running, open a web browser, navigate to , and ensure that the object browser is displayed. - -| Dashboard | Creating a bucket | -| ------------- | ------------- | -| ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic1.png?raw=true) | ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic2.png?raw=true) | - -## 3. Test Using MinIO Client - -MinIO Client is a command-line tool called `mc` that provides UNIX-like commands for interacting with the server (e.g. ls, cat, cp, mirror, diff, find, etc.). `mc` supports file systems and Amazon S3-compatible cloud storage services (AWS Signature v2 and v4). - -### 3.1 Configure the Gateway using MinIO Client - -Use the following command to configure the gateway: - -```sh -mc alias set mygcs http://gateway-ip:9000 minioaccesskey miniosecretkey -``` - -### 3.2 List Containers on GCS - -Use the following command to list the containers on GCS: - -```sh -mc ls mygcs -``` - -A response similar to this one should be displayed: - -``` -[2017-02-22 01:50:43 PST] 0B ferenginar/ -[2017-02-26 21:43:51 PST] 0B my-container/ -[2017-02-26 22:10:11 PST] 0B test-container1/ -``` - -### 3.3 Known limitations - -MinIO Gateway has the following limitations when used with GCS: - -* It only supports read-only and write-only bucket policies at the bucket level; all other variations will return `API Not implemented`. -* The `List Multipart Uploads` and `List Object parts` commands always return empty lists. Therefore, the client must store all of the parts that it has uploaded and use that information when invoking the `_Complete Multipart Upload` command. - -Other limitations: - -* Bucket notification APIs are not supported. - -## 4. Explore Further - -* [`mc` command-line interface](https://docs.min.io/docs/minio-client-quickstart-guide) -* [`aws` command-line interface](https://docs.min.io/docs/aws-cli-with-minio) -* [`minio-go` Go SDK](https://docs.min.io/docs/golang-client-quickstart-guide) diff --git a/docs/gateway/hdfs.md b/docs/gateway/hdfs.md deleted file mode 100644 index d3d32ed9c..000000000 --- a/docs/gateway/hdfs.md +++ /dev/null @@ -1,126 +0,0 @@ -# MinIO HDFS Gateway [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) - -MinIO HDFS gateway adds Amazon S3 API support to Hadoop HDFS filesystem. Applications can use both the S3 and file APIs concurrently without requiring any data migration. Since the gateway is stateless and shared-nothing, you may elastically provision as many MinIO instances as needed to distribute the load. - -> NOTE: Intention of this gateway implementation it to make it easy to migrate your existing data on HDFS clusters to MinIO clusters using standard tools like `mc` or `aws-cli`, if the goal is to use HDFS perpetually we recommend that HDFS should be used directly for all write operations. - -## Support - -Gateway implementations are frozen and are not accepting any new features. Please reports any bugs at . If you are an existing customer please login to for production support. - -## Run MinIO Gateway for HDFS Storage - -### Using Binary - -Namenode information is obtained by reading `core-site.xml` automatically from your hadoop environment variables *$HADOOP_HOME* - -``` -export MINIO_ROOT_USER=minio -export MINIO_ROOT_PASSWORD=minio123 -minio gateway hdfs -``` - -You can also override the namenode endpoint as shown below. - -``` -export MINIO_ROOT_USER=minio -export MINIO_ROOT_PASSWORD=minio123 -minio gateway hdfs hdfs://namenode:8200 -``` - -### Using Docker - -Using docker is experimental, most Hadoop environments are not dockerized and may require additional steps in getting this to work properly. You are better off just using the binary in this situation. - -``` -podman run \ - -p 9000:9000 \ - -p 9001:9001 \ - --name hdfs-s3 \ - -e "MINIO_ROOT_USER=minio" \ - -e "MINIO_ROOT_PASSWORD=minio123" \ - quay.io/minio/minio gateway hdfs hdfs://namenode:8200 --console-address ":9001" -``` - -### Setup Kerberos - -MinIO supports two kerberos authentication methods, keytab and ccache. - -To enable kerberos authentication, you need to set `hadoop.security.authentication=kerberos` in the HDFS config file. - -```xml - - hadoop.security.authentication - kerberos - -``` - -MinIO will load `krb5.conf` from environment variable `KRB5_CONFIG` or default location `/etc/krb5.conf`. - -```sh -export KRB5_CONFIG=/path/to/krb5.conf -``` - -If you want MinIO to use ccache for authentication, set environment variable `KRB5CCNAME` to the credential cache file path, -or MinIO will use the default location `/tmp/krb5cc_%{uid}`. - -```sh -export KRB5CCNAME=/path/to/krb5cc -``` - -If you prefer to use keytab, with automatically renewal, you need to config three environment variables: - -- `KRB5KEYTAB`: the location of keytab file -- `KRB5USERNAME`: the username -- `KRB5REALM`: the realm - -Please note that the username is not principal name. - -```sh -export KRB5KEYTAB=/path/to/keytab -export KRB5USERNAME=hdfs -export KRB5REALM=REALM.COM -``` - -## Test using MinIO Console - -*MinIO gateway* comes with an embedded web based object browser. Point your web browser to to ensure that your server has started successfully. - -| Dashboard | Creating a bucket | -| ------------- | ------------- | -| ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic1.png?raw=true) | ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic2.png?raw=true) | - -## Test using MinIO Client `mc` - -`mc` provides a modern alternative to UNIX commands such as ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. - -### Configure `mc` - -``` -mc alias set myhdfs http://gateway-ip:9000 access_key secret_key -``` - -### List buckets on hdfs - -``` -mc ls myhdfs -[2017-02-22 01:50:43 PST] 0B user/ -[2017-02-26 21:43:51 PST] 0B datasets/ -[2017-02-26 22:10:11 PST] 0B assets/ -``` - -### Known limitations - -Gateway inherits the following limitations of HDFS storage layer: - -- No bucket policy support (HDFS has no such concept) -- No bucket notification APIs are not supported (HDFS has no support for fsnotify) -- No server side encryption support (Intentionally not implemented) -- No server side compression support (Intentionally not implemented) -- Concurrent multipart operations are not supported (HDFS lacks safe locking support, or poorly implemented) - -## Explore Further - -- [`mc` command-line interface](https://docs.minio.io/docs/minio-client-quickstart-guide) -- [`aws` command-line interface](https://docs.minio.io/docs/aws-cli-with-minio) -- [`minio-go` Go SDK](https://docs.minio.io/docs/golang-client-quickstart-guide) diff --git a/go.mod b/go.mod index c389b6cd2..4d445612d 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,6 @@ require ( github.com/buger/jsonparser v1.1.1 github.com/cespare/xxhash/v2 v2.1.2 github.com/cheggaaa/pb v1.0.29 - github.com/colinmarc/hdfs/v2 v2.2.0 github.com/coredns/coredns v1.9.0 github.com/coreos/go-oidc v2.1.0+incompatible github.com/cosnicolaou/pbzip2 v1.0.1 @@ -36,7 +35,6 @@ require ( github.com/gorilla/mux v1.8.0 github.com/hashicorp/golang-lru v0.5.4 github.com/inconshreveable/mousetrap v1.0.0 - github.com/jcmturner/gokrb5/v8 v8.4.2 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.14.4 github.com/klauspost/cpuid/v2 v2.0.11 @@ -149,7 +147,7 @@ require ( github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.0.0 // indirect - github.com/jcmturner/goidentity/v6 v6.0.1 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jessevdk/go-flags v1.5.0 // indirect github.com/josharian/intern v1.0.0 // indirect diff --git a/go.sum b/go.sum index 30f393993..4fe7af522 100644 --- a/go.sum +++ b/go.sum @@ -182,8 +182,6 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/colinmarc/hdfs/v2 v2.2.0 h1:4AaIlTq+/sWmeqYhI0dX8bD4YrMQM990tRjm636FkGM= -github.com/colinmarc/hdfs/v2 v2.2.0/go.mod h1:Wss6n3mtaZyRwWaqtSH+6ge01qT0rw9dJJmvoUnIQ/E= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= @@ -391,7 +389,6 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -472,10 +469,7 @@ github.com/gopherjs/gopherjs v0.0.0-20220104163920-15ed2e8cf2bd/go.mod h1:cz9oNY github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= -github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= -github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= @@ -557,10 +551,8 @@ github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= -github.com/jcmturner/gokrb5/v8 v8.4.1/go.mod h1:T1hnNppQsBtxW0tCHMHTkAt8n/sABdzZgZdoFrZaZNM= github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= -github.com/jcmturner/rpc/v2 v2.0.2/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= @@ -817,7 +809,6 @@ github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.m github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/getopt v0.0.0-20180729010549-6fdd0a2c7117/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= @@ -1057,7 +1048,6 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=