2019-04-17 12:52:08 -04:00
|
|
|
/*
|
|
|
|
* Minio Cloud Storage, (C) 2019 Minio, Inc.
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package hdfs
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2019-09-11 13:21:43 -04:00
|
|
|
"errors"
|
2019-07-24 21:05:48 -04:00
|
|
|
"fmt"
|
2019-04-17 12:52:08 -04:00
|
|
|
"io"
|
|
|
|
"net"
|
|
|
|
"net/http"
|
|
|
|
"os"
|
|
|
|
"os/user"
|
|
|
|
"path"
|
|
|
|
"sort"
|
|
|
|
"strings"
|
|
|
|
"syscall"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/minio/cli"
|
2019-07-24 21:05:48 -04:00
|
|
|
krb "github.com/minio/gokrb5/v7/client"
|
|
|
|
"github.com/minio/gokrb5/v7/config"
|
|
|
|
"github.com/minio/gokrb5/v7/credentials"
|
2019-05-01 17:43:22 -04:00
|
|
|
"github.com/minio/hdfs/v3"
|
2019-07-24 21:05:48 -04:00
|
|
|
"github.com/minio/hdfs/v3/hadoopconf"
|
2019-05-29 19:35:12 -04:00
|
|
|
"github.com/minio/minio-go/v6/pkg/s3utils"
|
2019-04-17 12:52:08 -04:00
|
|
|
minio "github.com/minio/minio/cmd"
|
|
|
|
"github.com/minio/minio/cmd/logger"
|
|
|
|
"github.com/minio/minio/pkg/auth"
|
2019-10-04 13:35:33 -04:00
|
|
|
"github.com/minio/minio/pkg/env"
|
2019-04-17 12:52:08 -04:00
|
|
|
xnet "github.com/minio/minio/pkg/net"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
hdfsBackend = "hdfs"
|
|
|
|
|
2019-08-06 15:08:58 -04:00
|
|
|
hdfsSeparator = minio.SlashSeparator
|
2019-04-17 12:52:08 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
const hdfsGatewayTemplate = `NAME:
|
|
|
|
{{.HelpName}} - {{.Usage}}
|
|
|
|
|
|
|
|
USAGE:
|
|
|
|
{{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} HDFS-NAMENODE [HDFS-NAMENODE...]
|
|
|
|
{{if .VisibleFlags}}
|
|
|
|
FLAGS:
|
|
|
|
{{range .VisibleFlags}}{{.}}
|
|
|
|
{{end}}{{end}}
|
|
|
|
HDFS-NAMENODE:
|
|
|
|
HDFS namenode URI
|
|
|
|
|
|
|
|
EXAMPLES:
|
2019-12-16 23:30:57 -05:00
|
|
|
1. Start minio gateway server for HDFS backend
|
2019-05-15 04:32:44 -04:00
|
|
|
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey
|
|
|
|
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey
|
|
|
|
{{.Prompt}} {{.HelpName}} hdfs://namenode:8200
|
2019-04-17 12:52:08 -04:00
|
|
|
|
2019-12-16 23:30:57 -05:00
|
|
|
2. Start minio gateway server for HDFS with edge caching enabled
|
2019-05-15 04:32:44 -04:00
|
|
|
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey
|
|
|
|
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey
|
2019-11-20 18:10:24 -05:00
|
|
|
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4"
|
|
|
|
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*,*.png"
|
2020-02-23 08:33:39 -05:00
|
|
|
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}90
|
|
|
|
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_AFTER{{.AssignmentOperator}}3
|
|
|
|
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_LOW{{.AssignmentOperator}}75
|
|
|
|
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_HIGH{{.AssignmentOperator}}85
|
2019-05-15 04:32:44 -04:00
|
|
|
{{.Prompt}} {{.HelpName}} hdfs://namenode:8200
|
2019-04-17 12:52:08 -04:00
|
|
|
`
|
|
|
|
|
|
|
|
minio.RegisterGatewayCommand(cli.Command{
|
|
|
|
Name: hdfsBackend,
|
|
|
|
Usage: "Hadoop Distributed File System (HDFS)",
|
|
|
|
Action: hdfsGatewayMain,
|
|
|
|
CustomHelpTemplate: hdfsGatewayTemplate,
|
|
|
|
HideHelpCommand: true,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handler for 'minio gateway hdfs' command line.
|
|
|
|
func hdfsGatewayMain(ctx *cli.Context) {
|
|
|
|
// Validate gateway arguments.
|
2019-07-24 21:05:48 -04:00
|
|
|
if ctx.Args().First() == "help" {
|
2019-04-17 12:52:08 -04:00
|
|
|
cli.ShowCommandHelpAndExit(ctx, hdfsBackend, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
minio.StartGateway(ctx, &HDFS{args: ctx.Args()})
|
|
|
|
}
|
|
|
|
|
|
|
|
// HDFS implements Gateway.
|
|
|
|
type HDFS struct {
|
|
|
|
args []string
|
|
|
|
}
|
|
|
|
|
|
|
|
// Name implements Gateway interface.
|
|
|
|
func (g *HDFS) Name() string {
|
|
|
|
return hdfsBackend
|
|
|
|
}
|
|
|
|
|
2019-07-24 21:05:48 -04:00
|
|
|
func getKerberosClient() (*krb.Client, error) {
|
2019-10-04 13:35:33 -04:00
|
|
|
cfg, err := config.Load(env.Get("KRB5_CONFIG", "/etc/krb5.conf"))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-07-24 21:05:48 -04:00
|
|
|
}
|
|
|
|
|
2019-10-04 13:35:33 -04:00
|
|
|
u, err := user.Current()
|
2019-07-24 21:05:48 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-10-04 13:35:33 -04:00
|
|
|
// Determine the ccache location from the environment, falling back to the default location.
|
|
|
|
ccachePath := env.Get("KRB5CCNAME", fmt.Sprintf("/tmp/krb5cc_%s", u.Uid))
|
2019-07-24 21:05:48 -04:00
|
|
|
if strings.Contains(ccachePath, ":") {
|
|
|
|
if strings.HasPrefix(ccachePath, "FILE:") {
|
|
|
|
ccachePath = strings.TrimPrefix(ccachePath, "FILE:")
|
|
|
|
} else {
|
|
|
|
return nil, fmt.Errorf("unable to use kerberos ccache: %s", ccachePath)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ccache, err := credentials.LoadCCache(ccachePath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return krb.NewClientFromCCache(ccache, cfg)
|
|
|
|
}
|
|
|
|
|
2019-04-17 12:52:08 -04:00
|
|
|
// NewGatewayLayer returns hdfs gatewaylayer.
|
|
|
|
func (g *HDFS) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) {
|
|
|
|
dialFunc := (&net.Dialer{
|
|
|
|
Timeout: 30 * time.Second,
|
|
|
|
KeepAlive: 30 * time.Second,
|
|
|
|
DualStack: true,
|
|
|
|
}).DialContext
|
|
|
|
|
2019-07-24 21:05:48 -04:00
|
|
|
hconfig, err := hadoopconf.LoadFromEnvironment()
|
2019-04-17 12:52:08 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-07-24 21:05:48 -04:00
|
|
|
opts := hdfs.ClientOptionsFromConf(hconfig)
|
|
|
|
opts.NamenodeDialFunc = dialFunc
|
|
|
|
opts.DatanodeDialFunc = dialFunc
|
|
|
|
|
|
|
|
// Not addresses found, load it from command line.
|
|
|
|
if len(opts.Addresses) == 0 {
|
|
|
|
var addresses []string
|
|
|
|
for _, s := range g.args {
|
|
|
|
u, err := xnet.ParseURL(s)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
addresses = append(addresses, u.Host)
|
|
|
|
}
|
|
|
|
opts.Addresses = addresses
|
|
|
|
}
|
|
|
|
|
2019-10-04 13:35:33 -04:00
|
|
|
u, err := user.Current()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("Unable to lookup local user: %s", err)
|
|
|
|
}
|
|
|
|
|
2019-07-24 21:05:48 -04:00
|
|
|
if opts.KerberosClient != nil {
|
|
|
|
opts.KerberosClient, err = getKerberosClient()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("Unable to initialize kerberos client: %s", err)
|
|
|
|
}
|
|
|
|
} else {
|
2019-10-04 13:35:33 -04:00
|
|
|
opts.User = env.Get("HADOOP_USER_NAME", u.Username)
|
2019-04-17 12:52:08 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
clnt, err := hdfs.NewClient(opts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = clnt.MkdirAll(minio.PathJoin(hdfsSeparator, minioMetaTmpBucket), os.FileMode(0755)); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &hdfsObjects{clnt: clnt, listPool: minio.NewTreeWalkPool(time.Minute * 30)}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Production - hdfs gateway is production ready.
|
|
|
|
func (g *HDFS) Production() bool {
|
2020-01-18 10:25:03 -05:00
|
|
|
return true
|
2019-04-17 12:52:08 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (n *hdfsObjects) Shutdown(ctx context.Context) error {
|
|
|
|
return n.clnt.Close()
|
|
|
|
}
|
|
|
|
|
2020-02-19 22:51:33 -05:00
|
|
|
func (n *hdfsObjects) StorageInfo(ctx context.Context, _ bool) minio.StorageInfo {
|
2019-04-17 12:52:08 -04:00
|
|
|
fsInfo, err := n.clnt.StatFs()
|
|
|
|
if err != nil {
|
|
|
|
return minio.StorageInfo{}
|
|
|
|
}
|
|
|
|
sinfo := minio.StorageInfo{}
|
2019-10-23 00:01:14 -04:00
|
|
|
sinfo.Used = []uint64{fsInfo.Used}
|
2019-11-01 19:58:11 -04:00
|
|
|
sinfo.Backend.Type = minio.BackendGateway
|
|
|
|
sinfo.Backend.GatewayOnline = true
|
2019-04-17 12:52:08 -04:00
|
|
|
return sinfo
|
|
|
|
}
|
|
|
|
|
|
|
|
// hdfsObjects implements gateway for Minio and S3 compatible object storage servers.
|
|
|
|
type hdfsObjects struct {
|
|
|
|
minio.GatewayUnsupported
|
|
|
|
clnt *hdfs.Client
|
|
|
|
listPool *minio.TreeWalkPool
|
|
|
|
}
|
|
|
|
|
|
|
|
func hdfsToObjectErr(ctx context.Context, err error, params ...string) error {
|
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
bucket := ""
|
|
|
|
object := ""
|
|
|
|
uploadID := ""
|
|
|
|
switch len(params) {
|
|
|
|
case 3:
|
|
|
|
uploadID = params[2]
|
|
|
|
fallthrough
|
|
|
|
case 2:
|
|
|
|
object = params[1]
|
|
|
|
fallthrough
|
|
|
|
case 1:
|
|
|
|
bucket = params[0]
|
|
|
|
}
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case os.IsNotExist(err):
|
|
|
|
if uploadID != "" {
|
|
|
|
return minio.InvalidUploadID{
|
|
|
|
UploadID: uploadID,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if object != "" {
|
|
|
|
return minio.ObjectNotFound{Bucket: bucket, Object: object}
|
|
|
|
}
|
|
|
|
return minio.BucketNotFound{Bucket: bucket}
|
|
|
|
case os.IsExist(err):
|
|
|
|
if object != "" {
|
|
|
|
return minio.PrefixAccessDenied{Bucket: bucket, Object: object}
|
|
|
|
}
|
|
|
|
return minio.BucketAlreadyOwnedByYou{Bucket: bucket}
|
2019-09-11 13:21:43 -04:00
|
|
|
case errors.Is(err, syscall.ENOTEMPTY):
|
2019-04-17 12:52:08 -04:00
|
|
|
if object != "" {
|
|
|
|
return minio.PrefixAccessDenied{Bucket: bucket, Object: object}
|
|
|
|
}
|
|
|
|
return minio.BucketNotEmpty{Bucket: bucket}
|
|
|
|
default:
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// hdfsIsValidBucketName verifies whether a bucket name is valid.
|
|
|
|
func hdfsIsValidBucketName(bucket string) bool {
|
|
|
|
return s3utils.CheckValidBucketNameStrict(bucket) == nil
|
|
|
|
}
|
|
|
|
|
2020-03-28 00:52:59 -04:00
|
|
|
func (n *hdfsObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error {
|
2019-04-17 12:52:08 -04:00
|
|
|
if !hdfsIsValidBucketName(bucket) {
|
|
|
|
return minio.BucketNameInvalid{Bucket: bucket}
|
|
|
|
}
|
|
|
|
return hdfsToObjectErr(ctx, n.clnt.Remove(minio.PathJoin(hdfsSeparator, bucket)), bucket)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *hdfsObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
|
|
|
|
if !hdfsIsValidBucketName(bucket) {
|
|
|
|
return minio.BucketNameInvalid{Bucket: bucket}
|
|
|
|
}
|
|
|
|
return hdfsToObjectErr(ctx, n.clnt.Mkdir(minio.PathJoin(hdfsSeparator, bucket), os.FileMode(0755)), bucket)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *hdfsObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, err error) {
|
|
|
|
fi, err := n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket))
|
|
|
|
if err != nil {
|
|
|
|
return bi, hdfsToObjectErr(ctx, err, bucket)
|
|
|
|
}
|
|
|
|
// As hdfs.Stat() doesn't carry anything other than ModTime(), use ModTime() as CreatedTime.
|
|
|
|
return minio.BucketInfo{
|
|
|
|
Name: bucket,
|
|
|
|
Created: fi.ModTime(),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *hdfsObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) {
|
|
|
|
entries, err := n.clnt.ReadDir(hdfsSeparator)
|
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return nil, hdfsToObjectErr(ctx, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, entry := range entries {
|
|
|
|
// Ignore all reserved bucket names and invalid bucket names.
|
|
|
|
if isReservedOrInvalidBucket(entry.Name(), false) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
buckets = append(buckets, minio.BucketInfo{
|
|
|
|
Name: entry.Name(),
|
|
|
|
// As hdfs.Stat() doesnt carry CreatedTime, use ModTime() as CreatedTime.
|
|
|
|
Created: entry.ModTime(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sort bucket infos by bucket name.
|
|
|
|
sort.Sort(byBucketName(buckets))
|
|
|
|
return buckets, nil
|
|
|
|
}
|
|
|
|
|
2019-05-02 01:06:57 -04:00
|
|
|
func (n *hdfsObjects) listDirFactory() minio.ListDirFunc {
|
2019-04-17 12:52:08 -04:00
|
|
|
// listDir - lists all the entries at a given prefix and given entry in the prefix.
|
2020-03-13 20:43:00 -04:00
|
|
|
listDir := func(bucket, prefixDir, prefixEntry string) (emptyDir bool, entries []string) {
|
2019-04-17 12:52:08 -04:00
|
|
|
f, err := n.clnt.Open(minio.PathJoin(hdfsSeparator, bucket, prefixDir))
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
logger.LogIf(context.Background(), err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
fis, err := f.Readdir(0)
|
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(context.Background(), err)
|
|
|
|
return
|
|
|
|
}
|
2020-03-13 20:43:00 -04:00
|
|
|
if len(fis) == 0 {
|
|
|
|
return true, nil
|
|
|
|
}
|
2019-04-17 12:52:08 -04:00
|
|
|
for _, fi := range fis {
|
|
|
|
if fi.IsDir() {
|
|
|
|
entries = append(entries, fi.Name()+hdfsSeparator)
|
|
|
|
} else {
|
|
|
|
entries = append(entries, fi.Name())
|
|
|
|
}
|
|
|
|
}
|
2020-03-13 20:43:00 -04:00
|
|
|
return false, minio.FilterMatchingPrefix(entries, prefixEntry)
|
2019-04-17 12:52:08 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Return list factory instance.
|
|
|
|
return listDir
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjects lists all blobs in HDFS bucket filtered by prefix.
|
|
|
|
func (n *hdfsObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
|
2020-01-16 20:11:25 -05:00
|
|
|
if _, err := n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket)); err != nil {
|
|
|
|
return loi, hdfsToObjectErr(ctx, err, bucket)
|
|
|
|
}
|
|
|
|
|
2019-04-17 12:52:08 -04:00
|
|
|
getObjectInfo := func(ctx context.Context, bucket, entry string) (minio.ObjectInfo, error) {
|
2020-01-16 20:11:25 -05:00
|
|
|
fi, err := n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket, entry))
|
|
|
|
if err != nil {
|
|
|
|
return minio.ObjectInfo{}, hdfsToObjectErr(ctx, err, bucket, entry)
|
|
|
|
}
|
|
|
|
return minio.ObjectInfo{
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: entry,
|
|
|
|
ModTime: fi.ModTime(),
|
|
|
|
Size: fi.Size(),
|
|
|
|
IsDir: fi.IsDir(),
|
|
|
|
AccTime: fi.(*hdfs.FileInfo).AccessTime(),
|
|
|
|
}, nil
|
2019-04-17 12:52:08 -04:00
|
|
|
}
|
|
|
|
|
2019-05-02 01:06:57 -04:00
|
|
|
return minio.ListObjects(ctx, n, bucket, prefix, marker, delimiter, maxKeys, n.listPool, n.listDirFactory(), getObjectInfo, getObjectInfo)
|
2019-04-17 12:52:08 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// deleteObject deletes a file path if its empty. If it's successfully deleted,
|
|
|
|
// it will recursively move up the tree, deleting empty parent directories
|
|
|
|
// until it finds one with files in it. Returns nil for a non-empty directory.
|
|
|
|
func (n *hdfsObjects) deleteObject(basePath, deletePath string) error {
|
|
|
|
if basePath == deletePath {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to remove path.
|
|
|
|
if err := n.clnt.Remove(deletePath); err != nil {
|
2019-09-11 13:21:43 -04:00
|
|
|
if errors.Is(err, syscall.ENOTEMPTY) {
|
2019-04-17 12:52:08 -04:00
|
|
|
// Ignore errors if the directory is not empty. The server relies on
|
|
|
|
// this functionality, and sometimes uses recursion that should not
|
|
|
|
// error on parent directories.
|
|
|
|
return nil
|
|
|
|
}
|
2019-09-11 13:21:43 -04:00
|
|
|
return err
|
2019-04-17 12:52:08 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Trailing slash is removed when found to ensure
|
|
|
|
// slashpath.Dir() to work as intended.
|
|
|
|
deletePath = strings.TrimSuffix(deletePath, hdfsSeparator)
|
|
|
|
deletePath = path.Dir(deletePath)
|
|
|
|
|
|
|
|
// Delete parent directory. Errors for parent directories shouldn't trickle down.
|
|
|
|
n.deleteObject(basePath, deletePath)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjectsV2 lists all blobs in HDFS bucket filtered by prefix
|
|
|
|
func (n *hdfsObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int,
|
|
|
|
fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) {
|
|
|
|
// fetchOwner is not supported and unused.
|
|
|
|
marker := continuationToken
|
|
|
|
if marker == "" {
|
|
|
|
marker = startAfter
|
|
|
|
}
|
|
|
|
resultV1, err := n.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
|
|
|
|
if err != nil {
|
|
|
|
return loi, err
|
|
|
|
}
|
|
|
|
return minio.ListObjectsV2Info{
|
|
|
|
Objects: resultV1.Objects,
|
|
|
|
Prefixes: resultV1.Prefixes,
|
|
|
|
ContinuationToken: continuationToken,
|
|
|
|
NextContinuationToken: resultV1.NextMarker,
|
|
|
|
IsTruncated: resultV1.IsTruncated,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *hdfsObjects) DeleteObject(ctx context.Context, bucket, object string) error {
|
|
|
|
return hdfsToObjectErr(ctx, n.deleteObject(minio.PathJoin(hdfsSeparator, bucket), minio.PathJoin(hdfsSeparator, bucket, object)), bucket, object)
|
|
|
|
}
|
|
|
|
|
2019-05-13 15:25:49 -04:00
|
|
|
func (n *hdfsObjects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) {
|
|
|
|
errs := make([]error, len(objects))
|
|
|
|
for idx, object := range objects {
|
|
|
|
errs[idx] = n.DeleteObject(ctx, bucket, object)
|
|
|
|
}
|
|
|
|
return errs, nil
|
|
|
|
}
|
|
|
|
|
2019-04-17 12:52:08 -04:00
|
|
|
func (n *hdfsObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) {
|
|
|
|
objInfo, err := n.GetObjectInfo(ctx, bucket, object, opts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var startOffset, length int64
|
|
|
|
startOffset, length, err = rs.GetOffsetLength(objInfo.Size)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
pr, pw := io.Pipe()
|
|
|
|
go func() {
|
|
|
|
nerr := n.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, opts)
|
|
|
|
pw.CloseWithError(nerr)
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Setup cleanup function to cause the above go-routine to
|
|
|
|
// exit in case of partial read
|
|
|
|
pipeCloser := func() { pr.Close() }
|
|
|
|
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *hdfsObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) {
|
|
|
|
cpSrcDstSame := minio.IsStringEqual(minio.PathJoin(hdfsSeparator, srcBucket, srcObject), minio.PathJoin(hdfsSeparator, dstBucket, dstObject))
|
|
|
|
if cpSrcDstSame {
|
|
|
|
return n.GetObjectInfo(ctx, srcBucket, srcObject, minio.ObjectOptions{})
|
|
|
|
}
|
|
|
|
|
|
|
|
return n.PutObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, minio.ObjectOptions{
|
|
|
|
ServerSideEncryption: dstOpts.ServerSideEncryption,
|
|
|
|
UserDefined: srcInfo.UserDefined,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *hdfsObjects) GetObject(ctx context.Context, bucket, key string, startOffset, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
|
|
|
|
if _, err := n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket)); err != nil {
|
|
|
|
return hdfsToObjectErr(ctx, err, bucket)
|
|
|
|
}
|
|
|
|
rd, err := n.clnt.Open(minio.PathJoin(hdfsSeparator, bucket, key))
|
|
|
|
if err != nil {
|
|
|
|
return hdfsToObjectErr(ctx, err, bucket, key)
|
|
|
|
}
|
2019-05-01 17:43:22 -04:00
|
|
|
defer rd.Close()
|
|
|
|
_, err = io.Copy(writer, io.NewSectionReader(rd, startOffset, length))
|
|
|
|
if err == io.ErrClosedPipe {
|
|
|
|
// hdfs library doesn't send EOF correctly, so io.Copy attempts
|
|
|
|
// to write which returns io.ErrClosedPipe - just ignore
|
|
|
|
// this for now.
|
|
|
|
err = nil
|
2019-04-17 12:52:08 -04:00
|
|
|
}
|
2019-05-01 17:43:22 -04:00
|
|
|
return hdfsToObjectErr(ctx, err, bucket, key)
|
2019-04-17 12:52:08 -04:00
|
|
|
}
|
|
|
|
|
2019-09-17 16:29:59 -04:00
|
|
|
func (n *hdfsObjects) isObjectDir(ctx context.Context, bucket, object string) bool {
|
|
|
|
f, err := n.clnt.Open(minio.PathJoin(hdfsSeparator, bucket, object))
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
fis, err := f.Readdir(1)
|
2019-10-08 18:50:43 -04:00
|
|
|
if err != nil && err != io.EOF {
|
2019-09-17 16:29:59 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return false
|
|
|
|
}
|
2019-10-08 18:50:43 -04:00
|
|
|
// Readdir returns an io.EOF when len(fis) == 0.
|
2019-09-17 16:29:59 -04:00
|
|
|
return len(fis) == 0
|
|
|
|
}
|
|
|
|
|
2019-04-17 12:52:08 -04:00
|
|
|
// GetObjectInfo reads object info and replies back ObjectInfo.
|
|
|
|
func (n *hdfsObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
|
|
|
_, err = n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket))
|
|
|
|
if err != nil {
|
|
|
|
return objInfo, hdfsToObjectErr(ctx, err, bucket)
|
|
|
|
}
|
2019-09-17 16:29:59 -04:00
|
|
|
if strings.HasSuffix(object, hdfsSeparator) && !n.isObjectDir(ctx, bucket, object) {
|
|
|
|
return objInfo, hdfsToObjectErr(ctx, os.ErrNotExist, bucket, object)
|
|
|
|
}
|
|
|
|
|
2019-04-17 12:52:08 -04:00
|
|
|
fi, err := n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket, object))
|
|
|
|
if err != nil {
|
|
|
|
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
|
|
|
}
|
|
|
|
return minio.ObjectInfo{
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: object,
|
|
|
|
ModTime: fi.ModTime(),
|
|
|
|
Size: fi.Size(),
|
|
|
|
IsDir: fi.IsDir(),
|
|
|
|
AccTime: fi.(*hdfs.FileInfo).AccessTime(),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *hdfsObjects) PutObject(ctx context.Context, bucket string, object string, r *minio.PutObjReader, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
|
|
|
_, err = n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket))
|
|
|
|
if err != nil {
|
|
|
|
return objInfo, hdfsToObjectErr(ctx, err, bucket)
|
|
|
|
}
|
|
|
|
|
|
|
|
name := minio.PathJoin(hdfsSeparator, bucket, object)
|
|
|
|
|
|
|
|
// If its a directory create a prefix {
|
2019-09-17 16:29:59 -04:00
|
|
|
if strings.HasSuffix(object, hdfsSeparator) && r.Size() == 0 {
|
2019-04-17 12:52:08 -04:00
|
|
|
if err = n.clnt.MkdirAll(name, os.FileMode(0755)); err != nil {
|
|
|
|
n.deleteObject(minio.PathJoin(hdfsSeparator, bucket), name)
|
|
|
|
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
tmpname := minio.PathJoin(hdfsSeparator, minioMetaTmpBucket, minio.MustGetUUID())
|
|
|
|
var w *hdfs.FileWriter
|
|
|
|
w, err = n.clnt.Create(tmpname)
|
|
|
|
if err != nil {
|
|
|
|
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
|
|
|
}
|
|
|
|
defer n.deleteObject(minio.PathJoin(hdfsSeparator, minioMetaTmpBucket), tmpname)
|
|
|
|
if _, err = io.Copy(w, r); err != nil {
|
2019-05-01 17:43:22 -04:00
|
|
|
w.Close()
|
2019-04-17 12:52:08 -04:00
|
|
|
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
|
|
|
}
|
|
|
|
dir := path.Dir(name)
|
|
|
|
if dir != "" {
|
|
|
|
if err = n.clnt.MkdirAll(dir, os.FileMode(0755)); err != nil {
|
2019-05-01 17:43:22 -04:00
|
|
|
w.Close()
|
2019-04-17 12:52:08 -04:00
|
|
|
n.deleteObject(minio.PathJoin(hdfsSeparator, bucket), dir)
|
|
|
|
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
|
|
|
}
|
|
|
|
}
|
2019-05-01 17:43:22 -04:00
|
|
|
w.Close()
|
2019-04-17 12:52:08 -04:00
|
|
|
if err = n.clnt.Rename(tmpname, name); err != nil {
|
|
|
|
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fi, err := n.clnt.Stat(name)
|
|
|
|
if err != nil {
|
|
|
|
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
|
|
|
}
|
|
|
|
return minio.ObjectInfo{
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: object,
|
|
|
|
ETag: r.MD5CurrentHexString(),
|
|
|
|
ModTime: fi.ModTime(),
|
|
|
|
Size: fi.Size(),
|
|
|
|
IsDir: fi.IsDir(),
|
|
|
|
AccTime: fi.(*hdfs.FileInfo).AccessTime(),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *hdfsObjects) NewMultipartUpload(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (uploadID string, err error) {
|
|
|
|
_, err = n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket))
|
|
|
|
if err != nil {
|
|
|
|
return uploadID, hdfsToObjectErr(ctx, err, bucket)
|
|
|
|
}
|
|
|
|
|
|
|
|
uploadID = minio.MustGetUUID()
|
|
|
|
if err = n.clnt.CreateEmptyFile(minio.PathJoin(hdfsSeparator, minioMetaTmpBucket, uploadID)); err != nil {
|
|
|
|
return uploadID, hdfsToObjectErr(ctx, err, bucket)
|
|
|
|
}
|
|
|
|
|
|
|
|
return uploadID, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *hdfsObjects) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, err error) {
|
|
|
|
_, err = n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket))
|
|
|
|
if err != nil {
|
|
|
|
return lmi, hdfsToObjectErr(ctx, err, bucket)
|
|
|
|
}
|
|
|
|
|
|
|
|
// It's decided not to support List Multipart Uploads, hence returning empty result.
|
|
|
|
return lmi, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *hdfsObjects) checkUploadIDExists(ctx context.Context, bucket, object, uploadID string) (err error) {
|
|
|
|
_, err = n.clnt.Stat(minio.PathJoin(hdfsSeparator, minioMetaTmpBucket, uploadID))
|
|
|
|
if err != nil {
|
|
|
|
return hdfsToObjectErr(ctx, err, bucket, object, uploadID)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *hdfsObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (result minio.ListPartsInfo, err error) {
|
|
|
|
_, err = n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket))
|
|
|
|
if err != nil {
|
|
|
|
return result, hdfsToObjectErr(ctx, err, bucket)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = n.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// It's decided not to support List parts, hence returning empty result.
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *hdfsObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int,
|
|
|
|
startOffset int64, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.PartInfo, error) {
|
|
|
|
return n.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.PutObjReader, dstOpts)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *hdfsObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *minio.PutObjReader, opts minio.ObjectOptions) (info minio.PartInfo, err error) {
|
|
|
|
_, err = n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket))
|
|
|
|
if err != nil {
|
|
|
|
return info, hdfsToObjectErr(ctx, err, bucket)
|
|
|
|
}
|
|
|
|
|
|
|
|
var w *hdfs.FileWriter
|
|
|
|
w, err = n.clnt.Append(minio.PathJoin(hdfsSeparator, minioMetaTmpBucket, uploadID))
|
|
|
|
if err != nil {
|
|
|
|
return info, hdfsToObjectErr(ctx, err, bucket, object, uploadID)
|
|
|
|
}
|
|
|
|
defer w.Close()
|
|
|
|
_, err = io.Copy(w, r.Reader)
|
|
|
|
if err != nil {
|
|
|
|
return info, hdfsToObjectErr(ctx, err, bucket, object, uploadID)
|
|
|
|
}
|
|
|
|
|
|
|
|
info.PartNumber = partID
|
|
|
|
info.ETag = r.MD5CurrentHexString()
|
|
|
|
info.LastModified = minio.UTCNow()
|
|
|
|
info.Size = r.Reader.Size()
|
|
|
|
|
|
|
|
return info, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *hdfsObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []minio.CompletePart, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
|
|
|
_, err = n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket))
|
|
|
|
if err != nil {
|
|
|
|
return objInfo, hdfsToObjectErr(ctx, err, bucket)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = n.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
|
|
|
return objInfo, err
|
|
|
|
}
|
|
|
|
|
|
|
|
name := minio.PathJoin(hdfsSeparator, bucket, object)
|
|
|
|
dir := path.Dir(name)
|
|
|
|
if dir != "" {
|
|
|
|
if err = n.clnt.MkdirAll(dir, os.FileMode(0755)); err != nil {
|
|
|
|
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = n.clnt.Rename(minio.PathJoin(hdfsSeparator, minioMetaTmpBucket, uploadID), name)
|
|
|
|
// Object already exists is an error on HDFS
|
|
|
|
// remove it and then create it again.
|
|
|
|
if os.IsExist(err) {
|
|
|
|
if err = n.clnt.Remove(name); err != nil {
|
|
|
|
if dir != "" {
|
|
|
|
n.deleteObject(minio.PathJoin(hdfsSeparator, bucket), dir)
|
|
|
|
}
|
|
|
|
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
|
|
|
}
|
|
|
|
if err = n.clnt.Rename(minio.PathJoin(hdfsSeparator, minioMetaTmpBucket, uploadID), name); err != nil {
|
|
|
|
if dir != "" {
|
|
|
|
n.deleteObject(minio.PathJoin(hdfsSeparator, bucket), dir)
|
|
|
|
}
|
|
|
|
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fi, err := n.clnt.Stat(name)
|
|
|
|
if err != nil {
|
|
|
|
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Calculate s3 compatible md5sum for complete multipart.
|
2019-05-08 21:35:40 -04:00
|
|
|
s3MD5 := minio.ComputeCompleteMultipartMD5(parts)
|
2019-04-17 12:52:08 -04:00
|
|
|
|
|
|
|
return minio.ObjectInfo{
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: object,
|
|
|
|
ETag: s3MD5,
|
|
|
|
ModTime: fi.ModTime(),
|
|
|
|
Size: fi.Size(),
|
|
|
|
IsDir: fi.IsDir(),
|
|
|
|
AccTime: fi.(*hdfs.FileInfo).AccessTime(),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *hdfsObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) (err error) {
|
|
|
|
_, err = n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket))
|
|
|
|
if err != nil {
|
|
|
|
return hdfsToObjectErr(ctx, err, bucket)
|
|
|
|
}
|
|
|
|
return hdfsToObjectErr(ctx, n.clnt.Remove(minio.PathJoin(hdfsSeparator, minioMetaTmpBucket, uploadID)), bucket, object, uploadID)
|
|
|
|
}
|
2019-12-28 11:54:43 -05:00
|
|
|
|
|
|
|
// IsReady returns whether the layer is ready to take requests.
|
|
|
|
func (n *hdfsObjects) IsReady(_ context.Context) bool {
|
|
|
|
return true
|
|
|
|
}
|