fs: Do not return reservedBucket names in ListBuckets() (#3754)

Make sure to skip reserved bucket names in `ListBuckets()`
current code didn't skip this properly and also generalize
this behavior for both XL and FS.
This commit is contained in:
Harshavardhana 2017-02-16 14:52:14 -08:00 committed by GitHub
parent 8816b08aae
commit 50b4e54a75
28 changed files with 85 additions and 104 deletions

View File

@ -150,7 +150,7 @@ func makeAdminPeers(eps []*url.URL) adminPeers {
secretKey: serverCred.SecretKey, secretKey: serverCred.SecretKey,
serverAddr: ep.Host, serverAddr: ep.Host,
secureConn: globalIsSSL, secureConn: globalIsSSL,
serviceEndpoint: path.Join(reservedBucket, adminPath), serviceEndpoint: path.Join(minioReservedBucketPath, adminPath),
serviceName: "Admin", serviceName: "Admin",
} }

View File

@ -141,7 +141,7 @@ func registerAdminRPCRouter(mux *router.Router) error {
if err != nil { if err != nil {
return traceError(err) return traceError(err)
} }
adminRouter := mux.NewRoute().PathPrefix(reservedBucket).Subrouter() adminRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()
adminRouter.Path(adminPath).Handler(adminRPCServer) adminRouter.Path(adminPath).Handler(adminRPCServer)
return nil return nil
} }

View File

@ -111,7 +111,7 @@ func updateCredsOnPeers(creds credential) map[string]error {
secretKey: serverCred.SecretKey, secretKey: serverCred.SecretKey,
serverAddr: peers[ix], serverAddr: peers[ix],
secureConn: globalIsSSL, secureConn: globalIsSSL,
serviceEndpoint: path.Join(reservedBucket, browserPeerPath), serviceEndpoint: path.Join(minioReservedBucketPath, browserPeerPath),
serviceName: "BrowserPeer", serviceName: "BrowserPeer",
}) })

View File

@ -36,7 +36,7 @@ func (s *TestRPCBrowserPeerSuite) SetUpSuite(c *testing.T) {
serverAddr: s.testServer.Server.Listener.Addr().String(), serverAddr: s.testServer.Server.Listener.Addr().String(),
accessKey: s.testServer.AccessKey, accessKey: s.testServer.AccessKey,
secretKey: s.testServer.SecretKey, secretKey: s.testServer.SecretKey,
serviceEndpoint: path.Join(reservedBucket, browserPeerPath), serviceEndpoint: path.Join(minioReservedBucketPath, browserPeerPath),
serviceName: "BrowserPeer", serviceName: "BrowserPeer",
} }
} }

View File

@ -45,7 +45,7 @@ func registerBrowserPeerRPCRouter(mux *router.Router) error {
return traceError(err) return traceError(err)
} }
bpRouter := mux.NewRoute().PathPrefix(reservedBucket).Subrouter() bpRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()
bpRouter.Path(browserPeerPath).Handler(bpRPCServer) bpRouter.Path(browserPeerPath).Handler(bpRPCServer)
return nil return nil
} }

View File

@ -253,19 +253,19 @@ func unmarshalSqsARN(queueARN string) (mSqs arnSQS) {
} }
sqsType := strings.TrimPrefix(queueARN, minioSqs+serverConfig.GetRegion()+":") sqsType := strings.TrimPrefix(queueARN, minioSqs+serverConfig.GetRegion()+":")
switch { switch {
case strings.HasSuffix(sqsType, queueTypeAMQP): case hasSuffix(sqsType, queueTypeAMQP):
mSqs.Type = queueTypeAMQP mSqs.Type = queueTypeAMQP
case strings.HasSuffix(sqsType, queueTypeNATS): case hasSuffix(sqsType, queueTypeNATS):
mSqs.Type = queueTypeNATS mSqs.Type = queueTypeNATS
case strings.HasSuffix(sqsType, queueTypeElastic): case hasSuffix(sqsType, queueTypeElastic):
mSqs.Type = queueTypeElastic mSqs.Type = queueTypeElastic
case strings.HasSuffix(sqsType, queueTypeRedis): case hasSuffix(sqsType, queueTypeRedis):
mSqs.Type = queueTypeRedis mSqs.Type = queueTypeRedis
case strings.HasSuffix(sqsType, queueTypePostgreSQL): case hasSuffix(sqsType, queueTypePostgreSQL):
mSqs.Type = queueTypePostgreSQL mSqs.Type = queueTypePostgreSQL
case strings.HasSuffix(sqsType, queueTypeKafka): case hasSuffix(sqsType, queueTypeKafka):
mSqs.Type = queueTypeKafka mSqs.Type = queueTypeKafka
case strings.HasSuffix(sqsType, queueTypeWebhook): case hasSuffix(sqsType, queueTypeWebhook):
mSqs.Type = queueTypeWebhook mSqs.Type = queueTypeWebhook
} // Add more queues here. } // Add more queues here.
mSqs.AccountID = strings.TrimSuffix(sqsType, ":"+mSqs.Type) mSqs.AccountID = strings.TrimSuffix(sqsType, ":"+mSqs.Type)

View File

@ -86,10 +86,10 @@ func traceError(e error, errs ...error) error {
fn := runtime.FuncForPC(pc) fn := runtime.FuncForPC(pc)
file, line := fn.FileLine(pc) file, line := fn.FileLine(pc)
name := fn.Name() name := fn.Name()
if strings.HasSuffix(name, "ServeHTTP") { if hasSuffix(name, "ServeHTTP") {
break break
} }
if strings.HasSuffix(name, "runtime.") { if hasSuffix(name, "runtime.") {
break break
} }

View File

@ -278,7 +278,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
} }
entry := strings.TrimPrefix(walkResult.entry, retainSlash(bucket)) entry := strings.TrimPrefix(walkResult.entry, retainSlash(bucket))
if strings.HasSuffix(walkResult.entry, slashSeparator) { if hasSuffix(walkResult.entry, slashSeparator) {
uploads = append(uploads, uploadMetadata{ uploads = append(uploads, uploadMetadata{
Object: entry, Object: entry,
}) })
@ -314,7 +314,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
for _, upload := range uploads { for _, upload := range uploads {
var objectName string var objectName string
var uploadID string var uploadID string
if strings.HasSuffix(upload.Object, slashSeparator) { if hasSuffix(upload.Object, slashSeparator) {
// All directory entries are common prefixes. // All directory entries are common prefixes.
uploadID = "" // Upload ids are empty for CommonPrefixes. uploadID = "" // Upload ids are empty for CommonPrefixes.
objectName = upload.Object objectName = upload.Object

View File

@ -19,7 +19,6 @@ package cmd
import ( import (
"crypto/md5" "crypto/md5"
"encoding/hex" "encoding/hex"
"errors"
"fmt" "fmt"
"hash" "hash"
"io" "io"
@ -291,12 +290,11 @@ func (fs fsObjects) ListBuckets() ([]BucketInfo, error) {
return nil, toObjectErr(traceError(errDiskNotFound)) return nil, toObjectErr(traceError(errDiskNotFound))
} }
var invalidBucketNames []string
for _, entry := range entries { for _, entry := range entries {
if entry == minioMetaBucket+"/" || !strings.HasSuffix(entry, slashSeparator) { // Ignore all reserved bucket names and invalid bucket names.
if isReservedOrInvalidBucket(entry) {
continue continue
} }
var fi os.FileInfo var fi os.FileInfo
fi, err = fsStatDir(pathJoin(fs.fsPath, entry)) fi, err = fsStatDir(pathJoin(fs.fsPath, entry))
if err != nil { if err != nil {
@ -310,24 +308,13 @@ func (fs fsObjects) ListBuckets() ([]BucketInfo, error) {
return nil, err return nil, err
} }
if !IsValidBucketName(fi.Name()) {
invalidBucketNames = append(invalidBucketNames, fi.Name())
continue
}
bucketInfos = append(bucketInfos, BucketInfo{ bucketInfos = append(bucketInfos, BucketInfo{
Name: fi.Name(), Name: fi.Name(),
// As os.Stat() doesn't carry other than ModTime(), use ModTime() as CreatedTime. // As os.Stat() doesnt carry CreatedTime, use ModTime() as CreatedTime.
Created: fi.ModTime(), Created: fi.ModTime(),
}) })
} }
// Print a user friendly message if we indeed skipped certain directories which are
// incompatible with S3's bucket name restrictions.
if len(invalidBucketNames) > 0 {
errorIf(errors.New("One or more invalid bucket names found"), "Skipping %s", invalidBucketNames)
}
// Sort bucket infos by bucket name. // Sort bucket infos by bucket name.
sort.Sort(byBucketName(bucketInfos)) sort.Sort(byBucketName(bucketInfos))
@ -780,7 +767,7 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
// Convert entry to ObjectInfo // Convert entry to ObjectInfo
entryToObjectInfo := func(entry string) (objInfo ObjectInfo, err error) { entryToObjectInfo := func(entry string) (objInfo ObjectInfo, err error) {
if strings.HasSuffix(entry, slashSeparator) { if hasSuffix(entry, slashSeparator) {
// Object name needs to be full path. // Object name needs to be full path.
objInfo.Name = entry objInfo.Name = entry
objInfo.IsDir = true objInfo.IsDir = true
@ -804,7 +791,7 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
// bucket argument is unused as we don't need to StatFile // bucket argument is unused as we don't need to StatFile
// to figure if it's a file, just need to check that the // to figure if it's a file, just need to check that the
// object string does not end with "/". // object string does not end with "/".
return !strings.HasSuffix(object, slashSeparator) return !hasSuffix(object, slashSeparator)
} }
listDir := fs.listDirFactory(isLeaf) listDir := fs.listDirFactory(isLeaf)
walkResultCh = startTreeWalk(bucket, prefix, marker, recursive, listDir, isLeaf, endWalkCh) walkResultCh = startTreeWalk(bucket, prefix, marker, recursive, listDir, isLeaf, endWalkCh)

View File

@ -68,7 +68,8 @@ func (h requestSizeLimitHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
// Reserved bucket. // Reserved bucket.
const ( const (
reservedBucket = "/minio" minioReservedBucket = "minio"
minioReservedBucketPath = "/" + minioReservedBucket
) )
// Adds redirect rules for incoming requests. // Adds redirect rules for incoming requests.
@ -86,8 +87,8 @@ func setBrowserRedirectHandler(h http.Handler) http.Handler {
// serves only limited purpose on redirect-handler for // serves only limited purpose on redirect-handler for
// browser requests. // browser requests.
func getRedirectLocation(urlPath string) (rLocation string) { func getRedirectLocation(urlPath string) (rLocation string) {
if urlPath == reservedBucket { if urlPath == minioReservedBucketPath {
rLocation = reservedBucket + "/" rLocation = minioReservedBucketPath + "/"
} }
if contains([]string{ if contains([]string{
"/", "/",
@ -95,7 +96,7 @@ func getRedirectLocation(urlPath string) (rLocation string) {
"/login", "/login",
"/favicon.ico", "/favicon.ico",
}, urlPath) { }, urlPath) {
rLocation = reservedBucket + urlPath rLocation = minioReservedBucketPath + urlPath
} }
return rLocation return rLocation
} }
@ -143,8 +144,8 @@ func setBrowserCacheControlHandler(h http.Handler) http.Handler {
func (h cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (h cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method == httpGET && guessIsBrowserReq(r) && globalIsBrowserEnabled { if r.Method == httpGET && guessIsBrowserReq(r) && globalIsBrowserEnabled {
// For all browser requests set appropriate Cache-Control policies // For all browser requests set appropriate Cache-Control policies
if hasPrefix(r.URL.Path, reservedBucket+"/") { if hasPrefix(r.URL.Path, minioReservedBucketPath+"/") {
if hasSuffix(r.URL.Path, ".js") || r.URL.Path == reservedBucket+"/favicon.ico" { if hasSuffix(r.URL.Path, ".js") || r.URL.Path == minioReservedBucketPath+"/favicon.ico" {
// For assets set cache expiry of one year. For each release, the name // For assets set cache expiry of one year. For each release, the name
// of the asset name will change and hence it can not be served from cache. // of the asset name will change and hence it can not be served from cache.
w.Header().Set("Cache-Control", "max-age=31536000") w.Header().Set("Cache-Control", "max-age=31536000")
@ -160,17 +161,17 @@ func (h cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Adds verification for incoming paths. // Adds verification for incoming paths.
type minioPrivateBucketHandler struct { type minioPrivateBucketHandler struct {
handler http.Handler handler http.Handler
reservedBucket string reservedBucketPath string
} }
func setPrivateBucketHandler(h http.Handler) http.Handler { func setPrivateBucketHandler(h http.Handler) http.Handler {
return minioPrivateBucketHandler{handler: h, reservedBucket: reservedBucket} return minioPrivateBucketHandler{h, minioReservedBucketPath}
} }
func (h minioPrivateBucketHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (h minioPrivateBucketHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// For all non browser requests, reject access to 'reservedBucket'. // For all non browser requests, reject access to 'reservedBucketPath'.
if !guessIsBrowserReq(r) && path.Clean(r.URL.Path) == reservedBucket { if !guessIsBrowserReq(r) && path.Clean(r.URL.Path) == h.reservedBucketPath {
writeErrorResponse(w, ErrAllAccessDisabled, r.URL) writeErrorResponse(w, ErrAllAccessDisabled, r.URL)
return return
} }

View File

@ -29,28 +29,28 @@ func TestRedirectLocation(t *testing.T) {
}{ }{
{ {
// 1. When urlPath is '/minio' // 1. When urlPath is '/minio'
urlPath: reservedBucket, urlPath: minioReservedBucketPath,
location: reservedBucket + "/", location: minioReservedBucketPath + "/",
}, },
{ {
// 2. When urlPath is '/' // 2. When urlPath is '/'
urlPath: "/", urlPath: "/",
location: reservedBucket + "/", location: minioReservedBucketPath + "/",
}, },
{ {
// 3. When urlPath is '/webrpc' // 3. When urlPath is '/webrpc'
urlPath: "/webrpc", urlPath: "/webrpc",
location: reservedBucket + "/webrpc", location: minioReservedBucketPath + "/webrpc",
}, },
{ {
// 4. When urlPath is '/login' // 4. When urlPath is '/login'
urlPath: "/login", urlPath: "/login",
location: reservedBucket + "/login", location: minioReservedBucketPath + "/login",
}, },
{ {
// 5. When urlPath is '/favicon.ico' // 5. When urlPath is '/favicon.ico'
urlPath: "/favicon.ico", urlPath: "/favicon.ico",
location: reservedBucket + "/favicon.ico", location: minioReservedBucketPath + "/favicon.ico",
}, },
{ {
// 6. When urlPath is '/unknown' // 6. When urlPath is '/unknown'

View File

@ -17,7 +17,6 @@
package cmd package cmd
import ( import (
"strings"
"testing" "testing"
"time" "time"
) )
@ -26,10 +25,10 @@ import (
func TestHumanizedDuration(t *testing.T) { func TestHumanizedDuration(t *testing.T) {
duration := time.Duration(90487000000000) duration := time.Duration(90487000000000)
humanDuration := timeDurationToHumanizedDuration(duration) humanDuration := timeDurationToHumanizedDuration(duration)
if !strings.HasSuffix(humanDuration.String(), "seconds") { if !hasSuffix(humanDuration.String(), "seconds") {
t.Fatal("Stringer method for humanized duration should have seconds.", humanDuration.String()) t.Fatal("Stringer method for humanized duration should have seconds.", humanDuration.String())
} }
if strings.HasSuffix(humanDuration.StringShort(), "seconds") { if hasSuffix(humanDuration.StringShort(), "seconds") {
t.Fatal("StringShorter method for humanized duration should not have seconds.", humanDuration.StringShort()) t.Fatal("StringShorter method for humanized duration should not have seconds.", humanDuration.StringShort())
} }
@ -42,9 +41,9 @@ func TestHumanizedDuration(t *testing.T) {
t.Fatalf("Expected %#v, got %#v incorrect conversion of duration to humanized form", t.Fatalf("Expected %#v, got %#v incorrect conversion of duration to humanized form",
expectedHumanSecDuration, humanSecDuration) expectedHumanSecDuration, humanSecDuration)
} }
if strings.HasSuffix(humanSecDuration.String(), "days") || if hasSuffix(humanSecDuration.String(), "days") ||
strings.HasSuffix(humanSecDuration.String(), "hours") || hasSuffix(humanSecDuration.String(), "hours") ||
strings.HasSuffix(humanSecDuration.String(), "minutes") { hasSuffix(humanSecDuration.String(), "minutes") {
t.Fatal("Stringer method for humanized duration should have only seconds.", humanSecDuration.String()) t.Fatal("Stringer method for humanized duration should have only seconds.", humanSecDuration.String())
} }
@ -57,7 +56,7 @@ func TestHumanizedDuration(t *testing.T) {
t.Fatalf("Expected %#v, got %#v incorrect conversion of duration to humanized form", t.Fatalf("Expected %#v, got %#v incorrect conversion of duration to humanized form",
expectedHumanMinDuration, humanMinDuration) expectedHumanMinDuration, humanMinDuration)
} }
if strings.HasSuffix(humanMinDuration.String(), "hours") { if hasSuffix(humanMinDuration.String(), "hours") {
t.Fatal("Stringer method for humanized duration should have only minutes.", humanMinDuration.String()) t.Fatal("Stringer method for humanized duration should have only minutes.", humanMinDuration.String())
} }
@ -70,7 +69,7 @@ func TestHumanizedDuration(t *testing.T) {
t.Fatalf("Expected %#v, got %#v incorrect conversion of duration to humanized form", t.Fatalf("Expected %#v, got %#v incorrect conversion of duration to humanized form",
expectedHumanHourDuration, humanHourDuration) expectedHumanHourDuration, humanHourDuration)
} }
if strings.HasSuffix(humanHourDuration.String(), "days") { if hasSuffix(humanHourDuration.String(), "days") {
t.Fatal("Stringer method for humanized duration should have hours.", humanHourDuration.String()) t.Fatal("Stringer method for humanized duration should have hours.", humanHourDuration.String())
} }
} }

View File

@ -30,7 +30,7 @@ import (
const ( const (
// Lock rpc server endpoint. // Lock rpc server endpoint.
lockRPCPath = "/minio/lock" lockRPCPath = "/lock"
// Lock maintenance interval. // Lock maintenance interval.
lockMaintenanceInterval = 1 * time.Minute // 1 minute. lockMaintenanceInterval = 1 * time.Minute // 1 minute.
@ -122,8 +122,8 @@ func registerStorageLockers(mux *router.Router, lockServers []*lockServer) error
if err := lockRPCServer.RegisterName("Dsync", lockServer); err != nil { if err := lockRPCServer.RegisterName("Dsync", lockServer); err != nil {
return traceError(err) return traceError(err)
} }
lockRouter := mux.PathPrefix(reservedBucket).Subrouter() lockRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter()
lockRouter.Path(path.Join("/lock", lockServer.rpcPath)).Handler(lockRPCServer) lockRouter.Path(path.Join(lockRPCPath, lockServer.rpcPath)).Handler(lockRPCServer)
} }
return nil return nil
} }

View File

@ -20,7 +20,6 @@ import (
"net" "net"
"net/url" "net/url"
"runtime" "runtime"
"strings"
"sync" "sync"
"time" "time"
@ -59,7 +58,7 @@ func isRemoteDisk(disk StorageAPI) bool {
// if size == 0 and object ends with slashSeparator then // if size == 0 and object ends with slashSeparator then
// returns true. // returns true.
func isObjectDir(object string, size int64) bool { func isObjectDir(object string, size int64) bool {
return strings.HasSuffix(object, slashSeparator) && size == 0 return hasSuffix(object, slashSeparator) && size == 0
} }
// Converts just bucket, object metadata into ObjectInfo datatype. // Converts just bucket, object metadata into ObjectInfo datatype.
@ -284,7 +283,7 @@ func cleanupDir(storage StorageAPI, volume, dirPath string) error {
var delFunc func(string) error var delFunc func(string) error
// Function to delete entries recursively. // Function to delete entries recursively.
delFunc = func(entryPath string) error { delFunc = func(entryPath string) error {
if !strings.HasSuffix(entryPath, slashSeparator) { if !hasSuffix(entryPath, slashSeparator) {
// Delete the file entry. // Delete the file entry.
return traceError(storage.DeleteFile(volume, entryPath)) return traceError(storage.DeleteFile(volume, entryPath))
} }

View File

@ -129,7 +129,7 @@ func retainSlash(s string) string {
func pathJoin(elem ...string) string { func pathJoin(elem ...string) string {
trailingSlash := "" trailingSlash := ""
if len(elem) > 0 { if len(elem) > 0 {
if strings.HasSuffix(elem[len(elem)-1], slashSeparator) { if hasSuffix(elem[len(elem)-1], slashSeparator) {
trailingSlash = "/" trailingSlash = "/"
} }
} }
@ -180,6 +180,15 @@ func hasSuffix(s string, suffix string) bool {
return strings.HasSuffix(s, suffix) return strings.HasSuffix(s, suffix)
} }
// Ignores all reserved bucket names or invalid bucket names.
func isReservedOrInvalidBucket(bucketEntry string) bool {
bucketEntry = strings.TrimSuffix(bucketEntry, slashSeparator)
if !IsValidBucketName(bucketEntry) {
return true
}
return bucketEntry == minioMetaBucket || bucketEntry == minioReservedBucket
}
// byBucketName is a collection satisfying sort.Interface. // byBucketName is a collection satisfying sort.Interface.
type byBucketName []BucketInfo type byBucketName []BucketInfo

View File

@ -24,7 +24,6 @@ import (
slashpath "path" slashpath "path"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
"syscall" "syscall"
@ -319,7 +318,7 @@ func listVols(dirPath string) ([]VolInfo, error) {
} }
var volsInfo []VolInfo var volsInfo []VolInfo
for _, entry := range entries { for _, entry := range entries {
if !strings.HasSuffix(entry, slashSeparator) || !isValidVolname(slashpath.Clean(entry)) { if !hasSuffix(entry, slashSeparator) || !isValidVolname(slashpath.Clean(entry)) {
// Skip if entry is neither a directory not a valid volume name. // Skip if entry is neither a directory not a valid volume name.
continue continue
} }
@ -917,8 +916,8 @@ func (s *posix) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err e
} }
} }
srcIsDir := strings.HasSuffix(srcPath, slashSeparator) srcIsDir := hasSuffix(srcPath, slashSeparator)
dstIsDir := strings.HasSuffix(dstPath, slashSeparator) dstIsDir := hasSuffix(dstPath, slashSeparator)
// Either src and dst have to be directories or files, else return error. // Either src and dst have to be directories or files, else return error.
if !(srcIsDir && dstIsDir || !srcIsDir && !dstIsDir) { if !(srcIsDir && dstIsDir || !srcIsDir && !dstIsDir) {
return errFileAccessDenied return errFileAccessDenied

View File

@ -66,7 +66,7 @@ func makeS3Peers(eps []*url.URL) s3Peers {
accessKey: serverCred.AccessKey, accessKey: serverCred.AccessKey,
secretKey: serverCred.SecretKey, secretKey: serverCred.SecretKey,
serverAddr: ep.Host, serverAddr: ep.Host,
serviceEndpoint: path.Join(reservedBucket, s3Path), serviceEndpoint: path.Join(minioReservedBucketPath, s3Path),
secureConn: globalIsSSL, secureConn: globalIsSSL,
serviceName: "S3", serviceName: "S3",
} }

View File

@ -45,7 +45,7 @@ func registerS3PeerRPCRouter(mux *router.Router) error {
return traceError(err) return traceError(err)
} }
s3PeerRouter := mux.NewRoute().PathPrefix(reservedBucket).Subrouter() s3PeerRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()
s3PeerRouter.Path(s3Path).Handler(s3PeerRPCServer) s3PeerRouter.Path(s3Path).Handler(s3PeerRPCServer)
return nil return nil
} }

View File

@ -36,7 +36,7 @@ func (s *TestRPCS3PeerSuite) SetUpSuite(t *testing.T) {
serverAddr: s.testServer.Server.Listener.Addr().String(), serverAddr: s.testServer.Server.Listener.Addr().String(),
accessKey: s.testServer.AccessKey, accessKey: s.testServer.AccessKey,
secretKey: s.testServer.SecretKey, secretKey: s.testServer.SecretKey,
serviceEndpoint: path.Join(reservedBucket, s3Path), serviceEndpoint: path.Join(minioReservedBucketPath, s3Path),
serviceName: "S3", serviceName: "S3",
} }
} }

View File

@ -34,7 +34,7 @@ type networkStorage struct {
} }
const ( const (
storageRPCPath = reservedBucket + "/storage" storageRPCPath = minioReservedBucketPath + "/storage"
) )
// Converts rpc.ServerError to underlying error. This function is // Converts rpc.ServerError to underlying error. This function is

View File

@ -233,7 +233,7 @@ func registerStorageRPCRouters(mux *router.Router, srvCmdConfig serverCmdConfig)
return traceError(err) return traceError(err)
} }
// Add minio storage routes. // Add minio storage routes.
storageRouter := mux.PathPrefix(reservedBucket).Subrouter() storageRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter()
storageRouter.Path(path.Join("/storage", stServer.path)).Handler(storageRPCServer) storageRouter.Path(path.Join("/storage", stServer.path)).Handler(storageRPCServer)
} }
return nil return nil

View File

@ -21,7 +21,6 @@ import (
"io/ioutil" "io/ioutil"
"reflect" "reflect"
"sort" "sort"
"strings"
"testing" "testing"
"time" "time"
) )
@ -187,7 +186,7 @@ func TestTreeWalk(t *testing.T) {
} }
isLeaf := func(volume, prefix string) bool { isLeaf := func(volume, prefix string) bool {
return !strings.HasSuffix(prefix, slashSeparator) return !hasSuffix(prefix, slashSeparator)
} }
listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk) listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk)
// Simple test for prefix based walk. // Simple test for prefix based walk.
@ -225,7 +224,7 @@ func TestTreeWalkTimeout(t *testing.T) {
} }
isLeaf := func(volume, prefix string) bool { isLeaf := func(volume, prefix string) bool {
return !strings.HasSuffix(prefix, slashSeparator) return !hasSuffix(prefix, slashSeparator)
} }
listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk) listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk)
@ -304,7 +303,7 @@ func TestListDir(t *testing.T) {
// create listDir function. // create listDir function.
listDir := listDirFactory(func(volume, prefix string) bool { listDir := listDirFactory(func(volume, prefix string) bool {
return !strings.HasSuffix(prefix, slashSeparator) return !hasSuffix(prefix, slashSeparator)
}, xlTreeWalkIgnoredErrs, disk1, disk2) }, xlTreeWalkIgnoredErrs, disk1, disk2)
// Create file1 in fsDir1 and file2 in fsDir2. // Create file1 in fsDir1 and file2 in fsDir2.
@ -376,7 +375,7 @@ func TestRecursiveTreeWalk(t *testing.T) {
// Simple isLeaf check, returns true if there is no trailing "/" // Simple isLeaf check, returns true if there is no trailing "/"
isLeaf := func(volume, prefix string) bool { isLeaf := func(volume, prefix string) bool {
return !strings.HasSuffix(prefix, slashSeparator) return !hasSuffix(prefix, slashSeparator)
} }
// Create listDir function. // Create listDir function.
@ -486,7 +485,7 @@ func TestSortedness(t *testing.T) {
// Simple isLeaf check, returns true if there is no trailing "/" // Simple isLeaf check, returns true if there is no trailing "/"
isLeaf := func(volume, prefix string) bool { isLeaf := func(volume, prefix string) bool {
return !strings.HasSuffix(prefix, slashSeparator) return !hasSuffix(prefix, slashSeparator)
} }
// Create listDir function. // Create listDir function.
listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk1) listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk1)
@ -563,7 +562,7 @@ func TestTreeWalkIsEnd(t *testing.T) {
} }
isLeaf := func(volume, prefix string) bool { isLeaf := func(volume, prefix string) bool {
return !strings.HasSuffix(prefix, slashSeparator) return !hasSuffix(prefix, slashSeparator)
} }
// Create listDir function. // Create listDir function.
listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk1) listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk1)

View File

@ -161,10 +161,6 @@ func (web *webAPIHandlers) ListBuckets(r *http.Request, args *WebGenericArgs, re
return toJSONError(err) return toJSONError(err)
} }
for _, bucket := range buckets { for _, bucket := range buckets {
if bucket.Name == path.Base(reservedBucket) {
continue
}
reply.Buckets = append(reply.Buckets, WebBucketInfo{ reply.Buckets = append(reply.Buckets, WebBucketInfo{
Name: bucket.Name, Name: bucket.Name,
CreationDate: bucket.Created, CreationDate: bucket.Created,
@ -584,7 +580,7 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
return objectAPI.GetObject(args.BucketName, objectName, 0, info.Size, writer) return objectAPI.GetObject(args.BucketName, objectName, 0, info.Size, writer)
} }
if !strings.HasSuffix(object, "/") { if !hasSuffix(object, slashSeparator) {
// If not a directory, compress the file and write it to response. // If not a directory, compress the file and write it to response.
err := zipit(pathJoin(args.Prefix, object)) err := zipit(pathJoin(args.Prefix, object))
if err != nil { if err != nil {

View File

@ -39,7 +39,7 @@ type indexHandler struct {
} }
func (h indexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (h indexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
r.URL.Path = reservedBucket + "/" r.URL.Path = minioReservedBucketPath + "/"
h.handler.ServeHTTP(w, r) h.handler.ServeHTTP(w, r)
} }
@ -68,7 +68,7 @@ func registerWebRouter(mux *router.Router) error {
codec := json2.NewCodec() codec := json2.NewCodec()
// Minio browser router. // Minio browser router.
webBrowserRouter := mux.NewRoute().PathPrefix(reservedBucket).Subrouter() webBrowserRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()
// Initialize json rpc handlers. // Initialize json rpc handlers.
webRPC := jsonrpc.NewServer() webRPC := jsonrpc.NewServer()
@ -87,13 +87,13 @@ func registerWebRouter(mux *router.Router) error {
webBrowserRouter.Methods("GET").Path("/zip").Queries("token", "{token:.*}").HandlerFunc(web.DownloadZip) webBrowserRouter.Methods("GET").Path("/zip").Queries("token", "{token:.*}").HandlerFunc(web.DownloadZip)
// Add compression for assets. // Add compression for assets.
compressedAssets := handlers.CompressHandler(http.StripPrefix(reservedBucket, http.FileServer(assetFS()))) compressedAssets := handlers.CompressHandler(http.StripPrefix(minioReservedBucketPath, http.FileServer(assetFS())))
// Serve javascript files and favicon from assets. // Serve javascript files and favicon from assets.
webBrowserRouter.Path(fmt.Sprintf("/{assets:[^/]+.js|%s}", specialAssets)).Handler(compressedAssets) webBrowserRouter.Path(fmt.Sprintf("/{assets:[^/]+.js|%s}", specialAssets)).Handler(compressedAssets)
// Serve index.html for rest of the requests. // Serve index.html for rest of the requests.
webBrowserRouter.Path("/{index:.*}").Handler(indexHandler{http.StripPrefix(reservedBucket, http.FileServer(assetFS()))}) webBrowserRouter.Path("/{index:.*}").Handler(indexHandler{http.StripPrefix(minioReservedBucketPath, http.FileServer(assetFS()))})
return nil return nil
} }

View File

@ -165,13 +165,7 @@ func (xl xlObjects) listBuckets() (bucketsInfo []BucketInfo, err error) {
// should take care of this. // should take care of this.
var bucketsInfo []BucketInfo var bucketsInfo []BucketInfo
for _, volInfo := range volsInfo { for _, volInfo := range volsInfo {
// StorageAPI can send volume names which are incompatible if isReservedOrInvalidBucket(volInfo.Name) {
// with buckets, handle it and skip them.
if !IsValidBucketName(volInfo.Name) {
continue
}
// Ignore the volume special bucket.
if volInfo.Name == minioMetaBucket {
continue continue
} }
bucketsInfo = append(bucketsInfo, BucketInfo{ bucketsInfo = append(bucketsInfo, BucketInfo{

View File

@ -114,7 +114,7 @@ func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, ma
} }
entry := walkResult.entry entry := walkResult.entry
var objInfo ObjectInfo var objInfo ObjectInfo
if strings.HasSuffix(entry, slashSeparator) { if hasSuffix(entry, slashSeparator) {
// Object name needs to be full path. // Object name needs to be full path.
objInfo.Bucket = bucket objInfo.Bucket = bucket
objInfo.Name = entry objInfo.Name = entry

View File

@ -16,8 +16,6 @@
package cmd package cmd
import "strings"
// Returns function "listDir" of the type listDirFunc. // Returns function "listDir" of the type listDirFunc.
// isLeaf - is used by listDir function to check if an entry is a leaf or non-leaf entry. // isLeaf - is used by listDir function to check if an entry is a leaf or non-leaf entry.
// disks - used for doing disk.ListDir(). FS passes single disk argument, XL passes a list of disks. // disks - used for doing disk.ListDir(). FS passes single disk argument, XL passes a list of disks.
@ -83,7 +81,7 @@ func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKey
} }
entry := walkResult.entry entry := walkResult.entry
var objInfo ObjectInfo var objInfo ObjectInfo
if strings.HasSuffix(entry, slashSeparator) { if hasSuffix(entry, slashSeparator) {
// Object name needs to be full path. // Object name needs to be full path.
objInfo.Bucket = bucket objInfo.Bucket = bucket
objInfo.Name = entry objInfo.Name = entry

View File

@ -356,7 +356,7 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
entry := strings.TrimPrefix(walkResult.entry, retainSlash(bucket)) entry := strings.TrimPrefix(walkResult.entry, retainSlash(bucket))
// For an entry looking like a directory, store and // For an entry looking like a directory, store and
// continue the loop not need to fetch uploads. // continue the loop not need to fetch uploads.
if strings.HasSuffix(walkResult.entry, slashSeparator) { if hasSuffix(walkResult.entry, slashSeparator) {
uploads = append(uploads, uploadMetadata{ uploads = append(uploads, uploadMetadata{
Object: entry, Object: entry,
}) })
@ -409,7 +409,7 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
for _, upload := range uploads { for _, upload := range uploads {
var objectName string var objectName string
var uploadID string var uploadID string
if strings.HasSuffix(upload.Object, slashSeparator) { if hasSuffix(upload.Object, slashSeparator) {
// All directory entries are common prefixes. // All directory entries are common prefixes.
uploadID = "" // For common prefixes, upload ids are empty. uploadID = "" // For common prefixes, upload ids are empty.
objectName = upload.Object objectName = upload.Object