mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
Use const slashSeparator instead of "/" everywhere (#8028)
This commit is contained in:
parent
b52b90412b
commit
e6d8e272ce
@ -1594,7 +1594,7 @@ func mustTrace(entry interface{}, trcAll, errOnly bool) bool {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
trace := trcAll || !hasPrefix(trcInfo.ReqInfo.Path, minioReservedBucketPath+slashSeparator)
|
trace := trcAll || !hasPrefix(trcInfo.ReqInfo.Path, minioReservedBucketPath+SlashSeparator)
|
||||||
if errOnly {
|
if errOnly {
|
||||||
return trace && trcInfo.RespInfo.StatusCode >= http.StatusBadRequest
|
return trace && trcInfo.RespInfo.StatusCode >= http.StatusBadRequest
|
||||||
}
|
}
|
||||||
|
@ -587,9 +587,9 @@ func (h *healSequence) healItemsFromSourceCh() error {
|
|||||||
|
|
||||||
var itemType madmin.HealItemType
|
var itemType madmin.HealItemType
|
||||||
switch {
|
switch {
|
||||||
case path == "/":
|
case path == SlashSeparator:
|
||||||
itemType = madmin.HealItemMetadata
|
itemType = madmin.HealItemMetadata
|
||||||
case !strings.Contains(path, "/"):
|
case !strings.Contains(path, SlashSeparator):
|
||||||
itemType = madmin.HealItemBucket
|
itemType = madmin.HealItemBucket
|
||||||
default:
|
default:
|
||||||
itemType = madmin.HealItemObject
|
itemType = madmin.HealItemObject
|
||||||
@ -693,7 +693,7 @@ func (h *healSequence) healDiskFormat() error {
|
|||||||
return errServerNotInitialized
|
return errServerNotInitialized
|
||||||
}
|
}
|
||||||
|
|
||||||
return h.queueHealTask("/", madmin.HealItemMetadata)
|
return h.queueHealTask(SlashSeparator, madmin.HealItemMetadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
// healBuckets - check for all buckets heal or just particular bucket.
|
// healBuckets - check for all buckets heal or just particular bucket.
|
||||||
|
@ -36,7 +36,7 @@ func TestListObjectsV2Resources(t *testing.T) {
|
|||||||
"prefix": []string{"photos/"},
|
"prefix": []string{"photos/"},
|
||||||
"continuation-token": []string{"token"},
|
"continuation-token": []string{"token"},
|
||||||
"start-after": []string{"start-after"},
|
"start-after": []string{"start-after"},
|
||||||
"delimiter": []string{"/"},
|
"delimiter": []string{SlashSeparator},
|
||||||
"fetch-owner": []string{"true"},
|
"fetch-owner": []string{"true"},
|
||||||
"max-keys": []string{"100"},
|
"max-keys": []string{"100"},
|
||||||
"encoding-type": []string{"gzip"},
|
"encoding-type": []string{"gzip"},
|
||||||
@ -44,7 +44,7 @@ func TestListObjectsV2Resources(t *testing.T) {
|
|||||||
prefix: "photos/",
|
prefix: "photos/",
|
||||||
token: "token",
|
token: "token",
|
||||||
startAfter: "start-after",
|
startAfter: "start-after",
|
||||||
delimiter: "/",
|
delimiter: SlashSeparator,
|
||||||
fetchOwner: true,
|
fetchOwner: true,
|
||||||
maxKeys: 100,
|
maxKeys: 100,
|
||||||
encodingType: "gzip",
|
encodingType: "gzip",
|
||||||
@ -55,14 +55,14 @@ func TestListObjectsV2Resources(t *testing.T) {
|
|||||||
"prefix": []string{"photos/"},
|
"prefix": []string{"photos/"},
|
||||||
"continuation-token": []string{"token"},
|
"continuation-token": []string{"token"},
|
||||||
"start-after": []string{"start-after"},
|
"start-after": []string{"start-after"},
|
||||||
"delimiter": []string{"/"},
|
"delimiter": []string{SlashSeparator},
|
||||||
"fetch-owner": []string{"true"},
|
"fetch-owner": []string{"true"},
|
||||||
"encoding-type": []string{"gzip"},
|
"encoding-type": []string{"gzip"},
|
||||||
},
|
},
|
||||||
prefix: "photos/",
|
prefix: "photos/",
|
||||||
token: "token",
|
token: "token",
|
||||||
startAfter: "start-after",
|
startAfter: "start-after",
|
||||||
delimiter: "/",
|
delimiter: SlashSeparator,
|
||||||
fetchOwner: true,
|
fetchOwner: true,
|
||||||
maxKeys: 1000,
|
maxKeys: 1000,
|
||||||
encodingType: "gzip",
|
encodingType: "gzip",
|
||||||
@ -73,7 +73,7 @@ func TestListObjectsV2Resources(t *testing.T) {
|
|||||||
"prefix": []string{"photos/"},
|
"prefix": []string{"photos/"},
|
||||||
"continuation-token": []string{""},
|
"continuation-token": []string{""},
|
||||||
"start-after": []string{"start-after"},
|
"start-after": []string{"start-after"},
|
||||||
"delimiter": []string{"/"},
|
"delimiter": []string{SlashSeparator},
|
||||||
"fetch-owner": []string{"true"},
|
"fetch-owner": []string{"true"},
|
||||||
"encoding-type": []string{"gzip"},
|
"encoding-type": []string{"gzip"},
|
||||||
},
|
},
|
||||||
@ -130,13 +130,13 @@ func TestListObjectsV1Resources(t *testing.T) {
|
|||||||
values: url.Values{
|
values: url.Values{
|
||||||
"prefix": []string{"photos/"},
|
"prefix": []string{"photos/"},
|
||||||
"marker": []string{"test"},
|
"marker": []string{"test"},
|
||||||
"delimiter": []string{"/"},
|
"delimiter": []string{SlashSeparator},
|
||||||
"max-keys": []string{"100"},
|
"max-keys": []string{"100"},
|
||||||
"encoding-type": []string{"gzip"},
|
"encoding-type": []string{"gzip"},
|
||||||
},
|
},
|
||||||
prefix: "photos/",
|
prefix: "photos/",
|
||||||
marker: "test",
|
marker: "test",
|
||||||
delimiter: "/",
|
delimiter: SlashSeparator,
|
||||||
maxKeys: 100,
|
maxKeys: 100,
|
||||||
encodingType: "gzip",
|
encodingType: "gzip",
|
||||||
},
|
},
|
||||||
@ -144,12 +144,12 @@ func TestListObjectsV1Resources(t *testing.T) {
|
|||||||
values: url.Values{
|
values: url.Values{
|
||||||
"prefix": []string{"photos/"},
|
"prefix": []string{"photos/"},
|
||||||
"marker": []string{"test"},
|
"marker": []string{"test"},
|
||||||
"delimiter": []string{"/"},
|
"delimiter": []string{SlashSeparator},
|
||||||
"encoding-type": []string{"gzip"},
|
"encoding-type": []string{"gzip"},
|
||||||
},
|
},
|
||||||
prefix: "photos/",
|
prefix: "photos/",
|
||||||
marker: "test",
|
marker: "test",
|
||||||
delimiter: "/",
|
delimiter: SlashSeparator,
|
||||||
maxKeys: 1000,
|
maxKeys: 1000,
|
||||||
encodingType: "gzip",
|
encodingType: "gzip",
|
||||||
},
|
},
|
||||||
|
@ -293,14 +293,14 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
|||||||
}
|
}
|
||||||
u := &url.URL{
|
u := &url.URL{
|
||||||
Host: r.Host,
|
Host: r.Host,
|
||||||
Path: path.Join(slashSeparator, bucket, object),
|
Path: path.Join(SlashSeparator, bucket, object),
|
||||||
Scheme: proto,
|
Scheme: proto,
|
||||||
}
|
}
|
||||||
// If domain is set then we need to use bucket DNS style.
|
// If domain is set then we need to use bucket DNS style.
|
||||||
for _, domain := range domains {
|
for _, domain := range domains {
|
||||||
if strings.Contains(r.Host, domain) {
|
if strings.Contains(r.Host, domain) {
|
||||||
u.Host = bucket + "." + r.Host
|
u.Host = bucket + "." + r.Host
|
||||||
u.Path = path.Join(slashSeparator, object)
|
u.Path = path.Join(SlashSeparator, object)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -48,7 +48,7 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// API Router
|
// API Router
|
||||||
apiRouter := router.PathPrefix("/").Subrouter()
|
apiRouter := router.PathPrefix(SlashSeparator).Subrouter()
|
||||||
var routers []*mux.Router
|
var routers []*mux.Router
|
||||||
for _, domainName := range globalDomainNames {
|
for _, domainName := range globalDomainNames {
|
||||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
|
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
|
||||||
@ -157,7 +157,7 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
|||||||
/// Root operation
|
/// Root operation
|
||||||
|
|
||||||
// ListBuckets
|
// ListBuckets
|
||||||
apiRouter.Methods(http.MethodGet).Path("/").HandlerFunc(httpTraceAll(api.ListBucketsHandler))
|
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(httpTraceAll(api.ListBucketsHandler))
|
||||||
|
|
||||||
// If none of the routes match.
|
// If none of the routes match.
|
||||||
apiRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler))
|
apiRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler))
|
||||||
|
@ -44,7 +44,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
|||||||
URL: &url.URL{
|
URL: &url.URL{
|
||||||
Host: "127.0.0.1:9000",
|
Host: "127.0.0.1:9000",
|
||||||
Scheme: httpScheme,
|
Scheme: httpScheme,
|
||||||
Path: "/",
|
Path: SlashSeparator,
|
||||||
},
|
},
|
||||||
Header: http.Header{
|
Header: http.Header{
|
||||||
"Authorization": []string{"AWS4-HMAC-SHA256 <cred_string>"},
|
"Authorization": []string{"AWS4-HMAC-SHA256 <cred_string>"},
|
||||||
@ -62,7 +62,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
|||||||
URL: &url.URL{
|
URL: &url.URL{
|
||||||
Host: "127.0.0.1:9000",
|
Host: "127.0.0.1:9000",
|
||||||
Scheme: httpScheme,
|
Scheme: httpScheme,
|
||||||
Path: "/",
|
Path: SlashSeparator,
|
||||||
},
|
},
|
||||||
Header: http.Header{
|
Header: http.Header{
|
||||||
"Authorization": []string{"Bearer 12313123"},
|
"Authorization": []string{"Bearer 12313123"},
|
||||||
@ -77,7 +77,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
|||||||
URL: &url.URL{
|
URL: &url.URL{
|
||||||
Host: "127.0.0.1:9000",
|
Host: "127.0.0.1:9000",
|
||||||
Scheme: httpScheme,
|
Scheme: httpScheme,
|
||||||
Path: "/",
|
Path: SlashSeparator,
|
||||||
},
|
},
|
||||||
Header: http.Header{
|
Header: http.Header{
|
||||||
"Authorization": []string{""},
|
"Authorization": []string{""},
|
||||||
@ -92,7 +92,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
|||||||
URL: &url.URL{
|
URL: &url.URL{
|
||||||
Host: "127.0.0.1:9000",
|
Host: "127.0.0.1:9000",
|
||||||
Scheme: httpScheme,
|
Scheme: httpScheme,
|
||||||
Path: "/",
|
Path: SlashSeparator,
|
||||||
RawQuery: "X-Amz-Credential=EXAMPLEINVALIDEXAMPL%2Fs3%2F20160314%2Fus-east-1",
|
RawQuery: "X-Amz-Credential=EXAMPLEINVALIDEXAMPL%2Fs3%2F20160314%2Fus-east-1",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -105,7 +105,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
|||||||
URL: &url.URL{
|
URL: &url.URL{
|
||||||
Host: "127.0.0.1:9000",
|
Host: "127.0.0.1:9000",
|
||||||
Scheme: httpScheme,
|
Scheme: httpScheme,
|
||||||
Path: "/",
|
Path: SlashSeparator,
|
||||||
},
|
},
|
||||||
Header: http.Header{
|
Header: http.Header{
|
||||||
"Content-Type": []string{"multipart/form-data"},
|
"Content-Type": []string{"multipart/form-data"},
|
||||||
|
@ -650,7 +650,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if objectAPI.IsEncryptionSupported() {
|
if objectAPI.IsEncryptionSupported() {
|
||||||
if hasServerSideEncryptionHeader(formValues) && !hasSuffix(object, slashSeparator) { // handle SSE-C and SSE-S3 requests
|
if hasServerSideEncryptionHeader(formValues) && !hasSuffix(object, SlashSeparator) { // handle SSE-C and SSE-S3 requests
|
||||||
var reader io.Reader
|
var reader io.Reader
|
||||||
var key []byte
|
var key []byte
|
||||||
if crypto.SSEC.IsRequested(formValues) {
|
if crypto.SSEC.IsRequested(formValues) {
|
||||||
|
@ -69,7 +69,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
expectedRespStatus: http.StatusForbidden,
|
expectedRespStatus: http.StatusForbidden,
|
||||||
locationResponse: []byte(""),
|
locationResponse: []byte(""),
|
||||||
errorResponse: APIErrorResponse{
|
errorResponse: APIErrorResponse{
|
||||||
Resource: "/" + bucketName + "/",
|
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||||
Code: "InvalidAccessKeyId",
|
Code: "InvalidAccessKeyId",
|
||||||
Message: "The access key ID you provided does not exist in our records.",
|
Message: "The access key ID you provided does not exist in our records.",
|
||||||
},
|
},
|
||||||
@ -394,7 +394,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
|||||||
prefix: "",
|
prefix: "",
|
||||||
keyMarker: "",
|
keyMarker: "",
|
||||||
uploadIDMarker: "",
|
uploadIDMarker: "",
|
||||||
delimiter: "/",
|
delimiter: SlashSeparator,
|
||||||
maxUploads: "100",
|
maxUploads: "100",
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
|
@ -87,7 +87,7 @@ func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
|
|||||||
// Load all custom CA files.
|
// Load all custom CA files.
|
||||||
for _, fi := range fis {
|
for _, fi := range fis {
|
||||||
// Skip all directories.
|
// Skip all directories.
|
||||||
if hasSuffix(fi, slashSeparator) {
|
if hasSuffix(fi, SlashSeparator) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
caCert, err := ioutil.ReadFile(pathJoin(certsCAsDir, fi))
|
caCert, err := ioutil.ReadFile(pathJoin(certsCAsDir, fi))
|
||||||
|
@ -175,7 +175,7 @@ func TestServerConfigMigrateV2toV33(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(fsDir)
|
defer os.RemoveAll(fsDir)
|
||||||
|
|
||||||
configPath := rootPath + "/" + minioConfigFile
|
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||||
|
|
||||||
// Create a corrupted config file
|
// Create a corrupted config file
|
||||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0644); err != nil {
|
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0644); err != nil {
|
||||||
@ -238,7 +238,7 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
|
|||||||
defer os.RemoveAll(rootPath)
|
defer os.RemoveAll(rootPath)
|
||||||
|
|
||||||
globalConfigDir = &ConfigDir{path: rootPath}
|
globalConfigDir = &ConfigDir{path: rootPath}
|
||||||
configPath := rootPath + "/" + minioConfigFile
|
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||||
|
|
||||||
// Create a corrupted config file
|
// Create a corrupted config file
|
||||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0644); err != nil {
|
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0644); err != nil {
|
||||||
@ -335,7 +335,7 @@ func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
|
|||||||
defer os.RemoveAll(rootPath)
|
defer os.RemoveAll(rootPath)
|
||||||
|
|
||||||
globalConfigDir = &ConfigDir{path: rootPath}
|
globalConfigDir = &ConfigDir{path: rootPath}
|
||||||
configPath := rootPath + "/" + minioConfigFile
|
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||||
|
|
||||||
for i := 3; i <= 17; i++ {
|
for i := 3; i <= 17; i++ {
|
||||||
// Create a corrupted config file
|
// Create a corrupted config file
|
||||||
|
@ -109,7 +109,7 @@ func parseCacheExcludes(excludes []string) ([]string, error) {
|
|||||||
if len(e) == 0 {
|
if len(e) == 0 {
|
||||||
return nil, uiErrInvalidCacheExcludesValue(nil).Msg("cache exclude path (%s) cannot be empty", e)
|
return nil, uiErrInvalidCacheExcludesValue(nil).Msg("cache exclude path (%s) cannot be empty", e)
|
||||||
}
|
}
|
||||||
if hasPrefix(e, slashSeparator) {
|
if hasPrefix(e, SlashSeparator) {
|
||||||
return nil, uiErrInvalidCacheExcludesValue(nil).Msg("cache exclude pattern (%s) cannot start with / as prefix", e)
|
return nil, uiErrInvalidCacheExcludesValue(nil).Msg("cache exclude pattern (%s) cannot start with / as prefix", e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -182,7 +182,7 @@ func (cfs *cacheFSObjects) purgeTrash() {
|
|||||||
|
|
||||||
// Purge cache entries that were not accessed.
|
// Purge cache entries that were not accessed.
|
||||||
func (cfs *cacheFSObjects) purge() {
|
func (cfs *cacheFSObjects) purge() {
|
||||||
delimiter := slashSeparator
|
delimiter := SlashSeparator
|
||||||
maxKeys := 1000
|
maxKeys := 1000
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
for {
|
for {
|
||||||
|
@ -395,7 +395,7 @@ func listDirCacheFactory(isLeaf func(string, string) bool, disks []*cacheFSObjec
|
|||||||
|
|
||||||
for i := range entries {
|
for i := range entries {
|
||||||
if isLeaf(bucket, entries[i]) {
|
if isLeaf(bucket, entries[i]) {
|
||||||
entries[i] = strings.TrimSuffix(entries[i], slashSeparator)
|
entries[i] = strings.TrimSuffix(entries[i], SlashSeparator)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -432,7 +432,7 @@ func (c cacheObjects) listCacheObjects(ctx context.Context, bucket, prefix, mark
|
|||||||
var nextMarker string
|
var nextMarker string
|
||||||
|
|
||||||
recursive := true
|
recursive := true
|
||||||
if delimiter == slashSeparator {
|
if delimiter == SlashSeparator {
|
||||||
recursive = false
|
recursive = false
|
||||||
}
|
}
|
||||||
walkResultCh, endWalkCh := c.listPool.Release(listParams{bucket, recursive, marker, prefix, false})
|
walkResultCh, endWalkCh := c.listPool.Release(listParams{bucket, recursive, marker, prefix, false})
|
||||||
@ -460,7 +460,7 @@ func (c cacheObjects) listCacheObjects(ctx context.Context, bucket, prefix, mark
|
|||||||
|
|
||||||
entry := walkResult.entry
|
entry := walkResult.entry
|
||||||
var objInfo ObjectInfo
|
var objInfo ObjectInfo
|
||||||
if hasSuffix(entry, slashSeparator) {
|
if hasSuffix(entry, SlashSeparator) {
|
||||||
// Object name needs to be full path.
|
// Object name needs to be full path.
|
||||||
objInfo.Bucket = bucket
|
objInfo.Bucket = bucket
|
||||||
objInfo.Name = entry
|
objInfo.Name = entry
|
||||||
@ -502,7 +502,7 @@ func (c cacheObjects) listCacheObjects(ctx context.Context, bucket, prefix, mark
|
|||||||
result = ListObjectsInfo{IsTruncated: !eof}
|
result = ListObjectsInfo{IsTruncated: !eof}
|
||||||
for _, objInfo := range objInfos {
|
for _, objInfo := range objInfos {
|
||||||
result.NextMarker = objInfo.Name
|
result.NextMarker = objInfo.Name
|
||||||
if objInfo.IsDir && delimiter == slashSeparator {
|
if objInfo.IsDir && delimiter == SlashSeparator {
|
||||||
result.Prefixes = append(result.Prefixes, objInfo.Name)
|
result.Prefixes = append(result.Prefixes, objInfo.Name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
// getDiskUsage walks the file tree rooted at root, calling usageFn
|
// getDiskUsage walks the file tree rooted at root, calling usageFn
|
||||||
// for each file or directory in the tree, including root.
|
// for each file or directory in the tree, including root.
|
||||||
func getDiskUsage(ctx context.Context, root string, usageFn usageFunc) error {
|
func getDiskUsage(ctx context.Context, root string, usageFn usageFunc) error {
|
||||||
return walk(ctx, root+slashSeparator, usageFn)
|
return walk(ctx, root+SlashSeparator, usageFn)
|
||||||
}
|
}
|
||||||
|
|
||||||
type usageFunc func(ctx context.Context, entry string) error
|
type usageFunc func(ctx context.Context, entry string) error
|
||||||
@ -34,7 +34,7 @@ func walk(ctx context.Context, path string, usageFn usageFunc) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !hasSuffix(path, slashSeparator) {
|
if !hasSuffix(path, SlashSeparator) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,7 +100,7 @@ func (endpoint *Endpoint) UpdateIsLocal() error {
|
|||||||
func NewEndpoint(arg string) (ep Endpoint, e error) {
|
func NewEndpoint(arg string) (ep Endpoint, e error) {
|
||||||
// isEmptyPath - check whether given path is not empty.
|
// isEmptyPath - check whether given path is not empty.
|
||||||
isEmptyPath := func(path string) bool {
|
isEmptyPath := func(path string) bool {
|
||||||
return path == "" || path == "/" || path == `\`
|
return path == "" || path == SlashSeparator || path == `\`
|
||||||
}
|
}
|
||||||
|
|
||||||
if isEmptyPath(arg) {
|
if isEmptyPath(arg) {
|
||||||
@ -152,7 +152,7 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
|
|||||||
return ep, fmt.Errorf("empty or root path is not supported in URL endpoint")
|
return ep, fmt.Errorf("empty or root path is not supported in URL endpoint")
|
||||||
}
|
}
|
||||||
|
|
||||||
// On windows having a preceding "/" will cause problems, if the
|
// On windows having a preceding SlashSeparator will cause problems, if the
|
||||||
// command line already has C:/<export-folder/ in it. Final resulting
|
// command line already has C:/<export-folder/ in it. Final resulting
|
||||||
// path on windows might become C:/C:/ this will cause problems
|
// path on windows might become C:/C:/ this will cause problems
|
||||||
// of starting minio server properly in distributed mode on windows.
|
// of starting minio server properly in distributed mode on windows.
|
||||||
|
@ -96,7 +96,7 @@ func TestNewEndpoint(t *testing.T) {
|
|||||||
{"http://127.0.0.1:8080/path", Endpoint{URL: u3, IsLocal: true, HostName: "127.0.0.1"}, URLEndpointType, nil},
|
{"http://127.0.0.1:8080/path", Endpoint{URL: u3, IsLocal: true, HostName: "127.0.0.1"}, URLEndpointType, nil},
|
||||||
{"http://192.168.253.200/path", Endpoint{URL: u4, IsLocal: false, HostName: "192.168.253.200"}, URLEndpointType, nil},
|
{"http://192.168.253.200/path", Endpoint{URL: u4, IsLocal: false, HostName: "192.168.253.200"}, URLEndpointType, nil},
|
||||||
{"", Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
{"", Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
||||||
{"/", Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
{SlashSeparator, Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
||||||
{`\`, Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
{`\`, Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
||||||
{"c://foo", Endpoint{}, -1, fmt.Errorf("invalid URL endpoint format")},
|
{"c://foo", Endpoint{}, -1, fmt.Errorf("invalid URL endpoint format")},
|
||||||
{"ftp://foo", Endpoint{}, -1, fmt.Errorf("invalid URL endpoint format")},
|
{"ftp://foo", Endpoint{}, -1, fmt.Errorf("invalid URL endpoint format")},
|
||||||
|
@ -141,7 +141,7 @@ func (m fsMetaV1) ToObjectInfo(bucket, object string, fi os.FileInfo) ObjectInfo
|
|||||||
m.Meta["content-type"] = mimedb.TypeByExtension(pathutil.Ext(object))
|
m.Meta["content-type"] = mimedb.TypeByExtension(pathutil.Ext(object))
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasSuffix(object, slashSeparator) {
|
if hasSuffix(object, SlashSeparator) {
|
||||||
m.Meta["etag"] = emptyETag // For directories etag is d41d8cd98f00b204e9800998ecf8427e
|
m.Meta["etag"] = emptyETag // For directories etag is d41d8cd98f00b204e9800998ecf8427e
|
||||||
m.Meta["content-type"] = "application/octet-stream"
|
m.Meta["content-type"] = "application/octet-stream"
|
||||||
}
|
}
|
||||||
|
@ -163,7 +163,7 @@ func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, k
|
|||||||
}
|
}
|
||||||
uploads = append(uploads, MultipartInfo{
|
uploads = append(uploads, MultipartInfo{
|
||||||
Object: object,
|
Object: object,
|
||||||
UploadID: strings.TrimSuffix(uploadID, slashSeparator),
|
UploadID: strings.TrimSuffix(uploadID, SlashSeparator),
|
||||||
Initiated: fi.ModTime(),
|
Initiated: fi.ModTime(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
10
cmd/fs-v1.go
10
cmd/fs-v1.go
@ -505,7 +505,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
|||||||
return nil, toObjectErr(err, bucket, object)
|
return nil, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
// For a directory, we need to send an reader that returns no bytes.
|
// For a directory, we need to send an reader that returns no bytes.
|
||||||
if hasSuffix(object, slashSeparator) {
|
if hasSuffix(object, SlashSeparator) {
|
||||||
// The lock taken above is released when
|
// The lock taken above is released when
|
||||||
// objReader.Close() is called by the caller.
|
// objReader.Close() is called by the caller.
|
||||||
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts.CheckCopyPrecondFn, nsUnlocker)
|
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts.CheckCopyPrecondFn, nsUnlocker)
|
||||||
@ -596,7 +596,7 @@ func (fs *FSObjects) getObject(ctx context.Context, bucket, object string, offse
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If its a directory request, we return an empty body.
|
// If its a directory request, we return an empty body.
|
||||||
if hasSuffix(object, slashSeparator) {
|
if hasSuffix(object, SlashSeparator) {
|
||||||
_, err = writer.Write([]byte(""))
|
_, err = writer.Write([]byte(""))
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
return toObjectErr(err, bucket, object)
|
return toObjectErr(err, bucket, object)
|
||||||
@ -690,7 +690,7 @@ func (fs *FSObjects) defaultFsJSON(object string) fsMetaV1 {
|
|||||||
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
|
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
|
||||||
func (fs *FSObjects) getObjectInfo(ctx context.Context, bucket, object string) (oi ObjectInfo, e error) {
|
func (fs *FSObjects) getObjectInfo(ctx context.Context, bucket, object string) (oi ObjectInfo, e error) {
|
||||||
fsMeta := fsMetaV1{}
|
fsMeta := fsMetaV1{}
|
||||||
if hasSuffix(object, slashSeparator) {
|
if hasSuffix(object, SlashSeparator) {
|
||||||
fi, err := fsStatDir(ctx, pathJoin(fs.fsPath, bucket, object))
|
fi, err := fsStatDir(ctx, pathJoin(fs.fsPath, bucket, object))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return oi, err
|
return oi, err
|
||||||
@ -750,7 +750,7 @@ func (fs *FSObjects) getObjectInfoWithLock(ctx context.Context, bucket, object s
|
|||||||
return oi, err
|
return oi, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.HasSuffix(object, slashSeparator) && !fs.isObjectDir(bucket, object) {
|
if strings.HasSuffix(object, SlashSeparator) && !fs.isObjectDir(bucket, object) {
|
||||||
return oi, errFileNotFound
|
return oi, errFileNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -784,7 +784,7 @@ func (fs *FSObjects) GetObjectInfo(ctx context.Context, bucket, object string, o
|
|||||||
func (fs *FSObjects) parentDirIsObject(ctx context.Context, bucket, parent string) bool {
|
func (fs *FSObjects) parentDirIsObject(ctx context.Context, bucket, parent string) bool {
|
||||||
var isParentDirObject func(string) bool
|
var isParentDirObject func(string) bool
|
||||||
isParentDirObject = func(p string) bool {
|
isParentDirObject = func(p string) bool {
|
||||||
if p == "." || p == "/" {
|
if p == "." || p == SlashSeparator {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if fsIsFile(ctx, pathJoin(fs.fsPath, bucket, p)) {
|
if fsIsFile(ctx, pathJoin(fs.fsPath, bucket, p)) {
|
||||||
|
@ -71,7 +71,7 @@ func TestFSParentDirIsObject(t *testing.T) {
|
|||||||
// Should not cause infinite loop.
|
// Should not cause infinite loop.
|
||||||
{
|
{
|
||||||
parentIsObject: false,
|
parentIsObject: false,
|
||||||
objectName: "/",
|
objectName: SlashSeparator,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
parentIsObject: false,
|
parentIsObject: false,
|
||||||
@ -214,7 +214,7 @@ func TestFSPutObject(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// With a directory object.
|
// With a directory object.
|
||||||
_, err = obj.PutObject(context.Background(), bucketName+"non-existent", objectName+"/", mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), ObjectOptions{})
|
_, err = obj.PutObject(context.Background(), bucketName+"non-existent", objectName+SlashSeparator, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), ObjectOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
||||||
}
|
}
|
||||||
|
@ -431,7 +431,7 @@ func checkAzureUploadID(ctx context.Context, uploadID string) (err error) {
|
|||||||
|
|
||||||
// parses partID from part metadata file name
|
// parses partID from part metadata file name
|
||||||
func parseAzurePart(metaPartFileName, prefix string) (partID int, err error) {
|
func parseAzurePart(metaPartFileName, prefix string) (partID int, err error) {
|
||||||
partStr := strings.TrimPrefix(metaPartFileName, prefix+"/")
|
partStr := strings.TrimPrefix(metaPartFileName, prefix+minio.SlashSeparator)
|
||||||
if partID, err = strconv.Atoi(partStr); err != nil || partID <= 0 {
|
if partID, err = strconv.Atoi(partStr); err != nil || partID <= 0 {
|
||||||
err = fmt.Errorf("invalid part number in block id '%s'", string(partID))
|
err = fmt.Errorf("invalid part number in block id '%s'", string(partID))
|
||||||
return
|
return
|
||||||
|
@ -472,7 +472,7 @@ func (l *gcsGateway) ListBuckets(ctx context.Context) (buckets []minio.BucketInf
|
|||||||
// DeleteBucket delete a bucket on GCS.
|
// DeleteBucket delete a bucket on GCS.
|
||||||
func (l *gcsGateway) DeleteBucket(ctx context.Context, bucket string) error {
|
func (l *gcsGateway) DeleteBucket(ctx context.Context, bucket string) error {
|
||||||
itObject := l.client.Bucket(bucket).Objects(ctx, &storage.Query{
|
itObject := l.client.Bucket(bucket).Objects(ctx, &storage.Query{
|
||||||
Delimiter: "/",
|
Delimiter: minio.SlashSeparator,
|
||||||
Versions: false,
|
Versions: false,
|
||||||
})
|
})
|
||||||
// We list the bucket and if we find any objects we return BucketNotEmpty error. If we
|
// We list the bucket and if we find any objects we return BucketNotEmpty error. If we
|
||||||
@ -1040,7 +1040,7 @@ func (l *gcsGateway) ListMultipartUploads(ctx context.Context, bucket string, pr
|
|||||||
if prefix == mpMeta.Object {
|
if prefix == mpMeta.Object {
|
||||||
// Extract uploadId
|
// Extract uploadId
|
||||||
// E.g minio.sys.tmp/multipart/v1/d063ad89-fdc4-4ea3-a99e-22dba98151f5/gcs.json
|
// E.g minio.sys.tmp/multipart/v1/d063ad89-fdc4-4ea3-a99e-22dba98151f5/gcs.json
|
||||||
components := strings.SplitN(attrs.Name, "/", 5)
|
components := strings.SplitN(attrs.Name, minio.SlashSeparator, 5)
|
||||||
if len(components) != 5 {
|
if len(components) != 5 {
|
||||||
compErr := errors.New("Invalid multipart upload format")
|
compErr := errors.New("Invalid multipart upload format")
|
||||||
logger.LogIf(ctx, compErr)
|
logger.LogIf(ctx, compErr)
|
||||||
@ -1114,7 +1114,7 @@ func (l *gcsGateway) PutObjectPart(ctx context.Context, bucket string, key strin
|
|||||||
|
|
||||||
// gcsGetPartInfo returns PartInfo of a given object part
|
// gcsGetPartInfo returns PartInfo of a given object part
|
||||||
func gcsGetPartInfo(ctx context.Context, attrs *storage.ObjectAttrs) (minio.PartInfo, error) {
|
func gcsGetPartInfo(ctx context.Context, attrs *storage.ObjectAttrs) (minio.PartInfo, error) {
|
||||||
components := strings.SplitN(attrs.Name, "/", 5)
|
components := strings.SplitN(attrs.Name, minio.SlashSeparator, 5)
|
||||||
if len(components) != 5 {
|
if len(components) != 5 {
|
||||||
logger.LogIf(ctx, errors.New("Invalid multipart upload format"))
|
logger.LogIf(ctx, errors.New("Invalid multipart upload format"))
|
||||||
return minio.PartInfo{}, errors.New("Invalid multipart upload format")
|
return minio.PartInfo{}, errors.New("Invalid multipart upload format")
|
||||||
|
@ -36,7 +36,7 @@ const (
|
|||||||
|
|
||||||
// Ignores all reserved bucket names or invalid bucket names.
|
// Ignores all reserved bucket names or invalid bucket names.
|
||||||
func isReservedOrInvalidBucket(bucketEntry string, strict bool) bool {
|
func isReservedOrInvalidBucket(bucketEntry string, strict bool) bool {
|
||||||
bucketEntry = strings.TrimSuffix(bucketEntry, "/")
|
bucketEntry = strings.TrimSuffix(bucketEntry, minio.SlashSeparator)
|
||||||
if strict {
|
if strict {
|
||||||
if err := s3utils.CheckValidBucketNameStrict(bucketEntry); err != nil {
|
if err := s3utils.CheckValidBucketNameStrict(bucketEntry); err != nil {
|
||||||
return true
|
return true
|
||||||
|
@ -46,7 +46,7 @@ import (
|
|||||||
const (
|
const (
|
||||||
hdfsBackend = "hdfs"
|
hdfsBackend = "hdfs"
|
||||||
|
|
||||||
hdfsSeparator = "/"
|
hdfsSeparator = minio.SlashSeparator
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -41,7 +41,6 @@ const (
|
|||||||
// custom multipart files are stored under the defaultMinioGWPrefix
|
// custom multipart files are stored under the defaultMinioGWPrefix
|
||||||
defaultMinioGWPrefix = ".minio"
|
defaultMinioGWPrefix = ".minio"
|
||||||
defaultGWContentFileName = "data"
|
defaultGWContentFileName = "data"
|
||||||
slashSeparator = "/"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// s3EncObjects is a wrapper around s3Objects and implements gateway calls for
|
// s3EncObjects is a wrapper around s3Objects and implements gateway calls for
|
||||||
@ -102,7 +101,7 @@ func (l *s3EncObjects) ListObjectsV2(ctx context.Context, bucket, prefix, contin
|
|||||||
}
|
}
|
||||||
// get objectname and ObjectInfo from the custom metadata file
|
// get objectname and ObjectInfo from the custom metadata file
|
||||||
if strings.HasSuffix(obj.Name, gwdareMetaJSON) {
|
if strings.HasSuffix(obj.Name, gwdareMetaJSON) {
|
||||||
objSlice := strings.Split(obj.Name, slashSeparator+defaultMinioGWPrefix)
|
objSlice := strings.Split(obj.Name, minio.SlashSeparator+defaultMinioGWPrefix)
|
||||||
gwMeta, e := l.getGWMetadata(ctx, bucket, getDareMetaPath(objSlice[0]))
|
gwMeta, e := l.getGWMetadata(ctx, bucket, getDareMetaPath(objSlice[0]))
|
||||||
if e != nil {
|
if e != nil {
|
||||||
continue
|
continue
|
||||||
@ -117,7 +116,7 @@ func (l *s3EncObjects) ListObjectsV2(ctx context.Context, bucket, prefix, contin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, p := range loi.Prefixes {
|
for _, p := range loi.Prefixes {
|
||||||
objName := strings.TrimSuffix(p, slashSeparator)
|
objName := strings.TrimSuffix(p, minio.SlashSeparator)
|
||||||
gm, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(objName))
|
gm, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(objName))
|
||||||
// if prefix is actually a custom multi-part object, append it to objects
|
// if prefix is actually a custom multi-part object, append it to objects
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -165,7 +164,7 @@ func isGWObject(objName string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
pfxSlice := strings.Split(objName, slashSeparator)
|
pfxSlice := strings.Split(objName, minio.SlashSeparator)
|
||||||
var i1, i2 int
|
var i1, i2 int
|
||||||
for i := len(pfxSlice) - 1; i >= 0; i-- {
|
for i := len(pfxSlice) - 1; i >= 0; i-- {
|
||||||
p := pfxSlice[i]
|
p := pfxSlice[i]
|
||||||
@ -401,10 +400,10 @@ func (l *s3EncObjects) ListMultipartUploads(ctx context.Context, bucket string,
|
|||||||
if e != nil {
|
if e != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
lmi.KeyMarker = strings.TrimSuffix(lmi.KeyMarker, getGWContentPath("/"))
|
lmi.KeyMarker = strings.TrimSuffix(lmi.KeyMarker, getGWContentPath(minio.SlashSeparator))
|
||||||
lmi.NextKeyMarker = strings.TrimSuffix(lmi.NextKeyMarker, getGWContentPath("/"))
|
lmi.NextKeyMarker = strings.TrimSuffix(lmi.NextKeyMarker, getGWContentPath(minio.SlashSeparator))
|
||||||
for i := range lmi.Uploads {
|
for i := range lmi.Uploads {
|
||||||
lmi.Uploads[i].Object = strings.TrimSuffix(lmi.Uploads[i].Object, getGWContentPath("/"))
|
lmi.Uploads[i].Object = strings.TrimSuffix(lmi.Uploads[i].Object, getGWContentPath(minio.SlashSeparator))
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -153,7 +153,7 @@ func containsReservedMetadata(header http.Header) bool {
|
|||||||
// Reserved bucket.
|
// Reserved bucket.
|
||||||
const (
|
const (
|
||||||
minioReservedBucket = "minio"
|
minioReservedBucket = "minio"
|
||||||
minioReservedBucketPath = "/" + minioReservedBucket
|
minioReservedBucketPath = SlashSeparator + minioReservedBucket
|
||||||
)
|
)
|
||||||
|
|
||||||
// Adds redirect rules for incoming requests.
|
// Adds redirect rules for incoming requests.
|
||||||
@ -172,10 +172,10 @@ func setBrowserRedirectHandler(h http.Handler) http.Handler {
|
|||||||
// browser requests.
|
// browser requests.
|
||||||
func getRedirectLocation(urlPath string) (rLocation string) {
|
func getRedirectLocation(urlPath string) (rLocation string) {
|
||||||
if urlPath == minioReservedBucketPath {
|
if urlPath == minioReservedBucketPath {
|
||||||
rLocation = minioReservedBucketPath + "/"
|
rLocation = minioReservedBucketPath + SlashSeparator
|
||||||
}
|
}
|
||||||
if contains([]string{
|
if contains([]string{
|
||||||
"/",
|
SlashSeparator,
|
||||||
"/webrpc",
|
"/webrpc",
|
||||||
"/login",
|
"/login",
|
||||||
"/favicon.ico",
|
"/favicon.ico",
|
||||||
@ -229,7 +229,7 @@ func guessIsRPCReq(req *http.Request) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return req.Method == http.MethodPost &&
|
return req.Method == http.MethodPost &&
|
||||||
strings.HasPrefix(req.URL.Path, minioReservedBucketPath+"/")
|
strings.HasPrefix(req.URL.Path, minioReservedBucketPath+SlashSeparator)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h redirectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (h redirectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
@ -258,7 +258,7 @@ func setBrowserCacheControlHandler(h http.Handler) http.Handler {
|
|||||||
func (h cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (h cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
if r.Method == http.MethodGet && guessIsBrowserReq(r) {
|
if r.Method == http.MethodGet && guessIsBrowserReq(r) {
|
||||||
// For all browser requests set appropriate Cache-Control policies
|
// For all browser requests set appropriate Cache-Control policies
|
||||||
if hasPrefix(r.URL.Path, minioReservedBucketPath+"/") {
|
if hasPrefix(r.URL.Path, minioReservedBucketPath+SlashSeparator) {
|
||||||
if hasSuffix(r.URL.Path, ".js") || r.URL.Path == minioReservedBucketPath+"/favicon.ico" {
|
if hasSuffix(r.URL.Path, ".js") || r.URL.Path == minioReservedBucketPath+"/favicon.ico" {
|
||||||
// For assets set cache expiry of one year. For each release, the name
|
// For assets set cache expiry of one year. For each release, the name
|
||||||
// of the asset name will change and hence it can not be served from cache.
|
// of the asset name will change and hence it can not be served from cache.
|
||||||
@ -276,7 +276,7 @@ func (h cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||||||
// Check to allow access to the reserved "bucket" `/minio` for Admin
|
// Check to allow access to the reserved "bucket" `/minio` for Admin
|
||||||
// API requests.
|
// API requests.
|
||||||
func isAdminReq(r *http.Request) bool {
|
func isAdminReq(r *http.Request) bool {
|
||||||
return strings.HasPrefix(r.URL.Path, adminAPIPathPrefix+"/")
|
return strings.HasPrefix(r.URL.Path, adminAPIPathPrefix+SlashSeparator)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds verification for incoming paths.
|
// Adds verification for incoming paths.
|
||||||
@ -596,7 +596,7 @@ const (
|
|||||||
// such as ".." and "."
|
// such as ".." and "."
|
||||||
func hasBadPathComponent(path string) bool {
|
func hasBadPathComponent(path string) bool {
|
||||||
path = strings.TrimSpace(path)
|
path = strings.TrimSpace(path)
|
||||||
for _, p := range strings.Split(path, slashSeparator) {
|
for _, p := range strings.Split(path, SlashSeparator) {
|
||||||
switch strings.TrimSpace(p) {
|
switch strings.TrimSpace(p) {
|
||||||
case dotdotComponent:
|
case dotdotComponent:
|
||||||
return true
|
return true
|
||||||
|
@ -35,12 +35,12 @@ func TestRedirectLocation(t *testing.T) {
|
|||||||
{
|
{
|
||||||
// 1. When urlPath is '/minio'
|
// 1. When urlPath is '/minio'
|
||||||
urlPath: minioReservedBucketPath,
|
urlPath: minioReservedBucketPath,
|
||||||
location: minioReservedBucketPath + "/",
|
location: minioReservedBucketPath + SlashSeparator,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// 2. When urlPath is '/'
|
// 2. When urlPath is '/'
|
||||||
urlPath: "/",
|
urlPath: SlashSeparator,
|
||||||
location: minioReservedBucketPath + "/",
|
location: minioReservedBucketPath + SlashSeparator,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// 3. When urlPath is '/webrpc'
|
// 3. When urlPath is '/webrpc'
|
||||||
|
@ -95,8 +95,8 @@ func isMetadataReplace(h http.Header) bool {
|
|||||||
// Splits an incoming path into bucket and object components.
|
// Splits an incoming path into bucket and object components.
|
||||||
func path2BucketAndObject(path string) (bucket, object string) {
|
func path2BucketAndObject(path string) (bucket, object string) {
|
||||||
// Skip the first element if it is '/', split the rest.
|
// Skip the first element if it is '/', split the rest.
|
||||||
path = strings.TrimPrefix(path, "/")
|
path = strings.TrimPrefix(path, SlashSeparator)
|
||||||
pathComponents := strings.SplitN(path, "/", 2)
|
pathComponents := strings.SplitN(path, SlashSeparator, 2)
|
||||||
|
|
||||||
// Save the bucket and object extracted from path.
|
// Save the bucket and object extracted from path.
|
||||||
switch len(pathComponents) {
|
switch len(pathComponents) {
|
||||||
@ -370,7 +370,7 @@ func getResource(path string, host string, domains []string) (string, error) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
bucket := strings.TrimSuffix(host, "."+domain)
|
bucket := strings.TrimSuffix(host, "."+domain)
|
||||||
return slashSeparator + pathJoin(bucket, path), nil
|
return SlashSeparator + pathJoin(bucket, path), nil
|
||||||
}
|
}
|
||||||
return path, nil
|
return path, nil
|
||||||
}
|
}
|
||||||
|
@ -86,7 +86,7 @@ func getCurrentIAMFormat() iamFormat {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getIAMFormatFilePath() string {
|
func getIAMFormatFilePath() string {
|
||||||
return iamConfigPrefix + "/" + iamFormatFile
|
return iamConfigPrefix + SlashSeparator + iamFormatFile
|
||||||
}
|
}
|
||||||
|
|
||||||
func getUserIdentityPath(user string, isSTS bool) string {
|
func getUserIdentityPath(user string, isSTS bool) string {
|
||||||
@ -191,7 +191,7 @@ func listIAMConfigItems(objectAPI ObjectLayer, pathPrefix string, dirs bool,
|
|||||||
marker := ""
|
marker := ""
|
||||||
for {
|
for {
|
||||||
lo, err := objectAPI.ListObjects(context.Background(),
|
lo, err := objectAPI.ListObjects(context.Background(),
|
||||||
minioMetaBucket, pathPrefix, marker, "/", 1000)
|
minioMetaBucket, pathPrefix, marker, SlashSeparator, 1000)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
select {
|
select {
|
||||||
case ch <- itemOrErr{Err: err}:
|
case ch <- itemOrErr{Err: err}:
|
||||||
@ -207,7 +207,7 @@ func listIAMConfigItems(objectAPI ObjectLayer, pathPrefix string, dirs bool,
|
|||||||
}
|
}
|
||||||
for _, itemPrefix := range lister {
|
for _, itemPrefix := range lister {
|
||||||
item := strings.TrimPrefix(itemPrefix, pathPrefix)
|
item := strings.TrimPrefix(itemPrefix, pathPrefix)
|
||||||
item = strings.TrimSuffix(item, "/")
|
item = strings.TrimSuffix(item, SlashSeparator)
|
||||||
select {
|
select {
|
||||||
case ch <- itemOrErr{Item: item}:
|
case ch <- itemOrErr{Item: item}:
|
||||||
case <-doneCh:
|
case <-doneCh:
|
||||||
|
@ -232,12 +232,12 @@ func startLockMaintenance(lkSrv *lockRESTServer) {
|
|||||||
func registerLockRESTHandlers(router *mux.Router) {
|
func registerLockRESTHandlers(router *mux.Router) {
|
||||||
subrouter := router.PathPrefix(lockRESTPath).Subrouter()
|
subrouter := router.PathPrefix(lockRESTPath).Subrouter()
|
||||||
queries := restQueries(lockRESTUID, lockRESTSource, lockRESTResource, lockRESTServerAddr, lockRESTServerEndpoint)
|
queries := restQueries(lockRESTUID, lockRESTSource, lockRESTResource, lockRESTServerAddr, lockRESTServerEndpoint)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + lockRESTMethodLock).HandlerFunc(httpTraceHdrs(globalLockServer.LockHandler)).Queries(queries...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + lockRESTMethodLock).HandlerFunc(httpTraceHdrs(globalLockServer.LockHandler)).Queries(queries...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + lockRESTMethodRLock).HandlerFunc(httpTraceHdrs(globalLockServer.RLockHandler)).Queries(queries...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + lockRESTMethodRLock).HandlerFunc(httpTraceHdrs(globalLockServer.RLockHandler)).Queries(queries...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + lockRESTMethodUnlock).HandlerFunc(httpTraceHdrs(globalLockServer.UnlockHandler)).Queries(queries...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + lockRESTMethodUnlock).HandlerFunc(httpTraceHdrs(globalLockServer.UnlockHandler)).Queries(queries...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + lockRESTMethodRUnlock).HandlerFunc(httpTraceHdrs(globalLockServer.RUnlockHandler)).Queries(queries...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + lockRESTMethodRUnlock).HandlerFunc(httpTraceHdrs(globalLockServer.RUnlockHandler)).Queries(queries...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + lockRESTMethodForceUnlock).HandlerFunc(httpTraceHdrs(globalLockServer.ForceUnlockHandler)).Queries(queries...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + lockRESTMethodForceUnlock).HandlerFunc(httpTraceHdrs(globalLockServer.ForceUnlockHandler)).Queries(queries...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + lockRESTMethodExpired).HandlerFunc(httpTraceAll(globalLockServer.ExpiredHandler)).Queries(queries...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + lockRESTMethodExpired).HandlerFunc(httpTraceAll(globalLockServer.ExpiredHandler)).Queries(queries...)
|
||||||
|
|
||||||
router.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler))
|
router.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler))
|
||||||
|
|
||||||
|
@ -56,10 +56,10 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Checks if the object is a directory, this logic uses
|
// Checks if the object is a directory, this logic uses
|
||||||
// if size == 0 and object ends with slashSeparator then
|
// if size == 0 and object ends with SlashSeparator then
|
||||||
// returns true.
|
// returns true.
|
||||||
func isObjectDir(object string, size int64) bool {
|
func isObjectDir(object string, size int64) bool {
|
||||||
return hasSuffix(object, slashSeparator) && size == 0
|
return hasSuffix(object, SlashSeparator) && size == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Converts just bucket, object metadata into ObjectInfo datatype.
|
// Converts just bucket, object metadata into ObjectInfo datatype.
|
||||||
@ -110,7 +110,7 @@ func cleanupDir(ctx context.Context, storage StorageAPI, volume, dirPath string)
|
|||||||
var delFunc func(string) error
|
var delFunc func(string) error
|
||||||
// Function to delete entries recursively.
|
// Function to delete entries recursively.
|
||||||
delFunc = func(entryPath string) error {
|
delFunc = func(entryPath string) error {
|
||||||
if !hasSuffix(entryPath, slashSeparator) {
|
if !hasSuffix(entryPath, SlashSeparator) {
|
||||||
// Delete the file entry.
|
// Delete the file entry.
|
||||||
err := storage.DeleteFile(volume, entryPath)
|
err := storage.DeleteFile(volume, entryPath)
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
@ -157,7 +157,7 @@ func cleanupObjectsBulk(ctx context.Context, storage StorageAPI, volume string,
|
|||||||
var traverse func(string) ([]string, error)
|
var traverse func(string) ([]string, error)
|
||||||
traverse = func(entryPath string) ([]string, error) {
|
traverse = func(entryPath string) ([]string, error) {
|
||||||
var output = make([]string, 0)
|
var output = make([]string, 0)
|
||||||
if !hasSuffix(entryPath, slashSeparator) {
|
if !hasSuffix(entryPath, SlashSeparator) {
|
||||||
output = append(output, entryPath)
|
output = append(output, entryPath)
|
||||||
return output, nil
|
return output, nil
|
||||||
}
|
}
|
||||||
@ -320,7 +320,7 @@ func listObjectsNonSlash(ctx context.Context, obj ObjectLayer, bucket, prefix, m
|
|||||||
}
|
}
|
||||||
|
|
||||||
func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, delimiter string, maxKeys int, tpool *TreeWalkPool, listDir ListDirFunc, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) (loi ListObjectsInfo, err error) {
|
func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, delimiter string, maxKeys int, tpool *TreeWalkPool, listDir ListDirFunc, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) (loi ListObjectsInfo, err error) {
|
||||||
if delimiter != slashSeparator && delimiter != "" {
|
if delimiter != SlashSeparator && delimiter != "" {
|
||||||
return listObjectsNonSlash(ctx, obj, bucket, prefix, marker, delimiter, maxKeys, tpool, listDir, getObjInfo, getObjectInfoDirs...)
|
return listObjectsNonSlash(ctx, obj, bucket, prefix, marker, delimiter, maxKeys, tpool, listDir, getObjInfo, getObjectInfoDirs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -346,7 +346,7 @@ func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, d
|
|||||||
// along // with the prefix. On a flat namespace with 'prefix'
|
// along // with the prefix. On a flat namespace with 'prefix'
|
||||||
// as '/' we don't have any entries, since all the keys are
|
// as '/' we don't have any entries, since all the keys are
|
||||||
// of form 'keyName/...'
|
// of form 'keyName/...'
|
||||||
if delimiter == slashSeparator && prefix == slashSeparator {
|
if delimiter == SlashSeparator && prefix == SlashSeparator {
|
||||||
return loi, nil
|
return loi, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -357,7 +357,7 @@ func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, d
|
|||||||
|
|
||||||
// Default is recursive, if delimiter is set then list non recursive.
|
// Default is recursive, if delimiter is set then list non recursive.
|
||||||
recursive := true
|
recursive := true
|
||||||
if delimiter == slashSeparator {
|
if delimiter == SlashSeparator {
|
||||||
recursive = false
|
recursive = false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -382,7 +382,7 @@ func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, d
|
|||||||
|
|
||||||
var objInfo ObjectInfo
|
var objInfo ObjectInfo
|
||||||
var err error
|
var err error
|
||||||
if hasSuffix(walkResult.entry, slashSeparator) {
|
if hasSuffix(walkResult.entry, SlashSeparator) {
|
||||||
for _, getObjectInfoDir := range getObjectInfoDirs {
|
for _, getObjectInfoDir := range getObjectInfoDirs {
|
||||||
objInfo, err = getObjectInfoDir(ctx, bucket, walkResult.entry)
|
objInfo, err = getObjectInfoDir(ctx, bucket, walkResult.entry)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -429,7 +429,7 @@ func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, d
|
|||||||
|
|
||||||
result := ListObjectsInfo{}
|
result := ListObjectsInfo{}
|
||||||
for _, objInfo := range objInfos {
|
for _, objInfo := range objInfos {
|
||||||
if objInfo.IsDir && delimiter == slashSeparator {
|
if objInfo.IsDir && delimiter == SlashSeparator {
|
||||||
result.Prefixes = append(result.Prefixes, objInfo.Name)
|
result.Prefixes = append(result.Prefixes, objInfo.Name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -203,14 +203,14 @@ func (e ObjectExistsAsDirectory) Error() string {
|
|||||||
type PrefixAccessDenied GenericError
|
type PrefixAccessDenied GenericError
|
||||||
|
|
||||||
func (e PrefixAccessDenied) Error() string {
|
func (e PrefixAccessDenied) Error() string {
|
||||||
return "Prefix access is denied: " + e.Bucket + "/" + e.Object
|
return "Prefix access is denied: " + e.Bucket + SlashSeparator + e.Object
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParentIsObject object access is denied.
|
// ParentIsObject object access is denied.
|
||||||
type ParentIsObject GenericError
|
type ParentIsObject GenericError
|
||||||
|
|
||||||
func (e ParentIsObject) Error() string {
|
func (e ParentIsObject) Error() string {
|
||||||
return "Parent is object " + e.Bucket + "/" + path.Dir(e.Object)
|
return "Parent is object " + e.Bucket + SlashSeparator + path.Dir(e.Object)
|
||||||
}
|
}
|
||||||
|
|
||||||
// BucketExists bucket exists.
|
// BucketExists bucket exists.
|
||||||
|
@ -259,7 +259,7 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [
|
|||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
for _, d := range disks {
|
for _, d := range disks {
|
||||||
err = os.Chmod(d+"/"+testCase.bucketName+"/"+testCase.chmodPath, 0)
|
err = os.Chmod(d+SlashSeparator+testCase.bucketName+SlashSeparator+testCase.chmodPath, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d, Unable to chmod: %v", i+1, err)
|
t.Fatalf("Test %d, Unable to chmod: %v", i+1, err)
|
||||||
}
|
}
|
||||||
|
@ -74,7 +74,7 @@ func checkListObjsArgs(ctx context.Context, bucket, prefix, marker, delimiter st
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Verify if delimiter is anything other than '/', which we do not support.
|
// Verify if delimiter is anything other than '/', which we do not support.
|
||||||
if delimiter != "" && delimiter != slashSeparator {
|
if delimiter != "" && delimiter != SlashSeparator {
|
||||||
logger.LogIf(ctx, UnsupportedDelimiter{
|
logger.LogIf(ctx, UnsupportedDelimiter{
|
||||||
Delimiter: delimiter,
|
Delimiter: delimiter,
|
||||||
})
|
})
|
||||||
@ -102,7 +102,7 @@ func checkListMultipartArgs(ctx context.Context, bucket, prefix, keyMarker, uplo
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if uploadIDMarker != "" {
|
if uploadIDMarker != "" {
|
||||||
if hasSuffix(keyMarker, slashSeparator) {
|
if hasSuffix(keyMarker, SlashSeparator) {
|
||||||
|
|
||||||
logger.LogIf(ctx, InvalidUploadIDKeyCombination{
|
logger.LogIf(ctx, InvalidUploadIDKeyCombination{
|
||||||
UploadIDMarker: uploadIDMarker,
|
UploadIDMarker: uploadIDMarker,
|
||||||
@ -196,7 +196,7 @@ func checkPutObjectArgs(ctx context.Context, bucket, object string, obj ObjectLa
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(object) == 0 ||
|
if len(object) == 0 ||
|
||||||
(hasSuffix(object, slashSeparator) && size != 0) ||
|
(hasSuffix(object, SlashSeparator) && size != 0) ||
|
||||||
!IsValidObjectPrefix(object) {
|
!IsValidObjectPrefix(object) {
|
||||||
return ObjectNameInvalid{
|
return ObjectNameInvalid{
|
||||||
Bucket: bucket,
|
Bucket: bucket,
|
||||||
|
@ -543,20 +543,20 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) {
|
|||||||
{"test-bucket-list-object", "Asia", "", "", 10, resultCases[24], nil, true},
|
{"test-bucket-list-object", "Asia", "", "", 10, resultCases[24], nil, true},
|
||||||
// Tests with prefix and delimiter (55-57).
|
// Tests with prefix and delimiter (55-57).
|
||||||
// With delimeter the code should not recurse into the sub-directories of prefix Dir.
|
// With delimeter the code should not recurse into the sub-directories of prefix Dir.
|
||||||
{"test-bucket-list-object", "Asia", "", "/", 10, resultCases[25], nil, true},
|
{"test-bucket-list-object", "Asia", "", SlashSeparator, 10, resultCases[25], nil, true},
|
||||||
{"test-bucket-list-object", "new", "", "/", 10, resultCases[26], nil, true},
|
{"test-bucket-list-object", "new", "", SlashSeparator, 10, resultCases[26], nil, true},
|
||||||
{"test-bucket-list-object", "Asia/India/", "", "/", 10, resultCases[27], nil, true},
|
{"test-bucket-list-object", "Asia/India/", "", SlashSeparator, 10, resultCases[27], nil, true},
|
||||||
// Test with marker set as hierarhical value and with delimiter. (58-59)
|
// Test with marker set as hierarhical value and with delimiter. (58-59)
|
||||||
{"test-bucket-list-object", "", "Asia/India/India-summer-photos-1", "/", 10, resultCases[28], nil, true},
|
{"test-bucket-list-object", "", "Asia/India/India-summer-photos-1", SlashSeparator, 10, resultCases[28], nil, true},
|
||||||
{"test-bucket-list-object", "", "Asia/India/Karnataka/Bangalore/Koramangala/pics", "/", 10, resultCases[29], nil, true},
|
{"test-bucket-list-object", "", "Asia/India/Karnataka/Bangalore/Koramangala/pics", SlashSeparator, 10, resultCases[29], nil, true},
|
||||||
// Test with prefix and delimiter set to '/'. (60)
|
// Test with prefix and delimiter set to '/'. (60)
|
||||||
{"test-bucket-list-object", "/", "", "/", 10, resultCases[30], nil, true},
|
{"test-bucket-list-object", SlashSeparator, "", SlashSeparator, 10, resultCases[30], nil, true},
|
||||||
// Test with invalid prefix (61)
|
// Test with invalid prefix (61)
|
||||||
{"test-bucket-list-object", "\\", "", "/", 10, ListObjectsInfo{}, ObjectNameInvalid{Bucket: "test-bucket-list-object", Object: "\\"}, false},
|
{"test-bucket-list-object", "\\", "", SlashSeparator, 10, ListObjectsInfo{}, ObjectNameInvalid{Bucket: "test-bucket-list-object", Object: "\\"}, false},
|
||||||
// Test listing an empty directory in recursive mode (62)
|
// Test listing an empty directory in recursive mode (62)
|
||||||
{"test-bucket-empty-dir", "", "", "", 10, resultCases[31], nil, true},
|
{"test-bucket-empty-dir", "", "", "", 10, resultCases[31], nil, true},
|
||||||
// Test listing an empty directory in a non recursive mode (63)
|
// Test listing an empty directory in a non recursive mode (63)
|
||||||
{"test-bucket-empty-dir", "", "", "/", 10, resultCases[32], nil, true},
|
{"test-bucket-empty-dir", "", "", SlashSeparator, 10, resultCases[32], nil, true},
|
||||||
// Test listing a directory which contains an empty directory (64)
|
// Test listing a directory which contains an empty directory (64)
|
||||||
{"test-bucket-empty-dir", "", "temporary/", "", 10, resultCases[33], nil, true},
|
{"test-bucket-empty-dir", "", "temporary/", "", 10, resultCases[33], nil, true},
|
||||||
}
|
}
|
||||||
@ -595,7 +595,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) {
|
|||||||
t.Errorf("Test %d: %s: Expected object name to be \"%s\", but found \"%s\" instead", i+1, instanceType, testCase.result.Objects[j].Name, result.Objects[j].Name)
|
t.Errorf("Test %d: %s: Expected object name to be \"%s\", but found \"%s\" instead", i+1, instanceType, testCase.result.Objects[j].Name, result.Objects[j].Name)
|
||||||
}
|
}
|
||||||
// FIXME: we should always check for ETag
|
// FIXME: we should always check for ETag
|
||||||
if result.Objects[j].ETag == "" && !strings.HasSuffix(result.Objects[j].Name, slashSeparator) {
|
if result.Objects[j].ETag == "" && !strings.HasSuffix(result.Objects[j].Name, SlashSeparator) {
|
||||||
t.Errorf("Test %d: %s: Expected ETag to be not empty, but found empty instead (%v)", i+1, instanceType, result.Objects[j].Name)
|
t.Errorf("Test %d: %s: Expected ETag to be not empty, but found empty instead (%v)", i+1, instanceType, result.Objects[j].Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -680,7 +680,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
|||||||
// Expecting the result to contain one MultipartInfo entry and IsTruncated to be false.
|
// Expecting the result to contain one MultipartInfo entry and IsTruncated to be false.
|
||||||
{
|
{
|
||||||
MaxUploads: 2,
|
MaxUploads: 2,
|
||||||
Delimiter: "/",
|
Delimiter: SlashSeparator,
|
||||||
Prefix: "",
|
Prefix: "",
|
||||||
IsTruncated: false,
|
IsTruncated: false,
|
||||||
Uploads: []MultipartInfo{
|
Uploads: []MultipartInfo{
|
||||||
@ -1170,7 +1170,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
|||||||
{bucketNames[0], "orange", "", "", "", 2, listMultipartResults[12], nil, true},
|
{bucketNames[0], "orange", "", "", "", 2, listMultipartResults[12], nil, true},
|
||||||
{bucketNames[0], "Asia", "", "", "", 2, listMultipartResults[13], nil, true},
|
{bucketNames[0], "Asia", "", "", "", 2, listMultipartResults[13], nil, true},
|
||||||
// setting delimiter (Test number 27).
|
// setting delimiter (Test number 27).
|
||||||
{bucketNames[0], "", "", "", "/", 2, listMultipartResults[14], nil, true},
|
{bucketNames[0], "", "", "", SlashSeparator, 2, listMultipartResults[14], nil, true},
|
||||||
//Test case with multiple uploadID listing for given object (Test number 28).
|
//Test case with multiple uploadID listing for given object (Test number 28).
|
||||||
{bucketNames[1], "", "", "", "", 100, listMultipartResults[15], nil, true},
|
{bucketNames[1], "", "", "", "", 100, listMultipartResults[15], nil, true},
|
||||||
// Test case with multiple uploadID listing for given object, but uploadID marker set.
|
// Test case with multiple uploadID listing for given object, but uploadID marker set.
|
||||||
|
@ -51,7 +51,7 @@ const (
|
|||||||
// Multipart meta prefix.
|
// Multipart meta prefix.
|
||||||
mpartMetaPrefix = "multipart"
|
mpartMetaPrefix = "multipart"
|
||||||
// MinIO Multipart meta prefix.
|
// MinIO Multipart meta prefix.
|
||||||
minioMetaMultipartBucket = minioMetaBucket + "/" + mpartMetaPrefix
|
minioMetaMultipartBucket = minioMetaBucket + SlashSeparator + mpartMetaPrefix
|
||||||
// MinIO Tmp meta prefix.
|
// MinIO Tmp meta prefix.
|
||||||
minioMetaTmpBucket = minioMetaBucket + "/tmp"
|
minioMetaTmpBucket = minioMetaBucket + "/tmp"
|
||||||
// DNS separator (period), used for bucket name validation.
|
// DNS separator (period), used for bucket name validation.
|
||||||
@ -131,12 +131,12 @@ func IsValidBucketName(bucket string) bool {
|
|||||||
//
|
//
|
||||||
// - Backslash ("\")
|
// - Backslash ("\")
|
||||||
//
|
//
|
||||||
// additionally minio does not support object names with trailing "/".
|
// additionally minio does not support object names with trailing SlashSeparator.
|
||||||
func IsValidObjectName(object string) bool {
|
func IsValidObjectName(object string) bool {
|
||||||
if len(object) == 0 {
|
if len(object) == 0 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if hasSuffix(object, slashSeparator) {
|
if hasSuffix(object, SlashSeparator) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return IsValidObjectPrefix(object)
|
return IsValidObjectPrefix(object)
|
||||||
@ -168,7 +168,7 @@ func checkObjectNameForLengthAndSlash(bucket, object string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Check for slash as prefix in object name
|
// Check for slash as prefix in object name
|
||||||
if hasPrefix(object, slashSeparator) {
|
if hasPrefix(object, SlashSeparator) {
|
||||||
return ObjectNamePrefixAsSlash{
|
return ObjectNamePrefixAsSlash{
|
||||||
Bucket: bucket,
|
Bucket: bucket,
|
||||||
Object: object,
|
Object: object,
|
||||||
@ -177,20 +177,20 @@ func checkObjectNameForLengthAndSlash(bucket, object string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Slash separator.
|
// SlashSeparator - slash separator.
|
||||||
const slashSeparator = "/"
|
const SlashSeparator = "/"
|
||||||
|
|
||||||
// retainSlash - retains slash from a path.
|
// retainSlash - retains slash from a path.
|
||||||
func retainSlash(s string) string {
|
func retainSlash(s string) string {
|
||||||
return strings.TrimSuffix(s, slashSeparator) + slashSeparator
|
return strings.TrimSuffix(s, SlashSeparator) + SlashSeparator
|
||||||
}
|
}
|
||||||
|
|
||||||
// pathJoin - like path.Join() but retains trailing "/" of the last element
|
// pathJoin - like path.Join() but retains trailing SlashSeparator of the last element
|
||||||
func pathJoin(elem ...string) string {
|
func pathJoin(elem ...string) string {
|
||||||
trailingSlash := ""
|
trailingSlash := ""
|
||||||
if len(elem) > 0 {
|
if len(elem) > 0 {
|
||||||
if hasSuffix(elem[len(elem)-1], slashSeparator) {
|
if hasSuffix(elem[len(elem)-1], SlashSeparator) {
|
||||||
trailingSlash = "/"
|
trailingSlash = SlashSeparator
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return path.Join(elem...) + trailingSlash
|
return path.Join(elem...) + trailingSlash
|
||||||
@ -292,7 +292,7 @@ func isStringEqual(s1 string, s2 string) bool {
|
|||||||
|
|
||||||
// Ignores all reserved bucket names or invalid bucket names.
|
// Ignores all reserved bucket names or invalid bucket names.
|
||||||
func isReservedOrInvalidBucket(bucketEntry string, strict bool) bool {
|
func isReservedOrInvalidBucket(bucketEntry string, strict bool) bool {
|
||||||
bucketEntry = strings.TrimSuffix(bucketEntry, slashSeparator)
|
bucketEntry = strings.TrimSuffix(bucketEntry, SlashSeparator)
|
||||||
if strict {
|
if strict {
|
||||||
if err := s3utils.CheckValidBucketNameStrict(bucketEntry); err != nil {
|
if err := s3utils.CheckValidBucketNameStrict(bucketEntry); err != nil {
|
||||||
return true
|
return true
|
||||||
|
@ -1233,7 +1233,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||||||
|
|
||||||
var objectEncryptionKey []byte
|
var objectEncryptionKey []byte
|
||||||
if objectAPI.IsEncryptionSupported() {
|
if objectAPI.IsEncryptionSupported() {
|
||||||
if hasServerSideEncryptionHeader(r.Header) && !hasSuffix(object, slashSeparator) { // handle SSE requests
|
if hasServerSideEncryptionHeader(r.Header) && !hasSuffix(object, SlashSeparator) { // handle SSE requests
|
||||||
reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata)
|
reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||||
|
@ -482,7 +482,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
|||||||
|
|
||||||
expectedContent: encodeResponse(getAPIErrorResponse(ctx,
|
expectedContent: encodeResponse(getAPIErrorResponse(ctx,
|
||||||
getAPIError(ErrNoSuchKey),
|
getAPIError(ErrNoSuchKey),
|
||||||
"/"+bucketName+"/"+". ./. ./etc", "", "")),
|
SlashSeparator+bucketName+SlashSeparator+". ./. ./etc", "", "")),
|
||||||
expectedRespStatus: http.StatusNotFound,
|
expectedRespStatus: http.StatusNotFound,
|
||||||
},
|
},
|
||||||
// Test case - 9.
|
// Test case - 9.
|
||||||
@ -496,7 +496,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
|||||||
|
|
||||||
expectedContent: encodeResponse(getAPIErrorResponse(ctx,
|
expectedContent: encodeResponse(getAPIErrorResponse(ctx,
|
||||||
getAPIError(ErrInvalidObjectName),
|
getAPIError(ErrInvalidObjectName),
|
||||||
"/"+bucketName+"/"+". ./../etc", "", "")),
|
SlashSeparator+bucketName+SlashSeparator+". ./../etc", "", "")),
|
||||||
expectedRespStatus: http.StatusBadRequest,
|
expectedRespStatus: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
// Test case - 10.
|
// Test case - 10.
|
||||||
@ -1593,7 +1593,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
uploadID: uploadID,
|
uploadID: uploadID,
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
expectedRespStatus: http.StatusOK,
|
expectedRespStatus: http.StatusOK,
|
||||||
@ -1604,7 +1604,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
uploadID: uploadID,
|
uploadID: uploadID,
|
||||||
copySourceHeader: url.QueryEscape("/"),
|
copySourceHeader: url.QueryEscape(SlashSeparator),
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
|
|
||||||
@ -1617,7 +1617,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
uploadID: uploadID,
|
uploadID: uploadID,
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + testObject),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + testObject),
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
|
|
||||||
@ -1629,7 +1629,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
uploadID: uploadID,
|
uploadID: uploadID,
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
copySourceRange: "bytes=500-4096",
|
copySourceRange: "bytes=500-4096",
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
@ -1642,7 +1642,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
uploadID: uploadID,
|
uploadID: uploadID,
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
copySourceRange: "bytes=6145-",
|
copySourceRange: "bytes=6145-",
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
@ -1655,7 +1655,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
uploadID: uploadID,
|
uploadID: uploadID,
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
copySourceRange: "bytes=0-6144",
|
copySourceRange: "bytes=0-6144",
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
@ -1683,7 +1683,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
uploadID: uploadID,
|
uploadID: uploadID,
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + "non-existent-object"),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + "non-existent-object"),
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
|
|
||||||
@ -1697,7 +1697,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
{
|
{
|
||||||
bucketName: "non-existent-destination-bucket",
|
bucketName: "non-existent-destination-bucket",
|
||||||
uploadID: uploadID,
|
uploadID: uploadID,
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
|
|
||||||
@ -1709,7 +1709,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
uploadID: uploadID,
|
uploadID: uploadID,
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
accessKey: "Invalid-AccessID",
|
accessKey: "Invalid-AccessID",
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
|
|
||||||
@ -1721,7 +1721,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
uploadID: "-1",
|
uploadID: "-1",
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
|
|
||||||
@ -1732,7 +1732,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
uploadID: uploadID,
|
uploadID: uploadID,
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
invalidPartNumber: true,
|
invalidPartNumber: true,
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
@ -1743,7 +1743,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
uploadID: uploadID,
|
uploadID: uploadID,
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
maximumPartNumber: true,
|
maximumPartNumber: true,
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
@ -1753,7 +1753,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
uploadID: uploadID,
|
uploadID: uploadID,
|
||||||
copySourceHeader: url.QueryEscape("/"+bucketName+"/"+objectName) + "?versionId=null",
|
copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=null",
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
expectedRespStatus: http.StatusOK,
|
expectedRespStatus: http.StatusOK,
|
||||||
@ -1762,7 +1762,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
uploadID: uploadID,
|
uploadID: uploadID,
|
||||||
copySourceHeader: url.QueryEscape("/"+bucketName+"/"+objectName) + "?versionId=17",
|
copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=17",
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
expectedRespStatus: http.StatusNotFound,
|
expectedRespStatus: http.StatusNotFound,
|
||||||
@ -1771,7 +1771,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
uploadID: uploadID,
|
uploadID: uploadID,
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
copySourceVersionID: "null",
|
copySourceVersionID: "null",
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
@ -1781,7 +1781,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
uploadID: uploadID,
|
uploadID: uploadID,
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
copySourceVersionID: "17",
|
copySourceVersionID: "17",
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
@ -1852,7 +1852,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
// Below is how CopyObjectPartHandler is registered.
|
// Below is how CopyObjectPartHandler is registered.
|
||||||
// bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(api.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
// bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(api.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||||
// Its necessary to set the "X-Amz-Copy-Source" header for the request to be accepted by the handler.
|
// Its necessary to set the "X-Amz-Copy-Source" header for the request to be accepted by the handler.
|
||||||
nilReq.Header.Set("X-Amz-Copy-Source", url.QueryEscape("/"+nilBucket+"/"+nilObject))
|
nilReq.Header.Set("X-Amz-Copy-Source", url.QueryEscape(SlashSeparator+nilBucket+SlashSeparator+nilObject))
|
||||||
|
|
||||||
// execute the object layer set to `nil` test.
|
// execute the object layer set to `nil` test.
|
||||||
// `ExecObjectLayerAPINilTest` manages the operation.
|
// `ExecObjectLayerAPINilTest` manages the operation.
|
||||||
@ -1947,7 +1947,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
newObjectName: "newObject1",
|
newObjectName: "newObject1",
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
metadata: map[string]string{
|
metadata: map[string]string{
|
||||||
@ -1961,7 +1961,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
newObjectName: "newObject1",
|
newObjectName: "newObject1",
|
||||||
copySourceHeader: url.QueryEscape("/"),
|
copySourceHeader: url.QueryEscape(SlashSeparator),
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
|
|
||||||
@ -1973,7 +1973,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
newObjectName: objectName,
|
newObjectName: objectName,
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
|
|
||||||
@ -1986,7 +1986,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
newObjectName: objectName,
|
newObjectName: objectName,
|
||||||
copySourceHeader: url.QueryEscape(bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(bucketName + SlashSeparator + objectName),
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
|
|
||||||
@ -1999,7 +1999,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
newObjectName: objectName,
|
newObjectName: objectName,
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
metadata: map[string]string{
|
metadata: map[string]string{
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
},
|
},
|
||||||
@ -2015,7 +2015,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
newObjectName: "newObject1",
|
newObjectName: "newObject1",
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
metadata: map[string]string{
|
metadata: map[string]string{
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
},
|
},
|
||||||
@ -2032,7 +2032,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
newObjectName: objectName,
|
newObjectName: objectName,
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
metadata: map[string]string{
|
metadata: map[string]string{
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
},
|
},
|
||||||
@ -2050,7 +2050,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
newObjectName: objectName,
|
newObjectName: objectName,
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + "non-existent-object"),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + "non-existent-object"),
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
|
|
||||||
@ -2064,7 +2064,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: "non-existent-destination-bucket",
|
bucketName: "non-existent-destination-bucket",
|
||||||
newObjectName: objectName,
|
newObjectName: objectName,
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
|
|
||||||
@ -2076,7 +2076,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
newObjectName: objectName,
|
newObjectName: objectName,
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
accessKey: "Invalid-AccessID",
|
accessKey: "Invalid-AccessID",
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
|
|
||||||
@ -2086,7 +2086,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
newObjectName: "newObject1",
|
newObjectName: "newObject1",
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
copyModifiedHeader: "Mon, 02 Jan 2006 15:04:05 GMT",
|
copyModifiedHeader: "Mon, 02 Jan 2006 15:04:05 GMT",
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
@ -2096,7 +2096,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
newObjectName: "newObject1",
|
newObjectName: "newObject1",
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
copyModifiedHeader: "Mon, 02 Jan 2217 15:04:05 GMT",
|
copyModifiedHeader: "Mon, 02 Jan 2217 15:04:05 GMT",
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
@ -2106,7 +2106,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
newObjectName: "newObject1",
|
newObjectName: "newObject1",
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
copyModifiedHeader: "Mon, 02 Jan 2217 15:04:05 +00:00",
|
copyModifiedHeader: "Mon, 02 Jan 2217 15:04:05 +00:00",
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
@ -2116,7 +2116,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
newObjectName: "newObject1",
|
newObjectName: "newObject1",
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
copyUnmodifiedHeader: "Mon, 02 Jan 2217 15:04:05 GMT",
|
copyUnmodifiedHeader: "Mon, 02 Jan 2217 15:04:05 GMT",
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
@ -2126,7 +2126,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
newObjectName: "newObject1",
|
newObjectName: "newObject1",
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
copyUnmodifiedHeader: "Mon, 02 Jan 2007 15:04:05 GMT",
|
copyUnmodifiedHeader: "Mon, 02 Jan 2007 15:04:05 GMT",
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
@ -2136,7 +2136,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
newObjectName: "newObject1",
|
newObjectName: "newObject1",
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
copyUnmodifiedHeader: "Mon, 02 Jan 2007 15:04:05 +00:00",
|
copyUnmodifiedHeader: "Mon, 02 Jan 2007 15:04:05 +00:00",
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
@ -2146,7 +2146,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
newObjectName: "newObject1",
|
newObjectName: "newObject1",
|
||||||
copySourceHeader: url.QueryEscape("/"+bucketName+"/"+objectName) + "?versionId=null",
|
copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=null",
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
expectedRespStatus: http.StatusOK,
|
expectedRespStatus: http.StatusOK,
|
||||||
@ -2155,7 +2155,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
newObjectName: "newObject1",
|
newObjectName: "newObject1",
|
||||||
copySourceHeader: url.QueryEscape("/"+bucketName+"/"+objectName) + "?versionId=17",
|
copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=17",
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
expectedRespStatus: http.StatusNotFound,
|
expectedRespStatus: http.StatusNotFound,
|
||||||
@ -2164,7 +2164,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
newObjectName: "newObject1",
|
newObjectName: "newObject1",
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
copySourceVersionID: "null",
|
copySourceVersionID: "null",
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
@ -2174,7 +2174,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
newObjectName: "newObject1",
|
newObjectName: "newObject1",
|
||||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||||
copySourceVersionID: "17",
|
copySourceVersionID: "17",
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
@ -2307,7 +2307,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
// Below is how CopyObjectHandler is registered.
|
// Below is how CopyObjectHandler is registered.
|
||||||
// bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?")
|
// bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?")
|
||||||
// Its necessary to set the "X-Amz-Copy-Source" header for the request to be accepted by the handler.
|
// Its necessary to set the "X-Amz-Copy-Source" header for the request to be accepted by the handler.
|
||||||
nilReq.Header.Set("X-Amz-Copy-Source", url.QueryEscape("/"+nilBucket+"/"+nilObject))
|
nilReq.Header.Set("X-Amz-Copy-Source", url.QueryEscape(SlashSeparator+nilBucket+SlashSeparator+nilObject))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||||
}
|
}
|
||||||
|
@ -340,7 +340,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
result, err = obj.ListObjects(context.Background(), "bucket", "this/is/", "", "/", 10)
|
result, err = obj.ListObjects(context.Background(), "bucket", "this/is/", "", SlashSeparator, 10)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -354,7 +354,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||||||
|
|
||||||
// check delimited results with delimiter without prefix.
|
// check delimited results with delimiter without prefix.
|
||||||
{
|
{
|
||||||
result, err = obj.ListObjects(context.Background(), "bucket", "", "", "/", 1000)
|
result, err = obj.ListObjects(context.Background(), "bucket", "", "", SlashSeparator, 1000)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -808,38 +808,38 @@ func (s *peerRESTServer) IsValid(w http.ResponseWriter, r *http.Request) bool {
|
|||||||
func registerPeerRESTHandlers(router *mux.Router) {
|
func registerPeerRESTHandlers(router *mux.Router) {
|
||||||
server := &peerRESTServer{}
|
server := &peerRESTServer{}
|
||||||
subrouter := router.PathPrefix(peerRESTPath).Subrouter()
|
subrouter := router.PathPrefix(peerRESTPath).Subrouter()
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodGetLocks).HandlerFunc(httpTraceHdrs(server.GetLocksHandler))
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodGetLocks).HandlerFunc(httpTraceHdrs(server.GetLocksHandler))
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodServerInfo).HandlerFunc(httpTraceHdrs(server.ServerInfoHandler))
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodServerInfo).HandlerFunc(httpTraceHdrs(server.ServerInfoHandler))
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodCPULoadInfo).HandlerFunc(httpTraceHdrs(server.CPULoadInfoHandler))
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodCPULoadInfo).HandlerFunc(httpTraceHdrs(server.CPULoadInfoHandler))
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodMemUsageInfo).HandlerFunc(httpTraceHdrs(server.MemUsageInfoHandler))
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodMemUsageInfo).HandlerFunc(httpTraceHdrs(server.MemUsageInfoHandler))
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodDrivePerfInfo).HandlerFunc(httpTraceHdrs(server.DrivePerfInfoHandler))
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodDrivePerfInfo).HandlerFunc(httpTraceHdrs(server.DrivePerfInfoHandler))
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodDeleteBucket).HandlerFunc(httpTraceHdrs(server.DeleteBucketHandler)).Queries(restQueries(peerRESTBucket)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodDeleteBucket).HandlerFunc(httpTraceHdrs(server.DeleteBucketHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodSignalService).HandlerFunc(httpTraceHdrs(server.SignalServiceHandler)).Queries(restQueries(peerRESTSignal)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodSignalService).HandlerFunc(httpTraceHdrs(server.SignalServiceHandler)).Queries(restQueries(peerRESTSignal)...)
|
||||||
|
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBucketPolicyRemove).HandlerFunc(httpTraceAll(server.RemoveBucketPolicyHandler)).Queries(restQueries(peerRESTBucket)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBucketPolicyRemove).HandlerFunc(httpTraceAll(server.RemoveBucketPolicyHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBucketPolicySet).HandlerFunc(httpTraceHdrs(server.SetBucketPolicyHandler)).Queries(restQueries(peerRESTBucket)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBucketPolicySet).HandlerFunc(httpTraceHdrs(server.SetBucketPolicyHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||||
|
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodDeletePolicy).HandlerFunc(httpTraceAll(server.LoadPolicyHandler)).Queries(restQueries(peerRESTPolicy)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodDeletePolicy).HandlerFunc(httpTraceAll(server.LoadPolicyHandler)).Queries(restQueries(peerRESTPolicy)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodLoadPolicy).HandlerFunc(httpTraceAll(server.LoadPolicyHandler)).Queries(restQueries(peerRESTPolicy)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodLoadPolicy).HandlerFunc(httpTraceAll(server.LoadPolicyHandler)).Queries(restQueries(peerRESTPolicy)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodDeleteUser).HandlerFunc(httpTraceAll(server.LoadUserHandler)).Queries(restQueries(peerRESTUser)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodDeleteUser).HandlerFunc(httpTraceAll(server.LoadUserHandler)).Queries(restQueries(peerRESTUser)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodLoadUser).HandlerFunc(httpTraceAll(server.LoadUserHandler)).Queries(restQueries(peerRESTUser, peerRESTUserTemp)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodLoadUser).HandlerFunc(httpTraceAll(server.LoadUserHandler)).Queries(restQueries(peerRESTUser, peerRESTUserTemp)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodLoadUsers).HandlerFunc(httpTraceAll(server.LoadUsersHandler))
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodLoadUsers).HandlerFunc(httpTraceAll(server.LoadUsersHandler))
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodLoadGroup).HandlerFunc(httpTraceAll(server.LoadGroupHandler)).Queries(restQueries(peerRESTGroup)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodLoadGroup).HandlerFunc(httpTraceAll(server.LoadGroupHandler)).Queries(restQueries(peerRESTGroup)...)
|
||||||
|
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodStartProfiling).HandlerFunc(httpTraceAll(server.StartProfilingHandler)).Queries(restQueries(peerRESTProfiler)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodStartProfiling).HandlerFunc(httpTraceAll(server.StartProfilingHandler)).Queries(restQueries(peerRESTProfiler)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodDownloadProfilingData).HandlerFunc(httpTraceHdrs(server.DownloadProflingDataHandler))
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodDownloadProfilingData).HandlerFunc(httpTraceHdrs(server.DownloadProflingDataHandler))
|
||||||
|
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodTargetExists).HandlerFunc(httpTraceHdrs(server.TargetExistsHandler)).Queries(restQueries(peerRESTBucket)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodTargetExists).HandlerFunc(httpTraceHdrs(server.TargetExistsHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodSendEvent).HandlerFunc(httpTraceHdrs(server.SendEventHandler)).Queries(restQueries(peerRESTBucket)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodSendEvent).HandlerFunc(httpTraceHdrs(server.SendEventHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBucketNotificationPut).HandlerFunc(httpTraceHdrs(server.PutBucketNotificationHandler)).Queries(restQueries(peerRESTBucket)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBucketNotificationPut).HandlerFunc(httpTraceHdrs(server.PutBucketNotificationHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBucketNotificationListen).HandlerFunc(httpTraceHdrs(server.ListenBucketNotificationHandler)).Queries(restQueries(peerRESTBucket)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBucketNotificationListen).HandlerFunc(httpTraceHdrs(server.ListenBucketNotificationHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||||
|
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodReloadFormat).HandlerFunc(httpTraceHdrs(server.ReloadFormatHandler)).Queries(restQueries(peerRESTDryRun)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodReloadFormat).HandlerFunc(httpTraceHdrs(server.ReloadFormatHandler)).Queries(restQueries(peerRESTDryRun)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBucketLifecycleSet).HandlerFunc(httpTraceHdrs(server.SetBucketLifecycleHandler)).Queries(restQueries(peerRESTBucket)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBucketLifecycleSet).HandlerFunc(httpTraceHdrs(server.SetBucketLifecycleHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBucketLifecycleRemove).HandlerFunc(httpTraceHdrs(server.RemoveBucketLifecycleHandler)).Queries(restQueries(peerRESTBucket)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBucketLifecycleRemove).HandlerFunc(httpTraceHdrs(server.RemoveBucketLifecycleHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||||
|
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodTrace).HandlerFunc(server.TraceHandler)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodTrace).HandlerFunc(server.TraceHandler)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBackgroundHealStatus).HandlerFunc(server.BackgroundHealStatusHandler)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBackgroundHealStatus).HandlerFunc(server.BackgroundHealStatusHandler)
|
||||||
|
|
||||||
router.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler))
|
router.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler))
|
||||||
}
|
}
|
||||||
|
@ -86,7 +86,7 @@ func readDirN(dirPath string, count int) (entries []string, err error) {
|
|||||||
}
|
}
|
||||||
// Append to entries if symbolic link exists and is valid.
|
// Append to entries if symbolic link exists and is valid.
|
||||||
if st.IsDir() {
|
if st.IsDir() {
|
||||||
entries = append(entries, fi.Name()+slashSeparator)
|
entries = append(entries, fi.Name()+SlashSeparator)
|
||||||
} else if st.Mode().IsRegular() {
|
} else if st.Mode().IsRegular() {
|
||||||
entries = append(entries, fi.Name())
|
entries = append(entries, fi.Name())
|
||||||
}
|
}
|
||||||
@ -96,8 +96,8 @@ func readDirN(dirPath string, count int) (entries []string, err error) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if fi.Mode().IsDir() {
|
if fi.Mode().IsDir() {
|
||||||
// Append "/" instead of "\" so that sorting is achieved as expected.
|
// Append SlashSeparator instead of "\" so that sorting is achieved as expected.
|
||||||
entries = append(entries, fi.Name()+slashSeparator)
|
entries = append(entries, fi.Name()+SlashSeparator)
|
||||||
} else if fi.Mode().IsRegular() {
|
} else if fi.Mode().IsRegular() {
|
||||||
entries = append(entries, fi.Name())
|
entries = append(entries, fi.Name())
|
||||||
}
|
}
|
||||||
|
@ -69,7 +69,7 @@ func parseDirents(dirPath string, buf []byte) (entries []string, err error) {
|
|||||||
|
|
||||||
switch dirent.Type {
|
switch dirent.Type {
|
||||||
case syscall.DT_DIR:
|
case syscall.DT_DIR:
|
||||||
entries = append(entries, name+slashSeparator)
|
entries = append(entries, name+SlashSeparator)
|
||||||
case syscall.DT_REG:
|
case syscall.DT_REG:
|
||||||
entries = append(entries, name)
|
entries = append(entries, name)
|
||||||
case syscall.DT_LNK, syscall.DT_UNKNOWN:
|
case syscall.DT_LNK, syscall.DT_UNKNOWN:
|
||||||
@ -89,7 +89,7 @@ func parseDirents(dirPath string, buf []byte) (entries []string, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if fi.IsDir() {
|
if fi.IsDir() {
|
||||||
entries = append(entries, name+slashSeparator)
|
entries = append(entries, name+SlashSeparator)
|
||||||
} else if fi.Mode().IsRegular() {
|
} else if fi.Mode().IsRegular() {
|
||||||
entries = append(entries, name)
|
entries = append(entries, name)
|
||||||
}
|
}
|
||||||
|
@ -92,12 +92,12 @@ func readDirN(dirPath string, count int) (entries []string, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if fi.IsDir() {
|
if fi.IsDir() {
|
||||||
entries = append(entries, name+slashSeparator)
|
entries = append(entries, name+SlashSeparator)
|
||||||
} else if fi.Mode().IsRegular() {
|
} else if fi.Mode().IsRegular() {
|
||||||
entries = append(entries, name)
|
entries = append(entries, name)
|
||||||
}
|
}
|
||||||
case data.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0:
|
case data.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0:
|
||||||
entries = append(entries, name+slashSeparator)
|
entries = append(entries, name+SlashSeparator)
|
||||||
default:
|
default:
|
||||||
entries = append(entries, name)
|
entries = append(entries, name)
|
||||||
}
|
}
|
||||||
|
18
cmd/posix.go
18
cmd/posix.go
@ -95,7 +95,7 @@ func checkPathLength(pathName string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check each path segment length is > 255
|
// Check each path segment length is > 255
|
||||||
for len(pathName) > 0 && pathName != "." && pathName != "/" {
|
for len(pathName) > 0 && pathName != "." && pathName != SlashSeparator {
|
||||||
dir, file := slashpath.Dir(pathName), slashpath.Base(pathName)
|
dir, file := slashpath.Dir(pathName), slashpath.Base(pathName)
|
||||||
|
|
||||||
if len(file) > 255 {
|
if len(file) > 255 {
|
||||||
@ -558,7 +558,7 @@ func listVols(dirPath string) ([]VolInfo, error) {
|
|||||||
}
|
}
|
||||||
var volsInfo []VolInfo
|
var volsInfo []VolInfo
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if !hasSuffix(entry, slashSeparator) || !isValidVolname(slashpath.Clean(entry)) {
|
if !hasSuffix(entry, SlashSeparator) || !isValidVolname(slashpath.Clean(entry)) {
|
||||||
// Skip if entry is neither a directory not a valid volume name.
|
// Skip if entry is neither a directory not a valid volume name.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -718,7 +718,7 @@ func (s *posix) Walk(volume, dirPath, marker string, recursive bool, leafFile st
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
var fi FileInfo
|
var fi FileInfo
|
||||||
if hasSuffix(walkResult.entry, slashSeparator) {
|
if hasSuffix(walkResult.entry, SlashSeparator) {
|
||||||
fi = FileInfo{
|
fi = FileInfo{
|
||||||
Volume: volume,
|
Volume: volume,
|
||||||
Name: walkResult.entry,
|
Name: walkResult.entry,
|
||||||
@ -743,7 +743,7 @@ func (s *posix) Walk(volume, dirPath, marker string, recursive bool, leafFile st
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ListDir - return all the entries at the given directory path.
|
// ListDir - return all the entries at the given directory path.
|
||||||
// If an entry is a directory it will be returned with a trailing "/".
|
// If an entry is a directory it will be returned with a trailing SlashSeparator.
|
||||||
func (s *posix) ListDir(volume, dirPath string, count int, leafFile string) (entries []string, err error) {
|
func (s *posix) ListDir(volume, dirPath string, count int, leafFile string) (entries []string, err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if err == errFaultyDisk {
|
if err == errFaultyDisk {
|
||||||
@ -786,7 +786,7 @@ func (s *posix) ListDir(volume, dirPath string, count int, leafFile string) (ent
|
|||||||
if leafFile != "" {
|
if leafFile != "" {
|
||||||
for i, entry := range entries {
|
for i, entry := range entries {
|
||||||
if _, serr := os.Stat(pathJoin(dirPath, entry, leafFile)); serr == nil {
|
if _, serr := os.Stat(pathJoin(dirPath, entry, leafFile)); serr == nil {
|
||||||
entries[i] = strings.TrimSuffix(entry, slashSeparator)
|
entries[i] = strings.TrimSuffix(entry, SlashSeparator)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1390,7 +1390,7 @@ func deleteFile(basePath, deletePath string) error {
|
|||||||
|
|
||||||
// Trailing slash is removed when found to ensure
|
// Trailing slash is removed when found to ensure
|
||||||
// slashpath.Dir() to work as intended.
|
// slashpath.Dir() to work as intended.
|
||||||
deletePath = strings.TrimSuffix(deletePath, slashSeparator)
|
deletePath = strings.TrimSuffix(deletePath, SlashSeparator)
|
||||||
deletePath = slashpath.Dir(deletePath)
|
deletePath = slashpath.Dir(deletePath)
|
||||||
|
|
||||||
// Delete parent directory. Errors for parent directories shouldn't trickle down.
|
// Delete parent directory. Errors for parent directories shouldn't trickle down.
|
||||||
@ -1430,7 +1430,7 @@ func (s *posix) DeleteFile(volume, path string) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Following code is needed so that we retain "/" suffix if any in
|
// Following code is needed so that we retain SlashSeparator suffix if any in
|
||||||
// path argument.
|
// path argument.
|
||||||
filePath := pathJoin(volumeDir, path)
|
filePath := pathJoin(volumeDir, path)
|
||||||
if err = checkPathLength((filePath)); err != nil {
|
if err = checkPathLength((filePath)); err != nil {
|
||||||
@ -1492,8 +1492,8 @@ func (s *posix) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err e
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
srcIsDir := hasSuffix(srcPath, slashSeparator)
|
srcIsDir := hasSuffix(srcPath, SlashSeparator)
|
||||||
dstIsDir := hasSuffix(dstPath, slashSeparator)
|
dstIsDir := hasSuffix(dstPath, SlashSeparator)
|
||||||
// Either src and dst have to be directories or files, else return error.
|
// Either src and dst have to be directories or files, else return error.
|
||||||
if !(srcIsDir && dstIsDir || !srcIsDir && !dstIsDir) {
|
if !(srcIsDir && dstIsDir || !srcIsDir && !dstIsDir) {
|
||||||
return errFileAccessDenied
|
return errFileAccessDenied
|
||||||
|
@ -69,7 +69,7 @@ func TestIsValidVolname(t *testing.T) {
|
|||||||
// cases for which test should fail.
|
// cases for which test should fail.
|
||||||
// passing invalid bucket names.
|
// passing invalid bucket names.
|
||||||
{"", false},
|
{"", false},
|
||||||
{"/", false},
|
{SlashSeparator, false},
|
||||||
{"a", false},
|
{"a", false},
|
||||||
{"ab", false},
|
{"ab", false},
|
||||||
{"ab/", true},
|
{"ab/", true},
|
||||||
@ -319,9 +319,9 @@ func TestPosixReadAll(t *testing.T) {
|
|||||||
// TestPosixNewPosix all the cases handled in posix storage layer initialization.
|
// TestPosixNewPosix all the cases handled in posix storage layer initialization.
|
||||||
func TestPosixNewPosix(t *testing.T) {
|
func TestPosixNewPosix(t *testing.T) {
|
||||||
// Temporary dir name.
|
// Temporary dir name.
|
||||||
tmpDirName := globalTestTmpDir + "/" + "minio-" + nextSuffix()
|
tmpDirName := globalTestTmpDir + SlashSeparator + "minio-" + nextSuffix()
|
||||||
// Temporary file name.
|
// Temporary file name.
|
||||||
tmpFileName := globalTestTmpDir + "/" + "minio-" + nextSuffix()
|
tmpFileName := globalTestTmpDir + SlashSeparator + "minio-" + nextSuffix()
|
||||||
f, _ := os.Create(tmpFileName)
|
f, _ := os.Create(tmpFileName)
|
||||||
f.Close()
|
f.Close()
|
||||||
defer os.Remove(tmpFileName)
|
defer os.Remove(tmpFileName)
|
||||||
|
@ -52,9 +52,15 @@ type Client struct {
|
|||||||
newAuthToken func() string
|
newAuthToken func() string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// URL query separator constants
|
||||||
|
const (
|
||||||
|
resourceSep = "/"
|
||||||
|
querySep = "?"
|
||||||
|
)
|
||||||
|
|
||||||
// CallWithContext - make a REST call with context.
|
// CallWithContext - make a REST call with context.
|
||||||
func (c *Client) CallWithContext(ctx context.Context, method string, values url.Values, body io.Reader, length int64) (reply io.ReadCloser, err error) {
|
func (c *Client) CallWithContext(ctx context.Context, method string, values url.Values, body io.Reader, length int64) (reply io.ReadCloser, err error) {
|
||||||
req, err := http.NewRequest(http.MethodPost, c.url.String()+"/"+method+"?"+values.Encode(), body)
|
req, err := http.NewRequest(http.MethodPost, c.url.String()+resourceSep+method+querySep+values.Encode(), body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, &NetworkError{err}
|
return nil, &NetworkError{err}
|
||||||
}
|
}
|
||||||
|
@ -1000,7 +1000,7 @@ func (s *TestSuiteCommon) TestMultipleObjects(c *check) {
|
|||||||
func (s *TestSuiteCommon) TestNotImplemented(c *check) {
|
func (s *TestSuiteCommon) TestNotImplemented(c *check) {
|
||||||
// Generate a random bucket name.
|
// Generate a random bucket name.
|
||||||
bucketName := getRandomBucketName()
|
bucketName := getRandomBucketName()
|
||||||
request, err := newTestSignedRequest("GET", s.endPoint+"/"+bucketName+"/object?policy",
|
request, err := newTestSignedRequest("GET", s.endPoint+SlashSeparator+bucketName+"/object?policy",
|
||||||
0, nil, s.accessKey, s.secretKey, s.signer)
|
0, nil, s.accessKey, s.secretKey, s.signer)
|
||||||
c.Assert(err, nil)
|
c.Assert(err, nil)
|
||||||
|
|
||||||
@ -1111,7 +1111,7 @@ func (s *TestSuiteCommon) TestCopyObject(c *check) {
|
|||||||
request, err = newTestRequest("PUT", getPutObjectURL(s.endPoint, bucketName, objectName2), 0, nil)
|
request, err = newTestRequest("PUT", getPutObjectURL(s.endPoint, bucketName, objectName2), 0, nil)
|
||||||
c.Assert(err, nil)
|
c.Assert(err, nil)
|
||||||
// setting the "X-Amz-Copy-Source" to allow copying the content of previously uploaded object.
|
// setting the "X-Amz-Copy-Source" to allow copying the content of previously uploaded object.
|
||||||
request.Header.Set("X-Amz-Copy-Source", url.QueryEscape("/"+bucketName+"/"+objectName))
|
request.Header.Set("X-Amz-Copy-Source", url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName))
|
||||||
if s.signer == signerV4 {
|
if s.signer == signerV4 {
|
||||||
err = signRequestV4(request, s.accessKey, s.secretKey)
|
err = signRequestV4(request, s.accessKey, s.secretKey)
|
||||||
} else {
|
} else {
|
||||||
@ -1821,7 +1821,7 @@ func (s *TestSuiteCommon) TestPutBucketErrors(c *check) {
|
|||||||
|
|
||||||
// request for ACL.
|
// request for ACL.
|
||||||
// Since MinIO server doesn't support ACL's the request is expected to fail with "NotImplemented" error message.
|
// Since MinIO server doesn't support ACL's the request is expected to fail with "NotImplemented" error message.
|
||||||
request, err = newTestSignedRequest("PUT", s.endPoint+"/"+bucketName+"?acl",
|
request, err = newTestSignedRequest("PUT", s.endPoint+SlashSeparator+bucketName+"?acl",
|
||||||
0, nil, s.accessKey, s.secretKey, s.signer)
|
0, nil, s.accessKey, s.secretKey, s.signer)
|
||||||
c.Assert(err, nil)
|
c.Assert(err, nil)
|
||||||
|
|
||||||
|
@ -209,7 +209,7 @@ func getReqAccessKeyV2(r *http.Request) (auth.Credentials, bool, APIErrorCode) {
|
|||||||
// CanonicalizedProtocolHeaders +
|
// CanonicalizedProtocolHeaders +
|
||||||
// CanonicalizedResource;
|
// CanonicalizedResource;
|
||||||
//
|
//
|
||||||
// CanonicalizedResource = [ "/" + Bucket ] +
|
// CanonicalizedResource = [ SlashSeparator + Bucket ] +
|
||||||
// <HTTP-Request-URI, from the protocol name up to the query string> +
|
// <HTTP-Request-URI, from the protocol name up to the query string> +
|
||||||
// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
|
// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
|
||||||
//
|
//
|
||||||
|
@ -44,7 +44,7 @@ func (c credentialHeader) getScope() string {
|
|||||||
c.scope.region,
|
c.scope.region,
|
||||||
c.scope.service,
|
c.scope.service,
|
||||||
c.scope.request,
|
c.scope.request,
|
||||||
}, "/")
|
}, SlashSeparator)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getReqAccessKeyV4(r *http.Request, region string, stype serviceType) (auth.Credentials, bool, APIErrorCode) {
|
func getReqAccessKeyV4(r *http.Request, region string, stype serviceType) (auth.Credentials, bool, APIErrorCode) {
|
||||||
@ -73,11 +73,11 @@ func parseCredentialHeader(credElement string, region string, stype serviceType)
|
|||||||
if creds[0] != "Credential" {
|
if creds[0] != "Credential" {
|
||||||
return ch, ErrMissingCredTag
|
return ch, ErrMissingCredTag
|
||||||
}
|
}
|
||||||
credElements := strings.Split(strings.TrimSpace(creds[1]), "/")
|
credElements := strings.Split(strings.TrimSpace(creds[1]), SlashSeparator)
|
||||||
if len(credElements) < 5 {
|
if len(credElements) < 5 {
|
||||||
return ch, ErrCredMalformed
|
return ch, ErrCredMalformed
|
||||||
}
|
}
|
||||||
accessKey := strings.Join(credElements[:len(credElements)-4], "/") // The access key may contain one or more `/`
|
accessKey := strings.Join(credElements[:len(credElements)-4], SlashSeparator) // The access key may contain one or more `/`
|
||||||
if !auth.IsAccessKeyValid(accessKey) {
|
if !auth.IsAccessKeyValid(accessKey) {
|
||||||
return ch, ErrInvalidAccessKeyID
|
return ch, ErrInvalidAccessKeyID
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,7 @@ func joinWithSlash(accessKey, date, region, service, requestVersion string) stri
|
|||||||
date,
|
date,
|
||||||
region,
|
region,
|
||||||
service,
|
service,
|
||||||
requestVersion}, "/")
|
requestVersion}, SlashSeparator)
|
||||||
}
|
}
|
||||||
|
|
||||||
// generate CredentialHeader from its fields.
|
// generate CredentialHeader from its fields.
|
||||||
@ -79,12 +79,12 @@ func validateCredentialfields(t *testing.T, testNum int, expectedCredentials cre
|
|||||||
|
|
||||||
// TestParseCredentialHeader - validates the format validator and extractor for the Credential header in an aws v4 request.
|
// TestParseCredentialHeader - validates the format validator and extractor for the Credential header in an aws v4 request.
|
||||||
// A valid format of creadential should be of the following format.
|
// A valid format of creadential should be of the following format.
|
||||||
// Credential = accessKey + "/"+ scope
|
// Credential = accessKey + SlashSeparator+ scope
|
||||||
// where scope = string.Join([]string{ currTime.Format(yyyymmdd),
|
// where scope = string.Join([]string{ currTime.Format(yyyymmdd),
|
||||||
// globalMinioDefaultRegion,
|
// globalMinioDefaultRegion,
|
||||||
// "s3",
|
// "s3",
|
||||||
// "aws4_request",
|
// "aws4_request",
|
||||||
// },"/")
|
// },SlashSeparator)
|
||||||
func TestParseCredentialHeader(t *testing.T) {
|
func TestParseCredentialHeader(t *testing.T) {
|
||||||
|
|
||||||
sampleTimeStr := UTCNow().Format(yyyymmdd)
|
sampleTimeStr := UTCNow().Format(yyyymmdd)
|
||||||
|
@ -120,7 +120,7 @@ func getScope(t time.Time, region string) string {
|
|||||||
region,
|
region,
|
||||||
string(serviceS3),
|
string(serviceS3),
|
||||||
"aws4_request",
|
"aws4_request",
|
||||||
}, "/")
|
}, SlashSeparator)
|
||||||
return scope
|
return scope
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -248,7 +248,7 @@ func doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region s
|
|||||||
query.Set(xhttp.AmzDate, t.Format(iso8601Format))
|
query.Set(xhttp.AmzDate, t.Format(iso8601Format))
|
||||||
query.Set(xhttp.AmzExpires, strconv.Itoa(expireSeconds))
|
query.Set(xhttp.AmzExpires, strconv.Itoa(expireSeconds))
|
||||||
query.Set(xhttp.AmzSignedHeaders, getSignedHeaders(extractedSignedHeaders))
|
query.Set(xhttp.AmzSignedHeaders, getSignedHeaders(extractedSignedHeaders))
|
||||||
query.Set(xhttp.AmzCredential, cred.AccessKey+"/"+pSignValues.Credential.getScope())
|
query.Set(xhttp.AmzCredential, cred.AccessKey+SlashSeparator+pSignValues.Credential.getScope())
|
||||||
|
|
||||||
// Save other headers available in the request parameters.
|
// Save other headers available in the request parameters.
|
||||||
for k, v := range req.URL.Query() {
|
for k, v := range req.URL.Query() {
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
const storageRESTVersion = "v8"
|
const storageRESTVersion = "v8"
|
||||||
const storageRESTPath = minioReservedBucketPath + "/storage/" + storageRESTVersion + "/"
|
const storageRESTPath = minioReservedBucketPath + "/storage/" + storageRESTVersion + SlashSeparator
|
||||||
|
|
||||||
const (
|
const (
|
||||||
storageRESTMethodDiskInfo = "diskinfo"
|
storageRESTMethodDiskInfo = "diskinfo"
|
||||||
|
@ -567,41 +567,41 @@ func registerStorageRESTHandlers(router *mux.Router, endpoints EndpointList) {
|
|||||||
|
|
||||||
subrouter := router.PathPrefix(path.Join(storageRESTPath, endpoint.Path)).Subrouter()
|
subrouter := router.PathPrefix(path.Join(storageRESTPath, endpoint.Path)).Subrouter()
|
||||||
|
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodDiskInfo).HandlerFunc(httpTraceHdrs(server.DiskInfoHandler))
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodDiskInfo).HandlerFunc(httpTraceHdrs(server.DiskInfoHandler))
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodMakeVol).HandlerFunc(httpTraceHdrs(server.MakeVolHandler)).Queries(restQueries(storageRESTVolume)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodMakeVol).HandlerFunc(httpTraceHdrs(server.MakeVolHandler)).Queries(restQueries(storageRESTVolume)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodStatVol).HandlerFunc(httpTraceHdrs(server.StatVolHandler)).Queries(restQueries(storageRESTVolume)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodStatVol).HandlerFunc(httpTraceHdrs(server.StatVolHandler)).Queries(restQueries(storageRESTVolume)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodDeleteVol).HandlerFunc(httpTraceHdrs(server.DeleteVolHandler)).Queries(restQueries(storageRESTVolume)...)
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodDeleteVol).HandlerFunc(httpTraceHdrs(server.DeleteVolHandler)).Queries(restQueries(storageRESTVolume)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodListVols).HandlerFunc(httpTraceHdrs(server.ListVolsHandler))
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodListVols).HandlerFunc(httpTraceHdrs(server.ListVolsHandler))
|
||||||
|
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodAppendFile).HandlerFunc(httpTraceHdrs(server.AppendFileHandler)).
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodAppendFile).HandlerFunc(httpTraceHdrs(server.AppendFileHandler)).
|
||||||
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodWriteAll).HandlerFunc(httpTraceHdrs(server.WriteAllHandler)).
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodWriteAll).HandlerFunc(httpTraceHdrs(server.WriteAllHandler)).
|
||||||
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodCreateFile).HandlerFunc(httpTraceHdrs(server.CreateFileHandler)).
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodCreateFile).HandlerFunc(httpTraceHdrs(server.CreateFileHandler)).
|
||||||
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTLength)...)
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTLength)...)
|
||||||
|
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodStatFile).HandlerFunc(httpTraceHdrs(server.StatFileHandler)).
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodStatFile).HandlerFunc(httpTraceHdrs(server.StatFileHandler)).
|
||||||
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodReadAll).HandlerFunc(httpTraceHdrs(server.ReadAllHandler)).
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodReadAll).HandlerFunc(httpTraceHdrs(server.ReadAllHandler)).
|
||||||
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodReadFile).HandlerFunc(httpTraceHdrs(server.ReadFileHandler)).
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodReadFile).HandlerFunc(httpTraceHdrs(server.ReadFileHandler)).
|
||||||
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTOffset, storageRESTLength, storageRESTBitrotAlgo, storageRESTBitrotHash)...)
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTOffset, storageRESTLength, storageRESTBitrotAlgo, storageRESTBitrotHash)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodReadFileStream).HandlerFunc(httpTraceHdrs(server.ReadFileStreamHandler)).
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodReadFileStream).HandlerFunc(httpTraceHdrs(server.ReadFileStreamHandler)).
|
||||||
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTOffset, storageRESTLength)...)
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTOffset, storageRESTLength)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodListDir).HandlerFunc(httpTraceHdrs(server.ListDirHandler)).
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodListDir).HandlerFunc(httpTraceHdrs(server.ListDirHandler)).
|
||||||
Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTCount, storageRESTLeafFile)...)
|
Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTCount, storageRESTLeafFile)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodWalk).HandlerFunc(httpTraceHdrs(server.WalkHandler)).
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodWalk).HandlerFunc(httpTraceHdrs(server.WalkHandler)).
|
||||||
Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTMarkerPath, storageRESTRecursive, storageRESTLeafFile)...)
|
Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTMarkerPath, storageRESTRecursive, storageRESTLeafFile)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodDeleteFile).HandlerFunc(httpTraceHdrs(server.DeleteFileHandler)).
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodDeleteFile).HandlerFunc(httpTraceHdrs(server.DeleteFileHandler)).
|
||||||
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodDeleteFileBulk).HandlerFunc(httpTraceHdrs(server.DeleteFileBulkHandler)).
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodDeleteFileBulk).HandlerFunc(httpTraceHdrs(server.DeleteFileBulkHandler)).
|
||||||
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
||||||
|
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodRenameFile).HandlerFunc(httpTraceHdrs(server.RenameFileHandler)).
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodRenameFile).HandlerFunc(httpTraceHdrs(server.RenameFileHandler)).
|
||||||
Queries(restQueries(storageRESTSrcVolume, storageRESTSrcPath, storageRESTDstVolume, storageRESTDstPath)...)
|
Queries(restQueries(storageRESTSrcVolume, storageRESTSrcPath, storageRESTDstVolume, storageRESTDstPath)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodVerifyFile).HandlerFunc(httpTraceHdrs(server.VerifyFile)).
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodVerifyFile).HandlerFunc(httpTraceHdrs(server.VerifyFile)).
|
||||||
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTBitrotAlgo, storageRESTLength)...)
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTBitrotAlgo, storageRESTLength)...)
|
||||||
subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodGetInstanceID).HandlerFunc(httpTraceAll(server.GetInstanceID))
|
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodGetInstanceID).HandlerFunc(httpTraceAll(server.GetInstanceID))
|
||||||
}
|
}
|
||||||
|
|
||||||
router.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler))
|
router.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler))
|
||||||
|
@ -53,7 +53,7 @@ func registerSTSRouter(router *mux.Router) {
|
|||||||
sts := &stsAPIHandlers{}
|
sts := &stsAPIHandlers{}
|
||||||
|
|
||||||
// STS Router
|
// STS Router
|
||||||
stsRouter := router.NewRoute().PathPrefix("/").Subrouter()
|
stsRouter := router.NewRoute().PathPrefix(SlashSeparator).Subrouter()
|
||||||
|
|
||||||
// Assume roles with no JWT, handles AssumeRole.
|
// Assume roles with no JWT, handles AssumeRole.
|
||||||
stsRouter.Methods(http.MethodPost).MatcherFunc(func(r *http.Request, rm *mux.RouteMatch) bool {
|
stsRouter.Methods(http.MethodPost).MatcherFunc(func(r *http.Request, rm *mux.RouteMatch) bool {
|
||||||
|
@ -707,7 +707,7 @@ func signStreamingRequest(req *http.Request, accessKey, secretKey string, currTi
|
|||||||
globalMinioDefaultRegion,
|
globalMinioDefaultRegion,
|
||||||
string(serviceS3),
|
string(serviceS3),
|
||||||
"aws4_request",
|
"aws4_request",
|
||||||
}, "/")
|
}, SlashSeparator)
|
||||||
|
|
||||||
stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n"
|
stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n"
|
||||||
stringToSign = stringToSign + scope + "\n"
|
stringToSign = stringToSign + scope + "\n"
|
||||||
@ -722,7 +722,7 @@ func signStreamingRequest(req *http.Request, accessKey, secretKey string, currTi
|
|||||||
|
|
||||||
// final Authorization header
|
// final Authorization header
|
||||||
parts := []string{
|
parts := []string{
|
||||||
"AWS4-HMAC-SHA256" + " Credential=" + accessKey + "/" + scope,
|
"AWS4-HMAC-SHA256" + " Credential=" + accessKey + SlashSeparator + scope,
|
||||||
"SignedHeaders=" + signedHeaders,
|
"SignedHeaders=" + signedHeaders,
|
||||||
"Signature=" + signature,
|
"Signature=" + signature,
|
||||||
}
|
}
|
||||||
@ -787,7 +787,7 @@ func assembleStreamingChunks(req *http.Request, body io.ReadSeeker, chunkSize in
|
|||||||
regionStr,
|
regionStr,
|
||||||
string(serviceS3),
|
string(serviceS3),
|
||||||
"aws4_request",
|
"aws4_request",
|
||||||
}, "/")
|
}, SlashSeparator)
|
||||||
|
|
||||||
stringToSign := "AWS4-HMAC-SHA256-PAYLOAD" + "\n"
|
stringToSign := "AWS4-HMAC-SHA256-PAYLOAD" + "\n"
|
||||||
stringToSign = stringToSign + currTime.Format(iso8601Format) + "\n"
|
stringToSign = stringToSign + currTime.Format(iso8601Format) + "\n"
|
||||||
@ -1062,7 +1062,7 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error {
|
|||||||
region,
|
region,
|
||||||
string(serviceS3),
|
string(serviceS3),
|
||||||
"aws4_request",
|
"aws4_request",
|
||||||
}, "/")
|
}, SlashSeparator)
|
||||||
|
|
||||||
stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n"
|
stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n"
|
||||||
stringToSign = stringToSign + scope + "\n"
|
stringToSign = stringToSign + scope + "\n"
|
||||||
@ -1077,7 +1077,7 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error {
|
|||||||
|
|
||||||
// final Authorization header
|
// final Authorization header
|
||||||
parts := []string{
|
parts := []string{
|
||||||
"AWS4-HMAC-SHA256" + " Credential=" + accessKey + "/" + scope,
|
"AWS4-HMAC-SHA256" + " Credential=" + accessKey + SlashSeparator + scope,
|
||||||
"SignedHeaders=" + signedHeaders,
|
"SignedHeaders=" + signedHeaders,
|
||||||
"Signature=" + signature,
|
"Signature=" + signature,
|
||||||
}
|
}
|
||||||
@ -1089,7 +1089,7 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error {
|
|||||||
|
|
||||||
// getCredentialString generate a credential string.
|
// getCredentialString generate a credential string.
|
||||||
func getCredentialString(accessKeyID, location string, t time.Time) string {
|
func getCredentialString(accessKeyID, location string, t time.Time) string {
|
||||||
return accessKeyID + "/" + getScope(t, location)
|
return accessKeyID + SlashSeparator + getScope(t, location)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getMD5HashBase64 returns MD5 hash in base64 encoding of given data.
|
// getMD5HashBase64 returns MD5 hash in base64 encoding of given data.
|
||||||
@ -1360,9 +1360,9 @@ func (t *EOFWriter) Write(p []byte) (n int, err error) {
|
|||||||
|
|
||||||
// construct URL for http requests for bucket operations.
|
// construct URL for http requests for bucket operations.
|
||||||
func makeTestTargetURL(endPoint, bucketName, objectName string, queryValues url.Values) string {
|
func makeTestTargetURL(endPoint, bucketName, objectName string, queryValues url.Values) string {
|
||||||
urlStr := endPoint + "/"
|
urlStr := endPoint + SlashSeparator
|
||||||
if bucketName != "" {
|
if bucketName != "" {
|
||||||
urlStr = urlStr + bucketName + "/"
|
urlStr = urlStr + bucketName + SlashSeparator
|
||||||
}
|
}
|
||||||
if objectName != "" {
|
if objectName != "" {
|
||||||
urlStr = urlStr + s3utils.EncodePath(objectName)
|
urlStr = urlStr + s3utils.EncodePath(objectName)
|
||||||
@ -2142,7 +2142,7 @@ func registerAPIFunctions(muxRouter *mux.Router, objLayer ObjectLayer, apiFuncti
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// API Router.
|
// API Router.
|
||||||
apiRouter := muxRouter.PathPrefix("/").Subrouter()
|
apiRouter := muxRouter.PathPrefix(SlashSeparator).Subrouter()
|
||||||
// Bucket router.
|
// Bucket router.
|
||||||
bucketRouter := apiRouter.PathPrefix("/{bucket}").Subrouter()
|
bucketRouter := apiRouter.PathPrefix("/{bucket}").Subrouter()
|
||||||
|
|
||||||
|
@ -67,10 +67,10 @@ func doTreeWalk(ctx context.Context, bucket, prefixDir, entryPrefixMatch, marker
|
|||||||
var markerBase, markerDir string
|
var markerBase, markerDir string
|
||||||
if marker != "" {
|
if marker != "" {
|
||||||
// Ex: if marker="four/five.txt", markerDir="four/" markerBase="five.txt"
|
// Ex: if marker="four/five.txt", markerDir="four/" markerBase="five.txt"
|
||||||
markerSplit := strings.SplitN(marker, slashSeparator, 2)
|
markerSplit := strings.SplitN(marker, SlashSeparator, 2)
|
||||||
markerDir = markerSplit[0]
|
markerDir = markerSplit[0]
|
||||||
if len(markerSplit) == 2 {
|
if len(markerSplit) == 2 {
|
||||||
markerDir += slashSeparator
|
markerDir += SlashSeparator
|
||||||
markerBase = markerSplit[1]
|
markerBase = markerSplit[1]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -95,7 +95,7 @@ func doTreeWalk(ctx context.Context, bucket, prefixDir, entryPrefixMatch, marker
|
|||||||
|
|
||||||
for i, entry := range entries {
|
for i, entry := range entries {
|
||||||
pentry := pathJoin(prefixDir, entry)
|
pentry := pathJoin(prefixDir, entry)
|
||||||
isDir := hasSuffix(pentry, slashSeparator)
|
isDir := hasSuffix(pentry, SlashSeparator)
|
||||||
|
|
||||||
if i == 0 && markerDir == entry {
|
if i == 0 && markerDir == entry {
|
||||||
if !recursive {
|
if !recursive {
|
||||||
@ -165,7 +165,7 @@ func startTreeWalk(ctx context.Context, bucket, prefix, marker string, recursive
|
|||||||
resultCh := make(chan TreeWalkResult, maxObjectList)
|
resultCh := make(chan TreeWalkResult, maxObjectList)
|
||||||
entryPrefixMatch := prefix
|
entryPrefixMatch := prefix
|
||||||
prefixDir := ""
|
prefixDir := ""
|
||||||
lastIndex := strings.LastIndex(prefix, slashSeparator)
|
lastIndex := strings.LastIndex(prefix, SlashSeparator)
|
||||||
if lastIndex != -1 {
|
if lastIndex != -1 {
|
||||||
entryPrefixMatch = prefix[lastIndex+1:]
|
entryPrefixMatch = prefix[lastIndex+1:]
|
||||||
prefixDir = prefix[:lastIndex+1]
|
prefixDir = prefix[:lastIndex+1]
|
||||||
|
@ -72,7 +72,7 @@ EXAMPLES:
|
|||||||
const (
|
const (
|
||||||
minioReleaseTagTimeLayout = "2006-01-02T15-04-05Z"
|
minioReleaseTagTimeLayout = "2006-01-02T15-04-05Z"
|
||||||
minioOSARCH = runtime.GOOS + "-" + runtime.GOARCH
|
minioOSARCH = runtime.GOOS + "-" + runtime.GOARCH
|
||||||
minioReleaseURL = "https://dl.min.io/server/minio/release/" + minioOSARCH + "/"
|
minioReleaseURL = "https://dl.min.io/server/minio/release/" + minioOSARCH + SlashSeparator
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -81,16 +81,16 @@ func request2BucketObjectName(r *http.Request) (bucketName, objectName string) {
|
|||||||
|
|
||||||
// Convert url path into bucket and object name.
|
// Convert url path into bucket and object name.
|
||||||
func urlPath2BucketObjectName(path string) (bucketName, objectName string) {
|
func urlPath2BucketObjectName(path string) (bucketName, objectName string) {
|
||||||
if path == "" || path == slashSeparator {
|
if path == "" || path == SlashSeparator {
|
||||||
return "", ""
|
return "", ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// Trim any preceding slash separator.
|
// Trim any preceding slash separator.
|
||||||
urlPath := strings.TrimPrefix(path, slashSeparator)
|
urlPath := strings.TrimPrefix(path, SlashSeparator)
|
||||||
|
|
||||||
// Split urlpath using slash separator into a given number of
|
// Split urlpath using slash separator into a given number of
|
||||||
// expected tokens.
|
// expected tokens.
|
||||||
tokens := strings.SplitN(urlPath, slashSeparator, 2)
|
tokens := strings.SplitN(urlPath, SlashSeparator, 2)
|
||||||
bucketName = tokens[0]
|
bucketName = tokens[0]
|
||||||
if len(tokens) == 2 {
|
if len(tokens) == 2 {
|
||||||
objectName = tokens[1]
|
objectName = tokens[1]
|
||||||
|
@ -148,7 +148,7 @@ func TestURL2BucketObjectName(t *testing.T) {
|
|||||||
// Test case 2 where url only has separator.
|
// Test case 2 where url only has separator.
|
||||||
{
|
{
|
||||||
u: &url.URL{
|
u: &url.URL{
|
||||||
Path: "/",
|
Path: SlashSeparator,
|
||||||
},
|
},
|
||||||
bucket: "",
|
bucket: "",
|
||||||
object: "",
|
object: "",
|
||||||
|
@ -315,7 +315,7 @@ func (web *webAPIHandlers) ListBuckets(r *http.Request, args *WebGenericArgs, re
|
|||||||
r.Header.Set("prefix", "")
|
r.Header.Set("prefix", "")
|
||||||
|
|
||||||
// Set delimiter value for "s3:delimiter" policy conditionals.
|
// Set delimiter value for "s3:delimiter" policy conditionals.
|
||||||
r.Header.Set("delimiter", slashSeparator)
|
r.Header.Set("delimiter", SlashSeparator)
|
||||||
|
|
||||||
// If etcd, dns federation configured list buckets from etcd.
|
// If etcd, dns federation configured list buckets from etcd.
|
||||||
if globalDNSConfig != nil {
|
if globalDNSConfig != nil {
|
||||||
@ -429,7 +429,7 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r
|
|||||||
nextMarker := ""
|
nextMarker := ""
|
||||||
// Fetch all the objects
|
// Fetch all the objects
|
||||||
for {
|
for {
|
||||||
result, err := core.ListObjects(args.BucketName, args.Prefix, nextMarker, slashSeparator, 1000)
|
result, err := core.ListObjects(args.BucketName, args.Prefix, nextMarker, SlashSeparator, 1000)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return toJSONError(ctx, err, args.BucketName)
|
return toJSONError(ctx, err, args.BucketName)
|
||||||
}
|
}
|
||||||
@ -464,7 +464,7 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r
|
|||||||
r.Header.Set("prefix", args.Prefix)
|
r.Header.Set("prefix", args.Prefix)
|
||||||
|
|
||||||
// Set delimiter value for "s3:delimiter" policy conditionals.
|
// Set delimiter value for "s3:delimiter" policy conditionals.
|
||||||
r.Header.Set("delimiter", slashSeparator)
|
r.Header.Set("delimiter", SlashSeparator)
|
||||||
|
|
||||||
// Check if anonymous (non-owner) has access to download objects.
|
// Check if anonymous (non-owner) has access to download objects.
|
||||||
readable := globalPolicySys.IsAllowed(policy.Args{
|
readable := globalPolicySys.IsAllowed(policy.Args{
|
||||||
@ -480,7 +480,7 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r
|
|||||||
BucketName: args.BucketName,
|
BucketName: args.BucketName,
|
||||||
ConditionValues: getConditionValues(r, "", ""),
|
ConditionValues: getConditionValues(r, "", ""),
|
||||||
IsOwner: false,
|
IsOwner: false,
|
||||||
ObjectName: args.Prefix + "/",
|
ObjectName: args.Prefix + SlashSeparator,
|
||||||
})
|
})
|
||||||
|
|
||||||
reply.Writable = writable
|
reply.Writable = writable
|
||||||
@ -503,7 +503,7 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r
|
|||||||
r.Header.Set("prefix", args.Prefix)
|
r.Header.Set("prefix", args.Prefix)
|
||||||
|
|
||||||
// Set delimiter value for "s3:delimiter" policy conditionals.
|
// Set delimiter value for "s3:delimiter" policy conditionals.
|
||||||
r.Header.Set("delimiter", slashSeparator)
|
r.Header.Set("delimiter", SlashSeparator)
|
||||||
|
|
||||||
readable := globalIAMSys.IsAllowed(iampolicy.Args{
|
readable := globalIAMSys.IsAllowed(iampolicy.Args{
|
||||||
AccountName: claims.Subject,
|
AccountName: claims.Subject,
|
||||||
@ -519,7 +519,7 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r
|
|||||||
BucketName: args.BucketName,
|
BucketName: args.BucketName,
|
||||||
ConditionValues: getConditionValues(r, "", claims.Subject),
|
ConditionValues: getConditionValues(r, "", claims.Subject),
|
||||||
IsOwner: owner,
|
IsOwner: owner,
|
||||||
ObjectName: args.Prefix + "/",
|
ObjectName: args.Prefix + SlashSeparator,
|
||||||
})
|
})
|
||||||
|
|
||||||
reply.Writable = writable
|
reply.Writable = writable
|
||||||
@ -541,7 +541,7 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r
|
|||||||
nextMarker := ""
|
nextMarker := ""
|
||||||
// Fetch all the objects
|
// Fetch all the objects
|
||||||
for {
|
for {
|
||||||
lo, err := listObjects(ctx, args.BucketName, args.Prefix, nextMarker, slashSeparator, 1000)
|
lo, err := listObjects(ctx, args.BucketName, args.Prefix, nextMarker, SlashSeparator, 1000)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &json2.Error{Message: err.Error()}
|
return &json2.Error{Message: err.Error()}
|
||||||
}
|
}
|
||||||
@ -671,7 +671,7 @@ func (web *webAPIHandlers) RemoveObject(r *http.Request, args *RemoveObjectArgs,
|
|||||||
next:
|
next:
|
||||||
for _, objectName := range args.Objects {
|
for _, objectName := range args.Objects {
|
||||||
// If not a directory, remove the object.
|
// If not a directory, remove the object.
|
||||||
if !hasSuffix(objectName, slashSeparator) && objectName != "" {
|
if !hasSuffix(objectName, SlashSeparator) && objectName != "" {
|
||||||
// Deny if WORM is enabled
|
// Deny if WORM is enabled
|
||||||
if globalWORMEnabled {
|
if globalWORMEnabled {
|
||||||
if _, err = objectAPI.GetObjectInfo(ctx, args.BucketName, objectName, ObjectOptions{}); err == nil {
|
if _, err = objectAPI.GetObjectInfo(ctx, args.BucketName, objectName, ObjectOptions{}); err == nil {
|
||||||
@ -1034,7 +1034,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if objectAPI.IsEncryptionSupported() {
|
if objectAPI.IsEncryptionSupported() {
|
||||||
if hasServerSideEncryptionHeader(r.Header) && !hasSuffix(object, slashSeparator) { // handle SSE requests
|
if hasServerSideEncryptionHeader(r.Header) && !hasSuffix(object, SlashSeparator) { // handle SSE requests
|
||||||
rawReader := hashReader
|
rawReader := hashReader
|
||||||
var objectEncryptionKey []byte
|
var objectEncryptionKey []byte
|
||||||
reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata)
|
reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata)
|
||||||
@ -1436,7 +1436,7 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if !hasSuffix(object, slashSeparator) {
|
if !hasSuffix(object, SlashSeparator) {
|
||||||
// If not a directory, compress the file and write it to response.
|
// If not a directory, compress the file and write it to response.
|
||||||
err := zipit(pathJoin(args.Prefix, object))
|
err := zipit(pathJoin(args.Prefix, object))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1873,7 +1873,7 @@ func presignedGet(host, bucket, object string, expiry int64, creds auth.Credenti
|
|||||||
query.Set(xhttp.AmzSignedHeaders, "host")
|
query.Set(xhttp.AmzSignedHeaders, "host")
|
||||||
queryStr := s3utils.QueryEncode(query)
|
queryStr := s3utils.QueryEncode(query)
|
||||||
|
|
||||||
path := "/" + path.Join(bucket, object)
|
path := SlashSeparator + path.Join(bucket, object)
|
||||||
|
|
||||||
// "host" is the only header required to be signed for Presigned URLs.
|
// "host" is the only header required to be signed for Presigned URLs.
|
||||||
extractedSignedHeaders := make(http.Header)
|
extractedSignedHeaders := make(http.Header)
|
||||||
|
@ -824,7 +824,7 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler
|
|||||||
|
|
||||||
test := func(token string, sendContentLength bool) int {
|
test := func(token string, sendContentLength bool) int {
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
req, rErr := http.NewRequest("PUT", "/minio/upload/"+bucketName+"/"+objectName, nil)
|
req, rErr := http.NewRequest("PUT", "/minio/upload/"+bucketName+SlashSeparator+objectName, nil)
|
||||||
if rErr != nil {
|
if rErr != nil {
|
||||||
t.Fatalf("Cannot create upload request, %v", rErr)
|
t.Fatalf("Cannot create upload request, %v", rErr)
|
||||||
}
|
}
|
||||||
@ -926,7 +926,7 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl
|
|||||||
|
|
||||||
test := func(token string) (int, []byte) {
|
test := func(token string) (int, []byte) {
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
path := "/minio/download/" + bucketName + "/" + objectName + "?token="
|
path := "/minio/download/" + bucketName + SlashSeparator + objectName + "?token="
|
||||||
if token != "" {
|
if token != "" {
|
||||||
path = path + token
|
path = path + token
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,7 @@ type indexHandler struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (h indexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (h indexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
r.URL.Path = minioReservedBucketPath + "/"
|
r.URL.Path = minioReservedBucketPath + SlashSeparator
|
||||||
h.handler.ServeHTTP(w, r)
|
h.handler.ServeHTTP(w, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1027,7 +1027,7 @@ func (s *xlSets) listObjectsNonSlash(ctx context.Context, bucket, prefix, marker
|
|||||||
// walked and merged at this layer. Resulting value through the merge process sends
|
// walked and merged at this layer. Resulting value through the merge process sends
|
||||||
// the data in lexically sorted order.
|
// the data in lexically sorted order.
|
||||||
func (s *xlSets) listObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int, heal bool) (loi ListObjectsInfo, err error) {
|
func (s *xlSets) listObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int, heal bool) (loi ListObjectsInfo, err error) {
|
||||||
if delimiter != slashSeparator && delimiter != "" {
|
if delimiter != SlashSeparator && delimiter != "" {
|
||||||
// "heal" option passed can be ignored as the heal-listing does not send non-standard delimiter.
|
// "heal" option passed can be ignored as the heal-listing does not send non-standard delimiter.
|
||||||
return s.listObjectsNonSlash(ctx, bucket, prefix, marker, delimiter, maxKeys)
|
return s.listObjectsNonSlash(ctx, bucket, prefix, marker, delimiter, maxKeys)
|
||||||
}
|
}
|
||||||
@ -1054,7 +1054,7 @@ func (s *xlSets) listObjects(ctx context.Context, bucket, prefix, marker, delimi
|
|||||||
// along // with the prefix. On a flat namespace with 'prefix'
|
// along // with the prefix. On a flat namespace with 'prefix'
|
||||||
// as '/' we don't have any entries, since all the keys are
|
// as '/' we don't have any entries, since all the keys are
|
||||||
// of form 'keyName/...'
|
// of form 'keyName/...'
|
||||||
if delimiter == slashSeparator && prefix == slashSeparator {
|
if delimiter == SlashSeparator && prefix == SlashSeparator {
|
||||||
return loi, nil
|
return loi, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1065,7 +1065,7 @@ func (s *xlSets) listObjects(ctx context.Context, bucket, prefix, marker, delimi
|
|||||||
|
|
||||||
// Default is recursive, if delimiter is set then list non recursive.
|
// Default is recursive, if delimiter is set then list non recursive.
|
||||||
recursive := true
|
recursive := true
|
||||||
if delimiter == slashSeparator {
|
if delimiter == SlashSeparator {
|
||||||
recursive = false
|
recursive = false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1092,7 +1092,7 @@ func (s *xlSets) listObjects(ctx context.Context, bucket, prefix, marker, delimi
|
|||||||
|
|
||||||
for _, entry := range entries.Files {
|
for _, entry := range entries.Files {
|
||||||
var objInfo ObjectInfo
|
var objInfo ObjectInfo
|
||||||
if hasSuffix(entry.Name, slashSeparator) {
|
if hasSuffix(entry.Name, SlashSeparator) {
|
||||||
if !recursive {
|
if !recursive {
|
||||||
loi.Prefixes = append(loi.Prefixes, entry.Name)
|
loi.Prefixes = append(loi.Prefixes, entry.Name)
|
||||||
continue
|
continue
|
||||||
|
@ -37,7 +37,7 @@ func (xl xlObjects) getLoadBalancedDisks() (disks []StorageAPI) {
|
|||||||
func (xl xlObjects) parentDirIsObject(ctx context.Context, bucket, parent string) bool {
|
func (xl xlObjects) parentDirIsObject(ctx context.Context, bucket, parent string) bool {
|
||||||
var isParentDirObject func(string) bool
|
var isParentDirObject func(string) bool
|
||||||
isParentDirObject = func(p string) bool {
|
isParentDirObject = func(p string) bool {
|
||||||
if p == "." || p == "/" {
|
if p == "." || p == SlashSeparator {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if xl.isObject(bucket, p) {
|
if xl.isObject(bucket, p) {
|
||||||
|
@ -72,7 +72,7 @@ func TestXLParentDirIsObject(t *testing.T) {
|
|||||||
// Should not cause infinite loop.
|
// Should not cause infinite loop.
|
||||||
{
|
{
|
||||||
parentIsObject: false,
|
parentIsObject: false,
|
||||||
objectName: "/",
|
objectName: SlashSeparator,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
parentIsObject: false,
|
parentIsObject: false,
|
||||||
|
@ -697,7 +697,7 @@ func (xl xlObjects) HealObject(ctx context.Context, bucket, object string, dryRu
|
|||||||
healCtx := logger.SetReqInfo(context.Background(), newReqInfo)
|
healCtx := logger.SetReqInfo(context.Background(), newReqInfo)
|
||||||
|
|
||||||
// Healing directories handle it separately.
|
// Healing directories handle it separately.
|
||||||
if hasSuffix(object, slashSeparator) {
|
if hasSuffix(object, SlashSeparator) {
|
||||||
return xl.healObjectDir(healCtx, bucket, object, dryRun)
|
return xl.healObjectDir(healCtx, bucket, object, dryRun)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,7 +63,7 @@ func listDirFactory(ctx context.Context, disks ...StorageAPI) ListDirFunc {
|
|||||||
func (xl xlObjects) listObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
|
func (xl xlObjects) listObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
|
||||||
// Default is recursive, if delimiter is set then list non recursive.
|
// Default is recursive, if delimiter is set then list non recursive.
|
||||||
recursive := true
|
recursive := true
|
||||||
if delimiter == slashSeparator {
|
if delimiter == SlashSeparator {
|
||||||
recursive = false
|
recursive = false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -87,7 +87,7 @@ func (xl xlObjects) listObjects(ctx context.Context, bucket, prefix, marker, del
|
|||||||
}
|
}
|
||||||
entry := walkResult.entry
|
entry := walkResult.entry
|
||||||
var objInfo ObjectInfo
|
var objInfo ObjectInfo
|
||||||
if hasSuffix(entry, slashSeparator) {
|
if hasSuffix(entry, SlashSeparator) {
|
||||||
// Object name needs to be full path.
|
// Object name needs to be full path.
|
||||||
objInfo.Bucket = bucket
|
objInfo.Bucket = bucket
|
||||||
objInfo.Name = entry
|
objInfo.Name = entry
|
||||||
@ -125,7 +125,7 @@ func (xl xlObjects) listObjects(ctx context.Context, bucket, prefix, marker, del
|
|||||||
|
|
||||||
result := ListObjectsInfo{}
|
result := ListObjectsInfo{}
|
||||||
for _, objInfo := range objInfos {
|
for _, objInfo := range objInfos {
|
||||||
if objInfo.IsDir && delimiter == slashSeparator {
|
if objInfo.IsDir && delimiter == SlashSeparator {
|
||||||
result.Prefixes = append(result.Prefixes, objInfo.Name)
|
result.Prefixes = append(result.Prefixes, objInfo.Name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -165,7 +165,7 @@ func (xl xlObjects) ListObjects(ctx context.Context, bucket, prefix, marker, del
|
|||||||
// since according to s3 spec we stop at the 'delimiter' along
|
// since according to s3 spec we stop at the 'delimiter' along
|
||||||
// with the prefix. On a flat namespace with 'prefix' as '/'
|
// with the prefix. On a flat namespace with 'prefix' as '/'
|
||||||
// we don't have any entries, since all the keys are of form 'keyName/...'
|
// we don't have any entries, since all the keys are of form 'keyName/...'
|
||||||
if delimiter == slashSeparator && prefix == slashSeparator {
|
if delimiter == SlashSeparator && prefix == SlashSeparator {
|
||||||
return loi, nil
|
return loi, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,7 +169,7 @@ func (xl xlObjects) ListMultipartUploads(ctx context.Context, bucket, object, ke
|
|||||||
return result, err
|
return result, err
|
||||||
}
|
}
|
||||||
for i := range uploadIDs {
|
for i := range uploadIDs {
|
||||||
uploadIDs[i] = strings.TrimSuffix(uploadIDs[i], slashSeparator)
|
uploadIDs[i] = strings.TrimSuffix(uploadIDs[i], SlashSeparator)
|
||||||
}
|
}
|
||||||
sort.Strings(uploadIDs)
|
sort.Strings(uploadIDs)
|
||||||
for _, uploadID := range uploadIDs {
|
for _, uploadID := range uploadIDs {
|
||||||
|
@ -147,7 +147,7 @@ func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, r
|
|||||||
|
|
||||||
// Handler directory request by returning a reader that
|
// Handler directory request by returning a reader that
|
||||||
// returns no bytes.
|
// returns no bytes.
|
||||||
if hasSuffix(object, slashSeparator) {
|
if hasSuffix(object, SlashSeparator) {
|
||||||
var objInfo ObjectInfo
|
var objInfo ObjectInfo
|
||||||
if objInfo, err = xl.getObjectInfoDir(ctx, bucket, object); err != nil {
|
if objInfo, err = xl.getObjectInfoDir(ctx, bucket, object); err != nil {
|
||||||
nsUnlocker()
|
nsUnlocker()
|
||||||
@ -216,7 +216,7 @@ func (xl xlObjects) getObject(ctx context.Context, bucket, object string, startO
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If its a directory request, we return an empty body.
|
// If its a directory request, we return an empty body.
|
||||||
if hasSuffix(object, slashSeparator) {
|
if hasSuffix(object, SlashSeparator) {
|
||||||
_, err := writer.Write([]byte(""))
|
_, err := writer.Write([]byte(""))
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
return toObjectErr(err, bucket, object)
|
return toObjectErr(err, bucket, object)
|
||||||
@ -379,7 +379,7 @@ func (xl xlObjects) GetObjectInfo(ctx context.Context, bucket, object string, op
|
|||||||
return oi, err
|
return oi, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasSuffix(object, slashSeparator) {
|
if hasSuffix(object, SlashSeparator) {
|
||||||
info, err := xl.getObjectInfoDir(ctx, bucket, object)
|
info, err := xl.getObjectInfoDir(ctx, bucket, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return oi, toObjectErr(err, bucket, object)
|
return oi, toObjectErr(err, bucket, object)
|
||||||
@ -865,7 +865,7 @@ func (xl xlObjects) deleteObjects(ctx context.Context, bucket string, objects []
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, object := range objects {
|
for i, object := range objects {
|
||||||
isObjectDirs[i] = hasSuffix(object, slashSeparator)
|
isObjectDirs[i] = hasSuffix(object, SlashSeparator)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, object := range objects {
|
for i, object := range objects {
|
||||||
@ -972,7 +972,7 @@ func (xl xlObjects) DeleteObject(ctx context.Context, bucket, object string) (er
|
|||||||
}
|
}
|
||||||
|
|
||||||
var writeQuorum int
|
var writeQuorum int
|
||||||
var isObjectDir = hasSuffix(object, slashSeparator)
|
var isObjectDir = hasSuffix(object, SlashSeparator)
|
||||||
|
|
||||||
if isObjectDir {
|
if isObjectDir {
|
||||||
_, err = xl.getObjectInfoDir(ctx, bucket, object)
|
_, err = xl.getObjectInfoDir(ctx, bucket, object)
|
||||||
|
@ -180,7 +180,7 @@ func TestXLDeleteObjectsXLSet(t *testing.T) {
|
|||||||
switch statErr.(type) {
|
switch statErr.(type) {
|
||||||
case ObjectNotFound:
|
case ObjectNotFound:
|
||||||
default:
|
default:
|
||||||
t.Fatalf("Object %s is not removed", test.bucket+"/"+test.object)
|
t.Fatalf("Object %s is not removed", test.bucket+SlashSeparator+test.object)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user