mirror of
https://github.com/minio/minio.git
synced 2025-07-08 16:42:17 -04:00
Support for "directory" objects (#10499)
This commit is contained in:
parent
7f9498f43f
commit
230fc0d186
@ -422,7 +422,7 @@ func TestHealEmptyDirectoryErasure(t *testing.T) {
|
|||||||
z := obj.(*erasureZones)
|
z := obj.(*erasureZones)
|
||||||
er := z.zones[0].sets[0]
|
er := z.zones[0].sets[0]
|
||||||
firstDisk := er.getDisks()[0]
|
firstDisk := er.getDisks()[0]
|
||||||
err = firstDisk.DeleteFile(context.Background(), bucket, object)
|
err = firstDisk.DeleteVol(context.Background(), pathJoin(bucket, encodeDirObject(object)), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to delete a file - %v", err)
|
t.Fatalf("Failed to delete a file - %v", err)
|
||||||
}
|
}
|
||||||
@ -434,7 +434,7 @@ func TestHealEmptyDirectoryErasure(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check if the empty directory is restored in the first disk
|
// Check if the empty directory is restored in the first disk
|
||||||
_, err = firstDisk.StatVol(context.Background(), pathJoin(bucket, object))
|
_, err = firstDisk.StatVol(context.Background(), pathJoin(bucket, encodeDirObject(object)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected object to be present but stat failed - %v", err)
|
t.Fatalf("Expected object to be present but stat failed - %v", err)
|
||||||
}
|
}
|
||||||
|
@ -96,21 +96,14 @@ func (fi FileInfo) IsValid() bool {
|
|||||||
|
|
||||||
// ToObjectInfo - Converts metadata to object info.
|
// ToObjectInfo - Converts metadata to object info.
|
||||||
func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo {
|
func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo {
|
||||||
if HasSuffix(object, SlashSeparator) {
|
object = decodeDirObject(object)
|
||||||
return ObjectInfo{
|
|
||||||
Bucket: bucket,
|
|
||||||
Name: object,
|
|
||||||
IsDir: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
versionID := fi.VersionID
|
versionID := fi.VersionID
|
||||||
if globalBucketVersioningSys.Enabled(bucket) && versionID == "" {
|
if globalBucketVersioningSys.Enabled(bucket) && versionID == "" {
|
||||||
versionID = nullVersionID
|
versionID = nullVersionID
|
||||||
}
|
}
|
||||||
|
|
||||||
objInfo := ObjectInfo{
|
objInfo := ObjectInfo{
|
||||||
IsDir: false,
|
IsDir: HasSuffix(object, SlashSeparator),
|
||||||
Bucket: bucket,
|
Bucket: bucket,
|
||||||
Name: object,
|
Name: object,
|
||||||
VersionID: versionID,
|
VersionID: versionID,
|
||||||
|
@ -460,6 +460,8 @@ func (z *erasureZones) MakeBucketWithLocation(ctx context.Context, bucket string
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (z *erasureZones) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
func (z *erasureZones) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||||
|
object = encodeDirObject(object)
|
||||||
|
|
||||||
for _, zone := range z.zones {
|
for _, zone := range z.zones {
|
||||||
gr, err = zone.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
gr, err = zone.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -477,6 +479,8 @@ func (z *erasureZones) GetObjectNInfo(ctx context.Context, bucket, object string
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (z *erasureZones) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
|
func (z *erasureZones) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
|
||||||
|
object = encodeDirObject(object)
|
||||||
|
|
||||||
for _, zone := range z.zones {
|
for _, zone := range z.zones {
|
||||||
if err := zone.GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts); err != nil {
|
if err := zone.GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts); err != nil {
|
||||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||||
@ -493,6 +497,7 @@ func (z *erasureZones) GetObject(ctx context.Context, bucket, object string, sta
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (z *erasureZones) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
func (z *erasureZones) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
|
object = encodeDirObject(object)
|
||||||
for _, zone := range z.zones {
|
for _, zone := range z.zones {
|
||||||
objInfo, err = zone.GetObjectInfo(ctx, bucket, object, opts)
|
objInfo, err = zone.GetObjectInfo(ctx, bucket, object, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -503,6 +508,7 @@ func (z *erasureZones) GetObjectInfo(ctx context.Context, bucket, object string,
|
|||||||
}
|
}
|
||||||
return objInfo, nil
|
return objInfo, nil
|
||||||
}
|
}
|
||||||
|
object = decodeDirObject(object)
|
||||||
if opts.VersionID != "" {
|
if opts.VersionID != "" {
|
||||||
return objInfo, VersionNotFound{Bucket: bucket, Object: object, VersionID: opts.VersionID}
|
return objInfo, VersionNotFound{Bucket: bucket, Object: object, VersionID: opts.VersionID}
|
||||||
}
|
}
|
||||||
@ -511,6 +517,8 @@ func (z *erasureZones) GetObjectInfo(ctx context.Context, bucket, object string,
|
|||||||
|
|
||||||
// PutObject - writes an object to least used erasure zone.
|
// PutObject - writes an object to least used erasure zone.
|
||||||
func (z *erasureZones) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (ObjectInfo, error) {
|
func (z *erasureZones) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (ObjectInfo, error) {
|
||||||
|
object = encodeDirObject(object)
|
||||||
|
|
||||||
if z.SingleZone() {
|
if z.SingleZone() {
|
||||||
return z.zones[0].PutObject(ctx, bucket, object, data, opts)
|
return z.zones[0].PutObject(ctx, bucket, object, data, opts)
|
||||||
}
|
}
|
||||||
@ -525,6 +533,8 @@ func (z *erasureZones) PutObject(ctx context.Context, bucket string, object stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (z *erasureZones) DeleteObject(ctx context.Context, bucket string, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
func (z *erasureZones) DeleteObject(ctx context.Context, bucket string, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
|
object = encodeDirObject(object)
|
||||||
|
|
||||||
if z.SingleZone() {
|
if z.SingleZone() {
|
||||||
return z.zones[0].DeleteObject(ctx, bucket, object, opts)
|
return z.zones[0].DeleteObject(ctx, bucket, object, opts)
|
||||||
}
|
}
|
||||||
@ -545,6 +555,8 @@ func (z *erasureZones) DeleteObjects(ctx context.Context, bucket string, objects
|
|||||||
dobjects := make([]DeletedObject, len(objects))
|
dobjects := make([]DeletedObject, len(objects))
|
||||||
objSets := set.NewStringSet()
|
objSets := set.NewStringSet()
|
||||||
for i := range derrs {
|
for i := range derrs {
|
||||||
|
objects[i].ObjectName = encodeDirObject(objects[i].ObjectName)
|
||||||
|
|
||||||
derrs[i] = checkDelObjArgs(ctx, bucket, objects[i].ObjectName)
|
derrs[i] = checkDelObjArgs(ctx, bucket, objects[i].ObjectName)
|
||||||
objSets.Add(objects[i].ObjectName)
|
objSets.Add(objects[i].ObjectName)
|
||||||
}
|
}
|
||||||
@ -576,6 +588,9 @@ func (z *erasureZones) DeleteObjects(ctx context.Context, bucket string, objects
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (z *erasureZones) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
|
func (z *erasureZones) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
|
srcObject = encodeDirObject(srcObject)
|
||||||
|
dstObject = encodeDirObject(dstObject)
|
||||||
|
|
||||||
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
||||||
|
|
||||||
zoneIdx, err := z.getZoneIdx(ctx, dstBucket, dstObject, dstOpts, srcInfo.Size)
|
zoneIdx, err := z.getZoneIdx(ctx, dstBucket, dstObject, dstOpts, srcInfo.Size)
|
||||||
@ -935,7 +950,16 @@ func lexicallySortedEntryZone(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileI
|
|||||||
zoneIndex = i
|
zoneIndex = i
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if zoneEntries[i][j].Name < lentry.Name {
|
str1 := zoneEntries[i][j].Name
|
||||||
|
str2 := lentry.Name
|
||||||
|
if HasSuffix(str1, globalDirSuffix) {
|
||||||
|
str1 = strings.TrimSuffix(str1, globalDirSuffix) + slashSeparator
|
||||||
|
}
|
||||||
|
if HasSuffix(str2, globalDirSuffix) {
|
||||||
|
str2 = strings.TrimSuffix(str2, globalDirSuffix) + slashSeparator
|
||||||
|
}
|
||||||
|
|
||||||
|
if str1 < str2 {
|
||||||
lentry = zoneEntries[i][j]
|
lentry = zoneEntries[i][j]
|
||||||
zoneIndex = i
|
zoneIndex = i
|
||||||
}
|
}
|
||||||
@ -968,6 +992,10 @@ func lexicallySortedEntryZone(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileI
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if HasSuffix(lentry.Name, globalDirSuffix) {
|
||||||
|
lentry.Name = strings.TrimSuffix(lentry.Name, globalDirSuffix) + slashSeparator
|
||||||
|
}
|
||||||
|
|
||||||
return lentry, lexicallySortedEntryCount, zoneIndex, isTruncated
|
return lentry, lexicallySortedEntryCount, zoneIndex, isTruncated
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1013,7 +1041,16 @@ func lexicallySortedEntryZoneVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneE
|
|||||||
zoneIndex = i
|
zoneIndex = i
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if zoneEntries[i][j].Name < lentry.Name {
|
str1 := zoneEntries[i][j].Name
|
||||||
|
str2 := lentry.Name
|
||||||
|
if HasSuffix(str1, globalDirSuffix) {
|
||||||
|
str1 = strings.TrimSuffix(str1, globalDirSuffix) + slashSeparator
|
||||||
|
}
|
||||||
|
if HasSuffix(str2, globalDirSuffix) {
|
||||||
|
str2 = strings.TrimSuffix(str2, globalDirSuffix) + slashSeparator
|
||||||
|
}
|
||||||
|
|
||||||
|
if str1 < str2 {
|
||||||
lentry = zoneEntries[i][j]
|
lentry = zoneEntries[i][j]
|
||||||
zoneIndex = i
|
zoneIndex = i
|
||||||
}
|
}
|
||||||
@ -1046,6 +1083,10 @@ func lexicallySortedEntryZoneVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneE
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if HasSuffix(lentry.Name, globalDirSuffix) {
|
||||||
|
lentry.Name = strings.TrimSuffix(lentry.Name, globalDirSuffix) + slashSeparator
|
||||||
|
}
|
||||||
|
|
||||||
return lentry, lexicallySortedEntryCount, zoneIndex, isTruncated
|
return lentry, lexicallySortedEntryCount, zoneIndex, isTruncated
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1058,6 +1099,7 @@ func mergeZonesEntriesVersionsCh(zonesEntryChs [][]FileInfoVersionsCh, maxKeys i
|
|||||||
zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfoVersions, len(entryChs)))
|
zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfoVersions, len(entryChs)))
|
||||||
zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs)))
|
zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs)))
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
fi, quorumCount, zoneIndex, ok := lexicallySortedEntryZoneVersions(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid)
|
fi, quorumCount, zoneIndex, ok := lexicallySortedEntryZoneVersions(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -1089,6 +1131,7 @@ func mergeZonesEntriesCh(zonesEntryChs [][]FileInfoCh, maxKeys int, zonesListTol
|
|||||||
zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfo, len(entryChs)))
|
zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfo, len(entryChs)))
|
||||||
zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs)))
|
zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs)))
|
||||||
}
|
}
|
||||||
|
var prevEntry string
|
||||||
for {
|
for {
|
||||||
fi, quorumCount, zoneIndex, ok := lexicallySortedEntryZone(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid)
|
fi, quorumCount, zoneIndex, ok := lexicallySortedEntryZone(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -1101,12 +1144,17 @@ func mergeZonesEntriesCh(zonesEntryChs [][]FileInfoCh, maxKeys int, zonesListTol
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if HasSuffix(fi.Name, slashSeparator) && fi.Name == prevEntry {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
entries.Files = append(entries.Files, fi)
|
entries.Files = append(entries.Files, fi)
|
||||||
i++
|
i++
|
||||||
if i == maxKeys {
|
if i == maxKeys {
|
||||||
entries.IsTruncated = isTruncatedZones(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid)
|
entries.IsTruncated = isTruncatedZones(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
prevEntry = fi.Name
|
||||||
}
|
}
|
||||||
return entries
|
return entries
|
||||||
}
|
}
|
||||||
@ -1836,6 +1884,8 @@ func (z *erasureZones) HealObjects(ctx context.Context, bucket, prefix string, o
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (z *erasureZones) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
func (z *erasureZones) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||||
|
object = encodeDirObject(object)
|
||||||
|
|
||||||
lk := z.NewNSLock(ctx, bucket, object)
|
lk := z.NewNSLock(ctx, bucket, object)
|
||||||
if bucket == minioMetaBucket {
|
if bucket == minioMetaBucket {
|
||||||
// For .minio.sys bucket heals we should hold write locks.
|
// For .minio.sys bucket heals we should hold write locks.
|
||||||
@ -1956,6 +2006,7 @@ func (z *erasureZones) Health(ctx context.Context, opts HealthOptions) HealthRes
|
|||||||
|
|
||||||
parityDrives := globalStorageClass.GetParityForSC(storageclass.STANDARD)
|
parityDrives := globalStorageClass.GetParityForSC(storageclass.STANDARD)
|
||||||
diskCount := z.SetDriveCount()
|
diskCount := z.SetDriveCount()
|
||||||
|
|
||||||
if parityDrives == 0 {
|
if parityDrives == 0 {
|
||||||
parityDrives = getDefaultParityBlocks(diskCount)
|
parityDrives = getDefaultParityBlocks(diskCount)
|
||||||
}
|
}
|
||||||
@ -2019,6 +2070,7 @@ func (z *erasureZones) Health(ctx context.Context, opts HealthOptions) HealthRes
|
|||||||
|
|
||||||
// PutObjectTags - replace or add tags to an existing object
|
// PutObjectTags - replace or add tags to an existing object
|
||||||
func (z *erasureZones) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error {
|
func (z *erasureZones) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error {
|
||||||
|
object = encodeDirObject(object)
|
||||||
if z.SingleZone() {
|
if z.SingleZone() {
|
||||||
return z.zones[0].PutObjectTags(ctx, bucket, object, tags, opts)
|
return z.zones[0].PutObjectTags(ctx, bucket, object, tags, opts)
|
||||||
}
|
}
|
||||||
@ -2048,6 +2100,7 @@ func (z *erasureZones) PutObjectTags(ctx context.Context, bucket, object string,
|
|||||||
|
|
||||||
// DeleteObjectTags - delete object tags from an existing object
|
// DeleteObjectTags - delete object tags from an existing object
|
||||||
func (z *erasureZones) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error {
|
func (z *erasureZones) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error {
|
||||||
|
object = encodeDirObject(object)
|
||||||
if z.SingleZone() {
|
if z.SingleZone() {
|
||||||
return z.zones[0].DeleteObjectTags(ctx, bucket, object, opts)
|
return z.zones[0].DeleteObjectTags(ctx, bucket, object, opts)
|
||||||
}
|
}
|
||||||
@ -2076,6 +2129,7 @@ func (z *erasureZones) DeleteObjectTags(ctx context.Context, bucket, object stri
|
|||||||
|
|
||||||
// GetObjectTags - get object tags from an existing object
|
// GetObjectTags - get object tags from an existing object
|
||||||
func (z *erasureZones) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) {
|
func (z *erasureZones) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) {
|
||||||
|
object = encodeDirObject(object)
|
||||||
if z.SingleZone() {
|
if z.SingleZone() {
|
||||||
return z.zones[0].GetObjectTags(ctx, bucket, object, opts)
|
return z.zones[0].GetObjectTags(ctx, bucket, object, opts)
|
||||||
}
|
}
|
||||||
|
@ -63,6 +63,8 @@ const (
|
|||||||
globalMinioModeErasure = "mode-server-xl"
|
globalMinioModeErasure = "mode-server-xl"
|
||||||
globalMinioModeDistErasure = "mode-server-distributed-xl"
|
globalMinioModeDistErasure = "mode-server-distributed-xl"
|
||||||
globalMinioModeGatewayPrefix = "mode-gateway-"
|
globalMinioModeGatewayPrefix = "mode-gateway-"
|
||||||
|
globalDirSuffix = "__XLDIR__"
|
||||||
|
globalDirSuffixWithSlash = globalDirSuffix + slashSeparator
|
||||||
|
|
||||||
// Add new global values here.
|
// Add new global values here.
|
||||||
)
|
)
|
||||||
|
@ -22,12 +22,18 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Converts underlying storage error. Convenience function written to
|
// Converts underlying storage error. Convenience function written to
|
||||||
// handle all cases where we have known types of errors returned by
|
// handle all cases where we have known types of errors returned by
|
||||||
// underlying storage layer.
|
// underlying storage layer.
|
||||||
func toObjectErr(err error, params ...string) error {
|
func toObjectErr(err error, params ...string) error {
|
||||||
|
if len(params) > 1 {
|
||||||
|
if HasSuffix(params[1], globalDirSuffix) {
|
||||||
|
params[1] = strings.TrimSuffix(params[1], globalDirSuffix) + slashSeparator
|
||||||
|
}
|
||||||
|
}
|
||||||
switch err {
|
switch err {
|
||||||
case errVolumeNotFound:
|
case errVolumeNotFound:
|
||||||
if len(params) >= 1 {
|
if len(params) >= 1 {
|
||||||
|
@ -131,6 +131,7 @@ func TestServerSuite(t *testing.T) {
|
|||||||
// Init and run test on ErasureSet backend.
|
// Init and run test on ErasureSet backend.
|
||||||
{serverType: "ErasureSet", signer: signerV4},
|
{serverType: "ErasureSet", signer: signerV4},
|
||||||
}
|
}
|
||||||
|
globalCLIContext.StrictS3Compat = true
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
t.Run(fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.serverType), func(t *testing.T) {
|
t.Run(fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.serverType), func(t *testing.T) {
|
||||||
runAllTests(testCase, &check{t, testCase.serverType})
|
runAllTests(testCase, &check{t, testCase.serverType})
|
||||||
@ -262,20 +263,6 @@ func (s *TestSuiteCommon) TestObjectDir(c *check) {
|
|||||||
// assert the http response status code.
|
// assert the http response status code.
|
||||||
c.Assert(response.StatusCode, http.StatusOK)
|
c.Assert(response.StatusCode, http.StatusOK)
|
||||||
|
|
||||||
request, err = newTestSignedRequest(http.MethodPut, getPutObjectURL(s.endPoint, bucketName, "my-object-directory/"),
|
|
||||||
0, nil, s.accessKey, s.secretKey, s.signer)
|
|
||||||
c.Assert(err, nil)
|
|
||||||
|
|
||||||
helloReader := bytes.NewReader([]byte("Hello, World"))
|
|
||||||
request.ContentLength = helloReader.Size()
|
|
||||||
request.Body = ioutil.NopCloser(helloReader)
|
|
||||||
|
|
||||||
// execute the HTTP request.
|
|
||||||
response, err = s.client.Do(request)
|
|
||||||
|
|
||||||
c.Assert(err, nil)
|
|
||||||
verifyError(c, response, "XMinioInvalidObjectName", "Object name contains unsupported characters.", http.StatusBadRequest)
|
|
||||||
|
|
||||||
request, err = newTestSignedRequest(http.MethodHead, getHeadObjectURL(s.endPoint, bucketName, "my-object-directory/"),
|
request, err = newTestSignedRequest(http.MethodHead, getHeadObjectURL(s.endPoint, bucketName, "my-object-directory/"),
|
||||||
0, nil, s.accessKey, s.secretKey, s.signer)
|
0, nil, s.accessKey, s.secretKey, s.signer)
|
||||||
c.Assert(err, nil)
|
c.Assert(err, nil)
|
||||||
|
@ -57,6 +57,9 @@ func filterMatchingPrefix(entries []string, prefixEntry string) []string {
|
|||||||
// isLeaf should be done in listDir()
|
// isLeaf should be done in listDir()
|
||||||
func delayIsLeafCheck(entries []string) bool {
|
func delayIsLeafCheck(entries []string) bool {
|
||||||
for i, entry := range entries {
|
for i, entry := range entries {
|
||||||
|
if HasSuffix(entry, globalDirSuffixWithSlash) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
if i == len(entries)-1 {
|
if i == len(entries)-1 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -96,7 +99,20 @@ func filterListEntries(bucket, prefixDir string, entries []string, prefixEntry s
|
|||||||
entries = filterMatchingPrefix(entries, prefixEntry)
|
entries = filterMatchingPrefix(entries, prefixEntry)
|
||||||
|
|
||||||
// Listing needs to be sorted.
|
// Listing needs to be sorted.
|
||||||
sort.Strings(entries)
|
sort.Slice(entries, func(i, j int) bool {
|
||||||
|
if !HasSuffix(entries[i], globalDirSuffixWithSlash) && !HasSuffix(entries[j], globalDirSuffixWithSlash) {
|
||||||
|
return entries[i] < entries[j]
|
||||||
|
}
|
||||||
|
first := entries[i]
|
||||||
|
second := entries[j]
|
||||||
|
if HasSuffix(first, globalDirSuffixWithSlash) {
|
||||||
|
first = strings.TrimSuffix(first, globalDirSuffixWithSlash) + slashSeparator
|
||||||
|
}
|
||||||
|
if HasSuffix(second, globalDirSuffixWithSlash) {
|
||||||
|
second = strings.TrimSuffix(second, globalDirSuffixWithSlash) + slashSeparator
|
||||||
|
}
|
||||||
|
return first < second
|
||||||
|
})
|
||||||
|
|
||||||
// Can isLeaf() check be delayed till when it has to be sent down the
|
// Can isLeaf() check be delayed till when it has to be sent down the
|
||||||
// TreeWalkResult channel?
|
// TreeWalkResult channel?
|
||||||
@ -114,8 +130,23 @@ func filterListEntries(bucket, prefixDir string, entries []string, prefixEntry s
|
|||||||
|
|
||||||
// Sort again after removing trailing "/" for objects as the previous sort
|
// Sort again after removing trailing "/" for objects as the previous sort
|
||||||
// does not hold good anymore.
|
// does not hold good anymore.
|
||||||
sort.Strings(entries)
|
sort.Slice(entries, func(i, j int) bool {
|
||||||
|
if !HasSuffix(entries[i], globalDirSuffix) && !HasSuffix(entries[j], globalDirSuffix) {
|
||||||
|
return entries[i] < entries[j]
|
||||||
|
}
|
||||||
|
first := entries[i]
|
||||||
|
second := entries[j]
|
||||||
|
if HasSuffix(first, globalDirSuffix) {
|
||||||
|
first = strings.TrimSuffix(first, globalDirSuffix) + slashSeparator
|
||||||
|
}
|
||||||
|
if HasSuffix(second, globalDirSuffix) {
|
||||||
|
second = strings.TrimSuffix(second, globalDirSuffix) + slashSeparator
|
||||||
|
}
|
||||||
|
if first == second {
|
||||||
|
return HasSuffix(entries[i], globalDirSuffix)
|
||||||
|
}
|
||||||
|
return first < second
|
||||||
|
})
|
||||||
return entries, false
|
return entries, false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -169,10 +200,10 @@ func doTreeWalk(ctx context.Context, bucket, prefixDir, entryPrefixMatch, marker
|
|||||||
entry = strings.TrimSuffix(entry, slashSeparator)
|
entry = strings.TrimSuffix(entry, slashSeparator)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
leaf = !strings.HasSuffix(entry, slashSeparator)
|
leaf = !HasSuffix(entry, slashSeparator)
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.HasSuffix(entry, slashSeparator) {
|
if HasSuffix(entry, slashSeparator) {
|
||||||
leafDir = isLeafDir(bucket, pathJoin(prefixDir, entry))
|
leafDir = isLeafDir(bucket, pathJoin(prefixDir, entry))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
17
cmd/utils.go
17
cmd/utils.go
@ -737,3 +737,20 @@ func (t *timedValue) Invalidate() {
|
|||||||
t.value = nil
|
t.value = nil
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// On MinIO a directory object is stored as a regular object with "__XLDIR__" suffix.
|
||||||
|
// For ex. "prefix/" is stored as "prefix__XLDIR__"
|
||||||
|
func encodeDirObject(object string) string {
|
||||||
|
if HasSuffix(object, slashSeparator) {
|
||||||
|
return strings.TrimSuffix(object, slashSeparator) + globalDirSuffix
|
||||||
|
}
|
||||||
|
return object
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reverse process of encodeDirObject()
|
||||||
|
func decodeDirObject(object string) string {
|
||||||
|
if HasSuffix(object, globalDirSuffix) {
|
||||||
|
return strings.TrimSuffix(object, globalDirSuffix) + slashSeparator
|
||||||
|
}
|
||||||
|
return object
|
||||||
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user