make sure listParts returns parts that are valid (#20390)

This commit is contained in:
Harshavardhana 2024-09-06 02:42:21 -07:00 committed by GitHub
parent 241be9709c
commit b6b7cddc9c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 433 additions and 106 deletions

View File

@ -216,6 +216,7 @@ func (fi FileInfo) ReplicationInfoEquals(ofi FileInfo) bool {
}
// objectPartIndex - returns the index of matching object part number.
// Returns -1 if the part cannot be found.
func objectPartIndex(parts []ObjectPartInfo, partNumber int) int {
for i, part := range parts {
if partNumber == part.Number {
@ -225,6 +226,17 @@ func objectPartIndex(parts []ObjectPartInfo, partNumber int) int {
return -1
}
// objectPartIndexNums returns the index of the specified part number.
// Returns -1 if the part cannot be found.
func objectPartIndexNums(parts []int, partNumber int) int {
for i, part := range parts {
if part != 0 && partNumber == part {
return i
}
}
return -1
}
// AddObjectPart - add a new object part in order.
func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize, actualSize int64, modTime time.Time, idx []byte, checksums map[string]string) {
partInfo := ObjectPartInfo{

View File

@ -795,6 +795,61 @@ func (er erasureObjects) GetMultipartInfo(ctx context.Context, bucket, object, u
return result, nil
}
func (er erasureObjects) listParts(ctx context.Context, onlineDisks []StorageAPI, partPath string, readQuorum int) ([]int, error) {
g := errgroup.WithNErrs(len(onlineDisks))
objectParts := make([][]string, len(onlineDisks))
// List uploaded parts from drives.
for index := range onlineDisks {
index := index
g.Go(func() (err error) {
if onlineDisks[index] == nil {
return errDiskNotFound
}
objectParts[index], err = onlineDisks[index].ListDir(ctx, minioMetaMultipartBucket, minioMetaMultipartBucket, partPath, -1)
return err
}, index)
}
if err := reduceReadQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, readQuorum); err != nil {
return nil, err
}
partQuorumMap := make(map[int]int)
for _, driveParts := range objectParts {
partsWithMetaCount := make(map[int]int, len(driveParts))
// part files can be either part.N or part.N.meta
for _, partPath := range driveParts {
var partNum int
if _, err := fmt.Sscanf(partPath, "part.%d", &partNum); err == nil {
partsWithMetaCount[partNum]++
continue
}
if _, err := fmt.Sscanf(partPath, "part.%d.meta", &partNum); err == nil {
partsWithMetaCount[partNum]++
}
}
// Include only part.N.meta files with corresponding part.N
for partNum, cnt := range partsWithMetaCount {
if cnt < 2 {
continue
}
partQuorumMap[partNum]++
}
}
var partNums []int
for partNum, count := range partQuorumMap {
if count < readQuorum {
continue
}
partNums = append(partNums, partNum)
}
sort.Ints(partNums)
return partNums, nil
}
// ListObjectParts - lists all previously uploaded parts for a given
// object and uploadID. Takes additional input of part-number-marker
// to indicate where the listing should begin from.
@ -821,6 +876,14 @@ func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, up
}
uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
if partNumberMarker < 0 {
partNumberMarker = 0
}
// Limit output to maxPartsList.
if maxParts > maxPartsList {
maxParts = maxPartsList
}
// Populate the result stub.
result.Bucket = bucket
@ -831,126 +894,77 @@ func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, up
result.UserDefined = cloneMSS(fi.Metadata)
result.ChecksumAlgorithm = fi.Metadata[hash.MinIOMultipartChecksum]
if partNumberMarker < 0 {
partNumberMarker = 0
}
// Limit output to maxPartsList.
if maxParts > maxPartsList-partNumberMarker {
maxParts = maxPartsList - partNumberMarker
}
if maxParts == 0 {
return result, nil
}
onlineDisks := er.getDisks()
readQuorum := fi.ReadQuorum(er.defaultRQuorum())
// Read Part info for all parts
partPath := pathJoin(uploadIDPath, fi.DataDir) + "/"
req := ReadMultipleReq{
Bucket: minioMetaMultipartBucket,
Prefix: partPath,
MaxSize: 1 << 20, // Each part should realistically not be > 1MiB.
MaxResults: maxParts + 1,
MetadataOnly: true,
}
partPath := pathJoin(uploadIDPath, fi.DataDir) + SlashSeparator
start := partNumberMarker + 1
end := start + maxParts
// Parts are 1 based, so index 0 is part one, etc.
for i := start; i <= end; i++ {
req.Files = append(req.Files, fmt.Sprintf("part.%d.meta", i))
}
var disk StorageAPI
disks := er.getOnlineLocalDisks()
if len(disks) == 0 {
// using er.getOnlineLocalDisks() has one side-affect where
// on a pooled setup all disks are remote, add a fallback
disks = er.getOnlineDisks()
}
for _, disk = range disks {
if disk == nil {
continue
}
if !disk.IsOnline() {
continue
}
break
}
g := errgroup.WithNErrs(len(req.Files)).WithConcurrency(32)
partsInfo := make([]*ObjectPartInfo, len(req.Files))
for i, file := range req.Files {
file := file
partN := i + start
i := i
g.Go(func() error {
buf, err := disk.ReadAll(ctx, minioMetaMultipartBucket, pathJoin(partPath, file))
// List parts in quorum
partNums, err := er.listParts(ctx, onlineDisks, partPath, readQuorum)
if err != nil {
return err
// This means that fi.DataDir, is not yet populated so we
// return an empty response.
if errors.Is(err, errFileNotFound) {
return result, nil
}
return result, toObjectErr(err, bucket, object, uploadID)
}
pinfo := &ObjectPartInfo{}
_, err = pinfo.UnmarshalMsg(buf)
if len(partNums) == 0 {
return result, nil
}
start := objectPartIndexNums(partNums, partNumberMarker)
if start != -1 {
partNums = partNums[start+1:]
}
result.Parts = make([]PartInfo, 0, len(partNums))
partMetaPaths := make([]string, len(partNums))
for i, part := range partNums {
partMetaPaths[i] = pathJoin(partPath, fmt.Sprintf("part.%d.meta", part))
}
// Read parts in quorum
objParts, err := readParts(ctx, onlineDisks, minioMetaMultipartBucket, partMetaPaths,
partNums, readQuorum)
if err != nil {
return err
return result, toObjectErr(err, bucket, object, uploadID)
}
if partN != pinfo.Number {
return fmt.Errorf("part.%d.meta has incorrect corresponding part number: expected %d, got %d", partN, partN, pinfo.Number)
}
partsInfo[i] = pinfo
return nil
}, i)
}
g.Wait()
for _, part := range partsInfo {
if part != nil && part.Number != 0 && !part.ModTime.IsZero() {
fi.AddObjectPart(part.Number, part.ETag, part.Size, part.ActualSize, part.ModTime, part.Index, part.Checksums)
}
}
// Only parts with higher part numbers will be listed.
parts := fi.Parts
result.Parts = make([]PartInfo, 0, len(parts))
for _, part := range parts {
count := maxParts
for _, objPart := range objParts {
result.Parts = append(result.Parts, PartInfo{
PartNumber: part.Number,
ETag: part.ETag,
LastModified: part.ModTime,
ActualSize: part.ActualSize,
Size: part.Size,
ChecksumCRC32: part.Checksums["CRC32"],
ChecksumCRC32C: part.Checksums["CRC32C"],
ChecksumSHA1: part.Checksums["SHA1"],
ChecksumSHA256: part.Checksums["SHA256"],
PartNumber: objPart.Number,
LastModified: objPart.ModTime,
ETag: objPart.ETag,
Size: objPart.Size,
ActualSize: objPart.ActualSize,
ChecksumCRC32: objPart.Checksums["CRC32"],
ChecksumCRC32C: objPart.Checksums["CRC32C"],
ChecksumSHA1: objPart.Checksums["SHA1"],
ChecksumSHA256: objPart.Checksums["SHA256"],
})
if len(result.Parts) >= maxParts {
count--
if count == 0 {
break
}
}
// If listed entries are more than maxParts, we set IsTruncated as true.
if len(parts) > len(result.Parts) {
if len(objParts) > len(result.Parts) {
result.IsTruncated = true
// Make sure to fill next part number marker if IsTruncated is
// true for subsequent listing.
nextPartNumberMarker := result.Parts[len(result.Parts)-1].PartNumber
result.NextPartNumberMarker = nextPartNumberMarker
// Make sure to fill next part number marker if IsTruncated is true for subsequent listing.
result.NextPartNumberMarker = result.Parts[len(result.Parts)-1].PartNumber
}
return result, nil
}
func readParts(ctx context.Context, disks []StorageAPI, bucket string, partMetaPaths []string, partNumbers []int, readQuorum int) ([]*ObjectPartInfo, error) {
func readParts(ctx context.Context, disks []StorageAPI, bucket string, partMetaPaths []string, partNumbers []int, readQuorum int) ([]ObjectPartInfo, error) {
g := errgroup.WithNErrs(len(disks))
objectPartInfos := make([][]*ObjectPartInfo, len(disks))
@ -970,7 +984,7 @@ func readParts(ctx context.Context, disks []StorageAPI, bucket string, partMetaP
return nil, err
}
partInfosInQuorum := make([]*ObjectPartInfo, len(partMetaPaths))
partInfosInQuorum := make([]ObjectPartInfo, len(partMetaPaths))
partMetaQuorumMap := make(map[string]int, len(partNumbers))
for pidx := range partMetaPaths {
var pinfos []*ObjectPartInfo
@ -1016,22 +1030,22 @@ func readParts(ctx context.Context, disks []StorageAPI, bucket string, partMetaP
}
if pinfo != nil && pinfo.ETag != "" && partMetaQuorumMap[maxETag] >= readQuorum {
partInfosInQuorum[pidx] = pinfo
partInfosInQuorum[pidx] = *pinfo
continue
}
if partMetaQuorumMap[maxPartMeta] == len(disks) {
if pinfo != nil && pinfo.Error != "" {
partInfosInQuorum[pidx] = &ObjectPartInfo{Error: pinfo.Error}
partInfosInQuorum[pidx] = ObjectPartInfo{Error: pinfo.Error}
} else {
partInfosInQuorum[pidx] = &ObjectPartInfo{
partInfosInQuorum[pidx] = ObjectPartInfo{
Error: InvalidPart{
PartNumber: partNumbers[pidx],
}.Error(),
}
}
} else {
partInfosInQuorum[pidx] = &ObjectPartInfo{Error: errErasureReadQuorum.Error()}
partInfosInQuorum[pidx] = ObjectPartInfo{Error: errErasureReadQuorum.Error()}
}
}
return partInfosInQuorum, nil

View File

@ -1202,6 +1202,292 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
}
}
// Wrapper for calling TestListObjectPartsStale tests for both Erasure multiple disks and single node setup.
func TestListObjectPartsStale(t *testing.T) {
ExecObjectLayerDiskAlteredTest(t, testListObjectPartsStale)
}
// testListObjectPartsStale - Tests validate listing of object parts when parts are stale
func testListObjectPartsStale(obj ObjectLayer, instanceType string, disks []string, t *testing.T) {
bucketNames := []string{"minio-bucket", "minio-2-bucket"}
objectNames := []string{"minio-object-1.txt"}
uploadIDs := []string{}
globalStorageClass.Update(storageclass.Config{
RRS: storageclass.StorageClass{
Parity: 2,
},
Standard: storageclass.StorageClass{
Parity: 4,
},
})
// bucketnames[0].
// objectNames[0].
// uploadIds [0].
// Create bucket before initiating NewMultipartUpload.
err := obj.MakeBucket(context.Background(), bucketNames[0], MakeBucketOptions{})
if err != nil {
// Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error())
}
opts := ObjectOptions{}
// Initiate Multipart Upload on the above created bucket.
res, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], opts)
if err != nil {
// Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err.Error())
}
z := obj.(*erasureServerPools)
er := z.serverPools[0].sets[0]
uploadIDs = append(uploadIDs, res.UploadID)
// Create multipart parts.
// Need parts to be uploaded before MultipartLists can be called and tested.
createPartCases := []struct {
bucketName string
objName string
uploadID string
PartID int
inputReaderData string
inputMd5 string
inputDataSize int64
expectedMd5 string
}{
// Case 1-4.
// Creating sequence of parts for same uploadID.
// Used to ensure that the ListMultipartResult produces one output for the four parts uploaded below for the given upload ID.
{bucketNames[0], objectNames[0], uploadIDs[0], 1, "abcd", "e2fc714c4727ee9395f324cd2e7f331f", int64(len("abcd")), "e2fc714c4727ee9395f324cd2e7f331f"},
{bucketNames[0], objectNames[0], uploadIDs[0], 2, "efgh", "1f7690ebdd9b4caf8fab49ca1757bf27", int64(len("efgh")), "1f7690ebdd9b4caf8fab49ca1757bf27"},
{bucketNames[0], objectNames[0], uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd")), "09a0877d04abf8759f99adec02baf579"},
{bucketNames[0], objectNames[0], uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd")), "e132e96a5ddad6da8b07bba6f6131fef"},
}
sha256sum := ""
// Iterating over creatPartCases to generate multipart chunks.
for _, testCase := range createPartCases {
_, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetPutObjReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.inputDataSize, testCase.inputMd5, sha256sum), opts)
if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error())
}
}
erasureDisks := er.getDisks()
uploadIDPath := er.getUploadIDDir(bucketNames[0], objectNames[0], uploadIDs[0])
dataDirs, err := erasureDisks[0].ListDir(context.Background(), minioMetaMultipartBucket, minioMetaMultipartBucket, uploadIDPath, -1)
if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error())
}
var dataDir string
for _, folder := range dataDirs {
if strings.HasSuffix(folder, SlashSeparator) {
dataDir = folder
break
}
}
toDel := (len(erasureDisks) / 2) + 1
for _, disk := range erasureDisks[:toDel] {
disk.DeleteBulk(context.Background(), minioMetaMultipartBucket, []string{pathJoin(uploadIDPath, dataDir, "part.2")}...)
}
partInfos := []ListPartsInfo{
// partinfos - 0.
{
Bucket: bucketNames[0],
Object: objectNames[0],
MaxParts: 10,
UploadID: uploadIDs[0],
Parts: []PartInfo{
{
PartNumber: 1,
Size: 4,
ETag: "e2fc714c4727ee9395f324cd2e7f331f",
},
{
PartNumber: 3,
Size: 4,
ETag: "09a0877d04abf8759f99adec02baf579",
},
{
PartNumber: 4,
Size: 4,
ETag: "e132e96a5ddad6da8b07bba6f6131fef",
},
},
},
// partinfos - 1.
{
Bucket: bucketNames[0],
Object: objectNames[0],
MaxParts: 3,
UploadID: uploadIDs[0],
Parts: []PartInfo{
{
PartNumber: 1,
Size: 4,
ETag: "e2fc714c4727ee9395f324cd2e7f331f",
},
{
PartNumber: 3,
Size: 4,
ETag: "09a0877d04abf8759f99adec02baf579",
},
{
PartNumber: 4,
Size: 4,
ETag: "e132e96a5ddad6da8b07bba6f6131fef",
},
},
},
// partinfos - 2.
{
Bucket: bucketNames[0],
Object: objectNames[0],
MaxParts: 2,
NextPartNumberMarker: 3,
IsTruncated: true,
UploadID: uploadIDs[0],
Parts: []PartInfo{
{
PartNumber: 1,
Size: 4,
ETag: "e2fc714c4727ee9395f324cd2e7f331f",
},
{
PartNumber: 3,
Size: 4,
ETag: "09a0877d04abf8759f99adec02baf579",
},
},
},
// partinfos - 3.
{
Bucket: bucketNames[0],
Object: objectNames[0],
MaxParts: 2,
IsTruncated: false,
UploadID: uploadIDs[0],
PartNumberMarker: 3,
Parts: []PartInfo{
{
PartNumber: 4,
Size: 4,
ETag: "e132e96a5ddad6da8b07bba6f6131fef",
},
},
},
}
// Collection of non-exhaustive ListObjectParts test cases, valid errors
// and success responses.
testCases := []struct {
bucket string
object string
uploadID string
partNumberMarker int
maxParts int
// Expected output of ListPartsInfo.
expectedResult ListPartsInfo
expectedErr error
// Flag indicating whether the test is expected to pass or not.
shouldPass bool
}{
// Test cases with invalid bucket names (Test number 1-4).
{".test", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: ".test"}, false},
{"Test", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: "Test"}, false},
{"---", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: "---"}, false},
{"ad", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: "ad"}, false},
// Test cases for listing uploadID with single part.
// Valid bucket names, but they do not exist (Test number 5-7).
{"volatile-bucket-1", "test1", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
{"volatile-bucket-2", "test1", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
{"volatile-bucket-3", "test1", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
// Test case for Asserting for invalid objectName (Test number 8).
{bucketNames[0], "", "", 0, 0, ListPartsInfo{}, ObjectNameInvalid{Bucket: bucketNames[0]}, false},
// Asserting for Invalid UploadID (Test number 9).
{bucketNames[0], objectNames[0], "abc", 0, 0, ListPartsInfo{}, InvalidUploadID{UploadID: "abc"}, false},
// Test case for uploadID with multiple parts (Test number 12).
{bucketNames[0], objectNames[0], uploadIDs[0], 0, 10, partInfos[0], nil, true},
// Test case with maxParts set to less than number of parts (Test number 13).
{bucketNames[0], objectNames[0], uploadIDs[0], 0, 3, partInfos[1], nil, true},
// Test case with partNumberMarker set (Test number 14)-.
{bucketNames[0], objectNames[0], uploadIDs[0], 0, 2, partInfos[2], nil, true},
// Test case with partNumberMarker set (Test number 15)-.
{bucketNames[0], objectNames[0], uploadIDs[0], 3, 2, partInfos[3], nil, true},
}
for i, testCase := range testCases {
actualResult, actualErr := obj.ListObjectParts(context.Background(), testCase.bucket, testCase.object, testCase.uploadID, testCase.partNumberMarker, testCase.maxParts, ObjectOptions{})
if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr.Error())
}
if actualErr == nil && !testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, instanceType, testCase.expectedErr.Error())
}
// Failed as expected, but does it fail for the expected reason.
if actualErr != nil && !testCase.shouldPass {
if !strings.Contains(actualErr.Error(), testCase.expectedErr.Error()) {
t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, instanceType, testCase.expectedErr, actualErr)
}
}
// Passes as expected, but asserting the results.
if actualErr == nil && testCase.shouldPass {
expectedResult := testCase.expectedResult
// Asserting the MaxParts.
if actualResult.MaxParts != expectedResult.MaxParts {
t.Errorf("Test %d: %s: Expected the MaxParts to be %d, but instead found it to be %d", i+1, instanceType, expectedResult.MaxParts, actualResult.MaxParts)
}
// Asserting Object Name.
if actualResult.Object != expectedResult.Object {
t.Errorf("Test %d: %s: Expected Object name to be \"%s\", but instead found it to be \"%s\"", i+1, instanceType, expectedResult.Object, actualResult.Object)
}
// Asserting UploadID.
if actualResult.UploadID != expectedResult.UploadID {
t.Errorf("Test %d: %s: Expected UploadID to be \"%s\", but instead found it to be \"%s\"", i+1, instanceType, expectedResult.UploadID, actualResult.UploadID)
}
// Asserting NextPartNumberMarker.
if actualResult.NextPartNumberMarker != expectedResult.NextPartNumberMarker {
t.Errorf("Test %d: %s: Expected NextPartNumberMarker to be \"%d\", but instead found it to be \"%d\"", i+1, instanceType, expectedResult.NextPartNumberMarker, actualResult.NextPartNumberMarker)
}
// Asserting PartNumberMarker.
if actualResult.PartNumberMarker != expectedResult.PartNumberMarker {
t.Errorf("Test %d: %s: Expected PartNumberMarker to be \"%d\", but instead found it to be \"%d\"", i+1, instanceType, expectedResult.PartNumberMarker, actualResult.PartNumberMarker)
}
// Asserting the BucketName.
if actualResult.Bucket != expectedResult.Bucket {
t.Errorf("Test %d: %s: Expected Bucket to be \"%s\", but instead found it to be \"%s\"", i+1, instanceType, expectedResult.Bucket, actualResult.Bucket)
}
// Asserting IsTruncated.
if actualResult.IsTruncated != testCase.expectedResult.IsTruncated {
t.Errorf("Test %d: %s: Expected IsTruncated to be \"%v\", but found it to \"%v\"", i+1, instanceType, expectedResult.IsTruncated, actualResult.IsTruncated)
}
// Asserting the number of Parts.
if len(expectedResult.Parts) != len(actualResult.Parts) {
t.Errorf("Test %d: %s: Expected the result to contain info of %d Parts, but found %d instead", i+1, instanceType, len(expectedResult.Parts), len(actualResult.Parts))
} else {
// Iterating over the partInfos and asserting the fields.
for j, actualMetaData := range actualResult.Parts {
// Asserting the PartNumber in the PartInfo.
if actualMetaData.PartNumber != expectedResult.Parts[j].PartNumber {
t.Errorf("Test %d: %s: Part %d: Expected PartNumber to be \"%d\", but instead found \"%d\"", i+1, instanceType, j+1, expectedResult.Parts[j].PartNumber, actualMetaData.PartNumber)
}
// Asserting the Size in the PartInfo.
if actualMetaData.Size != expectedResult.Parts[j].Size {
t.Errorf("Test %d: %s: Part %d: Expected Part Size to be \"%d\", but instead found \"%d\"", i+1, instanceType, j+1, expectedResult.Parts[j].Size, actualMetaData.Size)
}
// Asserting the ETag in the PartInfo.
if actualMetaData.ETag != expectedResult.Parts[j].ETag {
t.Errorf("Test %d: %s: Part %d: Expected Etag to be \"%s\", but instead found \"%s\"", i+1, instanceType, j+1, expectedResult.Parts[j].ETag, actualMetaData.ETag)
}
}
}
}
}
}
// Wrapper for calling TestListObjectPartsDiskNotFound tests for both Erasure multiple disks and single node setup.
func TestListObjectPartsDiskNotFound(t *testing.T) {
ExecObjectLayerDiskAlteredTest(t, testListObjectPartsDiskNotFound)

View File

@ -23,6 +23,9 @@ catch() {
pkill minio
pkill mc
rm -rf /tmp/xl/
if [ $# -ne 0 ]; then
exit $#
fi
}
catch
@ -86,11 +89,11 @@ echo "=== myminio2"
versionId="$(./mc ls --json --versions myminio1/testbucket/dir/ | tail -n1 | jq -r .versionId)"
aws configure set aws_access_key_id minioadmin --profile minioadmin
aws configure set aws_secret_access_key minioadmin --profile minioadmin
aws configure set default.region us-east-1 --profile minioadmin
export AWS_ACCESS_KEY_ID=minioadmin
export AWS_SECRET_ACCESS_KEY=minioadmin
export AWS_REGION=us-east-1
aws s3api --endpoint-url http://localhost:9001 --profile minioadmin delete-object --bucket testbucket --key dir/file --version-id "$versionId"
aws s3api --endpoint-url http://localhost:9001 delete-object --bucket testbucket --key dir/file --version-id "$versionId"
./mc ls -r --versions myminio1/testbucket >/tmp/myminio1.txt
./mc ls -r --versions myminio2/testbucket >/tmp/myminio2.txt
@ -127,7 +130,7 @@ versionId="$(./mc ls --json --versions myminio1/foobucket/dir/ | jq -r .versionI
kill ${pid2} && wait ${pid2} || true
aws s3api --endpoint-url http://localhost:9001 --profile minioadmin delete-object --bucket foobucket --key dir/file --version-id "$versionId"
aws s3api --endpoint-url http://localhost:9001 delete-object --bucket foobucket --key dir/file --version-id "$versionId"
out="$(./mc ls myminio1/foobucket/dir/)"
if [ "$out" != "" ]; then

View File

@ -24,6 +24,9 @@ catch() {
rm -rf /tmp/multisitea
rm -rf /tmp/multisiteb
rm -rf /tmp/data
if [ $# -ne 0 ]; then
exit $#
fi
}
catch

View File

@ -26,6 +26,9 @@ catch() {
rm -rf /tmp/multisitea
rm -rf /tmp/multisiteb
rm -rf /tmp/multisitec
if [ $# -ne 0 ]; then
exit $#
fi
}
catch

View File

@ -24,6 +24,9 @@ catch() {
rm -rf /tmp/multisitec
rm -rf /tmp/multisited
rm -rf /tmp/data
if [ $# -ne 0 ]; then
exit $#
fi
}
catch

View File

@ -20,6 +20,9 @@ catch() {
pkill minio
pkill -9 minio
rm -rf /tmp/multisitea
if [ $# -ne 0 ]; then
exit $#
fi
}
catch