diff --git a/.github/workflows/go-fips.yml b/.github/workflows/go-fips.yml index da540cdff..0e3532a49 100644 --- a/.github/workflows/go-fips.yml +++ b/.github/workflows/go-fips.yml @@ -20,7 +20,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - go-version: [1.17.11b7, 1.18.3b7] + go-version: [1.18.5b7] os: [ubuntu-latest] steps: - uses: actions/checkout@v2 diff --git a/cmd/erasure-object.go b/cmd/erasure-object.go index e9ee3cddb..34381cc54 100644 --- a/cmd/erasure-object.go +++ b/cmd/erasure-object.go @@ -448,15 +448,26 @@ func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object st } defer NSUpdated(bucket, object) - if opts.VersionID != "" { - er.deleteObjectVersion(ctx, bucket, object, 1, FileInfo{ - VersionID: opts.VersionID, - }, false) - } else { - er.deleteObjectVersion(ctx, bucket, object, 1, FileInfo{ - VersionID: m.VersionID, - }, false) + fi := FileInfo{ + VersionID: m.VersionID, } + if opts.VersionID != "" { + fi.VersionID = opts.VersionID + } + + disks := er.getDisks() + g := errgroup.WithNErrs(len(disks)) + for index := range disks { + index := index + g.Go(func() error { + if disks[index] == nil { + return errDiskNotFound + } + return disks[index].DeleteVersion(ctx, bucket, object, fi, false) + }, index) + } + + g.Wait() } return m, err } @@ -1172,8 +1183,16 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st return fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended), nil } -func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object string, writeQuorum int, fi FileInfo, forceDelMarker bool) error { +func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object string, fi FileInfo, forceDelMarker bool) error { disks := er.getDisks() + // Assume (N/2 + 1) quorum for Delete() + // this is a theoretical assumption such that + // for delete's we do not need to honor storage + // class for objects that have reduced quorum + // due to storage class - this only needs to be honored + // for Read() requests alone that we already do. + writeQuorum := len(disks)/2 + 1 + g := errgroup.WithNErrs(len(disks)) for index := range disks { index := index @@ -1423,69 +1442,49 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string } } - // Acquire a write lock before deleting the object. - lk := er.NewNSLock(bucket, object) - lkctx, err := lk.GetLock(ctx, globalDeleteOperationTimeout) - if err != nil { - return ObjectInfo{}, err - } - ctx = lkctx.Context() - defer lk.Unlock(lkctx.Cancel) - - versionFound := true objInfo = ObjectInfo{VersionID: opts.VersionID} // version id needed in Delete API response. - goi, writeQuorum, gerr := er.getObjectInfoAndQuorum(ctx, bucket, object, opts) - if gerr != nil && goi.Name == "" { - switch gerr.(type) { - case InsufficientReadQuorum: - return objInfo, InsufficientWriteQuorum{} - } - // For delete marker replication, versionID being replicated will not exist on disk - if opts.DeleteMarker { - versionFound = false - } else { - return objInfo, gerr - } - } - - if opts.Expiration.Expire { - action := evalActionFromLifecycle(ctx, *lc, rcfg, goi, false) - var isErr bool - switch action { - case lifecycle.NoneAction: - isErr = true - case lifecycle.TransitionAction, lifecycle.TransitionVersionAction: - isErr = true - } - if isErr { - if goi.VersionID != "" { - return goi, VersionNotFound{ - Bucket: bucket, - Object: object, - VersionID: goi.VersionID, - } - } - return goi, ObjectNotFound{ - Bucket: bucket, - Object: object, - } - } - } - defer NSUpdated(bucket, object) storageDisks := er.getDisks() - var markDelete bool - // Determine whether to mark object deleted for replication - if goi.VersionID != "" { - markDelete = true + if opts.Expiration.Expire { + goi, _, err := er.getObjectInfoAndQuorum(ctx, bucket, object, opts) + if err == nil { + action := evalActionFromLifecycle(ctx, *lc, rcfg, goi, false) + var isErr bool + switch action { + case lifecycle.NoneAction: + isErr = true + case lifecycle.TransitionAction, lifecycle.TransitionVersionAction: + isErr = true + } + if isErr { + if goi.VersionID != "" { + return goi, VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: goi.VersionID, + } + } + return goi, ObjectNotFound{ + Bucket: bucket, + Object: object, + } + } + } } - // Default deleteMarker to true if object is under versioning - deleteMarker := opts.Versioned + versionFound := !(opts.DeleteMarker && opts.VersionID != "") - if opts.VersionID != "" { + // Determine whether to mark object deleted for replication + markDelete := !opts.DeleteMarker && opts.VersionID != "" + + // Default deleteMarker to true if object is under versioning + // versioning suspended means we add `null` version as + // delete marker, if its not decided already. + deleteMarker := (opts.Versioned || opts.VersionSuspended) && opts.VersionID == "" || (opts.DeleteMarker && opts.VersionID != "") + + if markDelete { // case where replica version needs to be deleted on target cluster if versionFound && opts.DeleteMarkerReplicationStatus() == replication.Replica { markDelete = false @@ -1496,22 +1495,6 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string if opts.VersionPurgeStatus() == Complete { markDelete = false } - - // Version is found but we do not wish to create more delete markers - // now, since VersionPurgeStatus() is already set, we can let the - // lower layers decide this. This fixes a regression that was introduced - // in PR #14555 where !VersionPurgeStatus.Empty() is automatically - // considered as Delete marker true to avoid listing such objects by - // regular ListObjects() calls. However for delete replication this - // ends up being a problem because "upon" a successful delete this - // ends up creating a new delete marker that is spurious and unnecessary. - if versionFound { - if !goi.VersionPurgeStatus.Empty() { - deleteMarker = false - } else if !goi.DeleteMarker { // implies a versioned delete of object - deleteMarker = false - } - } } modTime := opts.MTime @@ -1519,38 +1502,31 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string modTime = UTCNow() } fvID := mustGetUUID() - if markDelete { - if opts.Versioned || opts.VersionSuspended { - if !deleteMarker { - // versioning suspended means we add `null` version as - // delete marker, if its not decided already. - deleteMarker = opts.VersionSuspended && opts.VersionID == "" - } - fi := FileInfo{ - Name: object, - Deleted: deleteMarker, - MarkDeleted: markDelete, - ModTime: modTime, - ReplicationState: opts.DeleteReplication, - TransitionStatus: opts.Transition.Status, - ExpireRestored: opts.Transition.ExpireRestored, - } - fi.SetTierFreeVersionID(fvID) - if opts.Versioned { - fi.VersionID = mustGetUUID() - if opts.VersionID != "" { - fi.VersionID = opts.VersionID - } - } - // versioning suspended means we add `null` version as - // delete marker. Add delete marker, since we don't have - // any version specified explicitly. Or if a particular - // version id needs to be replicated. - if err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, fi, opts.DeleteMarker); err != nil { - return objInfo, toObjectErr(err, bucket, object) - } - return fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended), nil + if markDelete && (opts.Versioned || opts.VersionSuspended) { + fi := FileInfo{ + Name: object, + Deleted: deleteMarker, + MarkDeleted: markDelete, + ModTime: modTime, + ReplicationState: opts.DeleteReplication, + TransitionStatus: opts.Transition.Status, + ExpireRestored: opts.Transition.ExpireRestored, } + fi.SetTierFreeVersionID(fvID) + if opts.Versioned { + fi.VersionID = mustGetUUID() + if opts.VersionID != "" { + fi.VersionID = opts.VersionID + } + } + // versioning suspended means we add `null` version as + // delete marker. Add delete marker, since we don't have + // any version specified explicitly. Or if a particular + // version id needs to be replicated. + if err = er.deleteObjectVersion(ctx, bucket, object, fi, opts.DeleteMarker); err != nil { + return objInfo, toObjectErr(err, bucket, object) + } + return fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended), nil } // Delete the object version on all disks. @@ -1565,7 +1541,7 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string ExpireRestored: opts.Transition.ExpireRestored, } dfi.SetTierFreeVersionID(fvID) - if err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, dfi, opts.DeleteMarker); err != nil { + if err = er.deleteObjectVersion(ctx, bucket, object, dfi, opts.DeleteMarker); err != nil { return objInfo, toObjectErr(err, bucket, object) } @@ -1577,13 +1553,7 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string break } - return ObjectInfo{ - Bucket: bucket, - Name: object, - VersionID: opts.VersionID, - VersionPurgeStatusInternal: opts.DeleteReplication.VersionPurgeStatusInternal, - ReplicationStatusInternal: opts.DeleteReplication.ReplicationStatusInternal, - }, nil + return dfi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended), nil } // Send the successful but partial upload/delete, however ignore @@ -1853,14 +1823,8 @@ func (er erasureObjects) TransitionObject(ctx context.Context, bucket, object st eventName := event.ObjectTransitionComplete storageDisks := er.getDisks() - // we now know the number of blocks this object needs for data and parity. - // writeQuorum is dataBlocks + 1 - writeQuorum := fi.Erasure.DataBlocks - if fi.Erasure.DataBlocks == fi.Erasure.ParityBlocks { - writeQuorum++ - } - if err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, fi, false); err != nil { + if err = er.deleteObjectVersion(ctx, bucket, object, fi, false); err != nil { eventName = event.ObjectTransitionFailed } diff --git a/cmd/erasure-server-pool.go b/cmd/erasure-server-pool.go index 1755df18a..e53a6be72 100644 --- a/cmd/erasure-server-pool.go +++ b/cmd/erasure-server-pool.go @@ -380,19 +380,15 @@ func (z *erasureServerPools) getServerPoolsAvailableSpace(ctx context.Context, b return serverPools } -// poolObjInfo represents the state of an object per pool -type poolObjInfo struct { - PoolIndex int - ObjInfo ObjectInfo - Err error +// PoolObjInfo represents the state of current object version per pool +type PoolObjInfo struct { + Index int + ObjInfo ObjectInfo + Err error } -func (z *erasureServerPools) getPoolIdxExistingWithOpts(ctx context.Context, bucket, object string, opts ObjectOptions) (idx int, err error) { - if z.SinglePool() { - return 0, nil - } - - poolObjInfos := make([]poolObjInfo, len(z.serverPools)) +func (z *erasureServerPools) getPoolInfoExistingWithOpts(ctx context.Context, bucket, object string, opts ObjectOptions) (PoolObjInfo, error) { + poolObjInfos := make([]PoolObjInfo, len(z.serverPools)) poolOpts := make([]ObjectOptions, len(z.serverPools)) for i := range z.serverPools { poolOpts[i] = opts @@ -404,8 +400,8 @@ func (z *erasureServerPools) getPoolIdxExistingWithOpts(ctx context.Context, buc go func(i int, pool *erasureSets, opts ObjectOptions) { defer wg.Done() // remember the pool index, we may sort the slice original index might be lost. - pinfo := poolObjInfo{ - PoolIndex: i, + pinfo := PoolObjInfo{ + Index: i, } // do not remove this check as it can lead to inconsistencies // for all callers of bucket replication. @@ -429,19 +425,19 @@ func (z *erasureServerPools) getPoolIdxExistingWithOpts(ctx context.Context, buc for _, pinfo := range poolObjInfos { // skip all objects from suspended pools if asked by the // caller. - if z.IsSuspended(pinfo.PoolIndex) && opts.SkipDecommissioned { + if z.IsSuspended(pinfo.Index) && opts.SkipDecommissioned { continue } if pinfo.Err != nil && !isErrObjectNotFound(pinfo.Err) { - return -1, pinfo.Err + return pinfo, pinfo.Err } if isErrObjectNotFound(pinfo.Err) { // No object exists or its a delete marker, // check objInfo to confirm. if pinfo.ObjInfo.DeleteMarker && pinfo.ObjInfo.Name != "" { - return pinfo.PoolIndex, nil + return pinfo, nil } // objInfo is not valid, truly the object doesn't @@ -449,10 +445,18 @@ func (z *erasureServerPools) getPoolIdxExistingWithOpts(ctx context.Context, buc continue } - return pinfo.PoolIndex, nil + return pinfo, nil } - return -1, toObjectErr(errFileNotFound, bucket, object) + return PoolObjInfo{}, toObjectErr(errFileNotFound, bucket, object) +} + +func (z *erasureServerPools) getPoolIdxExistingWithOpts(ctx context.Context, bucket, object string, opts ObjectOptions) (idx int, err error) { + pinfo, err := z.getPoolInfoExistingWithOpts(ctx, bucket, object, opts) + if err != nil { + return -1, err + } + return pinfo.Index, nil } // getPoolIdxExistingNoLock returns the (first) found object pool index containing an object. @@ -999,16 +1003,36 @@ func (z *erasureServerPools) DeleteObject(ctx context.Context, bucket string, ob } object = encodeDirObject(object) - if z.SinglePool() { - return z.serverPools[0].DeleteObject(ctx, bucket, object, opts) - } - idx, err := z.getPoolIdxExistingWithOpts(ctx, bucket, object, opts) + // Acquire a write lock before deleting the object. + lk := z.NewNSLock(bucket, object) + lkctx, err := lk.GetLock(ctx, globalDeleteOperationTimeout) if err != nil { + return ObjectInfo{}, err + } + ctx = lkctx.Context() + defer lk.Unlock(lkctx.Cancel) + + gopts := opts + gopts.NoLock = true + pinfo, err := z.getPoolInfoExistingWithOpts(ctx, bucket, object, gopts) + if err != nil { + switch err.(type) { + case InsufficientReadQuorum: + return objInfo, InsufficientWriteQuorum{} + } return objInfo, err } - return z.serverPools[idx].DeleteObject(ctx, bucket, object, opts) + // Delete marker already present we are not going to create new delete markers. + if pinfo.ObjInfo.DeleteMarker && opts.VersionID == "" { + pinfo.ObjInfo.Name = decodeDirObject(object) + return pinfo.ObjInfo, nil + } + + objInfo, err = z.serverPools[pinfo.Index].DeleteObject(ctx, bucket, object, opts) + objInfo.Name = decodeDirObject(object) + return objInfo, err } func (z *erasureServerPools) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { @@ -1034,14 +1058,6 @@ func (z *erasureServerPools) DeleteObjects(ctx context.Context, bucket string, o ctx = lkctx.Context() defer multiDeleteLock.Unlock(lkctx.Cancel) - if z.SinglePool() { - deleteObjects, dErrs := z.serverPools[0].DeleteObjects(ctx, bucket, objects, opts) - for i := range deleteObjects { - deleteObjects[i].ObjectName = decodeDirObject(deleteObjects[i].ObjectName) - } - return deleteObjects, dErrs - } - // Fetch location of up to 10 objects concurrently. poolObjIdxMap := map[int][]ObjectToDelete{} origIndexMap := map[int][]int{} @@ -1060,46 +1076,66 @@ func (z *erasureServerPools) DeleteObjects(ctx context.Context, bucket string, o j := j obj := obj eg.Go(func() error { - idx, err := z.getPoolIdxExistingWithOpts(ctx, bucket, obj.ObjectName, ObjectOptions{ + pinfo, err := z.getPoolInfoExistingWithOpts(ctx, bucket, obj.ObjectName, ObjectOptions{ NoLock: true, }) if err != nil { derrs[j] = err + dobjects[j] = DeletedObject{ + ObjectName: obj.ObjectName, + } return nil } + + // Delete marker already present we are not going to create new delete markers. + if pinfo.ObjInfo.DeleteMarker && obj.VersionID == "" { + dobjects[j] = DeletedObject{ + DeleteMarker: pinfo.ObjInfo.DeleteMarker, + DeleteMarkerVersionID: pinfo.ObjInfo.VersionID, + DeleteMarkerMTime: DeleteMarkerMTime{pinfo.ObjInfo.ModTime}, + ObjectName: pinfo.ObjInfo.Name, + } + return nil + } + + idx := pinfo.Index + mu.Lock() + defer mu.Unlock() + poolObjIdxMap[idx] = append(poolObjIdxMap[idx], obj) origIndexMap[idx] = append(origIndexMap[idx], j) - mu.Unlock() return nil }, j) } eg.Wait() // wait to check all the pools. - // Delete concurrently in all server pools. - var wg sync.WaitGroup - wg.Add(len(z.serverPools)) - for idx, pool := range z.serverPools { - go func(idx int, pool *erasureSets) { - defer wg.Done() - objs := poolObjIdxMap[idx] - if len(objs) > 0 { - orgIndexes := origIndexMap[idx] - deletedObjects, errs := pool.DeleteObjects(ctx, bucket, objs, opts) - mu.Lock() - for i, derr := range errs { - if derr != nil { - derrs[orgIndexes[i]] = derr + if len(poolObjIdxMap) > 0 { + // Delete concurrently in all server pools. + var wg sync.WaitGroup + wg.Add(len(z.serverPools)) + for idx, pool := range z.serverPools { + go func(idx int, pool *erasureSets) { + defer wg.Done() + objs := poolObjIdxMap[idx] + if len(objs) > 0 { + orgIndexes := origIndexMap[idx] + deletedObjects, errs := pool.DeleteObjects(ctx, bucket, objs, opts) + mu.Lock() + for i, derr := range errs { + if derr != nil { + derrs[orgIndexes[i]] = derr + } + deletedObjects[i].ObjectName = decodeDirObject(deletedObjects[i].ObjectName) + dobjects[orgIndexes[i]] = deletedObjects[i] } - deletedObjects[i].ObjectName = decodeDirObject(deletedObjects[i].ObjectName) - dobjects[orgIndexes[i]] = deletedObjects[i] + mu.Unlock() } - mu.Unlock() - } - }(idx, pool) + }(idx, pool) + } + wg.Wait() } - wg.Wait() return dobjects, derrs } diff --git a/cmd/erasure-single-drive.go b/cmd/erasure-single-drive.go index 4df8e8dcc..6539ac042 100644 --- a/cmd/erasure-single-drive.go +++ b/cmd/erasure-single-drive.go @@ -1223,7 +1223,7 @@ func (es *erasureSingle) putObject(ctx context.Context, bucket string, object st return fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended), nil } -func (es *erasureSingle) deleteObjectVersion(ctx context.Context, bucket, object string, writeQuorum int, fi FileInfo, forceDelMarker bool) error { +func (es *erasureSingle) deleteObjectVersion(ctx context.Context, bucket, object string, fi FileInfo, forceDelMarker bool) error { return es.disk.DeleteVersion(ctx, bucket, object, fi, forceDelMarker) } @@ -1447,7 +1447,7 @@ func (es *erasureSingle) DeleteObject(ctx context.Context, bucket, object string versionFound := true objInfo = ObjectInfo{VersionID: opts.VersionID} // version id needed in Delete API response. - goi, writeQuorum, gerr := es.getObjectInfoAndQuorum(ctx, bucket, object, opts) + goi, _, gerr := es.getObjectInfoAndQuorum(ctx, bucket, object, opts) if gerr != nil && goi.Name == "" { switch gerr.(type) { case InsufficientReadQuorum: @@ -1461,6 +1461,11 @@ func (es *erasureSingle) DeleteObject(ctx context.Context, bucket, object string } } + // Do not create new delete markers. + if goi.DeleteMarker && opts.VersionID == "" { + return goi, nil + } + if opts.Expiration.Expire { action := evalActionFromLifecycle(ctx, *lc, rcfg, goi, false) var isErr bool @@ -1557,7 +1562,7 @@ func (es *erasureSingle) DeleteObject(ctx context.Context, bucket, object string // delete marker. Add delete marker, since we don't have // any version specified explicitly. Or if a particular // version id needs to be replicated. - if err = es.deleteObjectVersion(ctx, bucket, object, writeQuorum, fi, opts.DeleteMarker); err != nil { + if err = es.deleteObjectVersion(ctx, bucket, object, fi, opts.DeleteMarker); err != nil { return objInfo, toObjectErr(err, bucket, object) } return fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended), nil @@ -1576,7 +1581,7 @@ func (es *erasureSingle) DeleteObject(ctx context.Context, bucket, object string ExpireRestored: opts.Transition.ExpireRestored, } dfi.SetTierFreeVersionID(fvID) - if err = es.deleteObjectVersion(ctx, bucket, object, writeQuorum, dfi, opts.DeleteMarker); err != nil { + if err = es.deleteObjectVersion(ctx, bucket, object, dfi, opts.DeleteMarker); err != nil { return objInfo, toObjectErr(err, bucket, object) } @@ -1813,14 +1818,7 @@ func (es *erasureSingle) TransitionObject(ctx context.Context, bucket, object st fi.TransitionVersionID = string(rv) eventName := event.ObjectTransitionComplete - // we now know the number of blocks this object needs for data and parity. - // writeQuorum is dataBlocks + 1 - writeQuorum := fi.Erasure.DataBlocks - if fi.Erasure.DataBlocks == fi.Erasure.ParityBlocks { - writeQuorum++ - } - - if err = es.deleteObjectVersion(ctx, bucket, object, writeQuorum, fi, false); err != nil { + if err = es.deleteObjectVersion(ctx, bucket, object, fi, false); err != nil { eventName = event.ObjectTransitionFailed } diff --git a/docs/bucket/replication/setup_2site_existing_replication.sh b/docs/bucket/replication/setup_2site_existing_replication.sh index 0336975aa..12897b5e1 100755 --- a/docs/bucket/replication/setup_2site_existing_replication.sh +++ b/docs/bucket/replication/setup_2site_existing_replication.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +set -x + trap 'catch $LINENO' ERR # shellcheck disable=SC2120 @@ -37,8 +39,10 @@ unset MINIO_KMS_KES_KEY_FILE unset MINIO_KMS_KES_ENDPOINT unset MINIO_KMS_KES_KEY_NAME -wget -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc \ - && chmod +x mc +if [ ! -f ./mc ]; then + wget --quiet -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc && \ + chmod +x mc +fi minio server --address 127.0.0.1:9001 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \ "http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_1.log 2>&1 & @@ -66,6 +70,9 @@ done ./mc mirror /tmp/data sitea/bucket/ ./mc version enable sitea/bucket +./mc cp /tmp/data/file_1.txt sitea/bucket/marker +./mc rm sitea/bucket/marker + ./mc mb siteb/bucket/ ./mc version enable siteb/bucket/ @@ -83,11 +90,49 @@ sleep 1 sleep 10s ## sleep for 10s idea is that we give 100ms per object. count=$(./mc replicate resync status sitea/bucket --remote-bucket "${remote_arn}" --json | jq .resyncInfo.target[].replicationCount) -./mc ls --versions sitea/bucket -./mc ls --versions siteb/bucket -if [ $count -ne 10 ]; then - echo "resync not complete after 100s unexpected failure" + +./mc ls -r --versions sitea/bucket > /tmp/sitea.txt +./mc ls -r --versions siteb/bucket > /tmp/siteb.txt + +out=$(diff -qpruN /tmp/sitea.txt /tmp/siteb.txt) +ret=$? +if [ $ret -ne 0 ]; then + echo "BUG: expected no missing entries after replication: $out" + exit 1 +fi + +if [ $count -ne 12 ]; then + echo "resync not complete after 10s unexpected failure" ./mc diff sitea/bucket siteb/bucket + exit 1 +fi + +./mc cp /tmp/data/file_1.txt sitea/bucket/marker_new +./mc rm sitea/bucket/marker_new + +sleep 12s ## sleep for 12s idea is that we give 100ms per object. + +./mc ls -r --versions sitea/bucket > /tmp/sitea.txt +./mc ls -r --versions siteb/bucket > /tmp/siteb.txt + +out=$(diff -qpruN /tmp/sitea.txt /tmp/siteb.txt) +ret=$? +if [ $ret -ne 0 ]; then + echo "BUG: expected no 'diff' after replication: $out" + exit 1 +fi + +./mc rm -r --force --versions sitea/bucket/marker +sleep 14s ## sleep for 14s idea is that we give 100ms per object. + +./mc ls -r --versions sitea/bucket > /tmp/sitea.txt +./mc ls -r --versions siteb/bucket > /tmp/siteb.txt + +out=$(diff -qpruN /tmp/sitea.txt /tmp/siteb.txt) +ret=$? +if [ $ret -ne 0 ]; then + echo "BUG: expected no 'diff' after replication: $out" + exit 1 fi catch diff --git a/docs/bucket/replication/setup_3site_replication.sh b/docs/bucket/replication/setup_3site_replication.sh index 7fe7d3faa..04c4bf6ac 100755 --- a/docs/bucket/replication/setup_3site_replication.sh +++ b/docs/bucket/replication/setup_3site_replication.sh @@ -1,5 +1,9 @@ #!/usr/bin/env bash +if [ -n "$TEST_DEBUG" ]; then + set -x +fi + trap 'catch $LINENO' ERR # shellcheck disable=SC2120