fix: site replication healing of missing buckets (#15298)

fixes a regression from #15186

- Adding tests to cover healing of buckets.
- Also dereference quota in SiteReplicationStatus only when non-nil
This commit is contained in:
Poorna 2022-07-14 14:27:47 -07:00 committed by GitHub
parent 6c265534a4
commit 7e32a17742
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 32 additions and 13 deletions

View File

@ -2814,7 +2814,7 @@ func (c *SiteReplicationSys) siteReplicationStatus(ctx context.Context, objAPI O
if bi, ok := sris[dIdx].Buckets[s.Bucket]; ok {
hasBucket = !bi.CreatedAt.Equal(timeSentinel)
}
quotaCfgSet := hasBucket && *quotaCfgs[i] != madmin.BucketQuota{}
quotaCfgSet := hasBucket && quotaCfgs[i] != nil && *quotaCfgs[i] != madmin.BucketQuota{}
ss := madmin.SRBucketStatsSummary{
DeploymentID: s.DeploymentID,
HasBucket: hasBucket,
@ -3556,20 +3556,13 @@ type srStatusInfo struct {
}
func (c *SiteReplicationSys) healBuckets(ctx context.Context, objAPI ObjectLayer) error {
buckets, err := c.listBuckets(ctx)
info, err := c.siteReplicationStatus(ctx, objAPI, madmin.SRStatusOptions{
Buckets: true,
})
if err != nil {
return err
}
for _, bi := range buckets {
bucket := bi.Name
info, err := c.siteReplicationStatus(ctx, objAPI, madmin.SRStatusOptions{
Entity: madmin.SRBucketEntity,
EntityValue: bucket,
})
if err != nil {
logger.LogIf(ctx, err)
continue
}
for bucket := range info.BucketStats {
c.healCreateMissingBucket(ctx, objAPI, bucket, info)
c.healVersioningMetadata(ctx, objAPI, bucket, info)
c.healOLockConfigMetadata(ctx, objAPI, bucket, info)

View File

@ -257,6 +257,9 @@ fi
kill -9 ${site1_pid}
# Update tag on minio2/newbucket when minio1 is down
./mc tag set minio2/newbucket "key=val2"
# create a new bucket on minio2. This should replicate to minio1 after it comes online.
./mc mb minio2/newbucket2
# Restart minio1 instance
minio server --config-dir /tmp/minio-ldap --address ":9001" /tmp/minio-ldap-idp1/{1...4} >/tmp/minio1_1.log 2>&1 &
sleep 10
@ -267,4 +270,10 @@ if [ "${val}" != "val2" ]; then
exit_1;
fi
# Test if bucket created when minio1 is down healed
diff -q <(./mc ls minio1 | awk '{print $3}') <(./mc ls minio2 | awk '{print $3}') 1>/dev/null
if [ $? -ne 0 ]; then
echo "expected bucket to have replicated, exiting..."
exit_1;
fi
cleanup

View File

@ -317,7 +317,8 @@ fi
kill -9 ${site1_pid}
# Update tag on minio2/newbucket when minio1 is down
./mc tag set minio2/newbucket "key=val2"
# create a new bucket on minio2. This should replicate to minio1 after it comes online.
./mc mb minio2/newbucket2
# Restart minio1 instance
minio server --config-dir /tmp/minio-internal --address ":9001" /tmp/minio-internal-idp1/{1...4} >/tmp/minio1_1.log 2>&1 &
sleep 15
@ -328,3 +329,9 @@ if [ "${val}" != "val2" ]; then
echo "expected bucket tag to have replicated, exiting..."
exit_1;
fi
# Test if bucket created when minio1 is down healed
diff -q <(./mc ls minio1 | awk '{print $3}') <(./mc ls minio2 | awk '{print $3}') 1>/dev/null
if [ $? -ne 0 ]; then
echo "expected bucket to have replicated, exiting..."
exit_1;
fi

View File

@ -242,6 +242,9 @@ fi
kill -9 ${site1_pid}
# Update tag on minio2/newbucket when minio1 is down
./mc tag set minio2/newbucket "key=val2"
# create a new bucket on minio2. This should replicate to minio1 after it comes online.
./mc mb minio2/newbucket2
# Restart minio1 instance
minio server --address ":9001" --console-address ":10000" /tmp/minio1/{1...4} >/tmp/minio1_1.log 2>&1 &
sleep 10
@ -251,3 +254,10 @@ if [ "${val}" != "val2" ]; then
echo "expected bucket tag to have replicated, exiting..."
exit_1;
fi
# Test if bucket created when minio1 is down healed
diff -q <(./mc ls minio1 | awk '{print $3}') <(./mc ls minio2 | awk '{print $3}') 1>/dev/null
if [ $? -ne 0 ]; then
echo "expected bucket to have replicated, exiting..."
exit_1;
fi