mirror of
https://github.com/minio/minio.git
synced 2025-04-22 03:24:38 -04:00
fix: site replication healing of missing buckets (#15298)
fixes a regression from #15186 - Adding tests to cover healing of buckets. - Also dereference quota in SiteReplicationStatus only when non-nil
This commit is contained in:
parent
6c265534a4
commit
7e32a17742
@ -2814,7 +2814,7 @@ func (c *SiteReplicationSys) siteReplicationStatus(ctx context.Context, objAPI O
|
|||||||
if bi, ok := sris[dIdx].Buckets[s.Bucket]; ok {
|
if bi, ok := sris[dIdx].Buckets[s.Bucket]; ok {
|
||||||
hasBucket = !bi.CreatedAt.Equal(timeSentinel)
|
hasBucket = !bi.CreatedAt.Equal(timeSentinel)
|
||||||
}
|
}
|
||||||
quotaCfgSet := hasBucket && *quotaCfgs[i] != madmin.BucketQuota{}
|
quotaCfgSet := hasBucket && quotaCfgs[i] != nil && *quotaCfgs[i] != madmin.BucketQuota{}
|
||||||
ss := madmin.SRBucketStatsSummary{
|
ss := madmin.SRBucketStatsSummary{
|
||||||
DeploymentID: s.DeploymentID,
|
DeploymentID: s.DeploymentID,
|
||||||
HasBucket: hasBucket,
|
HasBucket: hasBucket,
|
||||||
@ -3556,20 +3556,13 @@ type srStatusInfo struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *SiteReplicationSys) healBuckets(ctx context.Context, objAPI ObjectLayer) error {
|
func (c *SiteReplicationSys) healBuckets(ctx context.Context, objAPI ObjectLayer) error {
|
||||||
buckets, err := c.listBuckets(ctx)
|
info, err := c.siteReplicationStatus(ctx, objAPI, madmin.SRStatusOptions{
|
||||||
|
Buckets: true,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, bi := range buckets {
|
for bucket := range info.BucketStats {
|
||||||
bucket := bi.Name
|
|
||||||
info, err := c.siteReplicationStatus(ctx, objAPI, madmin.SRStatusOptions{
|
|
||||||
Entity: madmin.SRBucketEntity,
|
|
||||||
EntityValue: bucket,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
logger.LogIf(ctx, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
c.healCreateMissingBucket(ctx, objAPI, bucket, info)
|
c.healCreateMissingBucket(ctx, objAPI, bucket, info)
|
||||||
c.healVersioningMetadata(ctx, objAPI, bucket, info)
|
c.healVersioningMetadata(ctx, objAPI, bucket, info)
|
||||||
c.healOLockConfigMetadata(ctx, objAPI, bucket, info)
|
c.healOLockConfigMetadata(ctx, objAPI, bucket, info)
|
||||||
|
@ -257,6 +257,9 @@ fi
|
|||||||
kill -9 ${site1_pid}
|
kill -9 ${site1_pid}
|
||||||
# Update tag on minio2/newbucket when minio1 is down
|
# Update tag on minio2/newbucket when minio1 is down
|
||||||
./mc tag set minio2/newbucket "key=val2"
|
./mc tag set minio2/newbucket "key=val2"
|
||||||
|
# create a new bucket on minio2. This should replicate to minio1 after it comes online.
|
||||||
|
./mc mb minio2/newbucket2
|
||||||
|
|
||||||
# Restart minio1 instance
|
# Restart minio1 instance
|
||||||
minio server --config-dir /tmp/minio-ldap --address ":9001" /tmp/minio-ldap-idp1/{1...4} >/tmp/minio1_1.log 2>&1 &
|
minio server --config-dir /tmp/minio-ldap --address ":9001" /tmp/minio-ldap-idp1/{1...4} >/tmp/minio1_1.log 2>&1 &
|
||||||
sleep 10
|
sleep 10
|
||||||
@ -267,4 +270,10 @@ if [ "${val}" != "val2" ]; then
|
|||||||
exit_1;
|
exit_1;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Test if bucket created when minio1 is down healed
|
||||||
|
diff -q <(./mc ls minio1 | awk '{print $3}') <(./mc ls minio2 | awk '{print $3}') 1>/dev/null
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "expected bucket to have replicated, exiting..."
|
||||||
|
exit_1;
|
||||||
|
fi
|
||||||
cleanup
|
cleanup
|
||||||
|
@ -317,7 +317,8 @@ fi
|
|||||||
kill -9 ${site1_pid}
|
kill -9 ${site1_pid}
|
||||||
# Update tag on minio2/newbucket when minio1 is down
|
# Update tag on minio2/newbucket when minio1 is down
|
||||||
./mc tag set minio2/newbucket "key=val2"
|
./mc tag set minio2/newbucket "key=val2"
|
||||||
|
# create a new bucket on minio2. This should replicate to minio1 after it comes online.
|
||||||
|
./mc mb minio2/newbucket2
|
||||||
# Restart minio1 instance
|
# Restart minio1 instance
|
||||||
minio server --config-dir /tmp/minio-internal --address ":9001" /tmp/minio-internal-idp1/{1...4} >/tmp/minio1_1.log 2>&1 &
|
minio server --config-dir /tmp/minio-internal --address ":9001" /tmp/minio-internal-idp1/{1...4} >/tmp/minio1_1.log 2>&1 &
|
||||||
sleep 15
|
sleep 15
|
||||||
@ -328,3 +329,9 @@ if [ "${val}" != "val2" ]; then
|
|||||||
echo "expected bucket tag to have replicated, exiting..."
|
echo "expected bucket tag to have replicated, exiting..."
|
||||||
exit_1;
|
exit_1;
|
||||||
fi
|
fi
|
||||||
|
# Test if bucket created when minio1 is down healed
|
||||||
|
diff -q <(./mc ls minio1 | awk '{print $3}') <(./mc ls minio2 | awk '{print $3}') 1>/dev/null
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "expected bucket to have replicated, exiting..."
|
||||||
|
exit_1;
|
||||||
|
fi
|
@ -242,6 +242,9 @@ fi
|
|||||||
kill -9 ${site1_pid}
|
kill -9 ${site1_pid}
|
||||||
# Update tag on minio2/newbucket when minio1 is down
|
# Update tag on minio2/newbucket when minio1 is down
|
||||||
./mc tag set minio2/newbucket "key=val2"
|
./mc tag set minio2/newbucket "key=val2"
|
||||||
|
# create a new bucket on minio2. This should replicate to minio1 after it comes online.
|
||||||
|
./mc mb minio2/newbucket2
|
||||||
|
|
||||||
# Restart minio1 instance
|
# Restart minio1 instance
|
||||||
minio server --address ":9001" --console-address ":10000" /tmp/minio1/{1...4} >/tmp/minio1_1.log 2>&1 &
|
minio server --address ":9001" --console-address ":10000" /tmp/minio1/{1...4} >/tmp/minio1_1.log 2>&1 &
|
||||||
sleep 10
|
sleep 10
|
||||||
@ -251,3 +254,10 @@ if [ "${val}" != "val2" ]; then
|
|||||||
echo "expected bucket tag to have replicated, exiting..."
|
echo "expected bucket tag to have replicated, exiting..."
|
||||||
exit_1;
|
exit_1;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Test if bucket created when minio1 is down healed
|
||||||
|
diff -q <(./mc ls minio1 | awk '{print $3}') <(./mc ls minio2 | awk '{print $3}') 1>/dev/null
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "expected bucket to have replicated, exiting..."
|
||||||
|
exit_1;
|
||||||
|
fi
|
||||||
|
Loading…
x
Reference in New Issue
Block a user