mirror of https://github.com/minio/minio.git
Replicate Expiry ILM configs while site replication (#18130)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
This commit is contained in:
parent
41091d9472
commit
58306a9d34
|
@ -52,7 +52,8 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
|
|||
return
|
||||
}
|
||||
|
||||
status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites)
|
||||
opts := getSRAddOptions(r)
|
||||
status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites, opts)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
|
@ -68,6 +69,12 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
|
|||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
func getSRAddOptions(r *http.Request) (opts madmin.SRAddOptions) {
|
||||
q := r.Form
|
||||
opts.ReplicateILMExpiry = q.Get("replicateILMExpiry") == "true"
|
||||
return
|
||||
}
|
||||
|
||||
// SRPeerJoin - PUT /minio/admin/v3/site-replication/join
|
||||
//
|
||||
// used internally to tell current cluster to enable SR with
|
||||
|
@ -192,7 +199,7 @@ func (a adminAPIHandlers) SRPeerReplicateIAMItem(w http.ResponseWriter, r *http.
|
|||
}
|
||||
}
|
||||
|
||||
// SRPeerReplicateBucketItem - PUT /minio/admin/v3/site-replication/bucket-meta
|
||||
// SRPeerReplicateBucketItem - PUT /minio/admin/v3/site-replication/peer/bucket-meta
|
||||
func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
|
@ -253,6 +260,8 @@ func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *ht
|
|||
err = globalSiteReplicationSys.PeerBucketObjectLockConfigHandler(ctx, item.Bucket, item.ObjectLockConfig, item.UpdatedAt)
|
||||
case madmin.SRBucketMetaTypeSSEConfig:
|
||||
err = globalSiteReplicationSys.PeerBucketSSEConfigHandler(ctx, item.Bucket, item.SSEConfig, item.UpdatedAt)
|
||||
case madmin.SRBucketMetaLCConfig:
|
||||
err = globalSiteReplicationSys.PeerBucketLCConfigHandler(ctx, item.Bucket, item.ExpiryLCConfig, item.UpdatedAt)
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
|
@ -334,6 +343,7 @@ func (a adminAPIHandlers) SiteReplicationStatus(w http.ResponseWriter, r *http.R
|
|||
opts.Users = true
|
||||
opts.Policies = true
|
||||
opts.Groups = true
|
||||
opts.ILMExpiryRules = true
|
||||
}
|
||||
info, err := globalSiteReplicationSys.SiteReplicationStatus(ctx, objectAPI, opts)
|
||||
if err != nil {
|
||||
|
@ -383,7 +393,9 @@ func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Req
|
|||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site)
|
||||
|
||||
opts := getSREditOptions(r)
|
||||
status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site, opts)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
|
@ -398,6 +410,13 @@ func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Req
|
|||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
func getSREditOptions(r *http.Request) (opts madmin.SREditOptions) {
|
||||
q := r.Form
|
||||
opts.DisableILMExpiryReplication = q.Get("disableILMExpiryReplication") == "true"
|
||||
opts.EnableILMExpiryReplication = q.Get("enableILMExpiryReplication") == "true"
|
||||
return
|
||||
}
|
||||
|
||||
// SRPeerEdit - PUT /minio/admin/v3/site-replication/peer/edit
|
||||
//
|
||||
// used internally to tell current cluster to update endpoint for peer
|
||||
|
@ -422,12 +441,37 @@ func (a adminAPIHandlers) SRPeerEdit(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
// SRStateEdit - PUT /minio/admin/v3/site-replication/state/edit
|
||||
//
|
||||
// used internally to tell current cluster to update site replication state
|
||||
func (a adminAPIHandlers) SRStateEdit(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SiteReplicationOperationAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var state madmin.SRStateEditReq
|
||||
if err := parseJSONBody(ctx, r.Body, &state, ""); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err := globalSiteReplicationSys.PeerStateEditReq(ctx, state); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func getSRStatusOptions(r *http.Request) (opts madmin.SRStatusOptions) {
|
||||
q := r.Form
|
||||
opts.Buckets = q.Get("buckets") == "true"
|
||||
opts.Policies = q.Get("policies") == "true"
|
||||
opts.Groups = q.Get("groups") == "true"
|
||||
opts.Users = q.Get("users") == "true"
|
||||
opts.ILMExpiryRules = q.Get("ilm-expiry-rules") == "true"
|
||||
opts.PeerState = q.Get("peer-state") == "true"
|
||||
opts.Entity = madmin.GetSREntityType(q.Get("entity"))
|
||||
opts.EntityValue = q.Get("entityvalue")
|
||||
opts.ShowDeleted = q.Get("showDeleted") == "true"
|
||||
|
|
|
@ -376,6 +376,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
|||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/edit").HandlerFunc(adminMiddleware(adminAPI.SRPeerEdit))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/peer/remove").HandlerFunc(adminMiddleware(adminAPI.SRPeerRemove))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/site-replication/resync/op").HandlerFunc(adminMiddleware(adminAPI.SiteReplicationResyncOp)).Queries("operation", "{operation:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/state/edit").HandlerFunc(adminMiddleware(adminAPI.SRStateEdit))
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Top locks
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
|
@ -86,6 +87,41 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
// Create a map of updated set of rules in request
|
||||
updatedRules := make(map[string]lifecycle.Rule, len(bucketLifecycle.Rules))
|
||||
for _, rule := range bucketLifecycle.Rules {
|
||||
updatedRules[rule.ID] = rule
|
||||
}
|
||||
|
||||
// Get list of rules for the bucket from disk
|
||||
meta, err := globalBucketMetadataSys.GetConfigFromDisk(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
expiryRuleRemoved := false
|
||||
if len(meta.LifecycleConfigXML) > 0 {
|
||||
var lcCfg lifecycle.Lifecycle
|
||||
if err := xml.Unmarshal(meta.LifecycleConfigXML, &lcCfg); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, rl := range lcCfg.Rules {
|
||||
updRule, ok := updatedRules[rl.ID]
|
||||
// original rule had expiry that is no longer in the new config,
|
||||
// or rule is present but missing expiration flags
|
||||
if (!rl.Expiration.IsNull() || !rl.NoncurrentVersionExpiration.IsNull()) &&
|
||||
(!ok || (updRule.Expiration.IsNull() && updRule.NoncurrentVersionExpiration.IsNull())) {
|
||||
expiryRuleRemoved = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if bucketLifecycle.HasExpiry() || expiryRuleRemoved {
|
||||
currtime := time.Now()
|
||||
bucketLifecycle.ExpiryUpdatedAt = &currtime
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(bucketLifecycle)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
|
@ -142,6 +178,8 @@ func (api objectAPIHandlers) GetBucketLifecycleHandler(w http.ResponseWriter, r
|
|||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// explicitly set ExpiryUpdatedAt nil as its meant for internal consumption only
|
||||
config.ExpiryUpdatedAt = nil
|
||||
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
|
|
|
@ -19,6 +19,7 @@ package cmd
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
@ -177,6 +178,40 @@ func (sys *BucketMetadataSys) save(ctx context.Context, meta BucketMetadata) err
|
|||
// Delete delete the bucket metadata for the specified bucket.
|
||||
// must be used by all callers instead of using Update() with nil configData.
|
||||
func (sys *BucketMetadataSys) Delete(ctx context.Context, bucket string, configFile string) (updatedAt time.Time, err error) {
|
||||
if configFile == bucketLifecycleConfig {
|
||||
// Get bucket config from current site
|
||||
meta, e := globalBucketMetadataSys.GetConfigFromDisk(ctx, bucket)
|
||||
if e != nil && !errors.Is(e, errConfigNotFound) {
|
||||
return updatedAt, e
|
||||
}
|
||||
var expiryRuleRemoved bool
|
||||
if len(meta.LifecycleConfigXML) > 0 {
|
||||
var lcCfg lifecycle.Lifecycle
|
||||
if err := xml.Unmarshal(meta.LifecycleConfigXML, &lcCfg); err != nil {
|
||||
return updatedAt, err
|
||||
}
|
||||
// find a single expiry rule set the flag
|
||||
for _, rl := range lcCfg.Rules {
|
||||
if !rl.Expiration.IsNull() || !rl.NoncurrentVersionExpiration.IsNull() {
|
||||
expiryRuleRemoved = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Form empty ILM details with `ExpiryUpdatedAt` field and save
|
||||
var cfgData []byte
|
||||
if expiryRuleRemoved {
|
||||
var lcCfg lifecycle.Lifecycle
|
||||
currtime := time.Now()
|
||||
lcCfg.ExpiryUpdatedAt = &currtime
|
||||
cfgData, err = xml.Marshal(lcCfg)
|
||||
if err != nil {
|
||||
return updatedAt, err
|
||||
}
|
||||
}
|
||||
return sys.updateAndParse(ctx, bucket, configFile, cfgData, false)
|
||||
}
|
||||
return sys.updateAndParse(ctx, bucket, configFile, nil, false)
|
||||
}
|
||||
|
||||
|
@ -267,7 +302,10 @@ func (sys *BucketMetadataSys) GetLifecycleConfig(bucket string) (*lifecycle.Life
|
|||
}
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
if meta.lifecycleConfig == nil {
|
||||
// there could be just `ExpiryUpdatedAt` field populated as part
|
||||
// of last delete all. Treat this situation as not lifecycle configuration
|
||||
// available
|
||||
if meta.lifecycleConfig == nil || len(meta.lifecycleConfig.Rules) == 0 {
|
||||
return nil, time.Time{}, BucketLifecycleNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.lifecycleConfig, meta.LifecycleConfigUpdatedAt, nil
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,272 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -x
|
||||
|
||||
trap 'catch $LINENO' ERR
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
catch() {
|
||||
if [ $# -ne 0 ]; then
|
||||
echo "error on line $1"
|
||||
for site in sitea siteb sitec sited; do
|
||||
echo "$site server logs ========="
|
||||
cat "/tmp/${site}_1.log"
|
||||
echo "==========================="
|
||||
cat "/tmp/${site}_2.log"
|
||||
done
|
||||
fi
|
||||
|
||||
echo "Cleaning up instances of MinIO"
|
||||
pkill minio
|
||||
pkill -9 minio
|
||||
rm -rf /tmp/multisitea
|
||||
rm -rf /tmp/multisiteb
|
||||
rm -rf /tmp/multisitec
|
||||
rm -rf /tmp/multisited
|
||||
rm -rf /tmp/data
|
||||
}
|
||||
|
||||
catch
|
||||
|
||||
set -e
|
||||
export MINIO_CI_CD=1
|
||||
export MINIO_BROWSER=off
|
||||
export MINIO_ROOT_USER="minio"
|
||||
export MINIO_ROOT_PASSWORD="minio123"
|
||||
export MINIO_KMS_AUTO_ENCRYPTION=off
|
||||
export MINIO_PROMETHEUS_AUTH_TYPE=public
|
||||
export MINIO_KMS_SECRET_KEY=my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw=
|
||||
unset MINIO_KMS_KES_CERT_FILE
|
||||
unset MINIO_KMS_KES_KEY_FILE
|
||||
unset MINIO_KMS_KES_ENDPOINT
|
||||
unset MINIO_KMS_KES_KEY_NAME
|
||||
|
||||
if [ ! -f ./mc ]; then
|
||||
wget --quiet -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
|
||||
chmod +x mc
|
||||
fi
|
||||
|
||||
minio server --address 127.0.0.1:9001 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_1.log 2>&1 &
|
||||
minio server --address 127.0.0.1:9002 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_2.log 2>&1 &
|
||||
|
||||
minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_1.log 2>&1 &
|
||||
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
|
||||
|
||||
minio server --address 127.0.0.1:9005 "http://127.0.0.1:9005/tmp/multisitec/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9006/tmp/multisitec/data/disterasure/xl{5...8}" >/tmp/sitec_1.log 2>&1 &
|
||||
minio server --address 127.0.0.1:9006 "http://127.0.0.1:9005/tmp/multisitec/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9006/tmp/multisitec/data/disterasure/xl{5...8}" >/tmp/sitec_2.log 2>&1 &
|
||||
|
||||
minio server --address 127.0.0.1:9007 "http://127.0.0.1:9007/tmp/multisited/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9008/tmp/multisited/data/disterasure/xl{5...8}" >/tmp/sited_1.log 2>&1 &
|
||||
minio server --address 127.0.0.1:9008 "http://127.0.0.1:9007/tmp/multisited/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9008/tmp/multisited/data/disterasure/xl{5...8}" >/tmp/sited_2.log 2>&1 &
|
||||
|
||||
# Wait to make sure all MinIO instances are up
|
||||
sleep 20s
|
||||
|
||||
export MC_HOST_sitea=http://minio:minio123@127.0.0.1:9001
|
||||
export MC_HOST_siteb=http://minio:minio123@127.0.0.1:9004
|
||||
export MC_HOST_sitec=http://minio:minio123@127.0.0.1:9006
|
||||
export MC_HOST_sited=http://minio:minio123@127.0.0.1:9008
|
||||
|
||||
./mc mb sitea/bucket
|
||||
./mc mb sitec/bucket
|
||||
|
||||
## Setup site replication
|
||||
./mc admin replicate add sitea siteb --replicate-ilm-expiry
|
||||
|
||||
## Add warm tier
|
||||
./mc ilm tier add minio sitea WARM-TIER --endpoint http://localhost:9006 --access-key minio --secret-key minio123 --bucket bucket
|
||||
|
||||
## Add ILM rules
|
||||
./mc ilm add sitea/bucket --transition-days 0 --transition-tier WARM-TIER --transition-days 0 --noncurrent-expire-days 2 --expire-days 3 --prefix "myprefix" --tags "tag1=val1&tag2=val2"
|
||||
./mc ilm rule list sitea/bucket
|
||||
|
||||
## Check ilm expiry flag
|
||||
./mc admin replicate info sitea --json
|
||||
flag1=$(./mc admin replicate info sitea --json | jq '.sites[0]."replicate-ilm-expiry"')
|
||||
flag2=$(./mc admin replicate info sitea --json | jq '.sites[1]."replicate-ilm-expiry"')
|
||||
if [ "$flag1" != "true" ]; then
|
||||
echo "BUG: Expected ILM expiry replication not set for 'sitea'"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$flag2" != "true" ]; then
|
||||
echo "BUG: Expected ILM expiry replication not set for 'siteb'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
## Check if ILM expiry rules replicated
|
||||
sleep 20
|
||||
./mc ilm rule list siteb/bucket
|
||||
count=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules | length')
|
||||
if [ $count -ne 1 ]; then
|
||||
echo "BUG: ILM expiry rules not replicated to 'siteb'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
## Check replication of rules prefix and tags
|
||||
prefix=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[0].Filter.And.Prefix' | sed 's/"//g')
|
||||
tagName1=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[0].Filter.And.Tags[0].Key' | sed 's/"//g')
|
||||
tagVal1=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[0].Filter.And.Tags[0].Value' | sed 's/"//g')
|
||||
tagName2=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[0].Filter.And.Tags[1].Key' | sed 's/"//g')
|
||||
tagVal2=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[0].Filter.And.Tags[1].Value' | sed 's/"//g')
|
||||
if [ "${prefix}" != "myprefix" ]; then
|
||||
echo "BUG: ILM expiry rules prefix not replicated to 'siteb'"
|
||||
exit 1
|
||||
fi
|
||||
if [ "${tagName1}" != "tag1" ] || [ "${tagVal1}" != "val1" ] || [ "${tagName2}" != "tag2" ] || [ "${tagVal2}" != "val2" ]; then
|
||||
echo "BUG: ILM expiry rules tags not replicated to 'siteb'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
## Check edit of ILM expiry rule and its replication
|
||||
id=$(./mc ilm rule list sitea/bucket --json | jq '.config.Rules[] | select(.Expiration.Days==3) | .ID' | sed 's/"//g')
|
||||
./mc ilm edit --id "${id}" --expire-days "100" sitea/bucket
|
||||
sleep 30
|
||||
count1=$(./mc ilm rule list sitea/bucket --json | jq '.config.Rules[0].Expiration.Days')
|
||||
count2=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[0].Expiration.Days')
|
||||
if [ $count1 -ne 100 ]; then
|
||||
echo "BUG: Expiration days not changed on 'sitea'"
|
||||
exit 1
|
||||
fi
|
||||
if [ $count2 -ne 100 ]; then
|
||||
echo "BUG: Modified ILM expiry rule not replicated to 'siteb'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
## Check disabling of ILM expiry rules replication
|
||||
./mc admin replicate update sitea --disable-ilm-expiry-replication
|
||||
flag=$(./mc admin replicate info sitea --json | jq '.sites[] | select (.name=="sitea") | ."replicate-ilm-expiry"')
|
||||
if [ "$flag" != "false" ]; then
|
||||
echo "BUG: ILM expiry replication not disabled for 'sitea'"
|
||||
exit 1
|
||||
fi
|
||||
flag=$(./mc admin replicate info siteb --json | jq '.sites[] | select (.name=="sitea") | ."replicate-ilm-expiry"')
|
||||
if [ "$flag" != "false" ]; then
|
||||
echo "BUG: ILM expiry replication not disabled for 'siteb'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
## Perform individual updates of rules to sites
|
||||
./mc ilm edit --id "${id}" --expire-days "999" sitea/bucket
|
||||
sleep 1
|
||||
./mc ilm edit --id "${id}" --expire-days "888" siteb/bucket # when ilm expiry re-enabled, this should win
|
||||
|
||||
## Check re-enabling of ILM expiry rules replication
|
||||
./mc admin replicate update sitea --enable-ilm-expiry-replication
|
||||
flag=$(./mc admin replicate info sitea --json | jq '.sites[] | select (.name=="sitea") | ."replicate-ilm-expiry"')
|
||||
if [ "$flag" != "true" ]; then
|
||||
echo "BUG: ILM expiry replication not enabled for 'sitea'"
|
||||
exit 1
|
||||
fi
|
||||
flag=$(./mc admin replicate info siteb --json | jq '.sites[] | select (.name=="sitea") | ."replicate-ilm-expiry"')
|
||||
if [ "$flag" != "true" ]; then
|
||||
echo "BUG: ILM expiry replication not enabled for 'siteb'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
## Check if latest updated rules get replicated to all sites post re-enable of ILM expiry rules replication
|
||||
sleep 30
|
||||
count1=$(./mc ilm rule list sitea/bucket --json | jq '.config.Rules[0].Expiration.Days')
|
||||
count2=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[0].Expiration.Days')
|
||||
if [ $count1 -ne 888 ]; then
|
||||
echo "BUG: Latest expiration days not updated on 'sitea'"
|
||||
exit 1
|
||||
fi
|
||||
if [ $count2 -ne 888 ]; then
|
||||
echo "BUG: Latest expiration days not updated on 'siteb'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
## Check replication of edit of prefix, tags and status of ILM Expiry Rules
|
||||
./mc ilm rule edit --id "${id}" --prefix "newprefix" --tags "ntag1=nval1&ntag2=nval2" --disable sitea/bucket
|
||||
sleep 30
|
||||
nprefix=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[0].Filter.And.Prefix' | sed 's/"//g')
|
||||
ntagName1=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[0].Filter.And.Tags[0].Key' | sed 's/"//g')
|
||||
ntagVal1=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[0].Filter.And.Tags[0].Value' | sed 's/"//g')
|
||||
ntagName2=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[0].Filter.And.Tags[1].Key' | sed 's/"//g')
|
||||
ntagVal2=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[0].Filter.And.Tags[1].Value' | sed 's/"//g')
|
||||
st=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[0].Status' | sed 's/"//g')
|
||||
if [ "${nprefix}" != "newprefix" ]; then
|
||||
echo "BUG: ILM expiry rules prefix not replicated to 'siteb'"
|
||||
exit 1
|
||||
fi
|
||||
if [ "${ntagName1}" != "ntag1" ] || [ "${ntagVal1}" != "nval1" ] || [ "${ntagName2}" != "ntag2" ] || [ "${ntagVal2}" != "nval2" ]; then
|
||||
echo "BUG: ILM expiry rules tags not replicated to 'siteb'"
|
||||
exit 1
|
||||
fi
|
||||
if [ "${st}" != "Disabled" ]; then
|
||||
echo "BUG: ILM expiry rules status not replicated to 'siteb'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
## Check replication of deleted ILM expiry rules
|
||||
./mc ilm rule remove --id "${id}" sitea/bucket
|
||||
sleep 30
|
||||
# should error as rule doesnt exist
|
||||
error=$(./mc ilm rule list siteb/bucket --json | jq '.error.cause.message' | sed 's/"//g')
|
||||
if [ "$error" != "The lifecycle configuration does not exist" ]; then
|
||||
echo "BUG: Removed ILM expiry rule not replicated to 'siteb'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
## Check addition of new replication site to existing site replication setup
|
||||
# Add rules again as previous tests removed all
|
||||
./mc ilm add sitea/bucket --transition-days 0 --transition-tier WARM-TIER --transition-days 0 --noncurrent-expire-days 2 --expire-days 3 --prefix "myprefix" --tags "tag1=val1&tag2=val2"
|
||||
./mc admin replicate add sitea siteb sited
|
||||
sleep 30
|
||||
# Check site replication info and status for new site
|
||||
sitesCount=$(mc admin replicate info sited --json | jq '.sites | length')
|
||||
if [ ${sitesCount} -ne 3 ]; then
|
||||
echo "BUG: New site 'sited' not appearing in site replication info"
|
||||
exit 1
|
||||
fi
|
||||
flag3=$(./mc admin replicate info sited --json | jq '.sites[2]."replicate-ilm-expiry"')
|
||||
if [ "${flag3}" != "true" ]; then
|
||||
echo "BUG: ILM expiry replication not enabled for 'sited'"
|
||||
exit 1
|
||||
fi
|
||||
rulesCount=$(./mc ilm rule list sited/bucket --json | jq '.config.Rules | length')
|
||||
if [ ${rulesCount} -ne 1 ]; then
|
||||
echo "BUG: ILM expiry rules not replicated to 'sited'"
|
||||
exit 1
|
||||
fi
|
||||
prefix=$(./mc ilm rule list sited/bucket --json | jq '.config.Rules[0].Filter.And.Prefix' | sed 's/"//g')
|
||||
tagName1=$(./mc ilm rule list sited/bucket --json | jq '.config.Rules[0].Filter.And.Tags[0].Key' | sed 's/"//g')
|
||||
tagVal1=$(./mc ilm rule list sited/bucket --json | jq '.config.Rules[0].Filter.And.Tags[0].Value' | sed 's/"//g')
|
||||
tagName2=$(./mc ilm rule list sited/bucket --json | jq '.config.Rules[0].Filter.And.Tags[1].Key' | sed 's/"//g')
|
||||
tagVal2=$(./mc ilm rule list sited/bucket --json | jq '.config.Rules[0].Filter.And.Tags[1].Value' | sed 's/"//g')
|
||||
if [ "${prefix}" != "myprefix" ]; then
|
||||
echo "BUG: ILM expiry rules prefix not replicated to 'sited'"
|
||||
exit 1
|
||||
fi
|
||||
if [ "${tagName1}" != "tag1" ] || [ "${tagVal1}" != "val1" ] || [ "${tagName2}" != "tag2" ] || [ "${tagVal2}" != "val2" ]; then
|
||||
echo "BUG: ILM expiry rules tags not replicated to 'sited'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
## Check replication of deleted ILM expiry rules when target has transition part as well
|
||||
## Only the expiry part of rules should get removed as part if replication of removal from
|
||||
## other site
|
||||
id=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[] | select(.Expiration.Days==3) | .ID' | sed 's/"//g')
|
||||
# Remove rule from siteb
|
||||
./mc ilm rule remove --id "${id}" siteb/bucket
|
||||
sleep 30 # allow to replicate
|
||||
# sitea should still contain the transition portion of rule
|
||||
transitionRuleDays=$(./mc ilm rule list sitea/bucket --json | jq '.config.Rules[0].Transition.Days')
|
||||
expirationRuleDet=$(./mc ilm rule list sitea/bucket --json | jq '.config.Rules[0].Expiration')
|
||||
if [ ${transitionRuleDays} -ne 0 ]; then
|
||||
echo "BUG: Transition rules not retained as part of replication of deleted ILM expiry rules on 'sitea'"
|
||||
exit 1
|
||||
fi
|
||||
if [ ${expirationRuleDet} != null ]; then
|
||||
echo "BUG: removed ILM expiry rule not replicated to 'sitea'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
catch
|
6
go.mod
6
go.mod
|
@ -247,3 +247,9 @@ require (
|
|||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
replace github.com/minio/madmin-go/v3 v3.0.29 => github.com/shtripat/madmin-go/v3 v3.0.0-20231106151808-5082883cc33c
|
||||
|
||||
replace github.com/minio/mc v0.0.0-20231030184332-9f2fb2b6a9f8 => github.com/shtripat/mc v0.0.0-20231109083216-6c94adcab7f8
|
||||
|
||||
replace github.com/minio/console v0.41.0 => github.com/shtripat/minio-console v0.0.0-20231107130354-bf0c7604ae99
|
||||
|
|
12
go.sum
12
go.sum
|
@ -470,8 +470,6 @@ github.com/minio/cli v1.24.2 h1:J+fCUh9mhPLjN3Lj/YhklXvxj8mnyE/D6FpFduXJ2jg=
|
|||
github.com/minio/cli v1.24.2/go.mod h1:bYxnK0uS629N3Bq+AOZZ+6lwF77Sodk4+UL9vNuXhOY=
|
||||
github.com/minio/colorjson v1.0.6 h1:m7TUvpvt0u7FBmVIEQNIa0T4NBQlxrcMBp4wJKsg2Ik=
|
||||
github.com/minio/colorjson v1.0.6/go.mod h1:LUXwS5ZGNb6Eh9f+t+3uJiowD3XsIWtsvTriUBeqgYs=
|
||||
github.com/minio/console v0.41.0 h1:NjvBij5Hg4GLkO/iAUfZ4imATA/rKNtgVhnn3sEuKDo=
|
||||
github.com/minio/console v0.41.0/go.mod h1:LTDngEa3Z/s9+2oUb3eBtaVsS/vQFuWTH9d8Z2Pe1mo=
|
||||
github.com/minio/csvparser v1.0.0 h1:xJEHcYK8ZAjeW4hNV9Zu30u+/2o4UyPnYgyjWp8b7ZU=
|
||||
github.com/minio/csvparser v1.0.0/go.mod h1:lKXskSLzPgC5WQyzP7maKH7Sl1cqvANXo9YCto8zbtM=
|
||||
github.com/minio/dnscache v0.1.1 h1:AMYLqomzskpORiUA1ciN9k7bZT1oB3YZN4cEIi88W5o=
|
||||
|
@ -484,10 +482,6 @@ github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA
|
|||
github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
|
||||
github.com/minio/kes-go v0.2.0 h1:HA33arq9s3MErbsj3PAXFVfFo4U4yw7lTKQ5kWFrpCA=
|
||||
github.com/minio/kes-go v0.2.0/go.mod h1:VorHLaIYis9/MxAHAtXN4d8PUMNKhIxTIlvFt0hBOEo=
|
||||
github.com/minio/madmin-go/v3 v3.0.29 h1:3bNLArtxIFud5wyb5/DnF5DGLBvcSJyzCA44EclX1Ow=
|
||||
github.com/minio/madmin-go/v3 v3.0.29/go.mod h1:4QN2NftLSV7MdlT50dkrenOMmNVHluxTvlqJou3hte8=
|
||||
github.com/minio/mc v0.0.0-20231030184332-9f2fb2b6a9f8 h1:3WUMQABG8FytpYHRtLHjrnztcUB09hlIrh7rQI9H+tY=
|
||||
github.com/minio/mc v0.0.0-20231030184332-9f2fb2b6a9f8/go.mod h1:SoPU55ntH5d6IEq6jRBn6e/7SpwI/eSNdBDWmH7nwHk=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v6 v6.0.46/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg=
|
||||
|
@ -661,6 +655,12 @@ github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFt
|
|||
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
|
||||
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
|
||||
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
|
||||
github.com/shtripat/madmin-go/v3 v3.0.0-20231106151808-5082883cc33c h1:QvvwLkuqdj5muc3jgHvZzeSEYG+xZjWc5OuZgxLW53I=
|
||||
github.com/shtripat/madmin-go/v3 v3.0.0-20231106151808-5082883cc33c/go.mod h1:4QN2NftLSV7MdlT50dkrenOMmNVHluxTvlqJou3hte8=
|
||||
github.com/shtripat/mc v0.0.0-20231109083216-6c94adcab7f8 h1:K9T44eOsoeruwfBAACXi25YcLu3mN+2MXGdA753IOTE=
|
||||
github.com/shtripat/mc v0.0.0-20231109083216-6c94adcab7f8/go.mod h1:F6gQ1/r7HLWnB8zy2kuck8voYNcBtFu6QfN4SS9uZ6w=
|
||||
github.com/shtripat/minio-console v0.0.0-20231107130354-bf0c7604ae99 h1:4iAjs0cyV9XpgEGzsQu9y70h4KsCkvHSif2YeCe35z4=
|
||||
github.com/shtripat/minio-console v0.0.0-20231107130354-bf0c7604ae99/go.mod h1:Dw108EQHoZeERWn/LoZYZCds8/GKoVzOucqCit0fvyY=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
|
|
|
@ -97,8 +97,9 @@ func (a Action) Delete() bool {
|
|||
|
||||
// Lifecycle - Configuration for bucket lifecycle.
|
||||
type Lifecycle struct {
|
||||
XMLName xml.Name `xml:"LifecycleConfiguration"`
|
||||
Rules []Rule `xml:"Rule"`
|
||||
XMLName xml.Name `xml:"LifecycleConfiguration"`
|
||||
Rules []Rule `xml:"Rule"`
|
||||
ExpiryUpdatedAt *time.Time `xml:"ExpiryUpdatedAt,omitempty"`
|
||||
}
|
||||
|
||||
// HasTransition returns 'true' if lifecycle document has Transition enabled.
|
||||
|
@ -111,6 +112,16 @@ func (lc Lifecycle) HasTransition() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// HasExpiry returns 'true' if lifecycle document has Expiry enabled.
|
||||
func (lc Lifecycle) HasExpiry() bool {
|
||||
for _, rule := range lc.Rules {
|
||||
if !rule.Expiration.IsNull() || !rule.NoncurrentVersionExpiration.IsNull() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// UnmarshalXML - decodes XML data.
|
||||
func (lc *Lifecycle) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) {
|
||||
switch start.Name.Local {
|
||||
|
@ -137,6 +148,12 @@ func (lc *Lifecycle) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err e
|
|||
return err
|
||||
}
|
||||
lc.Rules = append(lc.Rules, r)
|
||||
case "ExpiryUpdatedAt":
|
||||
var t time.Time
|
||||
if err = d.DecodeElement(&t, &start); err != nil {
|
||||
return err
|
||||
}
|
||||
lc.ExpiryUpdatedAt = &t
|
||||
default:
|
||||
return xml.UnmarshalError(fmt.Sprintf("expected element type <Rule> but have <%s>", se.Name.Local))
|
||||
}
|
||||
|
|
|
@ -163,3 +163,16 @@ func (r Rule) Validate() error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloneNonTransition - returns a clone of the object containing non transition rules
|
||||
func (r Rule) CloneNonTransition() Rule {
|
||||
return Rule{
|
||||
XMLName: r.XMLName,
|
||||
ID: r.ID,
|
||||
Status: r.Status,
|
||||
Filter: r.Filter,
|
||||
Prefix: r.Prefix,
|
||||
Expiration: r.Expiration,
|
||||
NoncurrentVersionExpiration: r.NoncurrentVersionExpiration,
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue