mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
support site replication to replicate IAM users,groups (#14128)
- Site replication was missing replicating users, groups when an empty site was added. - Add site replication for groups and users when they are disabled and enabled. - Add support for replicating bucket quota config.
This commit is contained in:
parent
0012ca8ca5
commit
9d588319dd
@ -65,7 +65,8 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = parseBucketQuota(bucket, data); err != nil {
|
||||
quotaConfig, err := parseBucketQuota(bucket, data)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@ -75,6 +76,21 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
|
||||
bucketMeta := madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeQuotaConfig,
|
||||
Bucket: bucket,
|
||||
Quota: data,
|
||||
}
|
||||
if quotaConfig.Quota == 0 {
|
||||
bucketMeta.Quota = nil
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
if err = globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
@ -45,8 +45,7 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
var sites []madmin.PeerSite
|
||||
err := parseJSONBody(ctx, r.Body, &sites, cred.SecretKey)
|
||||
if err != nil {
|
||||
if err := parseJSONBody(ctx, r.Body, &sites, cred.SecretKey); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@ -226,6 +225,20 @@ func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *ht
|
||||
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, bktPolicy)
|
||||
}
|
||||
}
|
||||
case madmin.SRBucketMetaTypeQuotaConfig:
|
||||
if item.Quota == nil {
|
||||
err = globalSiteReplicationSys.PeerBucketQuotaConfigHandler(ctx, item.Bucket, nil)
|
||||
} else {
|
||||
quotaConfig, err := parseBucketQuota(item.Bucket, item.Quota)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = globalSiteReplicationSys.PeerBucketQuotaConfigHandler(ctx, item.Bucket, quotaConfig); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
case madmin.SRBucketMetaTypeTags:
|
||||
err = globalSiteReplicationSys.PeerBucketTaggingHandler(ctx, item.Bucket, item.Tags)
|
||||
case madmin.SRBucketMetaTypeObjectLockConfig:
|
||||
|
@ -330,6 +330,20 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemGroupInfo,
|
||||
GroupInfo: &madmin.SRGroupInfo{
|
||||
UpdateReq: madmin.GroupAddRemove{
|
||||
Group: group,
|
||||
Status: madmin.GroupStatus(status),
|
||||
IsRemove: false,
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SetUserStatus - PUT /minio/admin/v3/set-user-status?accessKey=<access_key>&status=[enabled|disabled]
|
||||
@ -347,7 +361,7 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
||||
accessKey := vars["accessKey"]
|
||||
status := vars["status"]
|
||||
|
||||
// This API is not allowed to lookup accessKey user status
|
||||
// This API is not allowed to lookup master access key user status
|
||||
if accessKey == globalActiveCred.AccessKey {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
@ -357,6 +371,20 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemIAMUser,
|
||||
IAMUser: &madmin.SRIAMUser{
|
||||
AccessKey: accessKey,
|
||||
IsDeleteReq: false,
|
||||
UserReq: &madmin.AddOrUpdateUserReq{
|
||||
Status: madmin.AccountStatus(status),
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// AddUser - PUT /minio/admin/v3/add-user?accessKey=<access_key>
|
||||
|
@ -214,7 +214,7 @@ func getDisksInfo(disks []StorageAPI, endpoints []Endpoint) (disksInfo []madmin.
|
||||
}
|
||||
}
|
||||
di.Metrics = &madmin.DiskMetrics{
|
||||
APILatencies: make(map[string]string),
|
||||
APILatencies: make(map[string]interface{}),
|
||||
APICalls: make(map[string]uint64),
|
||||
}
|
||||
for k, v := range info.Metrics.APILatencies {
|
||||
|
@ -1486,7 +1486,8 @@ func (store *IAMStoreSys) SetUserStatus(ctx context.Context, accessKey string, s
|
||||
AccessKey: accessKey,
|
||||
SecretKey: cred.SecretKey,
|
||||
Status: func() string {
|
||||
if status == madmin.AccountEnabled {
|
||||
switch string(status) {
|
||||
case string(madmin.AccountEnabled), string(auth.AccountOn):
|
||||
return auth.AccountOn
|
||||
}
|
||||
return auth.AccountOff
|
||||
@ -1555,6 +1556,10 @@ func (store *IAMStoreSys) UpdateServiceAccount(ctx context.Context, accessKey st
|
||||
switch opts.status {
|
||||
// The caller did not ask to update status account, do nothing
|
||||
case "":
|
||||
case string(madmin.AccountEnabled):
|
||||
cr.Status = auth.AccountOn
|
||||
case string(madmin.AccountDisabled):
|
||||
cr.Status = auth.AccountOff
|
||||
// Update account status
|
||||
case auth.AccountOn, auth.AccountOff:
|
||||
cr.Status = opts.status
|
||||
@ -1633,7 +1638,8 @@ func (store *IAMStoreSys) AddUser(ctx context.Context, accessKey string, ureq ma
|
||||
AccessKey: accessKey,
|
||||
SecretKey: ureq.SecretKey,
|
||||
Status: func() string {
|
||||
if ureq.Status == madmin.AccountEnabled {
|
||||
switch string(ureq.Status) {
|
||||
case string(madmin.AccountEnabled), string(auth.AccountOn):
|
||||
return auth.AccountOn
|
||||
}
|
||||
return auth.AccountOff
|
||||
|
@ -275,57 +275,43 @@ type PeerSiteInfo struct {
|
||||
}
|
||||
|
||||
// getSiteStatuses gathers more info on the sites being added
|
||||
func (c *SiteReplicationSys) getSiteStatuses(ctx context.Context, sites []madmin.PeerSite) (psi []PeerSiteInfo, err SRError) {
|
||||
func (c *SiteReplicationSys) getSiteStatuses(ctx context.Context, sites ...madmin.PeerSite) (psi []PeerSiteInfo, err error) {
|
||||
psi = make([]PeerSiteInfo, 0, len(sites))
|
||||
for _, v := range sites {
|
||||
admClient, err := getAdminClient(v.Endpoint, v.AccessKey, v.SecretKey)
|
||||
if err != nil {
|
||||
return psi, errSRPeerResp(fmt.Errorf("unable to create admin client for %s: %w", v.Name, err))
|
||||
}
|
||||
|
||||
info, err := admClient.ServerInfo(ctx)
|
||||
if err != nil {
|
||||
return psi, errSRPeerResp(fmt.Errorf("unable to fetch server info for %s: %w", v.Name, err))
|
||||
}
|
||||
|
||||
deploymentID := info.DeploymentID
|
||||
pi := PeerSiteInfo{
|
||||
PeerSite: v,
|
||||
DeploymentID: deploymentID,
|
||||
Empty: true,
|
||||
}
|
||||
|
||||
if deploymentID == globalDeploymentID {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return psi, errSRObjectLayerNotReady
|
||||
}
|
||||
res, err := objAPI.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
return psi, errSRBackendIssue(err)
|
||||
}
|
||||
if len(res) > 0 {
|
||||
pi.Empty = false
|
||||
}
|
||||
pi.self = true
|
||||
} else {
|
||||
s3Client, err := getS3Client(v)
|
||||
if err != nil {
|
||||
return psi, errSRPeerResp(fmt.Errorf("unable to create s3 client for %s: %w", v.Name, err))
|
||||
}
|
||||
|
||||
buckets, err := s3Client.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
return psi, errSRPeerResp(fmt.Errorf("unable to list buckets for %s: %v", v.Name, err))
|
||||
}
|
||||
pi.Empty = len(buckets) == 0
|
||||
}
|
||||
psi = append(psi, pi)
|
||||
|
||||
psi = append(psi, PeerSiteInfo{
|
||||
PeerSite: v,
|
||||
DeploymentID: info.DeploymentID,
|
||||
Empty: len(buckets) == 0,
|
||||
self: info.DeploymentID == globalDeploymentID,
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AddPeerClusters - add cluster sites for replication configuration.
|
||||
func (c *SiteReplicationSys) AddPeerClusters(ctx context.Context, psites []madmin.PeerSite) (madmin.ReplicateAddStatus, error) {
|
||||
sites, serr := c.getSiteStatuses(ctx, psites)
|
||||
if serr.Cause != nil {
|
||||
sites, serr := c.getSiteStatuses(ctx, psites...)
|
||||
if serr != nil {
|
||||
return madmin.ReplicateAddStatus{}, serr
|
||||
}
|
||||
var (
|
||||
@ -372,9 +358,18 @@ func (c *SiteReplicationSys) AddPeerClusters(ctx context.Context, psites []madmi
|
||||
return madmin.ReplicateAddStatus{}, errSRInvalidRequest(fmt.Errorf("all existing replicated sites must be specified - missing %s", strings.Join(diffSlc, " ")))
|
||||
}
|
||||
}
|
||||
|
||||
// validate that all clusters are using the same IDP settings.
|
||||
pass, err := c.validateIDPSettings(ctx, sites)
|
||||
if err != nil {
|
||||
return madmin.ReplicateAddStatus{}, err
|
||||
}
|
||||
if !pass {
|
||||
return madmin.ReplicateAddStatus{}, errSRInvalidRequest(errors.New("all cluster sites must have the same IAM/IDP settings"))
|
||||
}
|
||||
|
||||
// For this `add` API, either all clusters must be empty or the local
|
||||
// cluster must be the only one having some buckets.
|
||||
|
||||
if localHasBuckets && nonLocalPeerWithBuckets != "" {
|
||||
return madmin.ReplicateAddStatus{}, errSRInvalidRequest(errors.New("only one cluster may have data when configuring site replication"))
|
||||
}
|
||||
@ -383,16 +378,6 @@ func (c *SiteReplicationSys) AddPeerClusters(ctx context.Context, psites []madmi
|
||||
return madmin.ReplicateAddStatus{}, errSRInvalidRequest(fmt.Errorf("please send your request to the cluster containing data/buckets: %s", nonLocalPeerWithBuckets))
|
||||
}
|
||||
|
||||
// validate that all clusters are using the same (LDAP based)
|
||||
// external IDP.
|
||||
pass, err := c.validateIDPSettings(ctx, sites)
|
||||
if err != nil {
|
||||
return madmin.ReplicateAddStatus{}, err
|
||||
}
|
||||
if !pass {
|
||||
return madmin.ReplicateAddStatus{}, errSRInvalidRequest(errors.New("all cluster sites must have the same (LDAP) IDP settings"))
|
||||
}
|
||||
|
||||
// FIXME: Ideally, we also need to check if there are any global IAM
|
||||
// policies and any (LDAP user created) service accounts on the other
|
||||
// peer clusters, and if so, reject the cluster replicate add request.
|
||||
@ -506,7 +491,7 @@ func (c *SiteReplicationSys) AddPeerClusters(ctx context.Context, psites []madmi
|
||||
Status: madmin.ReplicateAddStatusSuccess,
|
||||
}
|
||||
|
||||
if err := c.syncLocalToPeers(ctx); err != nil {
|
||||
if err := c.syncToAllPeers(ctx); err != nil {
|
||||
result.InitialSyncErrorMessage = err.Error()
|
||||
}
|
||||
|
||||
@ -928,6 +913,8 @@ func (c *SiteReplicationSys) PeerBucketConfigureReplHandler(ctx context.Context,
|
||||
return err
|
||||
}
|
||||
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
errMap := make(map[string]error, len(c.state.Peers))
|
||||
for d, peer := range c.state.Peers {
|
||||
if d == globalDeploymentID {
|
||||
@ -1041,7 +1028,14 @@ func (c *SiteReplicationSys) PeerIAMUserChangeHandler(ctx context.Context, chang
|
||||
if change.UserReq == nil {
|
||||
return errSRInvalidRequest(errInvalidArgument)
|
||||
}
|
||||
err = globalIAMSys.CreateUser(ctx, change.AccessKey, *change.UserReq)
|
||||
userReq := *change.UserReq
|
||||
if userReq.Status != "" && userReq.SecretKey == "" {
|
||||
// Status is set without secretKey updates means we are
|
||||
// only changing the account status.
|
||||
err = globalIAMSys.SetUserStatus(ctx, change.AccessKey, userReq.Status)
|
||||
} else {
|
||||
err = globalIAMSys.CreateUser(ctx, change.AccessKey, userReq)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return wrapSRErr(err)
|
||||
@ -1058,9 +1052,13 @@ func (c *SiteReplicationSys) PeerGroupInfoChangeHandler(ctx context.Context, cha
|
||||
var err error
|
||||
if updReq.IsRemove {
|
||||
err = globalIAMSys.RemoveUsersFromGroup(ctx, updReq.Group, updReq.Members)
|
||||
} else {
|
||||
if updReq.Status != "" && len(updReq.Members) == 0 {
|
||||
err = globalIAMSys.SetGroupStatus(ctx, updReq.Group, updReq.Status == madmin.GroupEnabled)
|
||||
} else {
|
||||
err = globalIAMSys.AddUsersToGroup(ctx, updReq.Group, updReq.Members)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return wrapSRErr(err)
|
||||
}
|
||||
@ -1297,6 +1295,30 @@ func (c *SiteReplicationSys) PeerBucketSSEConfigHandler(ctx context.Context, buc
|
||||
return nil
|
||||
}
|
||||
|
||||
// PeerBucketQuotaConfigHandler - copies/deletes policy to local cluster.
|
||||
func (c *SiteReplicationSys) PeerBucketQuotaConfigHandler(ctx context.Context, bucket string, quota *madmin.BucketQuota) error {
|
||||
if quota != nil {
|
||||
quotaData, err := json.Marshal(quota)
|
||||
if err != nil {
|
||||
return wrapSRErr(err)
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketQuotaConfigFile, quotaData); err != nil {
|
||||
return wrapSRErr(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete the bucket policy
|
||||
err := globalBucketMetadataSys.Update(bucket, bucketQuotaConfigFile, nil)
|
||||
if err != nil {
|
||||
return wrapSRErr(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getAdminClient - NOTE: ensure to take at least a read lock on SiteReplicationSys
|
||||
// before calling this.
|
||||
func (c *SiteReplicationSys) getAdminClient(ctx context.Context, deploymentID string) (*madmin.AdminClient, error) {
|
||||
@ -1321,20 +1343,26 @@ func (c *SiteReplicationSys) getPeerCreds() (*auth.Credentials, error) {
|
||||
return &creds, nil
|
||||
}
|
||||
|
||||
// syncLocalToPeers is used when initially configuring site replication, to
|
||||
// copy existing buckets, their settings, service accounts and policies to all
|
||||
// new peers.
|
||||
func (c *SiteReplicationSys) syncLocalToPeers(ctx context.Context) error {
|
||||
// listBuckets returns a consistent common view of latest unique buckets across
|
||||
// sites, this is used for replication.
|
||||
func (c *SiteReplicationSys) listBuckets(ctx context.Context) ([]BucketInfo, error) {
|
||||
// If local has buckets, enable versioning on them, create them on peers
|
||||
// and setup replication rules.
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return errSRObjectLayerNotReady
|
||||
return nil, errSRObjectLayerNotReady
|
||||
}
|
||||
buckets, err := objAPI.ListBuckets(ctx)
|
||||
return objAPI.ListBuckets(ctx)
|
||||
}
|
||||
|
||||
// syncToAllPeers is used for syncing local data to all remote peers, it is
|
||||
// called once during initial "AddPeerClusters" request.
|
||||
func (c *SiteReplicationSys) syncToAllPeers(ctx context.Context) error {
|
||||
buckets, err := c.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return errSRBackendIssue(err)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, bucketInfo := range buckets {
|
||||
bucket := bucketInfo.Name
|
||||
|
||||
@ -1453,8 +1481,34 @@ func (c *SiteReplicationSys) syncLocalToPeers(ctx context.Context) error {
|
||||
return errSRBucketMetaError(err)
|
||||
}
|
||||
}
|
||||
|
||||
quotaConfig, err := globalBucketMetadataSys.GetQuotaConfig(bucket)
|
||||
found = true
|
||||
if _, ok := err.(BucketQuotaConfigNotFound); ok {
|
||||
found = false
|
||||
} else if err != nil {
|
||||
return errSRBackendIssue(err)
|
||||
}
|
||||
if found {
|
||||
quotaConfigJSON, err := json.Marshal(quotaConfig)
|
||||
if err != nil {
|
||||
return wrapSRErr(err)
|
||||
}
|
||||
err = c.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeQuotaConfig,
|
||||
Bucket: bucket,
|
||||
Quota: quotaConfigJSON,
|
||||
})
|
||||
if err != nil {
|
||||
return errSRBucketMetaError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Order matters from now on how the information is
|
||||
// synced to remote sites.
|
||||
|
||||
// Policies should be synced first.
|
||||
{
|
||||
// Replicate IAM policies on local to all peers.
|
||||
allPolicies, err := globalIAMSys.ListPolicies(ctx, "")
|
||||
@ -1478,20 +1532,131 @@ func (c *SiteReplicationSys) syncLocalToPeers(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Next should be userAccounts those are local users, OIDC and LDAP will not
|
||||
// may not have any local users.
|
||||
{
|
||||
userAccounts := make(map[string]auth.Credentials)
|
||||
globalIAMSys.store.rlock()
|
||||
err := globalIAMSys.store.loadUsers(ctx, regUser, userAccounts)
|
||||
globalIAMSys.store.runlock()
|
||||
if err != nil {
|
||||
return errSRBackendIssue(err)
|
||||
}
|
||||
|
||||
for _, acc := range userAccounts {
|
||||
if err := c.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemIAMUser,
|
||||
IAMUser: &madmin.SRIAMUser{
|
||||
AccessKey: acc.AccessKey,
|
||||
IsDeleteReq: false,
|
||||
UserReq: &madmin.AddOrUpdateUserReq{
|
||||
SecretKey: acc.SecretKey,
|
||||
Status: madmin.AccountStatus(acc.Status),
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
return errSRIAMError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Next should be Groups for some of these users, LDAP might have some Group
|
||||
// DNs here
|
||||
{
|
||||
groups := make(map[string]GroupInfo)
|
||||
|
||||
globalIAMSys.store.rlock()
|
||||
err := globalIAMSys.store.loadGroups(ctx, groups)
|
||||
globalIAMSys.store.runlock()
|
||||
if err != nil {
|
||||
return errSRBackendIssue(err)
|
||||
}
|
||||
|
||||
for gname, group := range groups {
|
||||
if err := c.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemGroupInfo,
|
||||
GroupInfo: &madmin.SRGroupInfo{
|
||||
UpdateReq: madmin.GroupAddRemove{
|
||||
Group: gname,
|
||||
Members: group.Members,
|
||||
Status: madmin.GroupStatus(group.Status),
|
||||
IsRemove: false,
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
return errSRIAMError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Service accounts are the static accounts that should be synced with
|
||||
// valid claims.
|
||||
{
|
||||
serviceAccounts := make(map[string]auth.Credentials)
|
||||
globalIAMSys.store.rlock()
|
||||
err := globalIAMSys.store.loadUsers(ctx, svcUser, serviceAccounts)
|
||||
globalIAMSys.store.runlock()
|
||||
if err != nil {
|
||||
return errSRBackendIssue(err)
|
||||
}
|
||||
|
||||
for user, acc := range serviceAccounts {
|
||||
if user == siteReplicatorSvcAcc {
|
||||
// skip the site replicate svc account as it is
|
||||
// already replicated.
|
||||
continue
|
||||
}
|
||||
|
||||
claims, err := globalIAMSys.GetClaimsForSvcAcc(ctx, acc.AccessKey)
|
||||
if err != nil {
|
||||
return errSRBackendIssue(err)
|
||||
}
|
||||
|
||||
_, policy, err := globalIAMSys.GetServiceAccount(ctx, acc.AccessKey)
|
||||
if err != nil {
|
||||
return errSRBackendIssue(err)
|
||||
}
|
||||
|
||||
var policyJSON []byte
|
||||
if policy != nil {
|
||||
policyJSON, err = json.Marshal(policy)
|
||||
if err != nil {
|
||||
return wrapSRErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
err = c.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
Create: &madmin.SRSvcAccCreate{
|
||||
Parent: acc.ParentUser,
|
||||
AccessKey: user,
|
||||
SecretKey: acc.SecretKey,
|
||||
Groups: acc.Groups,
|
||||
Claims: claims,
|
||||
SessionPolicy: json.RawMessage(policyJSON),
|
||||
Status: acc.Status,
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return errSRIAMError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Followed by policy mapping for the userAccounts we previously synced.
|
||||
{
|
||||
// Replicate policy mappings on local to all peers.
|
||||
userPolicyMap := make(map[string]MappedPolicy)
|
||||
groupPolicyMap := make(map[string]MappedPolicy)
|
||||
globalIAMSys.store.rlock()
|
||||
errU := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, false, userPolicyMap)
|
||||
errG := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, true, groupPolicyMap)
|
||||
errU := globalIAMSys.store.loadMappedPolicies(ctx, regUser, false, userPolicyMap)
|
||||
errG := globalIAMSys.store.loadMappedPolicies(ctx, regUser, true, groupPolicyMap)
|
||||
globalIAMSys.store.runlock()
|
||||
if errU != nil {
|
||||
return errSRBackendIssue(errU)
|
||||
}
|
||||
if errG != nil {
|
||||
return errSRBackendIssue(errG)
|
||||
}
|
||||
|
||||
for user, mp := range userPolicyMap {
|
||||
err := c.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
@ -1507,6 +1672,10 @@ func (c *SiteReplicationSys) syncLocalToPeers(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
if errG != nil {
|
||||
return errSRBackendIssue(errG)
|
||||
}
|
||||
|
||||
for group, mp := range groupPolicyMap {
|
||||
err := c.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemPolicyMapping,
|
||||
@ -1522,54 +1691,44 @@ func (c *SiteReplicationSys) syncLocalToPeers(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
// and finally followed by policy mappings for for STS users.
|
||||
{
|
||||
// Check for service accounts and replicate them. Only LDAP user
|
||||
// owned service accounts are supported for this operation.
|
||||
serviceAccounts := make(map[string]auth.Credentials)
|
||||
// Replicate policy mappings on local to all peers.
|
||||
userPolicyMap := make(map[string]MappedPolicy)
|
||||
groupPolicyMap := make(map[string]MappedPolicy)
|
||||
globalIAMSys.store.rlock()
|
||||
err := globalIAMSys.store.loadUsers(ctx, svcUser, serviceAccounts)
|
||||
errU := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, false, userPolicyMap)
|
||||
errG := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, true, groupPolicyMap)
|
||||
globalIAMSys.store.runlock()
|
||||
if err != nil {
|
||||
return errSRBackendIssue(err)
|
||||
if errU != nil {
|
||||
return errSRBackendIssue(errU)
|
||||
}
|
||||
for user, acc := range serviceAccounts {
|
||||
if user == siteReplicatorSvcAcc {
|
||||
// skip the site replicate svc account as it is
|
||||
// already replicated.
|
||||
continue
|
||||
}
|
||||
claims, err := globalIAMSys.GetClaimsForSvcAcc(ctx, acc.AccessKey)
|
||||
if err != nil {
|
||||
return errSRBackendIssue(err)
|
||||
}
|
||||
if claims != nil {
|
||||
if _, isLDAPAccount := claims[ldapUserN]; !isLDAPAccount {
|
||||
continue
|
||||
}
|
||||
}
|
||||
_, policy, err := globalIAMSys.GetServiceAccount(ctx, acc.AccessKey)
|
||||
if err != nil {
|
||||
return errSRBackendIssue(err)
|
||||
}
|
||||
var policyJSON []byte
|
||||
if policy != nil {
|
||||
policyJSON, err = json.Marshal(policy)
|
||||
if err != nil {
|
||||
return wrapSRErr(err)
|
||||
}
|
||||
}
|
||||
err = c.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
Create: &madmin.SRSvcAccCreate{
|
||||
Parent: acc.ParentUser,
|
||||
AccessKey: user,
|
||||
SecretKey: acc.SecretKey,
|
||||
Groups: acc.Groups,
|
||||
Claims: claims,
|
||||
SessionPolicy: json.RawMessage(policyJSON),
|
||||
Status: acc.Status,
|
||||
|
||||
for user, mp := range userPolicyMap {
|
||||
err := c.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemPolicyMapping,
|
||||
PolicyMapping: &madmin.SRPolicyMapping{
|
||||
UserOrGroup: user,
|
||||
IsGroup: false,
|
||||
Policy: mp.Policies,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return errSRIAMError(err)
|
||||
}
|
||||
}
|
||||
|
||||
if errG != nil {
|
||||
return errSRBackendIssue(errG)
|
||||
}
|
||||
|
||||
for group, mp := range groupPolicyMap {
|
||||
err := c.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemPolicyMapping,
|
||||
PolicyMapping: &madmin.SRPolicyMapping{
|
||||
UserOrGroup: group,
|
||||
IsGroup: true,
|
||||
Policy: mp.Policies,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
@ -1701,7 +1860,10 @@ func getAdminClient(endpoint, accessKey, secretKey string) (*madmin.AdminClient,
|
||||
}
|
||||
|
||||
func getS3Client(pc madmin.PeerSite) (*minioClient.Client, error) {
|
||||
ep, _ := url.Parse(pc.Endpoint)
|
||||
ep, err := url.Parse(pc.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return minioClient.New(ep.Host, &minioClient.Options{
|
||||
Creds: credentials.NewStaticV4(pc.AccessKey, pc.SecretKey, ""),
|
||||
Secure: ep.Scheme == "https",
|
||||
|
@ -42,23 +42,40 @@ export MC_HOST_minio1=http://minio:minio123@localhost:9001
|
||||
export MC_HOST_minio2=http://minio:minio123@localhost:9002
|
||||
export MC_HOST_minio3=http://minio:minio123@localhost:9003
|
||||
|
||||
./mc admin replicate add minio1 minio2 minio3
|
||||
./mc admin replicate add minio1 minio2
|
||||
|
||||
./mc admin user add minio1 foobar foo12345
|
||||
|
||||
## add foobar-g group with foobar
|
||||
./mc admin group add minio2 foobar-g foobar
|
||||
|
||||
./mc admin policy set minio1 consoleAdmin user=foobar
|
||||
sleep 5
|
||||
|
||||
./mc admin user info minio2 foobar
|
||||
./mc admin user info minio3 foobar
|
||||
|
||||
./mc admin group info minio1 foobar-g
|
||||
|
||||
./mc admin policy add minio1 rw ./docs/site-replication/rw.json
|
||||
|
||||
sleep 5
|
||||
./mc admin policy info minio2 rw >/dev/null 2>&1
|
||||
|
||||
./mc admin replicate status minio1
|
||||
|
||||
## Add a new empty site
|
||||
./mc admin replicate add minio1 minio2 minio3
|
||||
|
||||
sleep 10
|
||||
|
||||
./mc admin policy info minio3 rw >/dev/null 2>&1
|
||||
|
||||
./mc admin policy remove minio3 rw
|
||||
|
||||
./mc admin replicate status minio3
|
||||
|
||||
sleep 10
|
||||
|
||||
./mc admin policy info minio1 rw
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "expecting the command to fail, exiting.."
|
||||
@ -71,21 +88,33 @@ if [ $? -eq 0 ]; then
|
||||
exit_1;
|
||||
fi
|
||||
|
||||
./mc admin policy info minio3 rw
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "expecting the command to fail, exiting.."
|
||||
exit_1;
|
||||
fi
|
||||
|
||||
./mc admin user info minio1 foobar
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "policy mapping missing, exiting.."
|
||||
echo "policy mapping missing on 'minio1', exiting.."
|
||||
exit_1;
|
||||
fi
|
||||
|
||||
./mc admin user info minio2 foobar
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "policy mapping missing, exiting.."
|
||||
echo "policy mapping missing on 'minio2', exiting.."
|
||||
exit_1;
|
||||
fi
|
||||
|
||||
./mc admin user info minio3 foobar
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "policy mapping missing, exiting.."
|
||||
echo "policy mapping missing on 'minio3', exiting.."
|
||||
exit_1;
|
||||
fi
|
||||
|
||||
./mc admin group info minio3 foobar-g
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "group mapping missing on 'minio3', exiting.."
|
||||
exit_1;
|
||||
fi
|
||||
|
||||
|
2
go.mod
2
go.mod
@ -49,7 +49,7 @@ require (
|
||||
github.com/minio/csvparser v1.0.0
|
||||
github.com/minio/highwayhash v1.0.2
|
||||
github.com/minio/kes v0.14.0
|
||||
github.com/minio/madmin-go v1.2.4
|
||||
github.com/minio/madmin-go v1.2.6
|
||||
github.com/minio/minio-go/v7 v7.0.20
|
||||
github.com/minio/parquet-go v1.1.0
|
||||
github.com/minio/pkg v1.1.14
|
||||
|
4
go.sum
4
go.sum
@ -1092,8 +1092,8 @@ github.com/minio/kes v0.14.0/go.mod h1:OUensXz2BpgMfiogslKxv7Anyx/wj+6bFC6qA7BQc
|
||||
github.com/minio/madmin-go v1.0.12/go.mod h1:BK+z4XRx7Y1v8SFWXsuLNqQqnq5BO/axJ8IDJfgyvfs=
|
||||
github.com/minio/madmin-go v1.1.15/go.mod h1:Iu0OnrMWNBYx1lqJTW+BFjBMx0Hi0wjw8VmqhiOs2Jo=
|
||||
github.com/minio/madmin-go v1.1.23/go.mod h1:wv8zCroSCnpjjQdmgsdJEkFH2oD4w9J40OZqbhxjiJ4=
|
||||
github.com/minio/madmin-go v1.2.4 h1:o+0X6ENO/AtkRxCbD4FXT6YCRIH7HgDGb+WzfJFDzMQ=
|
||||
github.com/minio/madmin-go v1.2.4/go.mod h1:/rOfQv4ohkXJ+7EaSnhg9IJEX7cobX08zkSLfh8G3Ks=
|
||||
github.com/minio/madmin-go v1.2.6 h1:k4q5I+6nV/r7QBFZtvlPWMJh8uOo5AbEMx/39VXP7kg=
|
||||
github.com/minio/madmin-go v1.2.6/go.mod h1:/rOfQv4ohkXJ+7EaSnhg9IJEX7cobX08zkSLfh8G3Ks=
|
||||
github.com/minio/mc v0.0.0-20211207230606-23a05f5a17f2 h1:xocb1RGyrDJ8PxkNn0NSbaBlfdU6J/Ag9QK62pb7nR8=
|
||||
github.com/minio/mc v0.0.0-20211207230606-23a05f5a17f2/go.mod h1:siI9jWTzj1KsNXgz6NOL/S7OTaAUM0OMi+zEkF08gnA=
|
||||
github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
|
||||
|
Loading…
Reference in New Issue
Block a user