mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
Move IAM periodic ops to a single go routine (#18026)
This helps reduce disk operations as these periodic routines would not run concurrently any more. Also add expired STS purging periodic operation: Since we do not scan the on-disk STS credentials (and instead only load them on-demand) a separate routine is needed to purge expired credentials from storage. Currently this runs about a quarter as often as IAM refresh. Also fix a bug where with etcd, STS accounts could get loaded into the iamUsersMap instead of the iamSTSAccountsMap.
This commit is contained in:
parent
cbc0ef459b
commit
7a7068ee47
@ -32,6 +32,7 @@ import (
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
// IAMObjectStore implements IAMStorageAPI
|
||||
@ -383,6 +384,32 @@ func (iamOS *IAMObjectStore) listAllIAMConfigItems(ctx context.Context) (map[str
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// PurgeExpiredSTS - purge expired STS credentials from object store.
|
||||
func (iamOS *IAMObjectStore) PurgeExpiredSTS(ctx context.Context) error {
|
||||
if iamOS.objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
bootstrapTraceMsg("purging expired STS credentials")
|
||||
// Scan STS users on disk and purge expired ones. We do not need to hold a
|
||||
// lock with store.lock() here.
|
||||
for item := range listIAMConfigItems(ctx, iamOS.objAPI, iamConfigPrefix+SlashSeparator+stsListKey) {
|
||||
if item.Err != nil {
|
||||
return item.Err
|
||||
}
|
||||
userName := path.Dir(item.Item)
|
||||
// loadUser() will delete expired user during the load - we do not need
|
||||
// to keep the loaded user around in memory, so we reinitialize the map
|
||||
// each time.
|
||||
m := map[string]UserIdentity{}
|
||||
if err := iamOS.loadUser(ctx, userName, stsUser, m); err != nil && err != errNoSuchUser {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("unable to load user during STS purge: %w (%s)", err, item.Item))
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Assumes cache is locked by caller.
|
||||
func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iamCache) error {
|
||||
if iamOS.objAPI == nil {
|
||||
|
@ -481,6 +481,16 @@ func setDefaultCannedPolicies(policies map[string]PolicyDoc) {
|
||||
}
|
||||
}
|
||||
|
||||
// PurgeExpiredSTS - purges expired STS credentials.
|
||||
func (store *IAMStoreSys) PurgeExpiredSTS(ctx context.Context) error {
|
||||
iamOS, ok := store.IAMStorageAPI.(*IAMObjectStore)
|
||||
if !ok {
|
||||
// No purging is done for non-object storage.
|
||||
return nil
|
||||
}
|
||||
return iamOS.PurgeExpiredSTS(ctx)
|
||||
}
|
||||
|
||||
// LoadIAMCache reads all IAM items and populates a new iamCache object and
|
||||
// replaces the in-memory cache object.
|
||||
func (store *IAMStoreSys) LoadIAMCache(ctx context.Context) error {
|
||||
@ -536,13 +546,13 @@ func (store *IAMStoreSys) LoadIAMCache(ctx context.Context) error {
|
||||
|
||||
bootstrapTraceMsg("loading STS users")
|
||||
// load STS temp users
|
||||
if err := store.loadUsers(ctx, stsUser, newCache.iamUsersMap); err != nil {
|
||||
if err := store.loadUsers(ctx, stsUser, newCache.iamSTSAccountsMap); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bootstrapTraceMsg("loading STS policy mapping")
|
||||
// load STS policy mappings
|
||||
if err := store.loadMappedPolicies(ctx, stsUser, false, newCache.iamUserPolicyMap); err != nil {
|
||||
if err := store.loadMappedPolicies(ctx, stsUser, false, newCache.iamSTSPolicyMap); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
151
cmd/iam.go
151
cmd/iam.go
@ -319,44 +319,7 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc
|
||||
|
||||
refreshInterval := sys.iamRefreshInterval
|
||||
|
||||
// Set up polling for expired accounts and credentials purging.
|
||||
switch {
|
||||
case sys.OpenIDConfig.ProviderEnabled():
|
||||
go func() {
|
||||
timer := time.NewTimer(refreshInterval)
|
||||
defer timer.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
sys.purgeExpiredCredentialsForExternalSSO(ctx)
|
||||
|
||||
timer.Reset(refreshInterval)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
case sys.LDAPConfig.Enabled():
|
||||
go func() {
|
||||
timer := time.NewTimer(refreshInterval)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
sys.purgeExpiredCredentialsForLDAP(ctx)
|
||||
sys.updateGroupMembershipsForLDAP(ctx)
|
||||
|
||||
timer.Reset(refreshInterval)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Start watching changes to storage.
|
||||
go sys.watch(ctx)
|
||||
go sys.periodicRoutines(ctx, refreshInterval)
|
||||
|
||||
// Load RoleARNs
|
||||
sys.rolesMap = make(map[arn.ARN]string)
|
||||
@ -377,6 +340,79 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc
|
||||
bootstrapTraceMsg("finishing IAM loading")
|
||||
}
|
||||
|
||||
func (sys *IAMSys) periodicRoutines(ctx context.Context, baseInterval time.Duration) {
|
||||
// Watch for IAM config changes for iamStorageWatcher.
|
||||
watcher, isWatcher := sys.store.IAMStorageAPI.(iamStorageWatcher)
|
||||
if isWatcher {
|
||||
go func() {
|
||||
ch := watcher.watch(ctx, iamConfigPrefix)
|
||||
for event := range ch {
|
||||
if err := sys.loadWatchedEvent(ctx, event); err != nil {
|
||||
// we simply log errors
|
||||
logger.LogIf(ctx, fmt.Errorf("Failure in loading watch event: %v", err))
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
// Add a random interval of up to 20% of the base interval.
|
||||
randInterval := func() time.Duration {
|
||||
return time.Duration(r.Float64() * float64(baseInterval) * 0.2)
|
||||
}
|
||||
|
||||
var maxDurationSecondsForLog float64 = 5
|
||||
timer := time.NewTimer(baseInterval + randInterval())
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
// Load all IAM items (except STS creds) periodically.
|
||||
refreshStart := time.Now()
|
||||
if err := sys.Load(ctx, false); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Failure in periodic refresh for IAM (took %.2fs): %v", time.Since(refreshStart).Seconds(), err))
|
||||
} else {
|
||||
took := time.Since(refreshStart).Seconds()
|
||||
if took > maxDurationSecondsForLog {
|
||||
// Log if we took a lot of time to load.
|
||||
logger.Info("IAM refresh took %.2fs", took)
|
||||
}
|
||||
}
|
||||
|
||||
// The following actions are performed about once in 4 times that
|
||||
// IAM is refreshed:
|
||||
if r.Intn(4) == 0 {
|
||||
// Purge expired STS credentials.
|
||||
purgeStart := time.Now()
|
||||
if err := sys.store.PurgeExpiredSTS(ctx); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Failure in periodic STS purge for IAM (took %.2fs): %v", time.Since(purgeStart).Seconds(), err))
|
||||
} else {
|
||||
took := time.Since(purgeStart).Seconds()
|
||||
if took > maxDurationSecondsForLog {
|
||||
// Log if we took a lot of time to load.
|
||||
logger.Info("IAM expired STS purge took %.2fs", took)
|
||||
}
|
||||
}
|
||||
|
||||
// Poll and remove accounts for those users who were removed
|
||||
// from LDAP/OpenID.
|
||||
if sys.LDAPConfig.Enabled() {
|
||||
sys.purgeExpiredCredentialsForLDAP(ctx)
|
||||
sys.updateGroupMembershipsForLDAP(ctx)
|
||||
}
|
||||
if sys.OpenIDConfig.ProviderEnabled() {
|
||||
sys.purgeExpiredCredentialsForExternalSSO(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
timer.Reset(baseInterval + randInterval())
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sys *IAMSys) validateAndAddRolePolicyMappings(ctx context.Context, m map[arn.ARN]string) {
|
||||
// Validate that policies associated with roles are defined. If
|
||||
// authZ plugin is set, role policies are just claims sent to
|
||||
@ -428,45 +464,6 @@ func (sys *IAMSys) HasWatcher() bool {
|
||||
return sys.store.HasWatcher()
|
||||
}
|
||||
|
||||
func (sys *IAMSys) watch(ctx context.Context) {
|
||||
watcher, ok := sys.store.IAMStorageAPI.(iamStorageWatcher)
|
||||
if ok {
|
||||
ch := watcher.watch(ctx, iamConfigPrefix)
|
||||
for event := range ch {
|
||||
if err := sys.loadWatchedEvent(ctx, event); err != nil {
|
||||
// we simply log errors
|
||||
logger.LogIf(ctx, fmt.Errorf("Failure in loading watch event: %v", err))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var maxRefreshDurationSecondsForLog float64 = 10
|
||||
|
||||
// Load all items periodically
|
||||
timer := time.NewTimer(sys.iamRefreshInterval)
|
||||
defer timer.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
refreshStart := time.Now()
|
||||
if err := sys.Load(ctx, false); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Failure in periodic refresh for IAM (took %.2fs): %v", time.Since(refreshStart).Seconds(), err))
|
||||
} else {
|
||||
took := time.Since(refreshStart).Seconds()
|
||||
if took > maxRefreshDurationSecondsForLog {
|
||||
// Log if we took a lot of time to load.
|
||||
logger.Info("IAM refresh took %.2fs", took)
|
||||
}
|
||||
}
|
||||
|
||||
timer.Reset(sys.iamRefreshInterval)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sys *IAMSys) loadWatchedEvent(ctx context.Context, event iamWatchEvent) (err error) {
|
||||
usersPrefix := strings.HasPrefix(event.keyPath, iamConfigUsersPrefix)
|
||||
groupsPrefix := strings.HasPrefix(event.keyPath, iamConfigGroupsPrefix)
|
||||
|
Loading…
Reference in New Issue
Block a user