allow purge expired STS while loading credentials (#19905)

the reason for this is to avoid STS mappings to be
purged without a successful load of other policies,
and all the credentials only loaded successfully
are properly handled.

This also avoids unnecessary cache store which was
implemented earlier for optimization.
This commit is contained in:
Harshavardhana 2024-06-10 11:45:50 -07:00 committed by GitHub
parent b8b956a05d
commit 614981e566
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 54 additions and 106 deletions

View File

@ -25,7 +25,6 @@ import (
"path" "path"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"time" "time"
"unicode/utf8" "unicode/utf8"
@ -34,6 +33,7 @@ import (
"github.com/minio/minio/internal/config" "github.com/minio/minio/internal/config"
xioutil "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/kms" "github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/puzpuzpuz/xsync/v3" "github.com/puzpuzpuz/xsync/v3"
) )
@ -44,8 +44,6 @@ type IAMObjectStore struct {
*iamCache *iamCache
cachedIAMListing atomic.Value
usersSysType UsersSysType usersSysType UsersSysType
objAPI ObjectLayer objAPI ObjectLayer
@ -421,8 +419,8 @@ func splitPath(s string, lastIndex bool) (string, string) {
return s[:i+1], s[i+1:] return s[:i+1], s[i+1:]
} }
func (iamOS *IAMObjectStore) listAllIAMConfigItems(ctx context.Context) (map[string][]string, error) { func (iamOS *IAMObjectStore) listAllIAMConfigItems(ctx context.Context) (res map[string][]string, err error) {
res := make(map[string][]string) res = make(map[string][]string)
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
for item := range listIAMConfigItems(ctx, iamOS.objAPI, iamConfigPrefix+SlashSeparator) { for item := range listIAMConfigItems(ctx, iamOS.objAPI, iamConfigPrefix+SlashSeparator) {
@ -438,62 +436,10 @@ func (iamOS *IAMObjectStore) listAllIAMConfigItems(ctx context.Context) (map[str
res[listKey] = append(res[listKey], trimmedItem) res[listKey] = append(res[listKey], trimmedItem)
} }
// Store the listing for later re-use.
iamOS.cachedIAMListing.Store(res)
return res, nil return res, nil
} }
// PurgeExpiredSTS - purge expired STS credentials from object store.
func (iamOS *IAMObjectStore) PurgeExpiredSTS(ctx context.Context) error {
if iamOS.objAPI == nil {
return errServerNotInitialized
}
bootstrapTraceMsg("purging expired STS credentials")
iamListing, ok := iamOS.cachedIAMListing.Load().(map[string][]string)
if !ok {
// There has been no store yet. This should never happen!
iamLogIf(GlobalContext, errors.New("WARNING: no cached IAM listing found"))
return nil
}
// Scan STS users on disk and purge expired ones. We do not need to hold a
// lock with store.lock() here.
stsAccountsFromStore := map[string]UserIdentity{}
stsAccPoliciesFromStore := xsync.NewMapOf[string, MappedPolicy]()
for _, item := range iamListing[stsListKey] {
userName := path.Dir(item)
// loadUser() will delete expired user during the load.
err := iamOS.loadUser(ctx, userName, stsUser, stsAccountsFromStore)
if err != nil && !errors.Is(err, errNoSuchUser) {
iamLogIf(GlobalContext,
fmt.Errorf("unable to load user during STS purge: %w (%s)", err, item))
}
}
// Loading the STS policy mappings from disk ensures that stale entries
// (removed during loadUser() in the loop above) are removed from memory.
for _, item := range iamListing[policyDBSTSUsersListKey] {
stsName := strings.TrimSuffix(item, ".json")
err := iamOS.loadMappedPolicy(ctx, stsName, stsUser, false, stsAccPoliciesFromStore)
if err != nil && !errors.Is(err, errNoSuchPolicy) {
iamLogIf(GlobalContext,
fmt.Errorf("unable to load policies during STS purge: %w (%s)", err, item))
}
}
// Store the newly populated map in the iam cache. This takes care of
// removing stale entries from the existing map.
iamOS.Lock()
defer iamOS.Unlock()
iamOS.iamCache.iamSTSAccountsMap = stsAccountsFromStore
iamOS.iamCache.iamSTSPolicyMap = stsAccPoliciesFromStore
return nil
}
// Assumes cache is locked by caller. // Assumes cache is locked by caller.
func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iamCache) error { func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iamCache) error {
if iamOS.objAPI == nil { if iamOS.objAPI == nil {
@ -594,6 +540,45 @@ func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iam
} }
cache.buildUserGroupMemberships() cache.buildUserGroupMemberships()
purgeStart := time.Now()
// Purge expired STS credentials.
// Scan STS users on disk and purge expired ones.
stsAccountsFromStore := map[string]UserIdentity{}
stsAccPoliciesFromStore := xsync.NewMapOf[string, MappedPolicy]()
for _, item := range listedConfigItems[stsListKey] {
userName := path.Dir(item)
// loadUser() will delete expired user during the load.
err := iamOS.loadUser(ctx, userName, stsUser, stsAccountsFromStore)
if err != nil && !errors.Is(err, errNoSuchUser) {
return fmt.Errorf("unable to load user during STS purge: %w (%s)", err, item)
}
}
// Loading the STS policy mappings from disk ensures that stale entries
// (removed during loadUser() in the loop above) are removed from memory.
for _, item := range listedConfigItems[policyDBSTSUsersListKey] {
stsName := strings.TrimSuffix(item, ".json")
err := iamOS.loadMappedPolicy(ctx, stsName, stsUser, false, stsAccPoliciesFromStore)
if err != nil && !errors.Is(err, errNoSuchPolicy) {
return fmt.Errorf("unable to load policies during STS purge: %w (%s)", err, item)
}
}
took := time.Since(purgeStart).Seconds()
if took > maxDurationSecondsForLog {
// Log if we took a lot of time to load.
logger.Info("IAM expired STS purge took %.2fs", took)
}
// Store the newly populated map in the iam cache. This takes care of
// removing stale entries from the existing map.
cache.iamSTSAccountsMap = stsAccountsFromStore
cache.iamSTSPolicyMap = stsAccPoliciesFromStore
return nil return nil
} }

View File

@ -532,16 +532,6 @@ func setDefaultCannedPolicies(policies map[string]PolicyDoc) {
} }
} }
// PurgeExpiredSTS - purges expired STS credentials.
func (store *IAMStoreSys) PurgeExpiredSTS(ctx context.Context) error {
iamOS, ok := store.IAMStorageAPI.(*IAMObjectStore)
if !ok {
// No purging is done for non-object storage.
return nil
}
return iamOS.PurgeExpiredSTS(ctx)
}
// LoadIAMCache reads all IAM items and populates a new iamCache object and // LoadIAMCache reads all IAM items and populates a new iamCache object and
// replaces the in-memory cache object. // replaces the in-memory cache object.
func (store *IAMStoreSys) LoadIAMCache(ctx context.Context, firstTime bool) error { func (store *IAMStoreSys) LoadIAMCache(ctx context.Context, firstTime bool) error {

View File

@ -352,6 +352,8 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc
bootstrapTraceMsg("finishing IAM loading") bootstrapTraceMsg("finishing IAM loading")
} }
const maxDurationSecondsForLog = 5
func (sys *IAMSys) periodicRoutines(ctx context.Context, baseInterval time.Duration) { func (sys *IAMSys) periodicRoutines(ctx context.Context, baseInterval time.Duration) {
// Watch for IAM config changes for iamStorageWatcher. // Watch for IAM config changes for iamStorageWatcher.
watcher, isWatcher := sys.store.IAMStorageAPI.(iamStorageWatcher) watcher, isWatcher := sys.store.IAMStorageAPI.(iamStorageWatcher)
@ -384,7 +386,6 @@ func (sys *IAMSys) periodicRoutines(ctx context.Context, baseInterval time.Durat
return baseInterval/2 + randAmt return baseInterval/2 + randAmt
} }
var maxDurationSecondsForLog float64 = 5
timer := time.NewTimer(waitInterval()) timer := time.NewTimer(waitInterval())
defer timer.Stop() defer timer.Stop()
@ -403,18 +404,6 @@ func (sys *IAMSys) periodicRoutines(ctx context.Context, baseInterval time.Durat
} }
} }
// Purge expired STS credentials.
purgeStart := time.Now()
if err := sys.store.PurgeExpiredSTS(ctx); err != nil {
iamLogIf(ctx, fmt.Errorf("Failure in periodic STS purge for IAM (took %.2fs): %v", time.Since(purgeStart).Seconds(), err))
} else {
took := time.Since(purgeStart).Seconds()
if took > maxDurationSecondsForLog {
// Log if we took a lot of time to load.
logger.Info("IAM expired STS purge took %.2fs", took)
}
}
// The following actions are performed about once in 4 times that // The following actions are performed about once in 4 times that
// IAM is refreshed: // IAM is refreshed:
if r.Intn(4) == 0 { if r.Intn(4) == 0 {
@ -1578,31 +1567,16 @@ func (sys *IAMSys) NormalizeLDAPAccessKeypairs(ctx context.Context, accessKeyMap
func (sys *IAMSys) getStoredLDAPPolicyMappingKeys(ctx context.Context, isGroup bool) set.StringSet { func (sys *IAMSys) getStoredLDAPPolicyMappingKeys(ctx context.Context, isGroup bool) set.StringSet {
entityKeysInStorage := set.NewStringSet() entityKeysInStorage := set.NewStringSet()
if iamOS, ok := sys.store.IAMStorageAPI.(*IAMObjectStore); ok { cache := sys.store.rlock()
// Load existing mapping keys from the cached listing for defer sys.store.runlock()
// `IAMObjectStore`. cachedPolicyMap := cache.iamSTSPolicyMap
iamFilesListing := iamOS.cachedIAMListing.Load().(map[string][]string) if isGroup {
listKey := policyDBSTSUsersListKey cachedPolicyMap = cache.iamGroupPolicyMap
if isGroup {
listKey = policyDBGroupsListKey
}
for _, item := range iamFilesListing[listKey] {
stsUserName := strings.TrimSuffix(item, ".json")
entityKeysInStorage.Add(stsUserName)
}
} else {
// For non-iam object store, we copy the mapping keys from the cache.
cache := sys.store.rlock()
defer sys.store.runlock()
cachedPolicyMap := cache.iamSTSPolicyMap
if isGroup {
cachedPolicyMap = cache.iamGroupPolicyMap
}
cachedPolicyMap.Range(func(k string, v MappedPolicy) bool {
entityKeysInStorage.Add(k)
return true
})
} }
cachedPolicyMap.Range(func(k string, v MappedPolicy) bool {
entityKeysInStorage.Add(k)
return true
})
return entityKeysInStorage return entityKeysInStorage
} }

View File

@ -50,7 +50,6 @@ func runAllIAMSTSTests(suite *TestSuiteIAM, c *check) {
} }
func TestIAMInternalIDPSTSServerSuite(t *testing.T) { func TestIAMInternalIDPSTSServerSuite(t *testing.T) {
t.Skip("FIXME: Skipping internal IDP tests. Flaky test, needs to be fixed.")
baseTestCases := []TestSuiteCommon{ baseTestCases := []TestSuiteCommon{
// Init and run test on ErasureSD backend with signature v4. // Init and run test on ErasureSD backend with signature v4.
{serverType: "ErasureSD", signer: signerV4}, {serverType: "ErasureSD", signer: signerV4},