mirror of
https://github.com/minio/minio.git
synced 2025-11-09 13:39:46 -05:00
fix: refactor locks to apply them uniquely per node (#11052)
This refactor is done for few reasons below - to avoid deadlocks in scenarios when number of nodes are smaller < actual erasure stripe count where in N participating local lockers can lead to deadlocks across systems. - avoids expiry routines to run 1000 of separate network operations and routes per disk where as each of them are still accessing one single local entity. - it is ideal to have since globalLockServer per instance. - In a 32node deployment however, each server group is still concentrated towards the same set of lockers that partipicate during the write/read phase, unlike previous minio/dsync implementation - this potentially avoids send 32 requests instead we will still send at max requests of unique nodes participating in a write/read phase. - reduces overall chattiness on smaller setups.
This commit is contained in:
@@ -84,18 +84,16 @@ func (client *peerRESTClient) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetLocksResp stores various info from the client for each lock that is requested.
|
||||
type GetLocksResp []map[string][]lockRequesterInfo
|
||||
|
||||
// GetLocks - fetch older locks for a remote node.
|
||||
func (client *peerRESTClient) GetLocks() (locks GetLocksResp, err error) {
|
||||
func (client *peerRESTClient) GetLocks() (lockMap map[string][]lockRequesterInfo, err error) {
|
||||
respBody, err := client.call(peerRESTMethodGetLocks, nil, nil, -1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
lockMap = map[string][]lockRequesterInfo{}
|
||||
defer http.DrainBody(respBody)
|
||||
err = gob.NewDecoder(respBody).Decode(&locks)
|
||||
return locks, err
|
||||
err = gob.NewDecoder(respBody).Decode(&lockMap)
|
||||
return lockMap, err
|
||||
}
|
||||
|
||||
// ServerInfo - fetch server information for a remote node.
|
||||
|
||||
Reference in New Issue
Block a user