fix: handle concurrent lockers with multiple optimizations (#10640)

- select lockers which are non-local and online to have
  affinity towards remote servers for lock contention

- optimize lock retry interval to avoid sending too many
  messages during lock contention, reduces average CPU
  usage as well

- if bucket is not set, when deleteObject fails make sure
  setPutObjHeaders() honors lifecycle only if bucket name
  is set.

- fix top locks to list out always the oldest lockers always,
  avoid getting bogged down into map's unordered nature.
This commit is contained in:
Harshavardhana
2020-10-08 12:32:32 -07:00
committed by GitHub
parent 907a171edd
commit 736e58dd68
20 changed files with 105 additions and 65 deletions

View File

@@ -534,13 +534,13 @@ func (sys *NotificationSys) GetLocks(ctx context.Context, r *http.Request) []*Pe
}
// Once we have received all the locks currently used from peers
// add the local peer locks list as well.
var getRespLocks GetLocksResp
llockers := make(GetLocksResp, 0, len(globalLockServers))
for _, llocker := range globalLockServers {
getRespLocks = append(getRespLocks, llocker.DupLockMap())
llockers = append(llockers, llocker.DupLockMap())
}
locksResp = append(locksResp, &PeerLocks{
Addr: getHostName(r),
Locks: getRespLocks,
Locks: llockers,
})
return locksResp
}