mirror of
https://github.com/minio/minio.git
synced 2025-04-20 10:37:31 -04:00
Implement RLock, RUnlock rpc handlers (#2437)
This would make it simplify dsync.RWMutex's algorithm to acquire a distributed read lock and be tolerant to N/2-1 failures.
This commit is contained in:
parent
43098df9d8
commit
229600ce9b
@ -31,7 +31,9 @@ const lockRPCPath = "/minio/lock"
|
|||||||
type lockServer struct {
|
type lockServer struct {
|
||||||
rpcPath string
|
rpcPath string
|
||||||
mutex sync.Mutex
|
mutex sync.Mutex
|
||||||
lockMap map[string]struct{}
|
// e.g, when a Lock(name) is held, map[string][]bool{"name" : []bool{true}}
|
||||||
|
// when one or more RLock() is held, map[string][]bool{"name" : []bool{false, false}}
|
||||||
|
lockMap map[string][]bool
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Distributed lock handlers
|
/// Distributed lock handlers
|
||||||
@ -43,7 +45,7 @@ func (l *lockServer) Lock(name *string, reply *bool) error {
|
|||||||
_, ok := l.lockMap[*name]
|
_, ok := l.lockMap[*name]
|
||||||
if !ok {
|
if !ok {
|
||||||
*reply = true
|
*reply = true
|
||||||
l.lockMap[*name] = struct{}{}
|
l.lockMap[*name] = []bool{true}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
*reply = false
|
*reply = false
|
||||||
@ -63,6 +65,40 @@ func (l *lockServer) Unlock(name *string, reply *bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *lockServer) RLock(name *string, reply *bool) error {
|
||||||
|
l.mutex.Lock()
|
||||||
|
defer l.mutex.Unlock()
|
||||||
|
locksHeld, ok := l.lockMap[*name]
|
||||||
|
if !ok {
|
||||||
|
// First read-lock to be held on *name.
|
||||||
|
l.lockMap[*name] = []bool{false}
|
||||||
|
} else {
|
||||||
|
// Add an entry for this read lock.
|
||||||
|
l.lockMap[*name] = append(locksHeld, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lockServer) RUnlock(name *string, reply *bool) error {
|
||||||
|
l.mutex.Lock()
|
||||||
|
defer l.mutex.Unlock()
|
||||||
|
locksHeld, ok := l.lockMap[*name]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("RUnlock attempted on an un-locked entity: %s", *name)
|
||||||
|
}
|
||||||
|
if len(locksHeld) > 1 {
|
||||||
|
// Remove one of the read locks held.
|
||||||
|
locksHeld = locksHeld[1:]
|
||||||
|
l.lockMap[*name] = locksHeld
|
||||||
|
} else {
|
||||||
|
// Delete the map entry since this is the last read lock held
|
||||||
|
// on *name.
|
||||||
|
delete(l.lockMap, *name)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize distributed lock.
|
// Initialize distributed lock.
|
||||||
func initDistributedNSLock(mux *router.Router, serverConfig serverCmdConfig) {
|
func initDistributedNSLock(mux *router.Router, serverConfig serverCmdConfig) {
|
||||||
lockServers := newLockServers(serverConfig)
|
lockServers := newLockServers(serverConfig)
|
||||||
@ -91,7 +127,7 @@ func newLockServers(serverConfig serverCmdConfig) (lockServers []*lockServer) {
|
|||||||
lockServers = append(lockServers, &lockServer{
|
lockServers = append(lockServers, &lockServer{
|
||||||
rpcPath: export,
|
rpcPath: export,
|
||||||
mutex: sync.Mutex{},
|
mutex: sync.Mutex{},
|
||||||
lockMap: make(map[string]struct{}),
|
lockMap: make(map[string][]bool),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user