mirror of
https://github.com/minio/minio.git
synced 2024-12-25 22:55:54 -05:00
e9b2bf00ad
This PR implements locking from a global entity into a more localized set level entity, allowing for locks to be held only on the resources which are writing to a collection of disks rather than a global level. In this process this PR also removes the top-level limit of 32 nodes to an unlimited number of nodes. This is a precursor change before bring in bucket expansion.
104 lines
3.3 KiB
Go
104 lines
3.3 KiB
Go
/*
|
|
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
package dsync_test
|
|
|
|
import (
|
|
"fmt"
|
|
"sync"
|
|
|
|
. "github.com/minio/minio/pkg/dsync"
|
|
)
|
|
|
|
const WriteLock = -1
|
|
|
|
type lockServer struct {
|
|
mutex sync.Mutex
|
|
// Map of locks, with negative value indicating (exclusive) write lock
|
|
// and positive values indicating number of read locks
|
|
lockMap map[string]int64
|
|
}
|
|
|
|
func (l *lockServer) Lock(args *LockArgs, reply *bool) error {
|
|
l.mutex.Lock()
|
|
defer l.mutex.Unlock()
|
|
if _, *reply = l.lockMap[args.Resource]; !*reply {
|
|
l.lockMap[args.Resource] = WriteLock // No locks held on the given name, so claim write lock
|
|
}
|
|
*reply = !*reply // Negate *reply to return true when lock is granted or false otherwise
|
|
return nil
|
|
}
|
|
|
|
func (l *lockServer) Unlock(args *LockArgs, reply *bool) error {
|
|
l.mutex.Lock()
|
|
defer l.mutex.Unlock()
|
|
var locksHeld int64
|
|
if locksHeld, *reply = l.lockMap[args.Resource]; !*reply { // No lock is held on the given name
|
|
return fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.Resource)
|
|
}
|
|
if *reply = locksHeld == WriteLock; !*reply { // Unless it is a write lock
|
|
return fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.Resource, locksHeld)
|
|
}
|
|
delete(l.lockMap, args.Resource) // Remove the write lock
|
|
return nil
|
|
}
|
|
|
|
const ReadLock = 1
|
|
|
|
func (l *lockServer) RLock(args *LockArgs, reply *bool) error {
|
|
l.mutex.Lock()
|
|
defer l.mutex.Unlock()
|
|
var locksHeld int64
|
|
if locksHeld, *reply = l.lockMap[args.Resource]; !*reply {
|
|
l.lockMap[args.Resource] = ReadLock // No locks held on the given name, so claim (first) read lock
|
|
*reply = true
|
|
} else {
|
|
if *reply = locksHeld != WriteLock; *reply { // Unless there is a write lock
|
|
l.lockMap[args.Resource] = locksHeld + ReadLock // Grant another read lock
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (l *lockServer) RUnlock(args *LockArgs, reply *bool) error {
|
|
l.mutex.Lock()
|
|
defer l.mutex.Unlock()
|
|
var locksHeld int64
|
|
if locksHeld, *reply = l.lockMap[args.Resource]; !*reply { // No lock is held on the given name
|
|
return fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.Resource)
|
|
}
|
|
if *reply = locksHeld != WriteLock; !*reply { // A write-lock is held, cannot release a read lock
|
|
return fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.Resource)
|
|
}
|
|
if locksHeld > ReadLock {
|
|
l.lockMap[args.Resource] = locksHeld - ReadLock // Remove one of the read locks held
|
|
} else {
|
|
delete(l.lockMap, args.Resource) // Remove the (last) read lock
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (l *lockServer) ForceUnlock(args *LockArgs, reply *bool) error {
|
|
l.mutex.Lock()
|
|
defer l.mutex.Unlock()
|
|
if len(args.UID) != 0 {
|
|
return fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.UID)
|
|
}
|
|
delete(l.lockMap, args.Resource) // Remove the lock (irrespective of write or read lock)
|
|
*reply = true
|
|
return nil
|
|
}
|