mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
Bypass network in lock requests to local server (#4465)
This makes lock RPCs similar to other RPCs where requests to the local server bypass the network. Requests to the local lock-subsystem may bypass the network layer and directly access the locking data-structures. This incidentally fixes #4451.
This commit is contained in:
parent
2559614bfd
commit
986aa8fabf
@ -22,7 +22,7 @@ import (
|
||||
)
|
||||
|
||||
// Similar to removeEntry but only removes an entry only if the lock entry exists in map.
|
||||
func (l *lockServer) removeEntryIfExists(nlrip nameLockRequesterInfoPair) {
|
||||
func (l *localLocker) removeEntryIfExists(nlrip nameLockRequesterInfoPair) {
|
||||
// Check if entry is still in map (could have been removed altogether by 'concurrent' (R)Unlock of last entry)
|
||||
if lri, ok := l.lockMap[nlrip.name]; ok {
|
||||
if !l.removeEntry(nlrip.name, nlrip.lri.uid, &lri) {
|
||||
@ -38,7 +38,7 @@ func (l *lockServer) removeEntryIfExists(nlrip nameLockRequesterInfoPair) {
|
||||
|
||||
// removeEntry either, based on the uid of the lock message, removes a single entry from the
|
||||
// lockRequesterInfo array or the whole array from the map (in case of a write lock or last read lock)
|
||||
func (l *lockServer) removeEntry(name, uid string, lri *[]lockRequesterInfo) bool {
|
||||
func (l *localLocker) removeEntry(name, uid string, lri *[]lockRequesterInfo) bool {
|
||||
// Find correct entry to remove based on uid.
|
||||
for index, entry := range *lri {
|
||||
if entry.uid == uid {
|
||||
|
@ -38,9 +38,9 @@ func TestLockRpcServerRemoveEntryIfExists(t *testing.T) {
|
||||
nlrip := nameLockRequesterInfoPair{name: "name", lri: lri}
|
||||
|
||||
// first test by simulating item has already been deleted
|
||||
locker.removeEntryIfExists(nlrip)
|
||||
locker.ll.removeEntryIfExists(nlrip)
|
||||
{
|
||||
gotLri, _ := locker.lockMap["name"]
|
||||
gotLri, _ := locker.ll.lockMap["name"]
|
||||
expectedLri := []lockRequesterInfo(nil)
|
||||
if !reflect.DeepEqual(expectedLri, gotLri) {
|
||||
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
|
||||
@ -48,10 +48,10 @@ func TestLockRpcServerRemoveEntryIfExists(t *testing.T) {
|
||||
}
|
||||
|
||||
// then test normal deletion
|
||||
locker.lockMap["name"] = []lockRequesterInfo{lri} // add item
|
||||
locker.removeEntryIfExists(nlrip)
|
||||
locker.ll.lockMap["name"] = []lockRequesterInfo{lri} // add item
|
||||
locker.ll.removeEntryIfExists(nlrip)
|
||||
{
|
||||
gotLri, _ := locker.lockMap["name"]
|
||||
gotLri, _ := locker.ll.lockMap["name"]
|
||||
expectedLri := []lockRequesterInfo(nil)
|
||||
if !reflect.DeepEqual(expectedLri, gotLri) {
|
||||
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
|
||||
@ -81,32 +81,32 @@ func TestLockRpcServerRemoveEntry(t *testing.T) {
|
||||
timeLastCheck: UTCNow(),
|
||||
}
|
||||
|
||||
locker.lockMap["name"] = []lockRequesterInfo{
|
||||
locker.ll.lockMap["name"] = []lockRequesterInfo{
|
||||
lockRequesterInfo1,
|
||||
lockRequesterInfo2,
|
||||
}
|
||||
|
||||
lri, _ := locker.lockMap["name"]
|
||||
lri, _ := locker.ll.lockMap["name"]
|
||||
|
||||
// test unknown uid
|
||||
if locker.removeEntry("name", "unknown-uid", &lri) {
|
||||
if locker.ll.removeEntry("name", "unknown-uid", &lri) {
|
||||
t.Errorf("Expected %#v, got %#v", false, true)
|
||||
}
|
||||
|
||||
if !locker.removeEntry("name", "0123-4567", &lri) {
|
||||
if !locker.ll.removeEntry("name", "0123-4567", &lri) {
|
||||
t.Errorf("Expected %#v, got %#v", true, false)
|
||||
} else {
|
||||
gotLri, _ := locker.lockMap["name"]
|
||||
gotLri, _ := locker.ll.lockMap["name"]
|
||||
expectedLri := []lockRequesterInfo{lockRequesterInfo2}
|
||||
if !reflect.DeepEqual(expectedLri, gotLri) {
|
||||
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
|
||||
}
|
||||
}
|
||||
|
||||
if !locker.removeEntry("name", "89ab-cdef", &lri) {
|
||||
if !locker.ll.removeEntry("name", "89ab-cdef", &lri) {
|
||||
t.Errorf("Expected %#v, got %#v", true, false)
|
||||
} else {
|
||||
gotLri, _ := locker.lockMap["name"]
|
||||
gotLri, _ := locker.ll.lockMap["name"]
|
||||
expectedLri := []lockRequesterInfo(nil)
|
||||
if !reflect.DeepEqual(expectedLri, gotLri) {
|
||||
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
|
||||
|
@ -60,9 +60,7 @@ func isWriteLock(lri []lockRequesterInfo) bool {
|
||||
// lockServer is type for RPC handlers
|
||||
type lockServer struct {
|
||||
AuthRPCServer
|
||||
serviceEndpoint string
|
||||
mutex sync.Mutex
|
||||
lockMap map[string][]lockRequesterInfo
|
||||
ll localLocker
|
||||
}
|
||||
|
||||
// Start lock maintenance from all lock servers.
|
||||
@ -91,30 +89,11 @@ func startLockMaintainence(lockServers []*lockServer) {
|
||||
|
||||
// Register distributed NS lock handlers.
|
||||
func registerDistNSLockRouter(mux *router.Router, endpoints EndpointList) error {
|
||||
// Initialize a new set of lock servers.
|
||||
lockServers := newLockServers(endpoints)
|
||||
|
||||
// Start lock maintenance from all lock servers.
|
||||
startLockMaintainence(lockServers)
|
||||
startLockMaintainence(globalLockServers)
|
||||
|
||||
// Register initialized lock servers to their respective rpc endpoints.
|
||||
return registerStorageLockers(mux, lockServers)
|
||||
}
|
||||
|
||||
// Create one lock server for every local storage rpc server.
|
||||
func newLockServers(endpoints EndpointList) (lockServers []*lockServer) {
|
||||
for _, endpoint := range endpoints {
|
||||
// Initialize new lock server for each local node.
|
||||
if endpoint.IsLocal {
|
||||
lockServers = append(lockServers, &lockServer{
|
||||
serviceEndpoint: endpoint.Path,
|
||||
mutex: sync.Mutex{},
|
||||
lockMap: make(map[string][]lockRequesterInfo),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return lockServers
|
||||
return registerStorageLockers(mux, globalLockServers)
|
||||
}
|
||||
|
||||
// registerStorageLockers - register locker rpc handlers for net/rpc library clients
|
||||
@ -125,129 +104,178 @@ func registerStorageLockers(mux *router.Router, lockServers []*lockServer) error
|
||||
return traceError(err)
|
||||
}
|
||||
lockRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter()
|
||||
lockRouter.Path(path.Join(lockServicePath, lockServer.serviceEndpoint)).Handler(lockRPCServer)
|
||||
lockRouter.Path(path.Join(lockServicePath, lockServer.ll.serviceEndpoint)).Handler(lockRPCServer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/// Distributed lock handlers
|
||||
// localLocker implements Dsync.NetLocker
|
||||
type localLocker struct {
|
||||
mutex sync.Mutex
|
||||
serviceEndpoint string
|
||||
serverAddr string
|
||||
lockMap map[string][]lockRequesterInfo
|
||||
}
|
||||
|
||||
// Lock - rpc handler for (single) write lock operation.
|
||||
func (l *lockServer) Lock(args *LockArgs, reply *bool) error {
|
||||
func (l *localLocker) ServerAddr() string {
|
||||
return l.serverAddr
|
||||
}
|
||||
|
||||
func (l *localLocker) ServiceEndpoint() string {
|
||||
return l.serviceEndpoint
|
||||
}
|
||||
|
||||
func (l *localLocker) Lock(args dsync.LockArgs) (reply bool, err error) {
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
_, *reply = l.lockMap[args.LockArgs.Resource]
|
||||
if !*reply { // No locks held on the given name, so claim write lock
|
||||
l.lockMap[args.LockArgs.Resource] = []lockRequesterInfo{
|
||||
_, isLockTaken := l.lockMap[args.Resource]
|
||||
if !isLockTaken { // No locks held on the given name, so claim write lock
|
||||
l.lockMap[args.Resource] = []lockRequesterInfo{
|
||||
{
|
||||
writer: true,
|
||||
node: args.LockArgs.ServerAddr,
|
||||
serviceEndpoint: args.LockArgs.ServiceEndpoint,
|
||||
uid: args.LockArgs.UID,
|
||||
node: args.ServerAddr,
|
||||
serviceEndpoint: args.ServiceEndpoint,
|
||||
uid: args.UID,
|
||||
timestamp: UTCNow(),
|
||||
timeLastCheck: UTCNow(),
|
||||
},
|
||||
}
|
||||
}
|
||||
*reply = !*reply // Negate *reply to return true when lock is granted or false otherwise
|
||||
return nil
|
||||
// return reply=true if lock was granted.
|
||||
return !isLockTaken, nil
|
||||
}
|
||||
|
||||
// Unlock - rpc handler for (single) write unlock operation.
|
||||
func (l *lockServer) Unlock(args *LockArgs, reply *bool) error {
|
||||
func (l *localLocker) Unlock(args dsync.LockArgs) (reply bool, err error) {
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
var lri []lockRequesterInfo
|
||||
if lri, *reply = l.lockMap[args.LockArgs.Resource]; !*reply { // No lock is held on the given name
|
||||
return fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.LockArgs.Resource)
|
||||
if lri, reply = l.lockMap[args.Resource]; !reply {
|
||||
// No lock is held on the given name
|
||||
return reply, fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.Resource)
|
||||
}
|
||||
if *reply = isWriteLock(lri); !*reply { // Unless it is a write lock
|
||||
return fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.LockArgs.Resource, len(lri))
|
||||
if reply = isWriteLock(lri); !reply {
|
||||
// Unless it is a write lock
|
||||
return reply, fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.Resource, len(lri))
|
||||
}
|
||||
if !l.removeEntry(args.LockArgs.Resource, args.LockArgs.UID, &lri) {
|
||||
return fmt.Errorf("Unlock unable to find corresponding lock for uid: %s", args.LockArgs.UID)
|
||||
if !l.removeEntry(args.Resource, args.UID, &lri) {
|
||||
return false, fmt.Errorf("Unlock unable to find corresponding lock for uid: %s", args.UID)
|
||||
}
|
||||
return nil
|
||||
return true, nil
|
||||
|
||||
}
|
||||
|
||||
// RLock - rpc handler for read lock operation.
|
||||
func (l *lockServer) RLock(args *LockArgs, reply *bool) error {
|
||||
func (l *localLocker) RLock(args dsync.LockArgs) (reply bool, err error) {
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
lrInfo := lockRequesterInfo{
|
||||
writer: false,
|
||||
node: args.LockArgs.ServerAddr,
|
||||
serviceEndpoint: args.LockArgs.ServiceEndpoint,
|
||||
uid: args.LockArgs.UID,
|
||||
node: args.ServerAddr,
|
||||
serviceEndpoint: args.ServiceEndpoint,
|
||||
uid: args.UID,
|
||||
timestamp: UTCNow(),
|
||||
timeLastCheck: UTCNow(),
|
||||
}
|
||||
if lri, ok := l.lockMap[args.LockArgs.Resource]; ok {
|
||||
if *reply = !isWriteLock(lri); *reply { // Unless there is a write lock
|
||||
l.lockMap[args.LockArgs.Resource] = append(l.lockMap[args.LockArgs.Resource], lrInfo)
|
||||
if lri, ok := l.lockMap[args.Resource]; ok {
|
||||
if reply = !isWriteLock(lri); reply {
|
||||
// Unless there is a write lock
|
||||
l.lockMap[args.Resource] = append(l.lockMap[args.Resource], lrInfo)
|
||||
}
|
||||
} else { // No locks held on the given name, so claim (first) read lock
|
||||
l.lockMap[args.LockArgs.Resource] = []lockRequesterInfo{lrInfo}
|
||||
*reply = true
|
||||
} else {
|
||||
// No locks held on the given name, so claim (first) read lock
|
||||
l.lockMap[args.Resource] = []lockRequesterInfo{lrInfo}
|
||||
reply = true
|
||||
}
|
||||
return nil
|
||||
return reply, nil
|
||||
}
|
||||
|
||||
func (l *localLocker) RUnlock(args dsync.LockArgs) (reply bool, err error) {
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
var lri []lockRequesterInfo
|
||||
if lri, reply = l.lockMap[args.Resource]; !reply {
|
||||
// No lock is held on the given name
|
||||
return reply, fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.Resource)
|
||||
}
|
||||
if reply = !isWriteLock(lri); !reply {
|
||||
// A write-lock is held, cannot release a read lock
|
||||
return reply, fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.Resource)
|
||||
}
|
||||
if !l.removeEntry(args.Resource, args.UID, &lri) {
|
||||
return false, fmt.Errorf("RUnlock unable to find corresponding read lock for uid: %s", args.UID)
|
||||
}
|
||||
return reply, nil
|
||||
}
|
||||
|
||||
func (l *localLocker) ForceUnlock(args dsync.LockArgs) (reply bool, err error) {
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
if len(args.UID) != 0 {
|
||||
return false, fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.UID)
|
||||
}
|
||||
if _, ok := l.lockMap[args.Resource]; ok {
|
||||
// Only clear lock when it is taken
|
||||
// Remove the lock (irrespective of write or read lock)
|
||||
delete(l.lockMap, args.Resource)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
/// Distributed lock handlers
|
||||
|
||||
// Lock - rpc handler for (single) write lock operation.
|
||||
func (l *lockServer) Lock(args *LockArgs, reply *bool) (err error) {
|
||||
if err = args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
*reply, err = l.ll.Lock(args.LockArgs)
|
||||
return err
|
||||
}
|
||||
|
||||
// Unlock - rpc handler for (single) write unlock operation.
|
||||
func (l *lockServer) Unlock(args *LockArgs, reply *bool) (err error) {
|
||||
if err = args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
*reply, err = l.ll.Unlock(args.LockArgs)
|
||||
return err
|
||||
}
|
||||
|
||||
// RLock - rpc handler for read lock operation.
|
||||
func (l *lockServer) RLock(args *LockArgs, reply *bool) (err error) {
|
||||
if err = args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
*reply, err = l.ll.RLock(args.LockArgs)
|
||||
return err
|
||||
}
|
||||
|
||||
// RUnlock - rpc handler for read unlock operation.
|
||||
func (l *lockServer) RUnlock(args *LockArgs, reply *bool) error {
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
func (l *lockServer) RUnlock(args *LockArgs, reply *bool) (err error) {
|
||||
if err = args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
var lri []lockRequesterInfo
|
||||
if lri, *reply = l.lockMap[args.LockArgs.Resource]; !*reply { // No lock is held on the given name
|
||||
return fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.LockArgs.Resource)
|
||||
}
|
||||
if *reply = !isWriteLock(lri); !*reply { // A write-lock is held, cannot release a read lock
|
||||
return fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.LockArgs.Resource)
|
||||
}
|
||||
if !l.removeEntry(args.LockArgs.Resource, args.LockArgs.UID, &lri) {
|
||||
return fmt.Errorf("RUnlock unable to find corresponding read lock for uid: %s", args.LockArgs.UID)
|
||||
}
|
||||
return nil
|
||||
*reply, err = l.ll.RUnlock(args.LockArgs)
|
||||
return err
|
||||
}
|
||||
|
||||
// ForceUnlock - rpc handler for force unlock operation.
|
||||
func (l *lockServer) ForceUnlock(args *LockArgs, reply *bool) error {
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
func (l *lockServer) ForceUnlock(args *LockArgs, reply *bool) (err error) {
|
||||
if err = args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(args.LockArgs.UID) != 0 {
|
||||
return fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.LockArgs.UID)
|
||||
}
|
||||
if _, ok := l.lockMap[args.LockArgs.Resource]; ok { // Only clear lock when set
|
||||
delete(l.lockMap, args.LockArgs.Resource) // Remove the lock (irrespective of write or read lock)
|
||||
}
|
||||
*reply = true
|
||||
return nil
|
||||
*reply, err = l.ll.ForceUnlock(args.LockArgs)
|
||||
return err
|
||||
}
|
||||
|
||||
// Expired - rpc handler for expired lock status.
|
||||
func (l *lockServer) Expired(args *LockArgs, reply *bool) error {
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
l.ll.mutex.Lock()
|
||||
defer l.ll.mutex.Unlock()
|
||||
// Lock found, proceed to verify if belongs to given uid.
|
||||
if lri, ok := l.lockMap[args.LockArgs.Resource]; ok {
|
||||
if lri, ok := l.ll.lockMap[args.LockArgs.Resource]; ok {
|
||||
// Check whether uid is still active
|
||||
for _, entry := range lri {
|
||||
if entry.uid == args.LockArgs.UID {
|
||||
@ -277,10 +305,10 @@ type nameLockRequesterInfoPair struct {
|
||||
//
|
||||
// We will ignore the error, and we will retry later to get a resolve on this lock
|
||||
func (l *lockServer) lockMaintenance(interval time.Duration) {
|
||||
l.mutex.Lock()
|
||||
l.ll.mutex.Lock()
|
||||
// Get list of long lived locks to check for staleness.
|
||||
nlripLongLived := getLongLivedLocks(l.lockMap, interval)
|
||||
l.mutex.Unlock()
|
||||
nlripLongLived := getLongLivedLocks(l.ll.lockMap, interval)
|
||||
l.ll.mutex.Unlock()
|
||||
|
||||
serverCred := serverConfig.GetCredential()
|
||||
// Validate if long lived locks are indeed clean.
|
||||
@ -308,9 +336,9 @@ func (l *lockServer) lockMaintenance(interval time.Duration) {
|
||||
if expired {
|
||||
// The lock is no longer active at server that originated the lock
|
||||
// So remove the lock from the map.
|
||||
l.mutex.Lock()
|
||||
l.removeEntryIfExists(nlrip) // Purge the stale entry if it exists.
|
||||
l.mutex.Unlock()
|
||||
l.ll.mutex.Lock()
|
||||
l.ll.removeEntryIfExists(nlrip) // Purge the stale entry if it exists.
|
||||
l.ll.mutex.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -49,10 +49,12 @@ func createLockTestServer(t *testing.T) (string, *lockServer, string) {
|
||||
}
|
||||
|
||||
locker := &lockServer{
|
||||
AuthRPCServer: AuthRPCServer{},
|
||||
serviceEndpoint: "rpc-path",
|
||||
mutex: sync.Mutex{},
|
||||
lockMap: make(map[string][]lockRequesterInfo),
|
||||
AuthRPCServer: AuthRPCServer{},
|
||||
ll: localLocker{
|
||||
mutex: sync.Mutex{},
|
||||
serviceEndpoint: "rpc-path",
|
||||
lockMap: make(map[string][]lockRequesterInfo),
|
||||
},
|
||||
}
|
||||
creds := serverConfig.GetCredential()
|
||||
loginArgs := LoginRPCArgs{
|
||||
@ -93,7 +95,7 @@ func TestLockRpcServerLock(t *testing.T) {
|
||||
if !result {
|
||||
t.Errorf("Expected %#v, got %#v", true, result)
|
||||
} else {
|
||||
gotLri, _ := locker.lockMap["name"]
|
||||
gotLri, _ := locker.ll.lockMap["name"]
|
||||
expectedLri := []lockRequesterInfo{
|
||||
{
|
||||
writer: true,
|
||||
@ -163,7 +165,7 @@ func TestLockRpcServerUnlock(t *testing.T) {
|
||||
if !result {
|
||||
t.Errorf("Expected %#v, got %#v", true, result)
|
||||
} else {
|
||||
gotLri, _ := locker.lockMap["name"]
|
||||
gotLri, _ := locker.ll.lockMap["name"]
|
||||
expectedLri := []lockRequesterInfo(nil)
|
||||
if !testLockEquality(expectedLri, gotLri) {
|
||||
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
|
||||
@ -194,7 +196,7 @@ func TestLockRpcServerRLock(t *testing.T) {
|
||||
if !result {
|
||||
t.Errorf("Expected %#v, got %#v", true, result)
|
||||
} else {
|
||||
gotLri, _ := locker.lockMap["name"]
|
||||
gotLri, _ := locker.ll.lockMap["name"]
|
||||
expectedLri := []lockRequesterInfo{
|
||||
{
|
||||
writer: false,
|
||||
@ -281,7 +283,7 @@ func TestLockRpcServerRUnlock(t *testing.T) {
|
||||
if !result {
|
||||
t.Errorf("Expected %#v, got %#v", true, result)
|
||||
} else {
|
||||
gotLri, _ := locker.lockMap["name"]
|
||||
gotLri, _ := locker.ll.lockMap["name"]
|
||||
expectedLri := []lockRequesterInfo{
|
||||
{
|
||||
writer: false,
|
||||
@ -305,7 +307,7 @@ func TestLockRpcServerRUnlock(t *testing.T) {
|
||||
if !result {
|
||||
t.Errorf("Expected %#v, got %#v", true, result)
|
||||
} else {
|
||||
gotLri, _ := locker.lockMap["name"]
|
||||
gotLri, _ := locker.ll.lockMap["name"]
|
||||
expectedLri := []lockRequesterInfo(nil)
|
||||
if !testLockEquality(expectedLri, gotLri) {
|
||||
t.Errorf("Expected %#v, got %#v", expectedLri, gotLri)
|
||||
@ -427,6 +429,12 @@ func TestLockServers(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
defer removeAll(rootPath)
|
||||
|
||||
currentIsDistXL := globalIsDistXL
|
||||
defer func() {
|
||||
globalIsDistXL = currentIsDistXL
|
||||
@ -471,9 +479,13 @@ func TestLockServers(t *testing.T) {
|
||||
// Validates lock server initialization.
|
||||
for i, testCase := range testCases {
|
||||
globalIsDistXL = testCase.isDistXL
|
||||
lockServers := newLockServers(testCase.endpoints)
|
||||
if len(lockServers) != testCase.totalLockServers {
|
||||
t.Fatalf("Test %d: Expected total %d, got %d", i+1, testCase.totalLockServers, len(lockServers))
|
||||
globalLockServers = nil
|
||||
_, _ = newDsyncNodes(testCase.endpoints)
|
||||
if err != nil {
|
||||
t.Fatalf("Got unexpected error initializing lock servers: %v", err)
|
||||
}
|
||||
if len(globalLockServers) != testCase.totalLockServers {
|
||||
t.Fatalf("Test %d: Expected total %d, got %d", i+1, testCase.totalLockServers, len(globalLockServers))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -27,6 +27,9 @@ import (
|
||||
// Global name space lock.
|
||||
var globalNSMutex *nsLockMap
|
||||
|
||||
// Global lock servers
|
||||
var globalLockServers []*lockServer
|
||||
|
||||
// RWLocker - locker interface extends sync.Locker
|
||||
// to introduce RLock, RUnlock.
|
||||
type RWLocker interface {
|
||||
@ -36,27 +39,45 @@ type RWLocker interface {
|
||||
}
|
||||
|
||||
// Initialize distributed locking only in case of distributed setup.
|
||||
// Returns if the setup is distributed or not on success.
|
||||
func initDsyncNodes() error {
|
||||
// Returns lock clients and the node index for the current server.
|
||||
func newDsyncNodes(endpoints EndpointList) (clnts []dsync.NetLocker, myNode int) {
|
||||
cred := serverConfig.GetCredential()
|
||||
// Initialize rpc lock client information only if this instance is a distributed setup.
|
||||
clnts := make([]dsync.NetLocker, len(globalEndpoints))
|
||||
myNode := -1
|
||||
for index, endpoint := range globalEndpoints {
|
||||
clnts[index] = newLockRPCClient(authConfig{
|
||||
accessKey: cred.AccessKey,
|
||||
secretKey: cred.SecretKey,
|
||||
serverAddr: endpoint.Host,
|
||||
secureConn: globalIsSSL,
|
||||
serviceEndpoint: pathutil.Join(minioReservedBucketPath, lockServicePath, endpoint.Path),
|
||||
serviceName: lockServiceName,
|
||||
})
|
||||
if endpoint.IsLocal && myNode == -1 {
|
||||
clnts = make([]dsync.NetLocker, len(endpoints))
|
||||
myNode = -1
|
||||
for index, endpoint := range endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
// For a remote endpoints setup a lock RPC client.
|
||||
clnts[index] = newLockRPCClient(authConfig{
|
||||
accessKey: cred.AccessKey,
|
||||
secretKey: cred.SecretKey,
|
||||
serverAddr: endpoint.Host,
|
||||
secureConn: globalIsSSL,
|
||||
serviceEndpoint: pathutil.Join(minioReservedBucketPath, lockServicePath, endpoint.Path),
|
||||
serviceName: lockServiceName,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
// Local endpoint
|
||||
if myNode == -1 {
|
||||
myNode = index
|
||||
}
|
||||
// For a local endpoint, setup a local lock server to
|
||||
// avoid network requests.
|
||||
localLockServer := lockServer{
|
||||
AuthRPCServer: AuthRPCServer{},
|
||||
ll: localLocker{
|
||||
mutex: sync.Mutex{},
|
||||
serviceEndpoint: endpoint.Path,
|
||||
serverAddr: endpoint.Host,
|
||||
lockMap: make(map[string][]lockRequesterInfo),
|
||||
},
|
||||
}
|
||||
globalLockServers = append(globalLockServers, &localLockServer)
|
||||
clnts[index] = &(localLockServer.ll)
|
||||
}
|
||||
|
||||
return dsync.Init(clnts, myNode)
|
||||
return clnts, myNode
|
||||
}
|
||||
|
||||
// initNSLock - initialize name space lock map.
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/dsync"
|
||||
)
|
||||
|
||||
var serverFlags = []cli.Flag{
|
||||
@ -244,7 +245,8 @@ func serverMain(ctx *cli.Context) {
|
||||
|
||||
// Set nodes for dsync for distributed setup.
|
||||
if globalIsDistXL {
|
||||
fatalIf(initDsyncNodes(), "Unable to initialize distributed locking clients")
|
||||
clnts, myNode := newDsyncNodes(globalEndpoints)
|
||||
fatalIf(dsync.Init(clnts, myNode), "Unable to initialize distributed locking clients")
|
||||
}
|
||||
|
||||
// Initialize name space lock.
|
||||
|
@ -340,10 +340,11 @@ func TestServerListenAndServePlain(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServerListenAndServeTLS(t *testing.T) {
|
||||
_, err := newTestConfig(globalMinioDefaultRegion)
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
defer removeAll(rootPath)
|
||||
|
||||
wait := make(chan struct{})
|
||||
addr := net.JoinHostPort("127.0.0.1", getFreePort())
|
||||
|
Loading…
Reference in New Issue
Block a user