mirror of
https://github.com/minio/minio.git
synced 2025-07-08 08:32:18 -04:00
lock/rpc: change rpcPath to be called serviceEndpoint. (#4088)
This is a cleanup to ensure proper naming.
This commit is contained in:
parent
1b0b2c1c76
commit
1b1b9e4801
@ -29,9 +29,9 @@ func TestLockRPCClient(t *testing.T) {
|
|||||||
accessKey: "abcd",
|
accessKey: "abcd",
|
||||||
secretKey: "abcd123",
|
secretKey: "abcd123",
|
||||||
serverAddr: fmt.Sprintf("%X", UTCNow().UnixNano()),
|
serverAddr: fmt.Sprintf("%X", UTCNow().UnixNano()),
|
||||||
serviceEndpoint: pathJoin(lockRPCPath, "/test/1"),
|
serviceEndpoint: pathJoin(lockServicePath, "/test/1"),
|
||||||
secureConn: false,
|
secureConn: false,
|
||||||
serviceName: "Dsync",
|
serviceName: lockServiceName,
|
||||||
})
|
})
|
||||||
|
|
||||||
// Attempt all calls.
|
// Attempt all calls.
|
||||||
|
@ -28,12 +28,12 @@ func TestLockRpcServerRemoveEntryIfExists(t *testing.T) {
|
|||||||
defer removeAll(testPath)
|
defer removeAll(testPath)
|
||||||
|
|
||||||
lri := lockRequesterInfo{
|
lri := lockRequesterInfo{
|
||||||
writer: false,
|
writer: false,
|
||||||
node: "host",
|
node: "host",
|
||||||
rpcPath: "rpc-path",
|
serviceEndpoint: "rpc-path",
|
||||||
uid: "0123-4567",
|
uid: "0123-4567",
|
||||||
timestamp: UTCNow(),
|
timestamp: UTCNow(),
|
||||||
timeLastCheck: UTCNow(),
|
timeLastCheck: UTCNow(),
|
||||||
}
|
}
|
||||||
nlrip := nameLockRequesterInfoPair{name: "name", lri: lri}
|
nlrip := nameLockRequesterInfoPair{name: "name", lri: lri}
|
||||||
|
|
||||||
@ -65,20 +65,20 @@ func TestLockRpcServerRemoveEntry(t *testing.T) {
|
|||||||
defer removeAll(testPath)
|
defer removeAll(testPath)
|
||||||
|
|
||||||
lockRequesterInfo1 := lockRequesterInfo{
|
lockRequesterInfo1 := lockRequesterInfo{
|
||||||
writer: true,
|
writer: true,
|
||||||
node: "host",
|
node: "host",
|
||||||
rpcPath: "rpc-path",
|
serviceEndpoint: "rpc-path",
|
||||||
uid: "0123-4567",
|
uid: "0123-4567",
|
||||||
timestamp: UTCNow(),
|
timestamp: UTCNow(),
|
||||||
timeLastCheck: UTCNow(),
|
timeLastCheck: UTCNow(),
|
||||||
}
|
}
|
||||||
lockRequesterInfo2 := lockRequesterInfo{
|
lockRequesterInfo2 := lockRequesterInfo{
|
||||||
writer: true,
|
writer: true,
|
||||||
node: "host",
|
node: "host",
|
||||||
rpcPath: "rpc-path",
|
serviceEndpoint: "rpc-path",
|
||||||
uid: "89ab-cdef",
|
uid: "89ab-cdef",
|
||||||
timestamp: UTCNow(),
|
timestamp: UTCNow(),
|
||||||
timeLastCheck: UTCNow(),
|
timeLastCheck: UTCNow(),
|
||||||
}
|
}
|
||||||
|
|
||||||
locker.lockMap["name"] = []lockRequesterInfo{
|
locker.lockMap["name"] = []lockRequesterInfo{
|
||||||
@ -127,12 +127,12 @@ func TestLockRpcServerGetLongLivedLocks(t *testing.T) {
|
|||||||
{
|
{
|
||||||
lockMap: map[string][]lockRequesterInfo{
|
lockMap: map[string][]lockRequesterInfo{
|
||||||
"test": {{
|
"test": {{
|
||||||
writer: true,
|
writer: true,
|
||||||
node: "10.1.10.21",
|
node: "10.1.10.21",
|
||||||
rpcPath: "/lock/mnt/disk1",
|
serviceEndpoint: "/lock/mnt/disk1",
|
||||||
uid: "10000112",
|
uid: "10000112",
|
||||||
timestamp: ut,
|
timestamp: ut,
|
||||||
timeLastCheck: ut,
|
timeLastCheck: ut,
|
||||||
}},
|
}},
|
||||||
},
|
},
|
||||||
lockInterval: 1 * time.Minute,
|
lockInterval: 1 * time.Minute,
|
||||||
@ -142,12 +142,12 @@ func TestLockRpcServerGetLongLivedLocks(t *testing.T) {
|
|||||||
{
|
{
|
||||||
lockMap: map[string][]lockRequesterInfo{
|
lockMap: map[string][]lockRequesterInfo{
|
||||||
"test": {{
|
"test": {{
|
||||||
writer: true,
|
writer: true,
|
||||||
node: "10.1.10.21",
|
node: "10.1.10.21",
|
||||||
rpcPath: "/lock/mnt/disk1",
|
serviceEndpoint: "/lock/mnt/disk1",
|
||||||
uid: "10000112",
|
uid: "10000112",
|
||||||
timestamp: ut,
|
timestamp: ut,
|
||||||
timeLastCheck: ut.Add(-2 * time.Minute),
|
timeLastCheck: ut.Add(-2 * time.Minute),
|
||||||
}},
|
}},
|
||||||
},
|
},
|
||||||
lockInterval: 1 * time.Minute,
|
lockInterval: 1 * time.Minute,
|
||||||
@ -155,12 +155,12 @@ func TestLockRpcServerGetLongLivedLocks(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "test",
|
name: "test",
|
||||||
lri: lockRequesterInfo{
|
lri: lockRequesterInfo{
|
||||||
writer: true,
|
writer: true,
|
||||||
node: "10.1.10.21",
|
node: "10.1.10.21",
|
||||||
rpcPath: "/lock/mnt/disk1",
|
serviceEndpoint: "/lock/mnt/disk1",
|
||||||
uid: "10000112",
|
uid: "10000112",
|
||||||
timestamp: ut,
|
timestamp: ut,
|
||||||
timeLastCheck: ut.Add(-2 * time.Minute),
|
timeLastCheck: ut.Add(-2 * time.Minute),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -30,7 +30,10 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// Lock rpc server endpoint.
|
// Lock rpc server endpoint.
|
||||||
lockRPCPath = "/lock"
|
lockServicePath = "/lock"
|
||||||
|
|
||||||
|
// Lock rpc service name.
|
||||||
|
lockServiceName = "Dsync"
|
||||||
|
|
||||||
// Lock maintenance interval.
|
// Lock maintenance interval.
|
||||||
lockMaintenanceInterval = 1 * time.Minute // 1 minute.
|
lockMaintenanceInterval = 1 * time.Minute // 1 minute.
|
||||||
@ -39,17 +42,17 @@ const (
|
|||||||
lockValidityCheckInterval = 2 * time.Minute // 2 minutes.
|
lockValidityCheckInterval = 2 * time.Minute // 2 minutes.
|
||||||
)
|
)
|
||||||
|
|
||||||
// lockRequesterInfo stores various info from the client for each lock that is requested
|
// lockRequesterInfo stores various info from the client for each lock that is requested.
|
||||||
type lockRequesterInfo struct {
|
type lockRequesterInfo struct {
|
||||||
writer bool // Bool whether write or read lock
|
writer bool // Bool whether write or read lock.
|
||||||
node string // Network address of client claiming lock
|
node string // Network address of client claiming lock.
|
||||||
rpcPath string // RPC path of client claiming lock
|
serviceEndpoint string // RPC path of client claiming lock.
|
||||||
uid string // Uid to uniquely identify request of client
|
uid string // UID to uniquely identify request of client.
|
||||||
timestamp time.Time // Timestamp set at the time of initialization
|
timestamp time.Time // Timestamp set at the time of initialization.
|
||||||
timeLastCheck time.Time // Timestamp for last check of validity of lock
|
timeLastCheck time.Time // Timestamp for last check of validity of lock.
|
||||||
}
|
}
|
||||||
|
|
||||||
// isWriteLock returns whether the lock is a write or read lock
|
// isWriteLock returns whether the lock is a write or read lock.
|
||||||
func isWriteLock(lri []lockRequesterInfo) bool {
|
func isWriteLock(lri []lockRequesterInfo) bool {
|
||||||
return len(lri) == 1 && lri[0].writer
|
return len(lri) == 1 && lri[0].writer
|
||||||
}
|
}
|
||||||
@ -57,9 +60,9 @@ func isWriteLock(lri []lockRequesterInfo) bool {
|
|||||||
// lockServer is type for RPC handlers
|
// lockServer is type for RPC handlers
|
||||||
type lockServer struct {
|
type lockServer struct {
|
||||||
AuthRPCServer
|
AuthRPCServer
|
||||||
rpcPath string
|
serviceEndpoint string
|
||||||
mutex sync.Mutex
|
mutex sync.Mutex
|
||||||
lockMap map[string][]lockRequesterInfo
|
lockMap map[string][]lockRequesterInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start lock maintenance from all lock servers.
|
// Start lock maintenance from all lock servers.
|
||||||
@ -105,9 +108,9 @@ func newLockServers(srvConfig serverCmdConfig) (lockServers []*lockServer) {
|
|||||||
if isLocalStorage(ep) {
|
if isLocalStorage(ep) {
|
||||||
// Create handler for lock RPCs
|
// Create handler for lock RPCs
|
||||||
locker := &lockServer{
|
locker := &lockServer{
|
||||||
rpcPath: getPath(ep),
|
serviceEndpoint: getPath(ep),
|
||||||
mutex: sync.Mutex{},
|
mutex: sync.Mutex{},
|
||||||
lockMap: make(map[string][]lockRequesterInfo),
|
lockMap: make(map[string][]lockRequesterInfo),
|
||||||
}
|
}
|
||||||
lockServers = append(lockServers, locker)
|
lockServers = append(lockServers, locker)
|
||||||
}
|
}
|
||||||
@ -119,11 +122,11 @@ func newLockServers(srvConfig serverCmdConfig) (lockServers []*lockServer) {
|
|||||||
func registerStorageLockers(mux *router.Router, lockServers []*lockServer) error {
|
func registerStorageLockers(mux *router.Router, lockServers []*lockServer) error {
|
||||||
for _, lockServer := range lockServers {
|
for _, lockServer := range lockServers {
|
||||||
lockRPCServer := rpc.NewServer()
|
lockRPCServer := rpc.NewServer()
|
||||||
if err := lockRPCServer.RegisterName("Dsync", lockServer); err != nil {
|
if err := lockRPCServer.RegisterName(lockServiceName, lockServer); err != nil {
|
||||||
return traceError(err)
|
return traceError(err)
|
||||||
}
|
}
|
||||||
lockRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter()
|
lockRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter()
|
||||||
lockRouter.Path(path.Join(lockRPCPath, lockServer.rpcPath)).Handler(lockRPCServer)
|
lockRouter.Path(path.Join(lockServicePath, lockServer.serviceEndpoint)).Handler(lockRPCServer)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -141,12 +144,12 @@ func (l *lockServer) Lock(args *LockArgs, reply *bool) error {
|
|||||||
if !*reply { // No locks held on the given name, so claim write lock
|
if !*reply { // No locks held on the given name, so claim write lock
|
||||||
l.lockMap[args.LockArgs.Resource] = []lockRequesterInfo{
|
l.lockMap[args.LockArgs.Resource] = []lockRequesterInfo{
|
||||||
{
|
{
|
||||||
writer: true,
|
writer: true,
|
||||||
node: args.LockArgs.ServerAddr,
|
node: args.LockArgs.ServerAddr,
|
||||||
rpcPath: args.LockArgs.ServiceEndpoint,
|
serviceEndpoint: args.LockArgs.ServiceEndpoint,
|
||||||
uid: args.LockArgs.UID,
|
uid: args.LockArgs.UID,
|
||||||
timestamp: UTCNow(),
|
timestamp: UTCNow(),
|
||||||
timeLastCheck: UTCNow(),
|
timeLastCheck: UTCNow(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -182,12 +185,12 @@ func (l *lockServer) RLock(args *LockArgs, reply *bool) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
lrInfo := lockRequesterInfo{
|
lrInfo := lockRequesterInfo{
|
||||||
writer: false,
|
writer: false,
|
||||||
node: args.LockArgs.ServerAddr,
|
node: args.LockArgs.ServerAddr,
|
||||||
rpcPath: args.LockArgs.ServiceEndpoint,
|
serviceEndpoint: args.LockArgs.ServiceEndpoint,
|
||||||
uid: args.LockArgs.UID,
|
uid: args.LockArgs.UID,
|
||||||
timestamp: UTCNow(),
|
timestamp: UTCNow(),
|
||||||
timeLastCheck: UTCNow(),
|
timeLastCheck: UTCNow(),
|
||||||
}
|
}
|
||||||
if lri, ok := l.lockMap[args.LockArgs.Resource]; ok {
|
if lri, ok := l.lockMap[args.LockArgs.Resource]; ok {
|
||||||
if *reply = !isWriteLock(lri); *reply { // Unless there is a write lock
|
if *reply = !isWriteLock(lri); *reply { // Unless there is a write lock
|
||||||
@ -288,13 +291,16 @@ func (l *lockServer) lockMaintenance(interval time.Duration) {
|
|||||||
accessKey: serverCred.AccessKey,
|
accessKey: serverCred.AccessKey,
|
||||||
secretKey: serverCred.SecretKey,
|
secretKey: serverCred.SecretKey,
|
||||||
serverAddr: nlrip.lri.node,
|
serverAddr: nlrip.lri.node,
|
||||||
serviceEndpoint: nlrip.lri.rpcPath,
|
|
||||||
secureConn: globalIsSSL,
|
secureConn: globalIsSSL,
|
||||||
serviceName: "Dsync",
|
serviceEndpoint: nlrip.lri.serviceEndpoint,
|
||||||
|
serviceName: lockServiceName,
|
||||||
})
|
})
|
||||||
|
|
||||||
// Call back to original server verify whether the lock is still active (based on name & uid)
|
// Call back to original server verify whether the lock is still active (based on name & uid)
|
||||||
expired, _ := c.Expired(dsync.LockArgs{UID: nlrip.lri.uid, Resource: nlrip.name})
|
expired, _ := c.Expired(dsync.LockArgs{
|
||||||
|
UID: nlrip.lri.uid,
|
||||||
|
Resource: nlrip.name,
|
||||||
|
})
|
||||||
|
|
||||||
// Close the connection regardless of the call response.
|
// Close the connection regardless of the call response.
|
||||||
c.rpcClient.Close()
|
c.rpcClient.Close()
|
||||||
|
@ -34,7 +34,7 @@ func testLockEquality(lriLeft, lriRight []lockRequesterInfo) bool {
|
|||||||
for i := 0; i < len(lriLeft); i++ {
|
for i := 0; i < len(lriLeft); i++ {
|
||||||
if lriLeft[i].writer != lriRight[i].writer ||
|
if lriLeft[i].writer != lriRight[i].writer ||
|
||||||
lriLeft[i].node != lriRight[i].node ||
|
lriLeft[i].node != lriRight[i].node ||
|
||||||
lriLeft[i].rpcPath != lriRight[i].rpcPath ||
|
lriLeft[i].serviceEndpoint != lriRight[i].serviceEndpoint ||
|
||||||
lriLeft[i].uid != lriRight[i].uid {
|
lriLeft[i].uid != lriRight[i].uid {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -50,10 +50,10 @@ func createLockTestServer(t *testing.T) (string, *lockServer, string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
locker := &lockServer{
|
locker := &lockServer{
|
||||||
AuthRPCServer: AuthRPCServer{},
|
AuthRPCServer: AuthRPCServer{},
|
||||||
rpcPath: "rpc-path",
|
serviceEndpoint: "rpc-path",
|
||||||
mutex: sync.Mutex{},
|
mutex: sync.Mutex{},
|
||||||
lockMap: make(map[string][]lockRequesterInfo),
|
lockMap: make(map[string][]lockRequesterInfo),
|
||||||
}
|
}
|
||||||
creds := serverConfig.GetCredential()
|
creds := serverConfig.GetCredential()
|
||||||
loginArgs := LoginRPCArgs{
|
loginArgs := LoginRPCArgs{
|
||||||
@ -97,10 +97,10 @@ func TestLockRpcServerLock(t *testing.T) {
|
|||||||
gotLri, _ := locker.lockMap["name"]
|
gotLri, _ := locker.lockMap["name"]
|
||||||
expectedLri := []lockRequesterInfo{
|
expectedLri := []lockRequesterInfo{
|
||||||
{
|
{
|
||||||
writer: true,
|
writer: true,
|
||||||
node: "node",
|
node: "node",
|
||||||
rpcPath: "rpc-path",
|
serviceEndpoint: "rpc-path",
|
||||||
uid: "0123-4567",
|
uid: "0123-4567",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if !testLockEquality(expectedLri, gotLri) {
|
if !testLockEquality(expectedLri, gotLri) {
|
||||||
@ -198,10 +198,10 @@ func TestLockRpcServerRLock(t *testing.T) {
|
|||||||
gotLri, _ := locker.lockMap["name"]
|
gotLri, _ := locker.lockMap["name"]
|
||||||
expectedLri := []lockRequesterInfo{
|
expectedLri := []lockRequesterInfo{
|
||||||
{
|
{
|
||||||
writer: false,
|
writer: false,
|
||||||
node: "node",
|
node: "node",
|
||||||
rpcPath: "rpc-path",
|
serviceEndpoint: "rpc-path",
|
||||||
uid: "0123-4567",
|
uid: "0123-4567",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if !testLockEquality(expectedLri, gotLri) {
|
if !testLockEquality(expectedLri, gotLri) {
|
||||||
@ -285,10 +285,10 @@ func TestLockRpcServerRUnlock(t *testing.T) {
|
|||||||
gotLri, _ := locker.lockMap["name"]
|
gotLri, _ := locker.lockMap["name"]
|
||||||
expectedLri := []lockRequesterInfo{
|
expectedLri := []lockRequesterInfo{
|
||||||
{
|
{
|
||||||
writer: false,
|
writer: false,
|
||||||
node: "node",
|
node: "node",
|
||||||
rpcPath: "rpc-path",
|
serviceEndpoint: "rpc-path",
|
||||||
uid: "89ab-cdef",
|
uid: "89ab-cdef",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if !testLockEquality(expectedLri, gotLri) {
|
if !testLockEquality(expectedLri, gotLri) {
|
||||||
|
@ -50,9 +50,9 @@ func initDsyncNodes() error {
|
|||||||
accessKey: cred.AccessKey,
|
accessKey: cred.AccessKey,
|
||||||
secretKey: cred.SecretKey,
|
secretKey: cred.SecretKey,
|
||||||
serverAddr: ep.Host,
|
serverAddr: ep.Host,
|
||||||
serviceEndpoint: pathutil.Join(minioReservedBucketPath, lockRPCPath, getPath(ep)),
|
|
||||||
secureConn: globalIsSSL,
|
secureConn: globalIsSSL,
|
||||||
serviceName: "Dsync",
|
serviceEndpoint: pathutil.Join(minioReservedBucketPath, lockServicePath, getPath(ep)),
|
||||||
|
serviceName: lockServiceName,
|
||||||
})
|
})
|
||||||
if isLocalStorage(ep) && myNode == -1 {
|
if isLocalStorage(ep) && myNode == -1 {
|
||||||
myNode = index
|
myNode = index
|
||||||
|
Loading…
x
Reference in New Issue
Block a user