mirror of
https://github.com/minio/minio.git
synced 2025-11-07 12:52:58 -05:00
Adopt dsync interface changes and major cleanup on RPC server/client.
* Rename GenericArgs to AuthRPCArgs * Rename GenericReply to AuthRPCReply * Remove authConfig.loginMethod and add authConfig.ServiceName * Rename loginServer to AuthRPCServer * Rename RPCLoginArgs to LoginRPCArgs * Rename RPCLoginReply to LoginRPCReply * Version and RequestTime are added to LoginRPCArgs and verified by server side, not client side. * Fix data race in lockMaintainence loop.
This commit is contained in:
2
vendor/github.com/minio/dsync/README.md
generated
vendored
2
vendor/github.com/minio/dsync/README.md
generated
vendored
@@ -193,7 +193,7 @@ The basic steps in the lock process are as follows:
|
||||
### Unlock process
|
||||
|
||||
The unlock process is really simple:
|
||||
- boardcast unlock message to all nodes that granted lock
|
||||
- broadcast unlock message to all nodes that granted lock
|
||||
- if a destination is not available, retry with gradually longer back-off window to still deliver
|
||||
- ignore the 'result' (cover for cases where destination node has gone down and came back up)
|
||||
|
||||
|
||||
121
vendor/github.com/minio/dsync/drwmutex.go
generated
vendored
121
vendor/github.com/minio/dsync/drwmutex.go
generated
vendored
@@ -19,7 +19,7 @@ package dsync
|
||||
import (
|
||||
cryptorand "crypto/rand"
|
||||
"fmt"
|
||||
"log"
|
||||
golog "log"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net"
|
||||
@@ -36,6 +36,12 @@ func init() {
|
||||
dsyncLog = os.Getenv("DSYNC_LOG") == "1"
|
||||
}
|
||||
|
||||
func log(msg ...interface{}) {
|
||||
if dsyncLog {
|
||||
golog.Println(msg...)
|
||||
}
|
||||
}
|
||||
|
||||
// DRWMutexAcquireTimeout - tolerance limit to wait for lock acquisition before.
|
||||
const DRWMutexAcquireTimeout = 25 * time.Millisecond // 25ms.
|
||||
|
||||
@@ -60,23 +66,6 @@ func isLocked(uid string) bool {
|
||||
return len(uid) > 0
|
||||
}
|
||||
|
||||
type LockArgs struct {
|
||||
Token string
|
||||
Timestamp time.Time
|
||||
Name string
|
||||
Node string
|
||||
RPCPath string
|
||||
UID string
|
||||
}
|
||||
|
||||
func (l *LockArgs) SetToken(token string) {
|
||||
l.Token = token
|
||||
}
|
||||
|
||||
func (l *LockArgs) SetTimestamp(tstamp time.Time) {
|
||||
l.Timestamp = tstamp
|
||||
}
|
||||
|
||||
func NewDRWMutex(name string) *DRWMutex {
|
||||
return &DRWMutex{
|
||||
Name: name,
|
||||
@@ -152,7 +141,7 @@ func (dm *DRWMutex) lockBlocking(isReadLock bool) {
|
||||
|
||||
// lock tries to acquire the distributed lock, returning true or false
|
||||
//
|
||||
func lock(clnts []RPC, locks *[]string, lockName string, isReadLock bool) bool {
|
||||
func lock(clnts []NetLocker, locks *[]string, lockName string, isReadLock bool) bool {
|
||||
|
||||
// Create buffered channel of size equal to total number of nodes.
|
||||
ch := make(chan Granted, dnodeCount)
|
||||
@@ -160,25 +149,29 @@ func lock(clnts []RPC, locks *[]string, lockName string, isReadLock bool) bool {
|
||||
for index, c := range clnts {
|
||||
|
||||
// broadcast lock request to all nodes
|
||||
go func(index int, isReadLock bool, c RPC) {
|
||||
go func(index int, isReadLock bool, c NetLocker) {
|
||||
// All client methods issuing RPCs are thread-safe and goroutine-safe,
|
||||
// i.e. it is safe to call them from multiple concurrently running go routines.
|
||||
var locked bool
|
||||
bytesUid := [16]byte{}
|
||||
cryptorand.Read(bytesUid[:])
|
||||
uid := fmt.Sprintf("%X", bytesUid[:])
|
||||
args := LockArgs{Name: lockName, Node: clnts[ownNode].Node(), RPCPath: clnts[ownNode].RPCPath(), UID: uid}
|
||||
|
||||
args := LockArgs{
|
||||
UID: uid,
|
||||
Resource: lockName,
|
||||
ServerAddr: clnts[ownNode].ServerAddr(),
|
||||
ServiceEndpoint: clnts[ownNode].ServiceEndpoint(),
|
||||
}
|
||||
|
||||
var locked bool
|
||||
var err error
|
||||
if isReadLock {
|
||||
if err := c.Call("Dsync.RLock", &args, &locked); err != nil {
|
||||
if dsyncLog {
|
||||
log.Println("Unable to call Dsync.RLock", err)
|
||||
}
|
||||
if locked, err = c.RLock(args); err != nil {
|
||||
log("Unable to call RLock", err)
|
||||
}
|
||||
} else {
|
||||
if err := c.Call("Dsync.Lock", &args, &locked); err != nil {
|
||||
if dsyncLog {
|
||||
log.Println("Unable to call Dsync.Lock", err)
|
||||
}
|
||||
if locked, err = c.Lock(args); err != nil {
|
||||
log("Unable to call Lock", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -284,7 +277,7 @@ func quorumMet(locks *[]string, isReadLock bool) bool {
|
||||
}
|
||||
|
||||
// releaseAll releases all locks that are marked as locked
|
||||
func releaseAll(clnts []RPC, locks *[]string, lockName string, isReadLock bool) {
|
||||
func releaseAll(clnts []NetLocker, locks *[]string, lockName string, isReadLock bool) {
|
||||
for lock := 0; lock < dnodeCount; lock++ {
|
||||
if isLocked((*locks)[lock]) {
|
||||
sendRelease(clnts[lock], lockName, (*locks)[lock], isReadLock)
|
||||
@@ -385,7 +378,7 @@ func (dm *DRWMutex) ForceUnlock() {
|
||||
}
|
||||
|
||||
// sendRelease sends a release message to a node that previously granted a lock
|
||||
func sendRelease(c RPC, name, uid string, isReadLock bool) {
|
||||
func sendRelease(c NetLocker, name, uid string, isReadLock bool) {
|
||||
|
||||
backOffArray := []time.Duration{
|
||||
30 * time.Second, // 30secs.
|
||||
@@ -396,55 +389,47 @@ func sendRelease(c RPC, name, uid string, isReadLock bool) {
|
||||
1 * time.Hour, // 1hr.
|
||||
}
|
||||
|
||||
go func(c RPC, name string) {
|
||||
go func(c NetLocker, name string) {
|
||||
|
||||
for _, backOff := range backOffArray {
|
||||
|
||||
// All client methods issuing RPCs are thread-safe and goroutine-safe,
|
||||
// i.e. it is safe to call them from multiple concurrently running goroutines.
|
||||
var unlocked bool
|
||||
args := LockArgs{Name: name, UID: uid} // Just send name & uid (and leave out node and rpcPath; unimportant for unlocks)
|
||||
args := LockArgs{
|
||||
UID: uid,
|
||||
Resource: name,
|
||||
ServerAddr: clnts[ownNode].ServerAddr(),
|
||||
ServiceEndpoint: clnts[ownNode].ServiceEndpoint(),
|
||||
}
|
||||
|
||||
var err error
|
||||
if len(uid) == 0 {
|
||||
if err := c.Call("Dsync.ForceUnlock", &args, &unlocked); err == nil {
|
||||
// ForceUnlock delivered, exit out
|
||||
return
|
||||
} else if err != nil {
|
||||
if dsyncLog {
|
||||
log.Println("Unable to call Dsync.ForceUnlock", err)
|
||||
}
|
||||
if nErr, ok := err.(net.Error); ok && nErr.Timeout() {
|
||||
// ForceUnlock possibly failed with server timestamp mismatch, server may have restarted.
|
||||
return
|
||||
}
|
||||
if _, err = c.ForceUnlock(args); err != nil {
|
||||
log("Unable to call ForceUnlock", err)
|
||||
}
|
||||
} else if isReadLock {
|
||||
if err := c.Call("Dsync.RUnlock", &args, &unlocked); err == nil {
|
||||
// RUnlock delivered, exit out
|
||||
return
|
||||
} else if err != nil {
|
||||
if dsyncLog {
|
||||
log.Println("Unable to call Dsync.RUnlock", err)
|
||||
}
|
||||
if nErr, ok := err.(net.Error); ok && nErr.Timeout() {
|
||||
// RUnlock possibly failed with server timestamp mismatch, server may have restarted.
|
||||
return
|
||||
}
|
||||
if _, err = c.RUnlock(args); err != nil {
|
||||
log("Unable to call RUnlock", err)
|
||||
}
|
||||
} else {
|
||||
if err := c.Call("Dsync.Unlock", &args, &unlocked); err == nil {
|
||||
// Unlock delivered, exit out
|
||||
return
|
||||
} else if err != nil {
|
||||
if dsyncLog {
|
||||
log.Println("Unable to call Dsync.Unlock", err)
|
||||
}
|
||||
if nErr, ok := err.(net.Error); ok && nErr.Timeout() {
|
||||
// Unlock possibly failed with server timestamp mismatch, server may have restarted.
|
||||
return
|
||||
}
|
||||
if _, err = c.Unlock(args); err != nil {
|
||||
log("Unable to call Unlock", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Ignore if err is net.Error and it is occurred due to timeout.
|
||||
// The cause could have been server timestamp mismatch or server may have restarted.
|
||||
// FIXME: This is minio specific behaviour and we would need a way to make it generically.
|
||||
if nErr, ok := err.(net.Error); ok && nErr.Timeout() {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Wait..
|
||||
time.Sleep(backOff)
|
||||
}
|
||||
|
||||
30
vendor/github.com/minio/dsync/dsync.go
generated
vendored
30
vendor/github.com/minio/dsync/dsync.go
generated
vendored
@@ -18,16 +18,11 @@ package dsync
|
||||
|
||||
import "errors"
|
||||
|
||||
const RpcPath = "/dsync"
|
||||
const DebugPath = "/debug"
|
||||
|
||||
const DefaultPath = "/rpc/dsync"
|
||||
|
||||
// Number of nodes participating in the distributed locking.
|
||||
var dnodeCount int
|
||||
|
||||
// List of rpc client objects, one per lock server.
|
||||
var clnts []RPC
|
||||
var clnts []NetLocker
|
||||
|
||||
// Index into rpc client array for server running on localhost
|
||||
var ownNode int
|
||||
@@ -38,20 +33,21 @@ var dquorum int
|
||||
// Simple quorum for read operations, set to dNodeCount/2
|
||||
var dquorumReads int
|
||||
|
||||
// SetNodesWithPath - initializes package-level global state variables such as clnts.
|
||||
// N B - This function should be called only once inside any program that uses
|
||||
// dsync.
|
||||
func SetNodesWithClients(rpcClnts []RPC, rpcOwnNode int) (err error) {
|
||||
// Init - initializes package-level global state variables such as clnts.
|
||||
// N B - This function should be called only once inside any program
|
||||
// that uses dsync.
|
||||
func Init(rpcClnts []NetLocker, rpcOwnNode int) (err error) {
|
||||
|
||||
// Validate if number of nodes is within allowable range.
|
||||
if dnodeCount != 0 {
|
||||
return errors.New("Cannot reinitialize dsync package")
|
||||
} else if len(rpcClnts) < 4 {
|
||||
return errors.New("Dsync not designed for less than 4 nodes")
|
||||
}
|
||||
if len(rpcClnts) < 4 {
|
||||
return errors.New("Dsync is not designed for less than 4 nodes")
|
||||
} else if len(rpcClnts) > 16 {
|
||||
return errors.New("Dsync not designed for more than 16 nodes")
|
||||
} else if len(rpcClnts)&1 == 1 {
|
||||
return errors.New("Dsync not designed for an uneven number of nodes")
|
||||
return errors.New("Dsync is not designed for more than 16 nodes")
|
||||
} else if len(rpcClnts)%2 != 0 {
|
||||
return errors.New("Dsync is not designed for an uneven number of nodes")
|
||||
}
|
||||
|
||||
if rpcOwnNode > len(rpcClnts) {
|
||||
@@ -61,8 +57,8 @@ func SetNodesWithClients(rpcClnts []RPC, rpcOwnNode int) (err error) {
|
||||
dnodeCount = len(rpcClnts)
|
||||
dquorum = dnodeCount/2 + 1
|
||||
dquorumReads = dnodeCount / 2
|
||||
// Initialize node name and rpc path for each RPCClient object.
|
||||
clnts = make([]RPC, dnodeCount)
|
||||
// Initialize node name and rpc path for each NetLocker object.
|
||||
clnts = make([]NetLocker, dnodeCount)
|
||||
copy(clnts, rpcClnts)
|
||||
|
||||
ownNode = rpcOwnNode
|
||||
|
||||
56
vendor/github.com/minio/dsync/rpc-client-interface.go
generated
vendored
56
vendor/github.com/minio/dsync/rpc-client-interface.go
generated
vendored
@@ -16,15 +16,51 @@
|
||||
|
||||
package dsync
|
||||
|
||||
import "time"
|
||||
// LockArgs is minimal required values for any dsync compatible lock operation.
|
||||
type LockArgs struct {
|
||||
// Unique ID of lock/unlock request.
|
||||
UID string
|
||||
|
||||
// RPC - is dsync compatible client interface.
|
||||
type RPC interface {
|
||||
Call(serviceMethod string, args interface {
|
||||
SetToken(token string)
|
||||
SetTimestamp(tstamp time.Time)
|
||||
}, reply interface{}) error
|
||||
Node() string
|
||||
RPCPath() string
|
||||
Close() error
|
||||
// Resource contains a entity to be locked/unlocked.
|
||||
Resource string
|
||||
|
||||
// ServerAddr contains the address of the server who requested lock/unlock of the above resource.
|
||||
ServerAddr string
|
||||
|
||||
// ServiceEndpoint contains the network path of above server to do lock/unlock.
|
||||
ServiceEndpoint string
|
||||
}
|
||||
|
||||
// NetLocker is dsync compatible locker interface.
|
||||
type NetLocker interface {
|
||||
// Do read lock for given LockArgs. It should return
|
||||
// * a boolean to indicate success/failure of the operation
|
||||
// * an error on failure of lock request operation.
|
||||
RLock(args LockArgs) (bool, error)
|
||||
|
||||
// Do write lock for given LockArgs. It should return
|
||||
// * a boolean to indicate success/failure of the operation
|
||||
// * an error on failure of lock request operation.
|
||||
Lock(args LockArgs) (bool, error)
|
||||
|
||||
// Do read unlock for given LockArgs. It should return
|
||||
// * a boolean to indicate success/failure of the operation
|
||||
// * an error on failure of unlock request operation.
|
||||
RUnlock(args LockArgs) (bool, error)
|
||||
|
||||
// Do write unlock for given LockArgs. It should return
|
||||
// * a boolean to indicate success/failure of the operation
|
||||
// * an error on failure of unlock request operation.
|
||||
Unlock(args LockArgs) (bool, error)
|
||||
|
||||
// Unlock (read/write) forcefully for given LockArgs. It should return
|
||||
// * a boolean to indicate success/failure of the operation
|
||||
// * an error on failure of unlock request operation.
|
||||
ForceUnlock(args LockArgs) (bool, error)
|
||||
|
||||
// Return this lock server address.
|
||||
ServerAddr() string
|
||||
|
||||
// Return this lock server service endpoint on which the server runs.
|
||||
ServiceEndpoint() string
|
||||
}
|
||||
|
||||
6
vendor/vendor.json
vendored
6
vendor/vendor.json
vendored
@@ -148,10 +148,10 @@
|
||||
"revisionTime": "2015-11-18T20:00:48-08:00"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "ddMyebkzU3xB7K8dAhM1S+Mflmo=",
|
||||
"checksumSHA1": "NBGyq2+iTtJvJ+ElG4FzHLe1WSY=",
|
||||
"path": "github.com/minio/dsync",
|
||||
"revision": "dd0da3743e6668b03559c2905cc661bc0fceeae3",
|
||||
"revisionTime": "2016-11-28T22:07:34Z"
|
||||
"revision": "9cafd4d729eb71b31ef7851a8c8f6ceb855d0915",
|
||||
"revisionTime": "2016-12-23T07:07:24Z"
|
||||
},
|
||||
{
|
||||
"path": "github.com/minio/go-homedir",
|
||||
|
||||
Reference in New Issue
Block a user