locking: Add Refresh for better locking cleanup (#11535)

Co-authored-by: Anis Elleuch <anis@min.io>
Co-authored-by: Harshavardhana <harsha@minio.io>
This commit is contained in:
Anis Elleuch
2021-03-04 03:36:43 +01:00
committed by GitHub
parent 464fa08f2e
commit 7be7109471
23 changed files with 597 additions and 313 deletions

View File

@@ -41,17 +41,25 @@ func log(format string, data ...interface{}) {
}
}
// DRWMutexAcquireTimeout - tolerance limit to wait for lock acquisition before.
const DRWMutexAcquireTimeout = 1 * time.Second // 1 second.
// dRWMutexAcquireTimeout - tolerance limit to wait for lock acquisition before.
const drwMutexAcquireTimeout = 1 * time.Second // 1 second.
// dRWMutexRefreshTimeout - timeout for the refresh call
const drwMutexRefreshTimeout = 5 * time.Second
// dRWMutexRefreshInterval - the interval between two refresh calls
const drwMutexRefreshInterval = 10 * time.Second
const drwMutexInfinite = 1<<63 - 1
// A DRWMutex is a distributed mutual exclusion lock.
type DRWMutex struct {
Names []string
writeLocks []string // Array of nodes that granted a write lock
readersLocks [][]string // Array of array of nodes that granted reader locks
m sync.Mutex // Mutex to prevent multiple simultaneous locks from this node
clnt *Dsync
Names []string
writeLocks []string // Array of nodes that granted a write lock
readersLocks [][]string // Array of array of nodes that granted reader locks
m sync.Mutex // Mutex to prevent multiple simultaneous locks from this node
clnt *Dsync
cancelRefresh context.CancelFunc
}
// Granted - represents a structure of a granted lock.
@@ -85,7 +93,7 @@ func NewDRWMutex(clnt *Dsync, names ...string) *DRWMutex {
func (dm *DRWMutex) Lock(id, source string) {
isReadLock := false
dm.lockBlocking(context.Background(), id, source, isReadLock, Options{
dm.lockBlocking(context.Background(), nil, id, source, isReadLock, Options{
Timeout: drwMutexInfinite,
})
}
@@ -100,10 +108,10 @@ type Options struct {
// If the lock is already in use, the calling go routine
// blocks until either the mutex becomes available and return success or
// more time has passed than the timeout value and return false.
func (dm *DRWMutex) GetLock(ctx context.Context, id, source string, opts Options) (locked bool) {
func (dm *DRWMutex) GetLock(ctx context.Context, cancel context.CancelFunc, id, source string, opts Options) (locked bool) {
isReadLock := false
return dm.lockBlocking(ctx, id, source, isReadLock, opts)
return dm.lockBlocking(ctx, cancel, id, source, isReadLock, opts)
}
// RLock holds a read lock on dm.
@@ -113,7 +121,7 @@ func (dm *DRWMutex) GetLock(ctx context.Context, id, source string, opts Options
func (dm *DRWMutex) RLock(id, source string) {
isReadLock := true
dm.lockBlocking(context.Background(), id, source, isReadLock, Options{
dm.lockBlocking(context.Background(), nil, id, source, isReadLock, Options{
Timeout: drwMutexInfinite,
})
}
@@ -124,10 +132,10 @@ func (dm *DRWMutex) RLock(id, source string) {
// Otherwise the calling go routine blocks until either the mutex becomes
// available and return success or more time has passed than the timeout
// value and return false.
func (dm *DRWMutex) GetRLock(ctx context.Context, id, source string, opts Options) (locked bool) {
func (dm *DRWMutex) GetRLock(ctx context.Context, cancel context.CancelFunc, id, source string, opts Options) (locked bool) {
isReadLock := true
return dm.lockBlocking(ctx, id, source, isReadLock, opts)
return dm.lockBlocking(ctx, cancel, id, source, isReadLock, opts)
}
const (
@@ -139,7 +147,7 @@ const (
// The function will loop using a built-in timing randomized back-off
// algorithm until either the lock is acquired successfully or more
// time has elapsed than the timeout value.
func (dm *DRWMutex) lockBlocking(ctx context.Context, id, source string, isReadLock bool, opts Options) (locked bool) {
func (dm *DRWMutex) lockBlocking(ctx context.Context, lockLossCallback func(), id, source string, isReadLock bool, opts Options) (locked bool) {
restClnts, _ := dm.clnt.GetLockers()
r := rand.New(rand.NewSource(time.Now().UnixNano()))
@@ -191,6 +199,10 @@ func (dm *DRWMutex) lockBlocking(ctx context.Context, id, source string, isReadL
dm.m.Unlock()
log("lockBlocking %s/%s for %#v: granted\n", id, source, dm.Names)
// Refresh lock continuously and cancel if there is no quorum in the lock anymore
dm.startContinousLockRefresh(lockLossCallback, id, source, quorum)
return locked
}
@@ -199,6 +211,132 @@ func (dm *DRWMutex) lockBlocking(ctx context.Context, id, source string, isReadL
}
}
func (dm *DRWMutex) startContinousLockRefresh(lockLossCallback func(), id, source string, quorum int) {
ctx, cancel := context.WithCancel(context.Background())
dm.m.Lock()
dm.cancelRefresh = cancel
dm.m.Unlock()
go func() {
defer cancel()
for {
select {
case <-ctx.Done():
return
case <-time.NewTimer(drwMutexRefreshInterval).C:
refreshed, err := refresh(ctx, dm.clnt, id, source, quorum, dm.Names...)
if err == nil && !refreshed {
if lockLossCallback != nil {
lockLossCallback()
}
return
}
}
}
}()
}
type refreshResult struct {
offline bool
succeeded bool
}
func refresh(ctx context.Context, ds *Dsync, id, source string, quorum int, lockNames ...string) (bool, error) {
restClnts, owner := ds.GetLockers()
// Create buffered channel of size equal to total number of nodes.
ch := make(chan refreshResult, len(restClnts))
var wg sync.WaitGroup
for index, c := range restClnts {
wg.Add(1)
// Send refresh request to all nodes
go func(index int, c NetLocker) {
defer wg.Done()
if c == nil {
ch <- refreshResult{offline: true}
return
}
args := LockArgs{
Owner: owner,
UID: id,
Resources: lockNames,
Source: source,
Quorum: quorum,
}
ctx, cancel := context.WithTimeout(ctx, drwMutexRefreshTimeout)
defer cancel()
refreshed, err := c.Refresh(ctx, args)
if refreshed && err == nil {
ch <- refreshResult{succeeded: true}
} else {
if err != nil {
ch <- refreshResult{offline: true}
log("dsync: Unable to call Refresh failed with %s for %#v at %s\n", err, args, c)
} else {
ch <- refreshResult{succeeded: false}
log("dsync: Refresh returned false for %#v at %s\n", args, c)
}
}
}(index, c)
}
// Wait until we have either
//
// a) received all refresh responses
// b) received too many refreshed for quorum to be still possible
// c) timed out
//
i, refreshFailed, refreshSucceeded := 0, 0, 0
done := false
for ; i < len(restClnts); i++ {
select {
case refresh := <-ch:
if refresh.offline {
continue
}
if refresh.succeeded {
refreshSucceeded++
} else {
refreshFailed++
}
if refreshFailed > quorum {
// We know that we are not going to succeed with refresh
done = true
}
case <-ctx.Done():
// Refreshing is canceled
return false, ctx.Err()
}
if done {
break
}
}
refreshQuorum := refreshSucceeded >= quorum
if !refreshQuorum {
refreshQuorum = refreshFailed < quorum
}
// We may have some unused results in ch, release them async.
go func() {
wg.Wait()
close(ch)
for range ch {
}
}()
return refreshQuorum, nil
}
// lock tries to acquire the distributed lock, returning true or false.
func lock(ctx context.Context, ds *Dsync, locks *[]string, id, source string, isReadLock bool, tolerance, quorum int, lockNames ...string) bool {
for i := range *locks {
@@ -212,7 +350,7 @@ func lock(ctx context.Context, ds *Dsync, locks *[]string, id, source string, is
var wg sync.WaitGroup
// Combined timeout for the lock attempt.
ctx, cancel := context.WithTimeout(ctx, DRWMutexAcquireTimeout)
ctx, cancel := context.WithTimeout(ctx, drwMutexAcquireTimeout)
defer cancel()
for index, c := range restClnts {
wg.Add(1)
@@ -383,6 +521,9 @@ func releaseAll(ds *Dsync, tolerance int, owner string, locks *[]string, isReadL
//
// It is a run-time error if dm is not locked on entry to Unlock.
func (dm *DRWMutex) Unlock() {
dm.m.Lock()
dm.cancelRefresh()
dm.m.Unlock()
restClnts, owner := dm.clnt.GetLockers()
// create temp array on stack
@@ -422,6 +563,9 @@ func (dm *DRWMutex) Unlock() {
//
// It is a run-time error if dm is not locked on entry to RUnlock.
func (dm *DRWMutex) RUnlock() {
dm.m.Lock()
dm.cancelRefresh()
dm.m.Unlock()
// create temp array on stack
restClnts, owner := dm.clnt.GetLockers()

View File

@@ -36,12 +36,14 @@ func testSimpleWriteLock(t *testing.T, duration time.Duration) (locked bool) {
drwm := NewDRWMutex(ds, "simplelock")
if !drwm.GetRLock(context.Background(), id, source, Options{Timeout: time.Second}) {
ctx1, cancel1 := context.WithCancel(context.Background())
if !drwm.GetRLock(ctx1, cancel1, id, source, Options{Timeout: time.Second}) {
panic("Failed to acquire read lock")
}
// fmt.Println("1st read lock acquired, waiting...")
if !drwm.GetRLock(context.Background(), id, source, Options{Timeout: time.Second}) {
ctx2, cancel2 := context.WithCancel(context.Background())
if !drwm.GetRLock(ctx2, cancel2, id, source, Options{Timeout: time.Second}) {
panic("Failed to acquire read lock")
}
// fmt.Println("2nd read lock acquired, waiting...")
@@ -59,7 +61,8 @@ func testSimpleWriteLock(t *testing.T, duration time.Duration) (locked bool) {
}()
// fmt.Println("Trying to acquire write lock, waiting...")
locked = drwm.GetLock(context.Background(), id, source, Options{Timeout: duration})
ctx3, cancel3 := context.WithCancel(context.Background())
locked = drwm.GetLock(ctx3, cancel3, id, source, Options{Timeout: duration})
if locked {
// fmt.Println("Write lock acquired, waiting...")
time.Sleep(time.Second)
@@ -93,7 +96,8 @@ func testDualWriteLock(t *testing.T, duration time.Duration) (locked bool) {
drwm := NewDRWMutex(ds, "duallock")
// fmt.Println("Getting initial write lock")
if !drwm.GetLock(context.Background(), id, source, Options{Timeout: time.Second}) {
ctx1, cancel1 := context.WithCancel(context.Background())
if !drwm.GetLock(ctx1, cancel1, id, source, Options{Timeout: time.Second}) {
panic("Failed to acquire initial write lock")
}
@@ -104,7 +108,8 @@ func testDualWriteLock(t *testing.T, duration time.Duration) (locked bool) {
}()
// fmt.Println("Trying to acquire 2nd write lock, waiting...")
locked = drwm.GetLock(context.Background(), id, source, Options{Timeout: duration})
ctx2, cancel2 := context.WithCancel(context.Background())
locked = drwm.GetLock(ctx2, cancel2, id, source, Options{Timeout: duration})
if locked {
// fmt.Println("2nd write lock acquired, waiting...")
time.Sleep(time.Second)
@@ -139,7 +144,7 @@ func TestDualWriteLockTimedOut(t *testing.T) {
// Borrowed from rwmutex_test.go
func parallelReader(ctx context.Context, m *DRWMutex, clocked, cunlock, cdone chan bool) {
if m.GetRLock(ctx, id, source, Options{Timeout: time.Second}) {
if m.GetRLock(ctx, nil, id, source, Options{Timeout: time.Second}) {
clocked <- true
<-cunlock
m.RUnlock()
@@ -182,7 +187,7 @@ func TestParallelReaders(t *testing.T) {
// Borrowed from rwmutex_test.go
func reader(rwm *DRWMutex, numIterations int, activity *int32, cdone chan bool) {
for i := 0; i < numIterations; i++ {
if rwm.GetRLock(context.Background(), id, source, Options{Timeout: time.Second}) {
if rwm.GetRLock(context.Background(), nil, id, source, Options{Timeout: time.Second}) {
n := atomic.AddInt32(activity, 1)
if n < 1 || n >= 10000 {
panic(fmt.Sprintf("wlock(%d)\n", n))
@@ -199,7 +204,7 @@ func reader(rwm *DRWMutex, numIterations int, activity *int32, cdone chan bool)
// Borrowed from rwmutex_test.go
func writer(rwm *DRWMutex, numIterations int, activity *int32, cdone chan bool) {
for i := 0; i < numIterations; i++ {
if rwm.GetLock(context.Background(), id, source, Options{Timeout: time.Second}) {
if rwm.GetLock(context.Background(), nil, id, source, Options{Timeout: time.Second}) {
n := atomic.AddInt32(activity, 10000)
if n != 10000 {
panic(fmt.Sprintf("wlock(%d)\n", n))

View File

@@ -30,6 +30,15 @@ type lockServer struct {
// Map of locks, with negative value indicating (exclusive) write lock
// and positive values indicating number of read locks
lockMap map[string]int64
// Refresh returns lock not found if set to true
lockNotFound bool
}
func (l *lockServer) setRefreshReply(refreshed bool) {
l.mutex.Lock()
defer l.mutex.Unlock()
l.lockNotFound = !refreshed
}
func (l *lockServer) Lock(args *LockArgs, reply *bool) error {
@@ -91,6 +100,13 @@ func (l *lockServer) RUnlock(args *LockArgs, reply *bool) error {
return nil
}
func (l *lockServer) Refresh(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
*reply = !l.lockNotFound
return nil
}
func (l *lockServer) ForceUnlock(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()

View File

@@ -19,6 +19,7 @@
package dsync_test
import (
"context"
"fmt"
"log"
"math/rand"
@@ -32,19 +33,26 @@ import (
"time"
"github.com/google/uuid"
"github.com/minio/minio/pkg/dsync"
. "github.com/minio/minio/pkg/dsync"
)
const numberOfNodes = 5
var ds *Dsync
var rpcPaths []string // list of rpc paths where lock server is serving.
func startRPCServers(nodes []string) {
var nodes = make([]string, numberOfNodes) // list of node IP addrs or hostname with ports.
var lockServers []*lockServer
func startRPCServers() {
for i := range nodes {
server := rpc.NewServer()
server.RegisterName("Dsync", &lockServer{
ls := &lockServer{
mutex: sync.Mutex{},
lockMap: make(map[string]int64),
})
}
server.RegisterName("Dsync", ls)
// For some reason the registration paths need to be different (even for different server objs)
server.HandleHTTP(rpcPaths[i], fmt.Sprintf("%s-debug", rpcPaths[i]))
l, e := net.Listen("tcp", ":"+strconv.Itoa(i+12345))
@@ -52,6 +60,8 @@ func startRPCServers(nodes []string) {
log.Fatal("listen error:", e)
}
go http.Serve(l, nil)
lockServers = append(lockServers, ls)
}
// Let servers start
@@ -64,7 +74,6 @@ func TestMain(m *testing.M) {
rand.Seed(time.Now().UTC().UnixNano())
nodes := make([]string, 5) // list of node IP addrs or hostname with ports.
for i := range nodes {
nodes[i] = fmt.Sprintf("127.0.0.1:%d", i+12345)
}
@@ -82,7 +91,7 @@ func TestMain(m *testing.M) {
GetLockers: func() ([]NetLocker, string) { return clnts, uuid.New().String() },
}
startRPCServers(nodes)
startRPCServers()
os.Exit(m.Run())
}
@@ -231,6 +240,42 @@ func TestTwoSimultaneousLocksForDifferentResources(t *testing.T) {
time.Sleep(10 * time.Millisecond)
}
// Test refreshing lock
func TestFailedRefreshLock(t *testing.T) {
// Simulate Refresh RPC response to return no locking found
for i := range lockServers {
lockServers[i].setRefreshReply(false)
}
dm := NewDRWMutex(ds, "aap")
wg := sync.WaitGroup{}
wg.Add(1)
ctx, cl := context.WithCancel(context.Background())
cancel := func() {
cl()
wg.Done()
}
if !dm.GetLock(ctx, cancel, id, source, dsync.Options{Timeout: 5 * time.Minute}) {
t.Fatal("GetLock() should be successful")
}
// Wait until context is canceled
wg.Wait()
if ctx.Err() == nil {
t.Fatal("Unexpected error", ctx.Err())
}
// Should be safe operation in all cases
dm.Unlock()
// Revert Refresh RPC response to locking found
for i := range lockServers {
lockServers[i].setRefreshReply(false)
}
}
// Borrowed from mutex_test.go
func HammerMutex(m *DRWMutex, loops int, cdone chan bool) {
for i := 0; i < loops; i++ {

View File

@@ -114,9 +114,9 @@ func (rpcClient *ReconnectRPCClient) Unlock(args LockArgs) (status bool, err err
return status, err
}
func (rpcClient *ReconnectRPCClient) Expired(ctx context.Context, args LockArgs) (expired bool, err error) {
err = rpcClient.Call("Dsync.Expired", &args, &expired)
return expired, err
func (rpcClient *ReconnectRPCClient) Refresh(ctx context.Context, args LockArgs) (refreshed bool, err error) {
err = rpcClient.Call("Dsync.Refresh", &args, &refreshed)
return refreshed, err
}
func (rpcClient *ReconnectRPCClient) ForceUnlock(ctx context.Context, args LockArgs) (reply bool, err error) {

View File

@@ -60,8 +60,8 @@ type NetLocker interface {
// * an error on failure of unlock request operation.
Unlock(args LockArgs) (bool, error)
// Expired returns if current lock args has expired.
Expired(ctx context.Context, args LockArgs) (bool, error)
// Refresh the given lock to prevent it from becoming stale
Refresh(ctx context.Context, args LockArgs) (bool, error)
// Unlock (read/write) forcefully for given LockArgs. It should return
// * a boolean to indicate success/failure of the operation