locking: Add Refresh for better locking cleanup (#11535)

Co-authored-by: Anis Elleuch <anis@min.io>
Co-authored-by: Harshavardhana <harsha@minio.io>
This commit is contained in:
Anis Elleuch
2021-03-04 03:36:43 +01:00
committed by GitHub
parent 464fa08f2e
commit 7be7109471
23 changed files with 597 additions and 313 deletions

View File

@@ -19,6 +19,7 @@
package dsync_test
import (
"context"
"fmt"
"log"
"math/rand"
@@ -32,19 +33,26 @@ import (
"time"
"github.com/google/uuid"
"github.com/minio/minio/pkg/dsync"
. "github.com/minio/minio/pkg/dsync"
)
const numberOfNodes = 5
var ds *Dsync
var rpcPaths []string // list of rpc paths where lock server is serving.
func startRPCServers(nodes []string) {
var nodes = make([]string, numberOfNodes) // list of node IP addrs or hostname with ports.
var lockServers []*lockServer
func startRPCServers() {
for i := range nodes {
server := rpc.NewServer()
server.RegisterName("Dsync", &lockServer{
ls := &lockServer{
mutex: sync.Mutex{},
lockMap: make(map[string]int64),
})
}
server.RegisterName("Dsync", ls)
// For some reason the registration paths need to be different (even for different server objs)
server.HandleHTTP(rpcPaths[i], fmt.Sprintf("%s-debug", rpcPaths[i]))
l, e := net.Listen("tcp", ":"+strconv.Itoa(i+12345))
@@ -52,6 +60,8 @@ func startRPCServers(nodes []string) {
log.Fatal("listen error:", e)
}
go http.Serve(l, nil)
lockServers = append(lockServers, ls)
}
// Let servers start
@@ -64,7 +74,6 @@ func TestMain(m *testing.M) {
rand.Seed(time.Now().UTC().UnixNano())
nodes := make([]string, 5) // list of node IP addrs or hostname with ports.
for i := range nodes {
nodes[i] = fmt.Sprintf("127.0.0.1:%d", i+12345)
}
@@ -82,7 +91,7 @@ func TestMain(m *testing.M) {
GetLockers: func() ([]NetLocker, string) { return clnts, uuid.New().String() },
}
startRPCServers(nodes)
startRPCServers()
os.Exit(m.Run())
}
@@ -231,6 +240,42 @@ func TestTwoSimultaneousLocksForDifferentResources(t *testing.T) {
time.Sleep(10 * time.Millisecond)
}
// Test refreshing lock
func TestFailedRefreshLock(t *testing.T) {
// Simulate Refresh RPC response to return no locking found
for i := range lockServers {
lockServers[i].setRefreshReply(false)
}
dm := NewDRWMutex(ds, "aap")
wg := sync.WaitGroup{}
wg.Add(1)
ctx, cl := context.WithCancel(context.Background())
cancel := func() {
cl()
wg.Done()
}
if !dm.GetLock(ctx, cancel, id, source, dsync.Options{Timeout: 5 * time.Minute}) {
t.Fatal("GetLock() should be successful")
}
// Wait until context is canceled
wg.Wait()
if ctx.Err() == nil {
t.Fatal("Unexpected error", ctx.Err())
}
// Should be safe operation in all cases
dm.Unlock()
// Revert Refresh RPC response to locking found
for i := range lockServers {
lockServers[i].setRefreshReply(false)
}
}
// Borrowed from mutex_test.go
func HammerMutex(m *DRWMutex, loops int, cdone chan bool) {
for i := 0; i < loops; i++ {