run gofumpt cleanup across code-base (#14015)

This commit is contained in:
Harshavardhana
2022-01-02 09:15:06 -08:00
committed by GitHub
parent 6f474982ed
commit f527c708f2
250 changed files with 1201 additions and 1264 deletions

View File

@@ -103,7 +103,6 @@ func NewDRWMutex(clnt *Dsync, names ...string) *DRWMutex {
// If the lock is already in use, the calling go routine
// blocks until the mutex is available.
func (dm *DRWMutex) Lock(id, source string) {
isReadLock := false
dm.lockBlocking(context.Background(), nil, id, source, isReadLock, Options{
Timeout: drwMutexInfinite,
@@ -121,7 +120,6 @@ type Options struct {
// blocks until either the mutex becomes available and return success or
// more time has passed than the timeout value and return false.
func (dm *DRWMutex) GetLock(ctx context.Context, cancel context.CancelFunc, id, source string, opts Options) (locked bool) {
isReadLock := false
return dm.lockBlocking(ctx, cancel, id, source, isReadLock, opts)
}
@@ -131,7 +129,6 @@ func (dm *DRWMutex) GetLock(ctx context.Context, cancel context.CancelFunc, id,
// If one or more read locks are already in use, it will grant another lock.
// Otherwise the calling go routine blocks until the mutex is available.
func (dm *DRWMutex) RLock(id, source string) {
isReadLock := true
dm.lockBlocking(context.Background(), nil, id, source, isReadLock, Options{
Timeout: drwMutexInfinite,
@@ -145,7 +142,6 @@ func (dm *DRWMutex) RLock(id, source string) {
// available and return success or more time has passed than the timeout
// value and return false.
func (dm *DRWMutex) GetRLock(ctx context.Context, cancel context.CancelFunc, id, source string, opts Options) (locked bool) {
isReadLock := true
return dm.lockBlocking(ctx, cancel, id, source, isReadLock, opts)
}
@@ -317,7 +313,6 @@ func refresh(ctx context.Context, ds *Dsync, id, source string, quorum int) (boo
log("dsync: Refresh returned false for %#v at %s\n", args, c)
}
}
}(index, c)
}
@@ -422,7 +417,6 @@ func lock(ctx context.Context, ds *Dsync, locks *[]string, id, source string, is
g.lockUID = args.UID
}
ch <- g
}(index, isReadLock, c)
}

View File

@@ -32,7 +32,6 @@ const (
)
func testSimpleWriteLock(t *testing.T, duration time.Duration) (locked bool) {
drwm := NewDRWMutex(ds, "simplelock")
ctx1, cancel1 := context.WithCancel(context.Background())
@@ -91,7 +90,6 @@ func TestSimpleWriteLockTimedOut(t *testing.T) {
}
func testDualWriteLock(t *testing.T, duration time.Duration) (locked bool) {
drwm := NewDRWMutex(ds, "duallock")
// fmt.Println("Getting initial write lock")
@@ -126,7 +124,6 @@ func TestDualWriteLockAcquired(t *testing.T) {
if locked != expected {
t.Errorf("TestDualWriteLockAcquired(): \nexpected %#v\ngot %#v", expected, locked)
}
}
func TestDualWriteLockTimedOut(t *testing.T) {
@@ -136,7 +133,6 @@ func TestDualWriteLockTimedOut(t *testing.T) {
if locked != expected {
t.Errorf("TestDualWriteLockTimedOut(): \nexpected %#v\ngot %#v", expected, locked)
}
}
// Test cases below are copied 1 to 1 from sync/rwmutex_test.go (adapted to use DRWMutex)

View File

@@ -36,11 +36,15 @@ import (
const numberOfNodes = 5
var ds *Dsync
var rpcPaths []string // list of rpc paths where lock server is serving.
var (
ds *Dsync
rpcPaths []string // list of rpc paths where lock server is serving.
)
var nodes = make([]string, numberOfNodes) // list of node IP addrs or hostname with ports.
var lockServers []*lockServer
var (
nodes = make([]string, numberOfNodes) // list of node IP addrs or hostname with ports.
lockServers []*lockServer
)
func startRPCServers() {
for i := range nodes {
@@ -94,7 +98,6 @@ func TestMain(m *testing.M) {
}
func TestSimpleLock(t *testing.T) {
dm := NewDRWMutex(ds, "test")
dm.Lock(id, source)
@@ -106,7 +109,6 @@ func TestSimpleLock(t *testing.T) {
}
func TestSimpleLockUnlockMultipleTimes(t *testing.T) {
dm := NewDRWMutex(ds, "test")
dm.Lock(id, source)
@@ -132,7 +134,6 @@ func TestSimpleLockUnlockMultipleTimes(t *testing.T) {
// Test two locks for same resource, one succeeds, one fails (after timeout)
func TestTwoSimultaneousLocksForSameResource(t *testing.T) {
dm1st := NewDRWMutex(ds, "aap")
dm2nd := NewDRWMutex(ds, "aap")
@@ -156,7 +157,6 @@ func TestTwoSimultaneousLocksForSameResource(t *testing.T) {
// Test three locks for same resource, one succeeds, one fails (after timeout)
func TestThreeSimultaneousLocksForSameResource(t *testing.T) {
dm1st := NewDRWMutex(ds, "aap")
dm2nd := NewDRWMutex(ds, "aap")
dm3rd := NewDRWMutex(ds, "aap")
@@ -221,7 +221,6 @@ func TestThreeSimultaneousLocksForSameResource(t *testing.T) {
// Test two locks for different resources, both succeed
func TestTwoSimultaneousLocksForDifferentResources(t *testing.T) {
dm1 := NewDRWMutex(ds, "aap")
dm2 := NewDRWMutex(ds, "noot")
@@ -332,7 +331,7 @@ func BenchmarkMutexUncontended(b *testing.B) {
*DRWMutex
}
b.RunParallel(func(pb *testing.PB) {
var mu = PaddedMutex{NewDRWMutex(ds, "")}
mu := PaddedMutex{NewDRWMutex(ds, "")}
for pb.Next() {
mu.Lock(id, source)
mu.Unlock()