mirror of
https://github.com/minio/minio.git
synced 2025-11-07 12:52:58 -05:00
rename all remaining packages to internal/ (#12418)
This is to ensure that there are no projects that try to import `minio/minio/pkg` into their own repo. Any such common packages should go to `https://github.com/minio/pkg`
This commit is contained in:
193
internal/lsync/lrwmutex.go
Normal file
193
internal/lsync/lrwmutex.go
Normal file
@@ -0,0 +1,193 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package lsync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A LRWMutex is a mutual exclusion lock with timeouts.
|
||||
type LRWMutex struct {
|
||||
id string
|
||||
source string
|
||||
isWriteLock bool
|
||||
ref int
|
||||
mu sync.Mutex // Mutex to prevent multiple simultaneous locks
|
||||
}
|
||||
|
||||
// NewLRWMutex - initializes a new lsync RW mutex.
|
||||
func NewLRWMutex() *LRWMutex {
|
||||
return &LRWMutex{}
|
||||
}
|
||||
|
||||
// Lock holds a write lock on lm.
|
||||
//
|
||||
// If the lock is already in use, the calling go routine
|
||||
// blocks until the mutex is available.
|
||||
func (lm *LRWMutex) Lock() {
|
||||
|
||||
const isWriteLock = true
|
||||
lm.lockLoop(context.Background(), lm.id, lm.source, math.MaxInt64, isWriteLock)
|
||||
}
|
||||
|
||||
// GetLock tries to get a write lock on lm before the timeout occurs.
|
||||
func (lm *LRWMutex) GetLock(ctx context.Context, id string, source string, timeout time.Duration) (locked bool) {
|
||||
|
||||
const isWriteLock = true
|
||||
return lm.lockLoop(ctx, id, source, timeout, isWriteLock)
|
||||
}
|
||||
|
||||
// RLock holds a read lock on lm.
|
||||
//
|
||||
// If one or more read lock are already in use, it will grant another lock.
|
||||
// Otherwise the calling go routine blocks until the mutex is available.
|
||||
func (lm *LRWMutex) RLock() {
|
||||
|
||||
const isWriteLock = false
|
||||
lm.lockLoop(context.Background(), lm.id, lm.source, 1<<63-1, isWriteLock)
|
||||
}
|
||||
|
||||
// GetRLock tries to get a read lock on lm before the timeout occurs.
|
||||
func (lm *LRWMutex) GetRLock(ctx context.Context, id string, source string, timeout time.Duration) (locked bool) {
|
||||
|
||||
const isWriteLock = false
|
||||
return lm.lockLoop(ctx, id, source, timeout, isWriteLock)
|
||||
}
|
||||
|
||||
func (lm *LRWMutex) lock(id, source string, isWriteLock bool) (locked bool) {
|
||||
lm.mu.Lock()
|
||||
defer lm.mu.Unlock()
|
||||
|
||||
lm.id = id
|
||||
lm.source = source
|
||||
if isWriteLock {
|
||||
if lm.ref == 0 && !lm.isWriteLock {
|
||||
lm.ref = 1
|
||||
lm.isWriteLock = true
|
||||
locked = true
|
||||
}
|
||||
} else {
|
||||
if !lm.isWriteLock {
|
||||
lm.ref++
|
||||
locked = true
|
||||
}
|
||||
}
|
||||
|
||||
return locked
|
||||
}
|
||||
|
||||
const (
|
||||
lockRetryInterval = 50 * time.Millisecond
|
||||
)
|
||||
|
||||
// lockLoop will acquire either a read or a write lock
|
||||
//
|
||||
// The call will block until the lock is granted using a built-in
|
||||
// timing randomized back-off algorithm to try again until successful
|
||||
func (lm *LRWMutex) lockLoop(ctx context.Context, id, source string, timeout time.Duration, isWriteLock bool) (locked bool) {
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
retryCtx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-retryCtx.Done():
|
||||
// Caller context canceled or we timedout,
|
||||
// return false anyways for both situations.
|
||||
return false
|
||||
default:
|
||||
if lm.lock(id, source, isWriteLock) {
|
||||
return true
|
||||
}
|
||||
time.Sleep(time.Duration(r.Float64() * float64(lockRetryInterval)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock unlocks the write lock.
|
||||
//
|
||||
// It is a run-time error if lm is not locked on entry to Unlock.
|
||||
func (lm *LRWMutex) Unlock() {
|
||||
|
||||
isWriteLock := true
|
||||
success := lm.unlock(isWriteLock)
|
||||
if !success {
|
||||
panic("Trying to Unlock() while no Lock() is active")
|
||||
}
|
||||
}
|
||||
|
||||
// RUnlock releases a read lock held on lm.
|
||||
//
|
||||
// It is a run-time error if lm is not locked on entry to RUnlock.
|
||||
func (lm *LRWMutex) RUnlock() {
|
||||
|
||||
isWriteLock := false
|
||||
success := lm.unlock(isWriteLock)
|
||||
if !success {
|
||||
panic("Trying to RUnlock() while no RLock() is active")
|
||||
}
|
||||
}
|
||||
|
||||
func (lm *LRWMutex) unlock(isWriteLock bool) (unlocked bool) {
|
||||
lm.mu.Lock()
|
||||
defer lm.mu.Unlock()
|
||||
|
||||
// Try to release lock.
|
||||
if isWriteLock {
|
||||
if lm.isWriteLock && lm.ref == 1 {
|
||||
lm.ref = 0
|
||||
lm.isWriteLock = false
|
||||
unlocked = true
|
||||
}
|
||||
} else {
|
||||
if !lm.isWriteLock {
|
||||
if lm.ref > 0 {
|
||||
lm.ref--
|
||||
unlocked = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return unlocked
|
||||
}
|
||||
|
||||
// ForceUnlock will forcefully clear a write or read lock.
|
||||
func (lm *LRWMutex) ForceUnlock() {
|
||||
lm.mu.Lock()
|
||||
defer lm.mu.Unlock()
|
||||
|
||||
lm.ref = 0
|
||||
lm.isWriteLock = false
|
||||
|
||||
}
|
||||
|
||||
// DRLocker returns a sync.Locker interface that implements
|
||||
// the Lock and Unlock methods by calling drw.RLock and drw.RUnlock.
|
||||
func (lm *LRWMutex) DRLocker() sync.Locker {
|
||||
return (*drlocker)(lm)
|
||||
}
|
||||
|
||||
type drlocker LRWMutex
|
||||
|
||||
func (dr *drlocker) Lock() { (*LRWMutex)(dr).RLock() }
|
||||
func (dr *drlocker) Unlock() { (*LRWMutex)(dr).RUnlock() }
|
||||
339
internal/lsync/lrwmutex_test.go
Normal file
339
internal/lsync/lrwmutex_test.go
Normal file
@@ -0,0 +1,339 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package lsync_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"runtime"
|
||||
|
||||
. "github.com/minio/minio/internal/lsync"
|
||||
)
|
||||
|
||||
func testSimpleWriteLock(t *testing.T, duration time.Duration) (locked bool) {
|
||||
|
||||
ctx := context.Background()
|
||||
lrwm := NewLRWMutex()
|
||||
|
||||
if !lrwm.GetRLock(ctx, "", "object1", time.Second) {
|
||||
panic("Failed to acquire read lock")
|
||||
}
|
||||
// fmt.Println("1st read lock acquired, waiting...")
|
||||
|
||||
if !lrwm.GetRLock(ctx, "", "object1", time.Second) {
|
||||
panic("Failed to acquire read lock")
|
||||
}
|
||||
// fmt.Println("2nd read lock acquired, waiting...")
|
||||
|
||||
go func() {
|
||||
time.Sleep(2 * time.Second)
|
||||
lrwm.RUnlock()
|
||||
// fmt.Println("1st read lock released, waiting...")
|
||||
}()
|
||||
|
||||
go func() {
|
||||
time.Sleep(3 * time.Second)
|
||||
lrwm.RUnlock()
|
||||
// fmt.Println("2nd read lock released, waiting...")
|
||||
}()
|
||||
|
||||
// fmt.Println("Trying to acquire write lock, waiting...")
|
||||
locked = lrwm.GetLock(ctx, "", "", duration)
|
||||
if locked {
|
||||
// fmt.Println("Write lock acquired, waiting...")
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
lrwm.Unlock()
|
||||
} else {
|
||||
// fmt.Println("Write lock failed due to timeout")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func TestSimpleWriteLockAcquired(t *testing.T) {
|
||||
locked := testSimpleWriteLock(t, 5*time.Second)
|
||||
|
||||
expected := true
|
||||
if locked != expected {
|
||||
t.Errorf("TestSimpleWriteLockAcquired(): \nexpected %#v\ngot %#v", expected, locked)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleWriteLockTimedOut(t *testing.T) {
|
||||
locked := testSimpleWriteLock(t, time.Second)
|
||||
|
||||
expected := false
|
||||
if locked != expected {
|
||||
t.Errorf("TestSimpleWriteLockTimedOut(): \nexpected %#v\ngot %#v", expected, locked)
|
||||
}
|
||||
}
|
||||
|
||||
func testDualWriteLock(t *testing.T, duration time.Duration) (locked bool) {
|
||||
|
||||
ctx := context.Background()
|
||||
lrwm := NewLRWMutex()
|
||||
|
||||
// fmt.Println("Getting initial write lock")
|
||||
if !lrwm.GetLock(ctx, "", "", time.Second) {
|
||||
panic("Failed to acquire initial write lock")
|
||||
}
|
||||
|
||||
go func() {
|
||||
time.Sleep(2 * time.Second)
|
||||
lrwm.Unlock()
|
||||
// fmt.Println("Initial write lock released, waiting...")
|
||||
}()
|
||||
|
||||
// fmt.Println("Trying to acquire 2nd write lock, waiting...")
|
||||
locked = lrwm.GetLock(ctx, "", "", duration)
|
||||
if locked {
|
||||
// fmt.Println("2nd write lock acquired, waiting...")
|
||||
time.Sleep(time.Second)
|
||||
|
||||
lrwm.Unlock()
|
||||
} else {
|
||||
// fmt.Println("2nd write lock failed due to timeout")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func TestDualWriteLockAcquired(t *testing.T) {
|
||||
locked := testDualWriteLock(t, 3*time.Second)
|
||||
|
||||
expected := true
|
||||
if locked != expected {
|
||||
t.Errorf("TestDualWriteLockAcquired(): \nexpected %#v\ngot %#v", expected, locked)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestDualWriteLockTimedOut(t *testing.T) {
|
||||
locked := testDualWriteLock(t, time.Second)
|
||||
|
||||
expected := false
|
||||
if locked != expected {
|
||||
t.Errorf("TestDualWriteLockTimedOut(): \nexpected %#v\ngot %#v", expected, locked)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Test cases below are copied 1 to 1 from sync/rwmutex_test.go (adapted to use LRWMutex)
|
||||
|
||||
// Borrowed from rwmutex_test.go
|
||||
func parallelReader(ctx context.Context, m *LRWMutex, clocked, cunlock, cdone chan bool) {
|
||||
if m.GetRLock(ctx, "", "", time.Second) {
|
||||
clocked <- true
|
||||
<-cunlock
|
||||
m.RUnlock()
|
||||
cdone <- true
|
||||
}
|
||||
}
|
||||
|
||||
// Borrowed from rwmutex_test.go
|
||||
func doTestParallelReaders(numReaders, gomaxprocs int) {
|
||||
runtime.GOMAXPROCS(gomaxprocs)
|
||||
m := NewLRWMutex()
|
||||
|
||||
clocked := make(chan bool)
|
||||
cunlock := make(chan bool)
|
||||
cdone := make(chan bool)
|
||||
for i := 0; i < numReaders; i++ {
|
||||
go parallelReader(context.Background(), m, clocked, cunlock, cdone)
|
||||
}
|
||||
// Wait for all parallel RLock()s to succeed.
|
||||
for i := 0; i < numReaders; i++ {
|
||||
<-clocked
|
||||
}
|
||||
for i := 0; i < numReaders; i++ {
|
||||
cunlock <- true
|
||||
}
|
||||
// Wait for the goroutines to finish.
|
||||
for i := 0; i < numReaders; i++ {
|
||||
<-cdone
|
||||
}
|
||||
}
|
||||
|
||||
// Borrowed from rwmutex_test.go
|
||||
func TestParallelReaders(t *testing.T) {
|
||||
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
|
||||
doTestParallelReaders(1, 4)
|
||||
doTestParallelReaders(3, 4)
|
||||
doTestParallelReaders(4, 2)
|
||||
}
|
||||
|
||||
// Borrowed from rwmutex_test.go
|
||||
func reader(rwm *LRWMutex, numIterations int, activity *int32, cdone chan bool) {
|
||||
for i := 0; i < numIterations; i++ {
|
||||
if rwm.GetRLock(context.Background(), "", "", time.Second) {
|
||||
n := atomic.AddInt32(activity, 1)
|
||||
if n < 1 || n >= 10000 {
|
||||
panic(fmt.Sprintf("wlock(%d)\n", n))
|
||||
}
|
||||
for i := 0; i < 100; i++ {
|
||||
}
|
||||
atomic.AddInt32(activity, -1)
|
||||
rwm.RUnlock()
|
||||
}
|
||||
}
|
||||
cdone <- true
|
||||
}
|
||||
|
||||
// Borrowed from rwmutex_test.go
|
||||
func writer(rwm *LRWMutex, numIterations int, activity *int32, cdone chan bool) {
|
||||
for i := 0; i < numIterations; i++ {
|
||||
if rwm.GetLock(context.Background(), "", "", time.Second) {
|
||||
n := atomic.AddInt32(activity, 10000)
|
||||
if n != 10000 {
|
||||
panic(fmt.Sprintf("wlock(%d)\n", n))
|
||||
}
|
||||
for i := 0; i < 100; i++ {
|
||||
}
|
||||
atomic.AddInt32(activity, -10000)
|
||||
rwm.Unlock()
|
||||
}
|
||||
}
|
||||
cdone <- true
|
||||
}
|
||||
|
||||
// Borrowed from rwmutex_test.go
|
||||
func HammerRWMutex(gomaxprocs, numReaders, numIterations int) {
|
||||
runtime.GOMAXPROCS(gomaxprocs)
|
||||
// Number of active readers + 10000 * number of active writers.
|
||||
var activity int32
|
||||
rwm := NewLRWMutex()
|
||||
cdone := make(chan bool)
|
||||
go writer(rwm, numIterations, &activity, cdone)
|
||||
var i int
|
||||
for i = 0; i < numReaders/2; i++ {
|
||||
go reader(rwm, numIterations, &activity, cdone)
|
||||
}
|
||||
go writer(rwm, numIterations, &activity, cdone)
|
||||
for ; i < numReaders; i++ {
|
||||
go reader(rwm, numIterations, &activity, cdone)
|
||||
}
|
||||
// Wait for the 2 writers and all readers to finish.
|
||||
for i := 0; i < 2+numReaders; i++ {
|
||||
<-cdone
|
||||
}
|
||||
}
|
||||
|
||||
// Borrowed from rwmutex_test.go
|
||||
func TestRWMutex(t *testing.T) {
|
||||
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
|
||||
n := 1000
|
||||
if testing.Short() {
|
||||
n = 5
|
||||
}
|
||||
HammerRWMutex(1, 1, n)
|
||||
HammerRWMutex(1, 3, n)
|
||||
HammerRWMutex(1, 10, n)
|
||||
HammerRWMutex(4, 1, n)
|
||||
HammerRWMutex(4, 3, n)
|
||||
HammerRWMutex(4, 10, n)
|
||||
HammerRWMutex(10, 1, n)
|
||||
HammerRWMutex(10, 3, n)
|
||||
HammerRWMutex(10, 10, n)
|
||||
HammerRWMutex(10, 5, n)
|
||||
}
|
||||
|
||||
// Borrowed from rwmutex_test.go
|
||||
func TestDRLocker(t *testing.T) {
|
||||
wl := NewLRWMutex()
|
||||
var rl sync.Locker
|
||||
wlocked := make(chan bool, 1)
|
||||
rlocked := make(chan bool, 1)
|
||||
rl = wl.DRLocker()
|
||||
n := 10
|
||||
go func() {
|
||||
for i := 0; i < n; i++ {
|
||||
rl.Lock()
|
||||
rl.Lock()
|
||||
rlocked <- true
|
||||
wl.Lock()
|
||||
wlocked <- true
|
||||
}
|
||||
}()
|
||||
for i := 0; i < n; i++ {
|
||||
<-rlocked
|
||||
rl.Unlock()
|
||||
select {
|
||||
case <-wlocked:
|
||||
t.Fatal("RLocker() didn't read-lock it")
|
||||
default:
|
||||
}
|
||||
rl.Unlock()
|
||||
<-wlocked
|
||||
select {
|
||||
case <-rlocked:
|
||||
t.Fatal("RLocker() didn't respect the write lock")
|
||||
default:
|
||||
}
|
||||
wl.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Borrowed from rwmutex_test.go
|
||||
func TestUnlockPanic(t *testing.T) {
|
||||
defer func() {
|
||||
if recover() == nil {
|
||||
t.Fatalf("unlock of unlocked RWMutex did not panic")
|
||||
}
|
||||
}()
|
||||
mu := NewLRWMutex()
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// Borrowed from rwmutex_test.go
|
||||
func TestUnlockPanic2(t *testing.T) {
|
||||
defer func() {
|
||||
if recover() == nil {
|
||||
t.Fatalf("unlock of unlocked RWMutex did not panic")
|
||||
}
|
||||
}()
|
||||
mu := NewLRWMutex()
|
||||
mu.RLock()
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// Borrowed from rwmutex_test.go
|
||||
func TestRUnlockPanic(t *testing.T) {
|
||||
defer func() {
|
||||
if recover() == nil {
|
||||
t.Fatalf("read unlock of unlocked RWMutex did not panic")
|
||||
}
|
||||
}()
|
||||
mu := NewLRWMutex()
|
||||
mu.RUnlock()
|
||||
}
|
||||
|
||||
// Borrowed from rwmutex_test.go
|
||||
func TestRUnlockPanic2(t *testing.T) {
|
||||
defer func() {
|
||||
if recover() == nil {
|
||||
t.Fatalf("read unlock of unlocked RWMutex did not panic")
|
||||
}
|
||||
}()
|
||||
mu := NewLRWMutex()
|
||||
mu.Lock()
|
||||
mu.RUnlock()
|
||||
}
|
||||
Reference in New Issue
Block a user