mirror of
https://github.com/minio/minio.git
synced 2024-12-25 14:45:54 -05:00
a2a48cc065
When objects hold a lot of read locks cleanup time grows exponentially. ``` BEFORE: Unable to complete tests. AFTER: === RUN Test_localLocker_expireOldLocksExpire/100-locks/1-read local-locker_test.go:298: Scan Took: 0s. Left: 100/100 local-locker_test.go:317: Expire 50% took: 0s. Left: 44/44 local-locker_test.go:331: Expire rest took: 0s. Left: 0/0 === RUN Test_localLocker_expireOldLocksExpire/100-locks/100-read local-locker_test.go:298: Scan Took: 0s. Left: 10000/100 local-locker_test.go:317: Expire 50% took: 1ms. Left: 5000/100 local-locker_test.go:331: Expire rest took: 1ms. Left: 0/0 === RUN Test_localLocker_expireOldLocksExpire/100-locks/1000-read local-locker_test.go:298: Scan Took: 2ms. Left: 100000/100 local-locker_test.go:317: Expire 50% took: 55ms. Left: 50038/100 local-locker_test.go:331: Expire rest took: 29ms. Left: 0/0 === RUN Test_localLocker_expireOldLocksExpire/10000-locks/1-read local-locker_test.go:298: Scan Took: 1ms. Left: 10000/10000 local-locker_test.go:317: Expire 50% took: 2ms. Left: 5019/5019 local-locker_test.go:331: Expire rest took: 2ms. Left: 0/0 === RUN Test_localLocker_expireOldLocksExpire/10000-locks/100-read local-locker_test.go:298: Scan Took: 23ms. Left: 1000000/10000 local-locker_test.go:317: Expire 50% took: 160ms. Left: 499798/10000 local-locker_test.go:331: Expire rest took: 138ms. Left: 0/0 === RUN Test_localLocker_expireOldLocksExpire/10000-locks/1000-read local-locker_test.go:298: Scan Took: 200ms. Left: 10000000/10000 local-locker_test.go:317: Expire 50% took: 5.888s. Left: 5000196/10000 local-locker_test.go:331: Expire rest took: 3.417s. Left: 0/0 === RUN Test_localLocker_expireOldLocksExpire/1000000-locks/1-read local-locker_test.go:298: Scan Took: 133ms. Left: 1000000/1000000 local-locker_test.go:317: Expire 50% took: 348ms. Left: 500255/500255 local-locker_test.go:331: Expire rest took: 307ms. Left: 0/0 ```
347 lines
9.1 KiB
Go
347 lines
9.1 KiB
Go
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
//
|
|
// This file is part of MinIO Object Storage stack
|
|
//
|
|
// This program is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
// (at your option) any later version.
|
|
//
|
|
// This program is distributed in the hope that it will be useful
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU Affero General Public License for more details.
|
|
//
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
package cmd
|
|
|
|
import (
|
|
"context"
|
|
"encoding/hex"
|
|
"fmt"
|
|
"math/rand"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/google/uuid"
|
|
"github.com/minio/minio/internal/dsync"
|
|
)
|
|
|
|
func TestLocalLockerExpire(t *testing.T) {
|
|
wResources := make([]string, 1000)
|
|
rResources := make([]string, 1000)
|
|
l := newLocker()
|
|
ctx := context.Background()
|
|
for i := range wResources {
|
|
arg := dsync.LockArgs{
|
|
UID: mustGetUUID(),
|
|
Resources: []string{mustGetUUID()},
|
|
Source: t.Name(),
|
|
Owner: "owner",
|
|
Quorum: 0,
|
|
}
|
|
ok, err := l.Lock(ctx, arg)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !ok {
|
|
t.Fatal("did not get write lock")
|
|
}
|
|
wResources[i] = arg.Resources[0]
|
|
}
|
|
for i := range rResources {
|
|
name := mustGetUUID()
|
|
arg := dsync.LockArgs{
|
|
UID: mustGetUUID(),
|
|
Resources: []string{name},
|
|
Source: t.Name(),
|
|
Owner: "owner",
|
|
Quorum: 0,
|
|
}
|
|
ok, err := l.RLock(ctx, arg)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !ok {
|
|
t.Fatal("did not get write lock")
|
|
}
|
|
// RLock twice
|
|
ok, err = l.RLock(ctx, arg)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !ok {
|
|
t.Fatal("did not get write lock")
|
|
}
|
|
|
|
rResources[i] = arg.Resources[0]
|
|
}
|
|
if len(l.lockMap) != len(rResources)+len(wResources) {
|
|
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), len(rResources), len(wResources))
|
|
}
|
|
if len(l.lockUID) != len(rResources)+len(wResources) {
|
|
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), len(rResources), len(wResources))
|
|
}
|
|
// Expire an hour from now, should keep all
|
|
l.expireOldLocks(time.Hour)
|
|
if len(l.lockMap) != len(rResources)+len(wResources) {
|
|
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), len(rResources), len(wResources))
|
|
}
|
|
if len(l.lockUID) != len(rResources)+len(wResources) {
|
|
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), len(rResources), len(wResources))
|
|
}
|
|
|
|
// Expire a minute ago.
|
|
l.expireOldLocks(-time.Minute)
|
|
if len(l.lockMap) != 0 {
|
|
t.Fatalf("after cleanup should be empty, got %d", len(l.lockMap))
|
|
}
|
|
if len(l.lockUID) != 0 {
|
|
t.Fatalf("lockUID len, got %d, want %d", len(l.lockUID), 0)
|
|
}
|
|
}
|
|
|
|
func TestLocalLockerUnlock(t *testing.T) {
|
|
const n = 1000
|
|
const m = 5
|
|
wResources := make([][m]string, n)
|
|
rResources := make([]string, n)
|
|
wUIDs := make([]string, n)
|
|
rUIDs := make([]string, 0, n*2)
|
|
l := newLocker()
|
|
ctx := context.Background()
|
|
for i := range wResources {
|
|
names := [m]string{}
|
|
for j := range names {
|
|
names[j] = mustGetUUID()
|
|
}
|
|
uid := mustGetUUID()
|
|
arg := dsync.LockArgs{
|
|
UID: uid,
|
|
Resources: names[:],
|
|
Source: t.Name(),
|
|
Owner: "owner",
|
|
Quorum: 0,
|
|
}
|
|
ok, err := l.Lock(ctx, arg)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !ok {
|
|
t.Fatal("did not get write lock")
|
|
}
|
|
wResources[i] = names
|
|
wUIDs[i] = uid
|
|
|
|
}
|
|
for i := range rResources {
|
|
name := mustGetUUID()
|
|
uid := mustGetUUID()
|
|
arg := dsync.LockArgs{
|
|
UID: uid,
|
|
Resources: []string{name},
|
|
Source: t.Name(),
|
|
Owner: "owner",
|
|
Quorum: 0,
|
|
}
|
|
ok, err := l.RLock(ctx, arg)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !ok {
|
|
t.Fatal("did not get write lock")
|
|
}
|
|
rUIDs = append(rUIDs, uid)
|
|
|
|
// RLock twice, different uid
|
|
uid = mustGetUUID()
|
|
arg.UID = uid
|
|
ok, err = l.RLock(ctx, arg)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !ok {
|
|
t.Fatal("did not get write lock")
|
|
}
|
|
rResources[i] = name
|
|
rUIDs = append(rUIDs, uid)
|
|
}
|
|
// Each Lock has m entries
|
|
if len(l.lockMap) != len(rResources)+len(wResources)*m {
|
|
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), len(rResources), len(wResources)*m)
|
|
}
|
|
// A UID is added for every resource
|
|
if len(l.lockUID) != len(rResources)*2+len(wResources)*m {
|
|
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), len(rResources)*2, len(wResources)*m)
|
|
}
|
|
// RUnlock once...
|
|
for i, name := range rResources {
|
|
arg := dsync.LockArgs{
|
|
UID: rUIDs[i*2],
|
|
Resources: []string{name},
|
|
Source: t.Name(),
|
|
Owner: "owner",
|
|
Quorum: 0,
|
|
}
|
|
ok, err := l.RUnlock(ctx, arg)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !ok {
|
|
t.Fatal("did not get write lock")
|
|
}
|
|
}
|
|
|
|
// Each Lock has m entries
|
|
if len(l.lockMap) != len(rResources)+len(wResources)*m {
|
|
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), len(rResources), len(wResources)*m)
|
|
}
|
|
// A UID is added for every resource.
|
|
// We removed len(rResources) read sources.
|
|
if len(l.lockUID) != len(rResources)+len(wResources)*m {
|
|
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), len(rResources), len(wResources)*m)
|
|
}
|
|
// RUnlock again, different uids
|
|
for i, name := range rResources {
|
|
arg := dsync.LockArgs{
|
|
UID: rUIDs[i*2+1],
|
|
Resources: []string{name},
|
|
Source: "minio",
|
|
Owner: "owner",
|
|
Quorum: 0,
|
|
}
|
|
ok, err := l.RUnlock(ctx, arg)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !ok {
|
|
t.Fatal("did not get write lock")
|
|
}
|
|
}
|
|
|
|
// Each Lock has m entries
|
|
if len(l.lockMap) != 0+len(wResources)*m {
|
|
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), 0, len(wResources)*m)
|
|
}
|
|
// A UID is added for every resource.
|
|
// We removed Add Rlocked entries
|
|
if len(l.lockUID) != len(wResources)*m {
|
|
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), 0, len(wResources)*m)
|
|
}
|
|
|
|
// Remove write locked
|
|
for i, names := range wResources {
|
|
arg := dsync.LockArgs{
|
|
UID: wUIDs[i],
|
|
Resources: names[:],
|
|
Source: "minio",
|
|
Owner: "owner",
|
|
Quorum: 0,
|
|
}
|
|
ok, err := l.Unlock(ctx, arg)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !ok {
|
|
t.Fatal("did not get write lock")
|
|
}
|
|
}
|
|
|
|
// All should be gone now...
|
|
// Each Lock has m entries
|
|
if len(l.lockMap) != 0 {
|
|
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), 0, 0)
|
|
}
|
|
if len(l.lockUID) != 0 {
|
|
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), 0, 0)
|
|
}
|
|
}
|
|
|
|
func Test_localLocker_expireOldLocksExpire(t *testing.T) {
|
|
rng := rand.New(rand.NewSource(0))
|
|
// Numbers of unique locks
|
|
for _, locks := range []int{100, 1000, 1e6} {
|
|
if testing.Short() && locks > 100 {
|
|
continue
|
|
}
|
|
t.Run(fmt.Sprintf("%d-locks", locks), func(t *testing.T) {
|
|
// Number of readers per lock...
|
|
for _, readers := range []int{1, 10, 100} {
|
|
if locks > 1000 && readers > 1 {
|
|
continue
|
|
}
|
|
if testing.Short() && readers > 10 {
|
|
continue
|
|
}
|
|
t.Run(fmt.Sprintf("%d-read", readers), func(t *testing.T) {
|
|
l := newLocker()
|
|
for i := 0; i < locks; i++ {
|
|
var tmp [16]byte
|
|
rng.Read(tmp[:])
|
|
res := []string{hex.EncodeToString(tmp[:])}
|
|
|
|
for i := 0; i < readers; i++ {
|
|
rng.Read(tmp[:])
|
|
ok, err := l.RLock(context.Background(), dsync.LockArgs{
|
|
UID: uuid.NewString(),
|
|
Resources: res,
|
|
Source: hex.EncodeToString(tmp[:8]),
|
|
Owner: hex.EncodeToString(tmp[8:]),
|
|
Quorum: 0,
|
|
})
|
|
if !ok || err != nil {
|
|
t.Fatal("failed:", err, ok)
|
|
}
|
|
}
|
|
}
|
|
start := time.Now()
|
|
l.expireOldLocks(time.Hour)
|
|
t.Logf("Scan Took: %v. Left: %d/%d", time.Since(start).Round(time.Millisecond), len(l.lockUID), len(l.lockMap))
|
|
if len(l.lockMap) != locks {
|
|
t.Fatalf("objects deleted, want %d != got %d", locks, len(l.lockMap))
|
|
}
|
|
if len(l.lockUID) != locks*readers {
|
|
t.Fatalf("objects deleted, want %d != got %d", locks*readers, len(l.lockUID))
|
|
}
|
|
|
|
// Expire 50%
|
|
expired := time.Now().Add(-time.Hour * 2)
|
|
for _, v := range l.lockMap {
|
|
for i := range v {
|
|
if rng.Intn(2) == 0 {
|
|
v[i].TimeLastRefresh = expired
|
|
}
|
|
}
|
|
}
|
|
start = time.Now()
|
|
l.expireOldLocks(time.Hour)
|
|
t.Logf("Expire 50%% took: %v. Left: %d/%d", time.Since(start).Round(time.Millisecond), len(l.lockUID), len(l.lockMap))
|
|
|
|
if len(l.lockUID) == locks*readers {
|
|
t.Fatalf("objects uids all remain, unlikely")
|
|
}
|
|
if len(l.lockMap) == 0 {
|
|
t.Fatalf("objects all deleted, 0 remains")
|
|
}
|
|
if len(l.lockUID) == 0 {
|
|
t.Fatalf("objects uids all deleted, 0 remains")
|
|
}
|
|
|
|
start = time.Now()
|
|
l.expireOldLocks(-time.Minute)
|
|
t.Logf("Expire rest took: %v. Left: %d/%d", time.Since(start).Round(time.Millisecond), len(l.lockUID), len(l.lockMap))
|
|
|
|
if len(l.lockMap) != 0 {
|
|
t.Fatalf("objects not deleted, want %d != got %d", 0, len(l.lockMap))
|
|
}
|
|
if len(l.lockUID) != 0 {
|
|
t.Fatalf("objects not deleted, want %d != got %d", 0, len(l.lockUID))
|
|
}
|
|
})
|
|
}
|
|
})
|
|
}
|
|
}
|