2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2016-04-29 04:29:09 -04:00
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-04-29 04:29:09 -04:00
|
|
|
|
2016-04-29 14:39:20 -04:00
|
|
|
import (
|
2018-04-05 18:04:40 -04:00
|
|
|
"context"
|
2020-05-18 20:33:35 -04:00
|
|
|
"errors"
|
2022-01-02 12:15:06 -05:00
|
|
|
"fmt"
|
2016-08-23 22:19:24 -04:00
|
|
|
pathutil "path"
|
2018-01-17 10:24:46 -05:00
|
|
|
"runtime"
|
2020-02-21 00:59:57 -05:00
|
|
|
"sort"
|
2018-01-17 10:24:46 -05:00
|
|
|
"strings"
|
2016-04-29 14:39:20 -04:00
|
|
|
"sync"
|
2017-10-13 06:01:15 -04:00
|
|
|
"time"
|
|
|
|
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/dsync"
|
|
|
|
"github.com/minio/minio/internal/logger"
|
|
|
|
"github.com/minio/minio/internal/lsync"
|
2016-04-29 14:39:20 -04:00
|
|
|
)
|
2016-04-29 04:29:09 -04:00
|
|
|
|
2019-11-13 15:17:45 -05:00
|
|
|
// local lock servers
|
2020-12-10 10:28:37 -05:00
|
|
|
var globalLockServer *localLocker
|
2017-06-05 15:25:04 -04:00
|
|
|
|
2017-08-31 14:29:22 -04:00
|
|
|
// RWLocker - locker interface to introduce GetRLock, RUnlock.
|
2017-01-16 20:05:00 -05:00
|
|
|
type RWLocker interface {
|
2021-04-29 23:55:21 -04:00
|
|
|
GetLock(ctx context.Context, timeout *dynamicTimeout) (lkCtx LockContext, timedOutErr error)
|
2022-12-23 22:49:07 -05:00
|
|
|
Unlock(lkCtx LockContext)
|
2021-04-29 23:55:21 -04:00
|
|
|
GetRLock(ctx context.Context, timeout *dynamicTimeout) (lkCtx LockContext, timedOutErr error)
|
2022-12-23 22:49:07 -05:00
|
|
|
RUnlock(lkCtx LockContext)
|
2017-08-31 14:29:22 -04:00
|
|
|
}
|
|
|
|
|
2021-04-29 23:55:21 -04:00
|
|
|
// LockContext lock context holds the lock backed context and canceler for the context.
|
|
|
|
type LockContext struct {
|
|
|
|
ctx context.Context
|
|
|
|
cancel context.CancelFunc
|
|
|
|
}
|
|
|
|
|
|
|
|
// Context returns lock context
|
|
|
|
func (l LockContext) Context() context.Context {
|
|
|
|
return l.ctx
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cancel function calls cancel() function
|
|
|
|
func (l LockContext) Cancel() {
|
|
|
|
if l.cancel != nil {
|
|
|
|
l.cancel()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-12 23:34:52 -05:00
|
|
|
// newNSLock - return a new name space lock map.
|
2020-06-12 23:04:01 -04:00
|
|
|
func newNSLock(isDistErasure bool) *nsLockMap {
|
2018-01-12 23:34:52 -05:00
|
|
|
nsMutex := nsLockMap{
|
2020-06-12 23:04:01 -04:00
|
|
|
isDistErasure: isDistErasure,
|
2016-08-10 16:08:11 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
if isDistErasure {
|
2019-04-02 15:27:20 -04:00
|
|
|
return &nsMutex
|
|
|
|
}
|
2020-02-21 00:59:57 -05:00
|
|
|
nsMutex.lockMap = make(map[string]*nsLock)
|
2018-01-12 23:34:52 -05:00
|
|
|
return &nsMutex
|
|
|
|
}
|
|
|
|
|
2016-04-29 04:29:09 -04:00
|
|
|
// nsLock - provides primitives for locking critical namespace regions.
|
|
|
|
type nsLock struct {
|
2020-05-18 20:33:35 -04:00
|
|
|
ref int32
|
2019-04-02 15:27:20 -04:00
|
|
|
*lsync.LRWMutex
|
2016-04-29 04:29:09 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// nsLockMap - namespace lock map, provides primitives to Lock,
|
|
|
|
// Unlock, RLock and RUnlock.
|
|
|
|
type nsLockMap struct {
|
2017-01-16 20:05:00 -05:00
|
|
|
// Indicates if namespace is part of a distributed setup.
|
2020-06-12 23:04:01 -04:00
|
|
|
isDistErasure bool
|
|
|
|
lockMap map[string]*nsLock
|
|
|
|
lockMapMutex sync.Mutex
|
2016-04-29 04:29:09 -04:00
|
|
|
}
|
|
|
|
|
2016-04-29 14:39:20 -04:00
|
|
|
// Lock the namespace resource.
|
2020-02-21 00:59:57 -05:00
|
|
|
func (n *nsLockMap) lock(ctx context.Context, volume string, path string, lockSource, opsID string, readLock bool, timeout time.Duration) (locked bool) {
|
|
|
|
resource := pathJoin(volume, path)
|
|
|
|
|
2018-08-05 23:25:25 -04:00
|
|
|
n.lockMapMutex.Lock()
|
2020-05-18 20:33:35 -04:00
|
|
|
nsLk, found := n.lockMap[resource]
|
|
|
|
if !found {
|
|
|
|
nsLk = &nsLock{
|
2020-06-14 10:43:10 -04:00
|
|
|
LRWMutex: lsync.NewLRWMutex(),
|
2016-04-29 04:29:09 -04:00
|
|
|
}
|
2020-05-18 20:33:35 -04:00
|
|
|
// Add a count to indicate that a parallel unlock doesn't clear this entry.
|
2016-04-29 04:29:09 -04:00
|
|
|
}
|
2020-05-18 20:33:35 -04:00
|
|
|
nsLk.ref++
|
|
|
|
n.lockMap[resource] = nsLk
|
2016-08-10 16:08:11 -04:00
|
|
|
n.lockMapMutex.Unlock()
|
2016-04-29 14:39:20 -04:00
|
|
|
|
2017-08-31 14:29:22 -04:00
|
|
|
// Locking here will block (until timeout).
|
2016-04-29 14:39:20 -04:00
|
|
|
if readLock {
|
2020-06-14 10:43:10 -04:00
|
|
|
locked = nsLk.GetRLock(ctx, opsID, lockSource, timeout)
|
2016-04-29 14:39:20 -04:00
|
|
|
} else {
|
2020-06-14 10:43:10 -04:00
|
|
|
locked = nsLk.GetLock(ctx, opsID, lockSource, timeout)
|
2017-08-31 14:29:22 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if !locked { // We failed to get the lock
|
2018-08-05 23:25:25 -04:00
|
|
|
// Decrement ref count since we failed to get the lock
|
2020-05-18 20:33:35 -04:00
|
|
|
n.lockMapMutex.Lock()
|
|
|
|
n.lockMap[resource].ref--
|
|
|
|
if n.lockMap[resource].ref < 0 {
|
|
|
|
logger.CriticalIf(GlobalContext, errors.New("resource reference count was lower than 0"))
|
|
|
|
}
|
|
|
|
if n.lockMap[resource].ref == 0 {
|
2017-08-31 14:29:22 -04:00
|
|
|
// Remove from the map if there are no more references.
|
2020-02-21 00:59:57 -05:00
|
|
|
delete(n.lockMap, resource)
|
2017-08-31 14:29:22 -04:00
|
|
|
}
|
2020-05-18 20:33:35 -04:00
|
|
|
n.lockMapMutex.Unlock()
|
2016-04-29 14:39:20 -04:00
|
|
|
}
|
2020-05-18 20:33:35 -04:00
|
|
|
|
2017-08-31 14:29:22 -04:00
|
|
|
return
|
2016-04-29 04:29:09 -04:00
|
|
|
}
|
|
|
|
|
2016-04-29 14:39:20 -04:00
|
|
|
// Unlock the namespace resource.
|
2020-02-21 00:59:57 -05:00
|
|
|
func (n *nsLockMap) unlock(volume string, path string, readLock bool) {
|
|
|
|
resource := pathJoin(volume, path)
|
2020-05-15 02:59:07 -04:00
|
|
|
|
|
|
|
n.lockMapMutex.Lock()
|
|
|
|
defer n.lockMapMutex.Unlock()
|
|
|
|
if _, found := n.lockMap[resource]; !found {
|
2018-08-05 23:25:25 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if readLock {
|
2020-05-15 02:59:07 -04:00
|
|
|
n.lockMap[resource].RUnlock()
|
2018-08-05 23:25:25 -04:00
|
|
|
} else {
|
2020-05-15 02:59:07 -04:00
|
|
|
n.lockMap[resource].Unlock()
|
2018-08-05 23:25:25 -04:00
|
|
|
}
|
2020-05-18 20:33:35 -04:00
|
|
|
n.lockMap[resource].ref--
|
|
|
|
if n.lockMap[resource].ref < 0 {
|
|
|
|
logger.CriticalIf(GlobalContext, errors.New("resource reference count was lower than 0"))
|
|
|
|
}
|
|
|
|
if n.lockMap[resource].ref == 0 {
|
2020-05-15 02:59:07 -04:00
|
|
|
// Remove from the map if there are no more references.
|
|
|
|
delete(n.lockMap, resource)
|
2016-04-29 04:29:09 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-02 15:27:20 -04:00
|
|
|
// dsync's distributed lock instance.
|
|
|
|
type distLockInstance struct {
|
2020-02-21 00:59:57 -05:00
|
|
|
rwMutex *dsync.DRWMutex
|
|
|
|
opsID string
|
2019-04-02 15:27:20 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Lock - block until write lock is taken or timeout has occurred.
|
2021-04-29 23:55:21 -04:00
|
|
|
func (di *distLockInstance) GetLock(ctx context.Context, timeout *dynamicTimeout) (LockContext, error) {
|
2020-05-15 02:59:07 -04:00
|
|
|
lockSource := getSource(2)
|
2019-04-02 15:27:20 -04:00
|
|
|
start := UTCNow()
|
|
|
|
|
2021-03-03 21:36:43 -05:00
|
|
|
newCtx, cancel := context.WithCancel(ctx)
|
|
|
|
if !di.rwMutex.GetLock(newCtx, cancel, di.opsID, lockSource, dsync.Options{
|
2022-08-19 19:21:05 -04:00
|
|
|
Timeout: timeout.Timeout(),
|
|
|
|
RetryInterval: timeout.RetryInterval(),
|
2020-08-14 21:17:14 -04:00
|
|
|
}) {
|
2019-04-02 15:27:20 -04:00
|
|
|
timeout.LogFailure()
|
2023-03-09 18:16:13 -05:00
|
|
|
defer cancel()
|
2023-03-06 11:56:10 -05:00
|
|
|
if err := newCtx.Err(); err == context.Canceled {
|
2022-03-18 18:32:45 -04:00
|
|
|
return LockContext{ctx: ctx, cancel: func() {}}, err
|
|
|
|
}
|
2021-04-29 23:55:21 -04:00
|
|
|
return LockContext{ctx: ctx, cancel: func() {}}, OperationTimedOut{}
|
2019-04-02 15:27:20 -04:00
|
|
|
}
|
|
|
|
timeout.LogSuccess(UTCNow().Sub(start))
|
2021-04-29 23:55:21 -04:00
|
|
|
return LockContext{ctx: newCtx, cancel: cancel}, nil
|
2019-04-02 15:27:20 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Unlock - block until write lock is released.
|
2022-12-23 22:49:07 -05:00
|
|
|
func (di *distLockInstance) Unlock(lc LockContext) {
|
|
|
|
if lc.cancel != nil {
|
|
|
|
lc.cancel()
|
2021-04-27 19:12:50 -04:00
|
|
|
}
|
2024-09-09 11:49:49 -04:00
|
|
|
di.rwMutex.Unlock(context.Background())
|
2019-04-02 15:27:20 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// RLock - block until read lock is taken or timeout has occurred.
|
2021-04-29 23:55:21 -04:00
|
|
|
func (di *distLockInstance) GetRLock(ctx context.Context, timeout *dynamicTimeout) (LockContext, error) {
|
2020-05-15 02:59:07 -04:00
|
|
|
lockSource := getSource(2)
|
2019-04-02 15:27:20 -04:00
|
|
|
start := UTCNow()
|
2020-08-14 21:17:14 -04:00
|
|
|
|
2021-03-03 21:36:43 -05:00
|
|
|
newCtx, cancel := context.WithCancel(ctx)
|
|
|
|
if !di.rwMutex.GetRLock(ctx, cancel, di.opsID, lockSource, dsync.Options{
|
2022-08-19 19:21:05 -04:00
|
|
|
Timeout: timeout.Timeout(),
|
|
|
|
RetryInterval: timeout.RetryInterval(),
|
2020-08-14 21:17:14 -04:00
|
|
|
}) {
|
2019-04-02 15:27:20 -04:00
|
|
|
timeout.LogFailure()
|
2023-03-09 18:16:13 -05:00
|
|
|
defer cancel()
|
allow quota enforcement to rely on older values (#17351)
PUT calls cannot afford to have large latency build-ups due
to contentious usage.json, or worse letting them fail with
some unexpected error, this can happen when this file is
concurrently being updated via scanner or it is being
healed during a disk replacement heal.
However, these are fairly quick in theory, stressed clusters
can quickly show visible latency this can add up leading to
invalid errors returned during PUT.
It is perhaps okay for us to relax this error return requirement
instead, make sure that we log that we are proceeding to take in
the requests while the quota is using an older value for the quota
enforcement. These things will reconcile themselves eventually,
via scanner making sure to overwrite the usage.json.
Bonus: make sure that storage-rest-client sets ExpectTimeouts to
be 'true', such that DiskInfo() call with contextTimeout does
not prematurely disconnect the servers leading to a longer
healthCheck, back-off routine. This can easily pile up while also
causing active callers to disconnect, leading to quorum loss.
DiskInfo is actively used in the PUT, Multipart call path for
upgrading parity when disks are down, it in-turn shouldn't cause
more disks to go down.
2023-06-05 19:56:35 -04:00
|
|
|
if errors.Is(newCtx.Err(), context.Canceled) {
|
|
|
|
return LockContext{ctx: ctx, cancel: func() {}}, newCtx.Err()
|
2022-03-18 18:32:45 -04:00
|
|
|
}
|
2021-04-29 23:55:21 -04:00
|
|
|
return LockContext{ctx: ctx, cancel: func() {}}, OperationTimedOut{}
|
2019-04-02 15:27:20 -04:00
|
|
|
}
|
|
|
|
timeout.LogSuccess(UTCNow().Sub(start))
|
2021-04-29 23:55:21 -04:00
|
|
|
return LockContext{ctx: newCtx, cancel: cancel}, nil
|
2019-04-02 15:27:20 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// RUnlock - block until read lock is released.
|
2022-12-23 22:49:07 -05:00
|
|
|
func (di *distLockInstance) RUnlock(lc LockContext) {
|
|
|
|
if lc.cancel != nil {
|
|
|
|
lc.cancel()
|
2021-04-27 19:12:50 -04:00
|
|
|
}
|
2022-12-23 22:49:07 -05:00
|
|
|
di.rwMutex.RUnlock(lc.ctx)
|
2019-04-02 15:27:20 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// localLockInstance - frontend/top-level interface for namespace locks.
|
|
|
|
type localLockInstance struct {
|
2020-02-21 00:59:57 -05:00
|
|
|
ns *nsLockMap
|
|
|
|
volume string
|
|
|
|
paths []string
|
|
|
|
opsID string
|
2016-11-09 13:58:41 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewNSLock - returns a lock instance for a given volume and
|
|
|
|
// path. The returned lockInstance object encapsulates the nsLockMap,
|
|
|
|
// volume, path and operation ID.
|
2020-11-04 11:25:42 -05:00
|
|
|
func (n *nsLockMap) NewNSLock(lockers func() ([]dsync.NetLocker, string), volume string, paths ...string) RWLocker {
|
2024-09-29 18:40:36 -04:00
|
|
|
sort.Strings(paths)
|
2018-08-02 13:39:42 -04:00
|
|
|
opsID := mustGetUUID()
|
2020-06-12 23:04:01 -04:00
|
|
|
if n.isDistErasure {
|
2020-06-14 10:43:10 -04:00
|
|
|
drwmutex := dsync.NewDRWMutex(&dsync.Dsync{
|
2020-09-25 22:21:52 -04:00
|
|
|
GetLockers: lockers,
|
2022-03-01 14:14:28 -05:00
|
|
|
Timeouts: dsync.DefaultTimeouts,
|
2020-02-21 00:59:57 -05:00
|
|
|
}, pathsJoinPrefix(volume, paths...)...)
|
2020-11-04 11:25:42 -05:00
|
|
|
return &distLockInstance{drwmutex, opsID}
|
2019-04-02 15:27:20 -04:00
|
|
|
}
|
2020-11-04 11:25:42 -05:00
|
|
|
return &localLockInstance{n, volume, paths, opsID}
|
2016-11-09 13:58:41 -05:00
|
|
|
}
|
|
|
|
|
2017-08-31 14:29:22 -04:00
|
|
|
// Lock - block until write lock is taken or timeout has occurred.
|
2021-04-29 23:55:21 -04:00
|
|
|
func (li *localLockInstance) GetLock(ctx context.Context, timeout *dynamicTimeout) (_ LockContext, timedOutErr error) {
|
2020-05-15 02:59:07 -04:00
|
|
|
lockSource := getSource(2)
|
2017-08-31 14:29:22 -04:00
|
|
|
start := UTCNow()
|
2020-11-04 11:25:42 -05:00
|
|
|
const readLock = false
|
2021-04-12 16:45:06 -04:00
|
|
|
success := make([]int, len(li.paths))
|
2020-02-21 00:59:57 -05:00
|
|
|
for i, path := range li.paths {
|
2020-11-04 11:25:42 -05:00
|
|
|
if !li.ns.lock(ctx, li.volume, path, lockSource, li.opsID, readLock, timeout.Timeout()) {
|
2020-02-21 00:59:57 -05:00
|
|
|
timeout.LogFailure()
|
2021-04-12 16:45:06 -04:00
|
|
|
for si, sint := range success {
|
|
|
|
if sint == 1 {
|
|
|
|
li.ns.unlock(li.volume, li.paths[si], readLock)
|
|
|
|
}
|
2020-02-21 00:59:57 -05:00
|
|
|
}
|
allow quota enforcement to rely on older values (#17351)
PUT calls cannot afford to have large latency build-ups due
to contentious usage.json, or worse letting them fail with
some unexpected error, this can happen when this file is
concurrently being updated via scanner or it is being
healed during a disk replacement heal.
However, these are fairly quick in theory, stressed clusters
can quickly show visible latency this can add up leading to
invalid errors returned during PUT.
It is perhaps okay for us to relax this error return requirement
instead, make sure that we log that we are proceeding to take in
the requests while the quota is using an older value for the quota
enforcement. These things will reconcile themselves eventually,
via scanner making sure to overwrite the usage.json.
Bonus: make sure that storage-rest-client sets ExpectTimeouts to
be 'true', such that DiskInfo() call with contextTimeout does
not prematurely disconnect the servers leading to a longer
healthCheck, back-off routine. This can easily pile up while also
causing active callers to disconnect, leading to quorum loss.
DiskInfo is actively used in the PUT, Multipart call path for
upgrading parity when disks are down, it in-turn shouldn't cause
more disks to go down.
2023-06-05 19:56:35 -04:00
|
|
|
if errors.Is(ctx.Err(), context.Canceled) {
|
|
|
|
return LockContext{}, ctx.Err()
|
2022-03-18 18:32:45 -04:00
|
|
|
}
|
2021-04-29 23:55:21 -04:00
|
|
|
return LockContext{}, OperationTimedOut{}
|
2020-02-21 00:59:57 -05:00
|
|
|
}
|
2021-04-12 16:45:06 -04:00
|
|
|
success[i] = 1
|
2017-08-31 14:29:22 -04:00
|
|
|
}
|
|
|
|
timeout.LogSuccess(UTCNow().Sub(start))
|
2021-04-29 23:55:21 -04:00
|
|
|
return LockContext{ctx: ctx, cancel: func() {}}, nil
|
2016-11-09 13:58:41 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Unlock - block until write lock is released.
|
2022-12-23 22:49:07 -05:00
|
|
|
func (li *localLockInstance) Unlock(lc LockContext) {
|
|
|
|
if lc.cancel != nil {
|
|
|
|
lc.cancel()
|
2021-04-27 19:12:50 -04:00
|
|
|
}
|
2020-11-04 11:25:42 -05:00
|
|
|
const readLock = false
|
2020-02-21 00:59:57 -05:00
|
|
|
for _, path := range li.paths {
|
|
|
|
li.ns.unlock(li.volume, path, readLock)
|
|
|
|
}
|
2016-11-09 13:58:41 -05:00
|
|
|
}
|
|
|
|
|
2017-08-31 14:29:22 -04:00
|
|
|
// RLock - block until read lock is taken or timeout has occurred.
|
2021-04-29 23:55:21 -04:00
|
|
|
func (li *localLockInstance) GetRLock(ctx context.Context, timeout *dynamicTimeout) (_ LockContext, timedOutErr error) {
|
2020-05-15 02:59:07 -04:00
|
|
|
lockSource := getSource(2)
|
2017-08-31 14:29:22 -04:00
|
|
|
start := UTCNow()
|
2020-11-04 11:25:42 -05:00
|
|
|
const readLock = true
|
2021-04-12 16:45:06 -04:00
|
|
|
success := make([]int, len(li.paths))
|
2020-02-21 00:59:57 -05:00
|
|
|
for i, path := range li.paths {
|
2020-11-04 11:25:42 -05:00
|
|
|
if !li.ns.lock(ctx, li.volume, path, lockSource, li.opsID, readLock, timeout.Timeout()) {
|
2020-02-21 00:59:57 -05:00
|
|
|
timeout.LogFailure()
|
2021-04-12 16:45:06 -04:00
|
|
|
for si, sint := range success {
|
|
|
|
if sint == 1 {
|
|
|
|
li.ns.unlock(li.volume, li.paths[si], readLock)
|
|
|
|
}
|
2020-02-21 00:59:57 -05:00
|
|
|
}
|
2023-03-06 11:56:10 -05:00
|
|
|
if err := ctx.Err(); err == context.Canceled {
|
2022-03-18 18:32:45 -04:00
|
|
|
return LockContext{}, err
|
|
|
|
}
|
2021-04-29 23:55:21 -04:00
|
|
|
return LockContext{}, OperationTimedOut{}
|
2020-02-21 00:59:57 -05:00
|
|
|
}
|
2021-04-12 16:45:06 -04:00
|
|
|
success[i] = 1
|
2017-08-31 14:29:22 -04:00
|
|
|
}
|
|
|
|
timeout.LogSuccess(UTCNow().Sub(start))
|
2021-04-29 23:55:21 -04:00
|
|
|
return LockContext{ctx: ctx, cancel: func() {}}, nil
|
2016-11-09 13:58:41 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// RUnlock - block until read lock is released.
|
2022-12-23 22:49:07 -05:00
|
|
|
func (li *localLockInstance) RUnlock(lc LockContext) {
|
|
|
|
if lc.cancel != nil {
|
|
|
|
lc.cancel()
|
2021-04-27 19:12:50 -04:00
|
|
|
}
|
2020-11-04 11:25:42 -05:00
|
|
|
const readLock = true
|
2020-02-21 00:59:57 -05:00
|
|
|
for _, path := range li.paths {
|
|
|
|
li.ns.unlock(li.volume, path, readLock)
|
|
|
|
}
|
2016-11-09 13:58:41 -05:00
|
|
|
}
|
2018-01-17 10:24:46 -05:00
|
|
|
|
2020-05-15 02:59:07 -04:00
|
|
|
func getSource(n int) string {
|
2018-01-17 10:24:46 -05:00
|
|
|
var funcName string
|
2020-05-15 02:59:07 -04:00
|
|
|
pc, filename, lineNum, ok := runtime.Caller(n)
|
2018-01-17 10:24:46 -05:00
|
|
|
if ok {
|
|
|
|
filename = pathutil.Base(filename)
|
|
|
|
funcName = strings.TrimPrefix(runtime.FuncForPC(pc).Name(),
|
|
|
|
"github.com/minio/minio/cmd.")
|
|
|
|
} else {
|
|
|
|
filename = "<unknown>"
|
|
|
|
lineNum = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return fmt.Sprintf("[%s:%d:%s()]", filename, lineNum, funcName)
|
|
|
|
}
|