mirror of
https://github.com/minio/minio.git
synced 2025-04-29 14:17:59 -04:00
Added clear subcommand for control lock (#3013)
Added clear subcommand for control lock with following options: ``` 3. Clear lock named 'bucket/object' (exact match). $ minio control lock clear http://localhost:9000/bucket/object 4. Clear all locks with names that start with 'bucket/prefix' (wildcard match). $ minio control lock --recursive clear http://localhost:9000/bucket/prefix 5. Clear all locks older than 10minutes. $ minio control lock --older-than=10m clear http://localhost:9000/ 6. Clear all locks with names that start with 'bucket/a' and that are older than 1hour. $ minio control lock --recursive --older-than=1h clear http://localhost:9000/bucket/a ```
This commit is contained in:
parent
6274727b71
commit
0e2cd1a64d
@ -261,7 +261,7 @@ func (c *controlAPIHandlers) ServiceHandler(args *ServiceArgs, reply *ServiceRep
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LockInfo - RPC control handler for `minio control lock`. Returns the info of the locks held in the system.
|
// TryInitHandler - generic RPC control handler
|
||||||
func (c *controlAPIHandlers) TryInitHandler(args *GenericArgs, reply *GenericReply) error {
|
func (c *controlAPIHandlers) TryInitHandler(args *GenericArgs, reply *GenericReply) error {
|
||||||
if !isRPCTokenValid(args.Token) {
|
if !isRPCTokenValid(args.Token) {
|
||||||
return errInvalidToken
|
return errInvalidToken
|
||||||
|
@ -17,8 +17,10 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/minio/cli"
|
"github.com/minio/cli"
|
||||||
@ -28,13 +30,17 @@ import (
|
|||||||
var lockFlags = []cli.Flag{
|
var lockFlags = []cli.Flag{
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
Name: "older-than",
|
Name: "older-than",
|
||||||
Usage: "List locks older than given time.",
|
Usage: "Include locks older than given time.",
|
||||||
Value: "24h",
|
Value: "24h",
|
||||||
},
|
},
|
||||||
cli.BoolFlag{
|
cli.BoolFlag{
|
||||||
Name: "verbose",
|
Name: "verbose",
|
||||||
Usage: "Lists more information about locks.",
|
Usage: "Lists more information about locks.",
|
||||||
},
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "recursive",
|
||||||
|
Usage: "Recursively clear locks.",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var lockCmd = cli.Command{
|
var lockCmd = cli.Command{
|
||||||
@ -55,8 +61,20 @@ EAMPLES:
|
|||||||
1. List all currently active locks from all nodes. Defaults to list locks held longer than 24hrs.
|
1. List all currently active locks from all nodes. Defaults to list locks held longer than 24hrs.
|
||||||
$ minio control {{.Name}} list http://localhost:9000/
|
$ minio control {{.Name}} list http://localhost:9000/
|
||||||
|
|
||||||
2. List all currently active locks from all nodes. Request locks from older than 1minute.
|
2. List all currently active locks from all nodes. Request locks older than 1minute.
|
||||||
$ minio control {{.Name}} --older-than=1m list http://localhost:9000/
|
$ minio control {{.Name}} --older-than=1m list http://localhost:9000/
|
||||||
|
|
||||||
|
3. Clear lock named 'bucket/object' (exact match).
|
||||||
|
$ minio control {{.Name}} clear http://localhost:9000/bucket/object
|
||||||
|
|
||||||
|
4. Clear all locks with names that start with 'bucket/prefix' (wildcard match).
|
||||||
|
$ minio control {{.Name}} --recursive clear http://localhost:9000/bucket/prefix
|
||||||
|
|
||||||
|
5. Clear all locks older than 10minutes.
|
||||||
|
$ minio control {{.Name}} --older-than=10m clear http://localhost:9000/
|
||||||
|
|
||||||
|
6. Clear all locks with names that start with 'bucket/a' and that are older than 1hour.
|
||||||
|
$ minio control {{.Name}} --recursive --older-than=1h clear http://localhost:9000/bucket/a
|
||||||
`,
|
`,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -96,6 +114,33 @@ func printLockState(lkStateRep map[string]SystemLockState, olderThan time.Durati
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// clearLockState - clear locks based on a filter for a given duration and a name or prefix to match
|
||||||
|
func clearLockState(f func(bucket, object string), lkStateRep map[string]SystemLockState, olderThan time.Duration, match string, recursive bool) {
|
||||||
|
console.Println("Status Duration Server LockType Resource")
|
||||||
|
for server, lockState := range lkStateRep {
|
||||||
|
for _, lockInfo := range lockState.LocksInfoPerObject {
|
||||||
|
lockedResource := path.Join(lockInfo.Bucket, lockInfo.Object)
|
||||||
|
for _, lockDetails := range lockInfo.LockDetailsOnObject {
|
||||||
|
if lockDetails.Duration < olderThan {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if match != "" {
|
||||||
|
if recursive {
|
||||||
|
if !strings.HasPrefix(lockedResource, match) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if lockedResource != match {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f(lockInfo.Bucket, lockInfo.Object)
|
||||||
|
console.Println("CLEARED", lockDetails.Duration, server,
|
||||||
|
lockDetails.LockType, lockedResource)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// "minio control lock" entry point.
|
// "minio control lock" entry point.
|
||||||
func lockControl(c *cli.Context) {
|
func lockControl(c *cli.Context) {
|
||||||
if !c.Args().Present() && len(c.Args()) != 2 {
|
if !c.Args().Present() && len(c.Args()) != 2 {
|
||||||
@ -113,6 +158,9 @@ func lockControl(c *cli.Context) {
|
|||||||
// Verbose flag.
|
// Verbose flag.
|
||||||
verbose := c.Bool("verbose")
|
verbose := c.Bool("verbose")
|
||||||
|
|
||||||
|
// Recursive flag.
|
||||||
|
recursive := c.Bool("recursive")
|
||||||
|
|
||||||
authCfg := &authConfig{
|
authCfg := &authConfig{
|
||||||
accessKey: serverConfig.GetCredential().AccessKeyID,
|
accessKey: serverConfig.GetCredential().AccessKeyID,
|
||||||
secretKey: serverConfig.GetCredential().SecretAccessKey,
|
secretKey: serverConfig.GetCredential().SecretAccessKey,
|
||||||
@ -142,9 +190,36 @@ func lockControl(c *cli.Context) {
|
|||||||
printLockStateVerbose(lkStateRep, olderThan)
|
printLockStateVerbose(lkStateRep, olderThan)
|
||||||
}
|
}
|
||||||
case "clear":
|
case "clear":
|
||||||
// TODO. Defaults to clearing all locks.
|
path := parsedURL.Path
|
||||||
|
if strings.HasPrefix(path, "/") {
|
||||||
|
path = path[1:] // Strip leading slash
|
||||||
|
}
|
||||||
|
if path == "" && c.NumFlags() == 0 {
|
||||||
|
fatalIf(errors.New("Bad arguments"), "Need to either pass a path or older-than argument")
|
||||||
|
}
|
||||||
|
if !c.IsSet("older-than") { // If not set explicitly, change default to 0 instead of 24h
|
||||||
|
olderThan = 0
|
||||||
|
}
|
||||||
|
lkStateRep := make(map[string]SystemLockState)
|
||||||
|
// Request lock info, fetches from all the nodes in the cluster.
|
||||||
|
err = client.Call("Control.LockInfo", args, &lkStateRep)
|
||||||
|
fatalIf(err, "Unable to fetch system lockInfo.")
|
||||||
|
|
||||||
|
// Helper function to call server for actual removal of lock
|
||||||
|
f := func(bucket, object string) {
|
||||||
|
args := LockClearArgs{
|
||||||
|
Bucket: bucket,
|
||||||
|
Object: object,
|
||||||
|
}
|
||||||
|
reply := GenericReply{}
|
||||||
|
// Call server to clear the lock based on the name of the object.
|
||||||
|
err := client.Call("Control.LockClear", &args, &reply)
|
||||||
|
fatalIf(err, "Unable to clear lock.")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loop over all locks and determine whether to clear or not.
|
||||||
|
clearLockState(f, lkStateRep, olderThan, path, recursive)
|
||||||
default:
|
default:
|
||||||
fatalIf(errInvalidArgument, "Unsupported lock control operation %s", c.Args().Get(0))
|
fatalIf(errInvalidArgument, "Unsupported lock control operation %s", c.Args().Get(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -44,3 +44,134 @@ func TestPrintLockState(t *testing.T) {
|
|||||||
// Does not print any lock state in debug print mode.
|
// Does not print any lock state in debug print mode.
|
||||||
printLockStateVerbose(sysLockStateMap, 10*time.Second)
|
printLockStateVerbose(sysLockStateMap, 10*time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Helper function to test equality of locks (without taking timing info into account)
|
||||||
|
func testLockStateEquality(vliLeft, vliRight VolumeLockInfo) bool {
|
||||||
|
|
||||||
|
if vliLeft.Bucket != vliRight.Bucket ||
|
||||||
|
vliLeft.Object != vliRight.Object ||
|
||||||
|
vliLeft.LocksOnObject != vliRight.LocksOnObject ||
|
||||||
|
vliLeft.LocksAcquiredOnObject != vliRight.LocksAcquiredOnObject ||
|
||||||
|
vliLeft.TotalBlockedLocks != vliRight.TotalBlockedLocks {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test clearing of locks.
|
||||||
|
func TestLockStateClear(t *testing.T) {
|
||||||
|
|
||||||
|
// Helper function to circumvent RPC call to LockClear and call msMutex.ForceUnlock immediately.
|
||||||
|
f := func(bucket, object string) {
|
||||||
|
nsMutex.ForceUnlock(bucket, object)
|
||||||
|
}
|
||||||
|
|
||||||
|
nsMutex.Lock("testbucket", "1.txt", "11-11")
|
||||||
|
|
||||||
|
sysLockState, err := getSystemLockState()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedVli := VolumeLockInfo{
|
||||||
|
Bucket: "testbucket",
|
||||||
|
Object: "1.txt",
|
||||||
|
LocksOnObject: 1,
|
||||||
|
LocksAcquiredOnObject: 1,
|
||||||
|
TotalBlockedLocks: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test initial condition.
|
||||||
|
if !testLockStateEquality(expectedVli, sysLockState.LocksInfoPerObject[0]) {
|
||||||
|
t.Errorf("Expected %#v, got %#v", expectedVli, sysLockState.LocksInfoPerObject[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
sysLockStateMap := map[string]SystemLockState{}
|
||||||
|
sysLockStateMap["testnode1"] = sysLockState
|
||||||
|
|
||||||
|
// Clear locks that are 10 seconds old (which is a no-op in this case)
|
||||||
|
clearLockState(f, sysLockStateMap, 10*time.Second, "", false)
|
||||||
|
|
||||||
|
if sysLockState, err = getSystemLockState(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !testLockStateEquality(expectedVli, sysLockState.LocksInfoPerObject[0]) {
|
||||||
|
t.Errorf("Expected %#v, got %#v", expectedVli, sysLockState.LocksInfoPerObject[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear all locks (older than 0 seconds)
|
||||||
|
clearLockState(f, sysLockStateMap, 0, "", false)
|
||||||
|
|
||||||
|
// Verify that there are no locks
|
||||||
|
if sysLockState, err = getSystemLockState(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if len(sysLockState.LocksInfoPerObject) != 0 {
|
||||||
|
t.Errorf("Expected no locks, got %#v", sysLockState.LocksInfoPerObject)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create another lock
|
||||||
|
nsMutex.RLock("testbucket", "blob.txt", "22-22")
|
||||||
|
|
||||||
|
if sysLockState, err = getSystemLockState(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
sysLockStateMap["testnode1"] = sysLockState
|
||||||
|
|
||||||
|
// Correct wildcard match but bad age.
|
||||||
|
clearLockState(f, sysLockStateMap, 10*time.Second, "testbucket/blob", true)
|
||||||
|
|
||||||
|
// Ensure lock is still there.
|
||||||
|
if sysLockState, err = getSystemLockState(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
expectedVli.Object = "blob.txt"
|
||||||
|
if !testLockStateEquality(expectedVli, sysLockState.LocksInfoPerObject[0]) {
|
||||||
|
t.Errorf("Expected %#v, got %#v", expectedVli, sysLockState.LocksInfoPerObject[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear lock based on wildcard match.
|
||||||
|
clearLockState(f, sysLockStateMap, 0, "testbucket/blob", true)
|
||||||
|
|
||||||
|
// Verify that there are no locks
|
||||||
|
if sysLockState, err = getSystemLockState(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if len(sysLockState.LocksInfoPerObject) != 0 {
|
||||||
|
t.Errorf("Expected no locks, got %#v", sysLockState.LocksInfoPerObject)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create yet another lock
|
||||||
|
nsMutex.RLock("testbucket", "exact.txt", "33-33")
|
||||||
|
|
||||||
|
if sysLockState, err = getSystemLockState(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
sysLockStateMap["testnode1"] = sysLockState
|
||||||
|
|
||||||
|
// Make sure that exact match can fail.
|
||||||
|
clearLockState(f, sysLockStateMap, 0, "testbucket/exact.txT", false)
|
||||||
|
|
||||||
|
// Ensure lock is still there.
|
||||||
|
if sysLockState, err = getSystemLockState(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
expectedVli.Object = "exact.txt"
|
||||||
|
if !testLockStateEquality(expectedVli, sysLockState.LocksInfoPerObject[0]) {
|
||||||
|
t.Errorf("Expected %#v, got %#v", expectedVli, sysLockState.LocksInfoPerObject[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear lock based on exact match.
|
||||||
|
clearLockState(f, sysLockStateMap, 0, "testbucket/exact.txt", false)
|
||||||
|
|
||||||
|
// Verify that there are no locks
|
||||||
|
if sysLockState, err = getSystemLockState(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if len(sysLockState.LocksInfoPerObject) != 0 {
|
||||||
|
t.Errorf("Expected no locks, got %#v", sysLockState.LocksInfoPerObject)
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset lock states for further tests
|
||||||
|
initNSLock(false)
|
||||||
|
}
|
||||||
|
@ -99,7 +99,7 @@ func getSystemLockState() (SystemLockState, error) {
|
|||||||
func (c *controlAPIHandlers) remoteLockInfoCall(args *GenericArgs, replies []SystemLockState) error {
|
func (c *controlAPIHandlers) remoteLockInfoCall(args *GenericArgs, replies []SystemLockState) error {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
var errs = make([]error, len(c.RemoteControls))
|
var errs = make([]error, len(c.RemoteControls))
|
||||||
// Send remote call to all neighboring peers to restart minio servers.
|
// Send remote call to all neighboring peers fetch control lock info.
|
||||||
for index, clnt := range c.RemoteControls {
|
for index, clnt := range c.RemoteControls {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(index int, client *AuthRPCClient) {
|
go func(index int, client *AuthRPCClient) {
|
||||||
@ -133,7 +133,7 @@ func (c *controlAPIHandlers) RemoteLockInfo(args *GenericArgs, reply *SystemLock
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LockInfo - RPC control handler for `minio control lock`. Returns the info of the locks held in the cluster.
|
// LockInfo - RPC control handler for `minio control lock list`. Returns the info of the locks held in the cluster.
|
||||||
func (c *controlAPIHandlers) LockInfo(args *GenericArgs, reply *map[string]SystemLockState) error {
|
func (c *controlAPIHandlers) LockInfo(args *GenericArgs, reply *map[string]SystemLockState) error {
|
||||||
if !isRPCTokenValid(args.Token) {
|
if !isRPCTokenValid(args.Token) {
|
||||||
return errInvalidToken
|
return errInvalidToken
|
||||||
@ -167,3 +167,20 @@ func (c *controlAPIHandlers) LockInfo(args *GenericArgs, reply *map[string]Syste
|
|||||||
// Success.
|
// Success.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LockClearArgs - arguments for LockClear handler
|
||||||
|
type LockClearArgs struct {
|
||||||
|
GenericArgs
|
||||||
|
Bucket string
|
||||||
|
Object string
|
||||||
|
}
|
||||||
|
|
||||||
|
// LockClear - RPC control handler for `minio control lock clear`.
|
||||||
|
func (c *controlAPIHandlers) LockClear(args *LockClearArgs, reply *GenericReply) error {
|
||||||
|
if !isRPCTokenValid(args.Token) {
|
||||||
|
return errInvalidToken
|
||||||
|
}
|
||||||
|
nsMutex.ForceUnlock(args.Bucket, args.Object)
|
||||||
|
*reply = GenericReply{}
|
||||||
|
return nil
|
||||||
|
}
|
@ -251,6 +251,20 @@ func (n *nsLockMap) ForceUnlock(volume, path string) {
|
|||||||
n.lockMapMutex.Lock()
|
n.lockMapMutex.Lock()
|
||||||
defer n.lockMapMutex.Unlock()
|
defer n.lockMapMutex.Unlock()
|
||||||
|
|
||||||
|
// Clarification on operation:
|
||||||
|
// - In case of FS or XL we call ForceUnlock on the local nsMutex
|
||||||
|
// (since there is only a single server) which will cause the 'stuck'
|
||||||
|
// mutex to be removed from the map. Existing operations for this
|
||||||
|
// will continue to be blocked (and timeout). New operations on this
|
||||||
|
// resource will use a new mutex and proceed normally.
|
||||||
|
//
|
||||||
|
// - In case of Distributed setup (using dsync), there is no need to call
|
||||||
|
// ForceUnlock on the server where the lock was acquired and is presumably
|
||||||
|
// 'stuck'. Instead dsync.ForceUnlock() will release the underlying locks
|
||||||
|
// that participated in granting the lock. Any pending dsync locks that
|
||||||
|
// are blocking can now proceed as normal and any new locks will also
|
||||||
|
// participate normally.
|
||||||
|
|
||||||
if n.isDist { // For distributed mode, broadcast ForceUnlock message.
|
if n.isDist { // For distributed mode, broadcast ForceUnlock message.
|
||||||
dsync.NewDRWMutex(pathutil.Join(volume, path)).ForceUnlock()
|
dsync.NewDRWMutex(pathutil.Join(volume, path)).ForceUnlock()
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user