mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
unify single-node and distributed namespace locking (#2401)
This commit is contained in:
parent
b7c169d71d
commit
b0f3f94163
@ -18,10 +18,65 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"github.com/minio/dsync"
|
pathpkg "path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/minio/dsync"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Global name space lock.
|
||||||
|
var nsMutex *nsLockMap
|
||||||
|
|
||||||
|
// Initialize distributed locking only in case of distributed setup.
|
||||||
|
// Returns if the setup is distributed or not on success.
|
||||||
|
func initDsyncNodes(disks []string, port int) (bool, error) {
|
||||||
|
// Holds a bool indicating whether this server instance is part of
|
||||||
|
// distributed setup or not.
|
||||||
|
var isDist = false
|
||||||
|
// List of lock servers that part in the co-operative namespace locking.
|
||||||
|
var dsyncNodes []string
|
||||||
|
// Corresponding rpc paths needed for communication over net/rpc
|
||||||
|
var rpcPaths []string
|
||||||
|
|
||||||
|
// Port to connect to for the lock servers in a distributed setup.
|
||||||
|
serverPort := strconv.Itoa(port)
|
||||||
|
|
||||||
|
for _, disk := range disks {
|
||||||
|
if idx := strings.LastIndex(disk, ":"); idx != -1 {
|
||||||
|
dsyncNodes = append(dsyncNodes, disk[:idx]+":"+serverPort)
|
||||||
|
rpcPaths = append(rpcPaths, pathpkg.Join(lockRPCPath, disk[idx+1:]))
|
||||||
|
}
|
||||||
|
if !isLocalStorage(disk) {
|
||||||
|
// One or more disks supplied as arguments are not
|
||||||
|
// attached to the local node.
|
||||||
|
isDist = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Initialize rpc lock client information only if this instance is a
|
||||||
|
// distributed setup.
|
||||||
|
if isDist {
|
||||||
|
return isDist, dsync.SetNodesWithPath(dsyncNodes, rpcPaths)
|
||||||
|
}
|
||||||
|
return isDist, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// initNSLock - initialize name space lock map.
|
||||||
|
func initNSLock(isDist bool) {
|
||||||
|
nsMutex = &nsLockMap{
|
||||||
|
isDist: isDist,
|
||||||
|
lockMap: make(map[nsParam]*nsLock),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RWLocker - interface that any read-write locking library should implement.
|
||||||
|
type RWLocker interface {
|
||||||
|
sync.Locker
|
||||||
|
RLock()
|
||||||
|
RUnlock()
|
||||||
|
}
|
||||||
|
|
||||||
// nsParam - carries name space resource.
|
// nsParam - carries name space resource.
|
||||||
type nsParam struct {
|
type nsParam struct {
|
||||||
volume string
|
volume string
|
||||||
@ -30,43 +85,40 @@ type nsParam struct {
|
|||||||
|
|
||||||
// nsLock - provides primitives for locking critical namespace regions.
|
// nsLock - provides primitives for locking critical namespace regions.
|
||||||
type nsLock struct {
|
type nsLock struct {
|
||||||
*dsync.DRWMutex
|
RWLocker
|
||||||
ref uint
|
ref uint
|
||||||
}
|
}
|
||||||
|
|
||||||
// nsLockMap - namespace lock map, provides primitives to Lock,
|
// nsLockMap - namespace lock map, provides primitives to Lock,
|
||||||
// Unlock, RLock and RUnlock.
|
// Unlock, RLock and RUnlock.
|
||||||
type nsLockMap struct {
|
type nsLockMap struct {
|
||||||
lockMap map[nsParam]*nsLock
|
isDist bool // indicates whether the locking service is part of a distributed setup or not.
|
||||||
mutex sync.Mutex
|
lockMap map[nsParam]*nsLock
|
||||||
}
|
lockMapMutex sync.Mutex
|
||||||
|
|
||||||
// Global name space lock.
|
|
||||||
var nsMutex *nsLockMap
|
|
||||||
|
|
||||||
// initNSLock - initialize name space lock map.
|
|
||||||
func initNSLock() {
|
|
||||||
nsMutex = &nsLockMap{
|
|
||||||
lockMap: make(map[nsParam]*nsLock),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lock the namespace resource.
|
// Lock the namespace resource.
|
||||||
func (n *nsLockMap) lock(volume, path string, readLock bool) {
|
func (n *nsLockMap) lock(volume, path string, readLock bool) {
|
||||||
n.mutex.Lock()
|
var nsLk *nsLock
|
||||||
|
n.lockMapMutex.Lock()
|
||||||
|
|
||||||
param := nsParam{volume, path}
|
param := nsParam{volume, path}
|
||||||
nsLk, found := n.lockMap[param]
|
nsLk, found := n.lockMap[param]
|
||||||
if !found {
|
if !found {
|
||||||
nsLk = &nsLock{
|
nsLk = &nsLock{
|
||||||
DRWMutex: dsync.NewDRWMutex(volume + path),
|
RWLocker: func() RWLocker {
|
||||||
ref: 0,
|
if n.isDist {
|
||||||
|
return dsync.NewDRWMutex(pathpkg.Join(volume, path))
|
||||||
|
}
|
||||||
|
return &sync.RWMutex{}
|
||||||
|
}(),
|
||||||
|
ref: 0,
|
||||||
}
|
}
|
||||||
n.lockMap[param] = nsLk
|
n.lockMap[param] = nsLk
|
||||||
}
|
}
|
||||||
nsLk.ref++ // Update ref count here to avoid multiple races.
|
nsLk.ref++ // Update ref count here to avoid multiple races.
|
||||||
// Unlock map before Locking NS which might block.
|
// Unlock map before Locking NS which might block.
|
||||||
n.mutex.Unlock()
|
n.lockMapMutex.Unlock()
|
||||||
|
|
||||||
// Locking here can block.
|
// Locking here can block.
|
||||||
if readLock {
|
if readLock {
|
||||||
@ -79,8 +131,8 @@ func (n *nsLockMap) lock(volume, path string, readLock bool) {
|
|||||||
// Unlock the namespace resource.
|
// Unlock the namespace resource.
|
||||||
func (n *nsLockMap) unlock(volume, path string, readLock bool) {
|
func (n *nsLockMap) unlock(volume, path string, readLock bool) {
|
||||||
// nsLk.Unlock() will not block, hence locking the map for the entire function is fine.
|
// nsLk.Unlock() will not block, hence locking the map for the entire function is fine.
|
||||||
n.mutex.Lock()
|
n.lockMapMutex.Lock()
|
||||||
defer n.mutex.Unlock()
|
defer n.lockMapMutex.Unlock()
|
||||||
|
|
||||||
param := nsParam{volume, path}
|
param := nsParam{volume, path}
|
||||||
if nsLk, found := n.lockMap[param]; found {
|
if nsLk, found := n.lockMap[param]; found {
|
||||||
|
@ -21,13 +21,11 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/minio/cli"
|
"github.com/minio/cli"
|
||||||
"github.com/minio/dsync"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var srvConfig serverCmdConfig
|
var srvConfig serverCmdConfig
|
||||||
@ -222,29 +220,6 @@ func getPort(address string) int {
|
|||||||
return portInt
|
return portInt
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize distributed locking only in case of distributed setup.
|
|
||||||
func initDsyncNodes(disks []string, port int) error {
|
|
||||||
var isDist bool = false
|
|
||||||
var dsyncNodes []string
|
|
||||||
var rpcPaths []string
|
|
||||||
serverPort := strconv.Itoa(port)
|
|
||||||
|
|
||||||
for _, disk := range disks {
|
|
||||||
if idx := strings.LastIndex(disk, ":"); idx != -1 {
|
|
||||||
dsyncNodes = append(dsyncNodes, disk[:idx]+":"+serverPort)
|
|
||||||
rpcPaths = append(rpcPaths, path.Join(lockRPCPath, disk[idx+1:]))
|
|
||||||
}
|
|
||||||
if !isLocalStorage(disk) {
|
|
||||||
// One or more disks supplied as arguments are remote.
|
|
||||||
isDist = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if isDist {
|
|
||||||
return dsync.SetNodesWithPath(dsyncNodes, rpcPaths)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// serverMain handler called for 'minio server' command.
|
// serverMain handler called for 'minio server' command.
|
||||||
func serverMain(c *cli.Context) {
|
func serverMain(c *cli.Context) {
|
||||||
// Check 'server' cli arguments.
|
// Check 'server' cli arguments.
|
||||||
@ -271,12 +246,12 @@ func serverMain(c *cli.Context) {
|
|||||||
disks := c.Args()
|
disks := c.Args()
|
||||||
|
|
||||||
// Set nodes for dsync
|
// Set nodes for dsync
|
||||||
err = initDsyncNodes(disks, port)
|
var isDist bool
|
||||||
|
isDist, err = initDsyncNodes(disks, port)
|
||||||
fatalIf(err, "Unable to initialize distributed locking")
|
fatalIf(err, "Unable to initialize distributed locking")
|
||||||
|
|
||||||
// Initialize name space lock.
|
// Initialize name space lock.
|
||||||
// FIXME: add logic to switch between distributed and single-node namespace locking.
|
initNSLock(isDist)
|
||||||
initNSLock()
|
|
||||||
|
|
||||||
// Configure server.
|
// Configure server.
|
||||||
srvConfig = serverCmdConfig{
|
srvConfig = serverCmdConfig{
|
||||||
|
@ -45,7 +45,8 @@ import (
|
|||||||
// Tests should initNSLock only once.
|
// Tests should initNSLock only once.
|
||||||
func init() {
|
func init() {
|
||||||
// Initialize name space lock.
|
// Initialize name space lock.
|
||||||
initNSLock()
|
isDist := false
|
||||||
|
initNSLock(isDist)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestErrHandler - Golang Testing.T and Testing.B, and gocheck.C satisfy this interface.
|
// TestErrHandler - Golang Testing.T and Testing.B, and gocheck.C satisfy this interface.
|
||||||
|
17
vendor/github.com/minio/dsync/dmutex.go
generated
vendored
17
vendor/github.com/minio/dsync/dmutex.go
generated
vendored
@ -35,7 +35,6 @@ type DMutex struct {
|
|||||||
m sync.Mutex // Mutex to prevent multiple simultaneous locks from this node
|
m sync.Mutex // Mutex to prevent multiple simultaneous locks from this node
|
||||||
|
|
||||||
// TODO: Decide: create per object or create once for whole class
|
// TODO: Decide: create per object or create once for whole class
|
||||||
clnts []*rpc.Client
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Granted struct {
|
type Granted struct {
|
||||||
@ -45,13 +44,13 @@ type Granted struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func connectLazy(dm *DMutex) {
|
func connectLazy(dm *DMutex) {
|
||||||
if dm.clnts == nil {
|
if clnts == nil {
|
||||||
dm.clnts = make([]*rpc.Client, n)
|
clnts = make([]*rpc.Client, n)
|
||||||
}
|
}
|
||||||
for i := range dm.clnts {
|
for i := range clnts {
|
||||||
if dm.clnts[i] == nil {
|
if clnts[i] == nil {
|
||||||
// pass in unique path (as required by server.HandleHTTP()
|
// pass in unique path (as required by server.HandleHTTP()
|
||||||
dm.clnts[i], _ = rpc.DialHTTPPath("tcp", nodes[i], rpcPaths[i])
|
clnts[i], _ = rpc.DialHTTPPath("tcp", nodes[i], rpcPaths[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -78,7 +77,7 @@ func (dm *DMutex) Lock() {
|
|||||||
ids := make([]string, n)
|
ids := make([]string, n)
|
||||||
|
|
||||||
// try to acquire the lock
|
// try to acquire the lock
|
||||||
success := lock(dm.clnts, &locks, &ids, dm.Name)
|
success := lock(clnts, &locks, &ids, dm.Name)
|
||||||
if success {
|
if success {
|
||||||
// if success, copy array to object
|
// if success, copy array to object
|
||||||
dm.locks = make([]bool, n)
|
dm.locks = make([]bool, n)
|
||||||
@ -118,7 +117,7 @@ func (dm *DMutex) tryLockTimeout() bool {
|
|||||||
ids := make([]string, n)
|
ids := make([]string, n)
|
||||||
|
|
||||||
// try to acquire the lock
|
// try to acquire the lock
|
||||||
success := lock(dm.clnts, &locks, &ids, dm.Name)
|
success := lock(clnts, &locks, &ids, dm.Name)
|
||||||
if success {
|
if success {
|
||||||
// if success, copy array to object
|
// if success, copy array to object
|
||||||
dm.locks = make([]bool, n)
|
dm.locks = make([]bool, n)
|
||||||
@ -286,7 +285,7 @@ func (dm *DMutex) Unlock() {
|
|||||||
// We don't need to wait until we have released all the locks (or the quorum)
|
// We don't need to wait until we have released all the locks (or the quorum)
|
||||||
// (a subsequent lock will retry automatically in case it would fail to get
|
// (a subsequent lock will retry automatically in case it would fail to get
|
||||||
// quorum)
|
// quorum)
|
||||||
for index, c := range dm.clnts {
|
for index, c := range clnts {
|
||||||
|
|
||||||
if dm.locks[index] {
|
if dm.locks[index] {
|
||||||
// broadcast lock release to all nodes the granted the lock
|
// broadcast lock release to all nodes the granted the lock
|
||||||
|
1
vendor/github.com/minio/dsync/dsync.go
generated
vendored
1
vendor/github.com/minio/dsync/dsync.go
generated
vendored
@ -29,6 +29,7 @@ const DefaultPath = "/rpc/dsync"
|
|||||||
var n int
|
var n int
|
||||||
var nodes []string
|
var nodes []string
|
||||||
var rpcPaths []string
|
var rpcPaths []string
|
||||||
|
var clnts []*rpc.Client
|
||||||
|
|
||||||
func closeClients(clients []*rpc.Client) {
|
func closeClients(clients []*rpc.Client) {
|
||||||
for _, clnt := range clients {
|
for _, clnt := range clients {
|
||||||
|
6
vendor/vendor.json
vendored
6
vendor/vendor.json
vendored
@ -98,10 +98,10 @@
|
|||||||
"revisionTime": "2015-11-18T20:00:48-08:00"
|
"revisionTime": "2015-11-18T20:00:48-08:00"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "r1Vf/vQTkMsZrDVORBGAAIlOMP4=",
|
"checksumSHA1": "BqEf+ElZXcofLdav5iGfHH93vMY=",
|
||||||
"path": "github.com/minio/dsync",
|
"path": "github.com/minio/dsync",
|
||||||
"revision": "6bfa8c0c1c37959c1bda15bfdae228a986d3cca8",
|
"revision": "9c1a398a7d687901939a31d50f8639b11bb3c5fe",
|
||||||
"revisionTime": "2016-08-07T19:01:27Z"
|
"revisionTime": "2016-08-10T17:09:05Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"path": "github.com/minio/go-homedir",
|
"path": "github.com/minio/go-homedir",
|
||||||
|
Loading…
Reference in New Issue
Block a user