object layer initialization using distributed locking (#2397)

* vendorized latest minio/dsync

* wip - object layer initialization using distributed locking
This commit is contained in:
Krishnan Parthasarathi
2016-08-09 23:57:16 -07:00
committed by Harshavardhana
parent 3939c75345
commit b7c169d71d
7 changed files with 64 additions and 24 deletions

View File

@@ -49,6 +49,11 @@ func newObjectLayerFactory(disks, ignoredDisks []string) func() ObjectLayer {
if objAPI != nil {
return objAPI
}
// Acquire a distributed lock to ensure only one of the nodes
// initializes the format.json.
nsMutex.Lock(minioMetaBucket, formatConfigFile)
defer nsMutex.Unlock(minioMetaBucket, formatConfigFile)
objAPI, err = newObjectLayer(disks, ignoredDisks)
if err != nil {
return nil

View File

@@ -92,7 +92,7 @@ func (s *storageServer) ReadAllHandler(arg *ReadFileArgs, reply *[]byte) error {
if err != nil {
return err
}
reply = &buf
*reply = buf
return nil
}
@@ -102,7 +102,7 @@ func (s *storageServer) ReadFileHandler(arg *ReadFileArgs, reply *int64) error {
if err != nil {
return err
}
reply = &n
*reply = n
return nil
}
@@ -160,9 +160,9 @@ func newRPCServer(serverConfig serverCmdConfig) (servers []*storageServer, err e
// registerStorageRPCRouter - register storage rpc router.
func registerStorageRPCRouters(mux *router.Router, stServers []*storageServer) {
storageRPCServer := rpc.NewServer()
// Create a unique route for each disk exported from this node.
for _, stServer := range stServers {
storageRPCServer := rpc.NewServer()
storageRPCServer.RegisterName("Storage", stServer)
// Add minio storage routes.
storageRouter := mux.PathPrefix(reservedBucket).Subrouter()

View File

@@ -21,11 +21,13 @@ import (
"net"
"net/http"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/minio/cli"
"github.com/minio/dsync"
)
var srvConfig serverCmdConfig
@@ -220,6 +222,29 @@ func getPort(address string) int {
return portInt
}
// Initialize distributed locking only in case of distributed setup.
func initDsyncNodes(disks []string, port int) error {
var isDist bool = false
var dsyncNodes []string
var rpcPaths []string
serverPort := strconv.Itoa(port)
for _, disk := range disks {
if idx := strings.LastIndex(disk, ":"); idx != -1 {
dsyncNodes = append(dsyncNodes, disk[:idx]+":"+serverPort)
rpcPaths = append(rpcPaths, path.Join(lockRPCPath, disk[idx+1:]))
}
if !isLocalStorage(disk) {
// One or more disks supplied as arguments are remote.
isDist = true
}
}
if isDist {
return dsync.SetNodesWithPath(dsyncNodes, rpcPaths)
}
return nil
}
// serverMain handler called for 'minio server' command.
func serverMain(c *cli.Context) {
// Check 'server' cli arguments.
@@ -245,6 +270,14 @@ func serverMain(c *cli.Context) {
// Disks to be used in server init.
disks := c.Args()
// Set nodes for dsync
err = initDsyncNodes(disks, port)
fatalIf(err, "Unable to initialize distributed locking")
// Initialize name space lock.
// FIXME: add logic to switch between distributed and single-node namespace locking.
initNSLock()
// Configure server.
srvConfig = serverCmdConfig{
serverAddr: serverAddress,