Fix a bug in dsync initialization and communication (#5428)

In current implementation we used as many dsync clients
as per number of endpoints(along with path) which is not
the expected implementation. The implementation of Dsync
was expected to be just for the endpoint Host alone such
that if you have 4 servers and each with 4 disks we need
to only have 4 dsync clients and 4 dsync servers. But
we currently had 8 clients, servers which in-fact is
unexpected and should be avoided.

This PR brings the implementation back to its original
intention. This issue was found #5160
This commit is contained in:
Harshavardhana
2018-01-22 10:25:10 -08:00
committed by kannappanr
parent bb73c84b10
commit f3f09ed14e
8 changed files with 159 additions and 148 deletions

View File

@@ -28,13 +28,17 @@ import (
"github.com/minio/dsync"
"github.com/minio/lsync"
"github.com/minio/minio-go/pkg/set"
)
// Global name space lock.
var globalNSMutex *nsLockMap
// Global lock servers
var globalLockServers []*lockServer
// Global lock server one per server.
var globalLockServer *lockServer
// Instance of dsync for distributed clients.
var globalDsync *dsync.Dsync
// RWLocker - locker interface to introduce GetRLock, RUnlock.
type RWLocker interface {
@@ -56,39 +60,41 @@ type RWLockerSync interface {
// Returns lock clients and the node index for the current server.
func newDsyncNodes(endpoints EndpointList) (clnts []dsync.NetLocker, myNode int) {
cred := globalServerConfig.GetCredential()
clnts = make([]dsync.NetLocker, len(endpoints))
myNode = -1
for index, endpoint := range endpoints {
seenHosts := set.NewStringSet()
for _, endpoint := range endpoints {
if seenHosts.Contains(endpoint.Host) {
continue
}
seenHosts.Add(endpoint.Host)
if !endpoint.IsLocal {
// For a remote endpoints setup a lock RPC client.
clnts[index] = newLockRPCClient(authConfig{
clnts = append(clnts, newLockRPCClient(authConfig{
accessKey: cred.AccessKey,
secretKey: cred.SecretKey,
serverAddr: endpoint.Host,
secureConn: globalIsSSL,
serviceEndpoint: pathutil.Join(minioReservedBucketPath, lockServicePath, endpoint.Path),
serviceEndpoint: pathutil.Join(minioReservedBucketPath, lockServicePath),
serviceName: lockServiceName,
})
}))
continue
}
// Local endpoint
if myNode == -1 {
myNode = index
}
myNode = len(clnts)
// For a local endpoint, setup a local lock server to
// avoid network requests.
localLockServer := lockServer{
AuthRPCServer: AuthRPCServer{},
ll: localLocker{
mutex: sync.Mutex{},
serviceEndpoint: endpoint.Path,
serverAddr: endpoint.Host,
serviceEndpoint: pathutil.Join(minioReservedBucketPath, lockServicePath),
lockMap: make(map[string][]lockRequesterInfo),
},
}
globalLockServers = append(globalLockServers, &localLockServer)
clnts[index] = &(localLockServer.ll)
globalLockServer = &localLockServer
clnts = append(clnts, &(localLockServer.ll))
}
return clnts, myNode
@@ -149,7 +155,7 @@ func (n *nsLockMap) lock(volume, path string, lockSource, opsID string, readLock
nsLk = &nsLock{
RWLockerSync: func() RWLockerSync {
if n.isDistXL {
return dsync.NewDRWMutex(pathJoin(volume, path))
return dsync.NewDRWMutex(pathJoin(volume, path), globalDsync)
}
return &lsync.LRWMutex{}
}(),
@@ -303,7 +309,7 @@ func (n *nsLockMap) ForceUnlock(volume, path string) {
// are blocking can now proceed as normal and any new locks will also
// participate normally.
if n.isDistXL { // For distributed mode, broadcast ForceUnlock message.
dsync.NewDRWMutex(pathJoin(volume, path)).ForceUnlock()
dsync.NewDRWMutex(pathJoin(volume, path), globalDsync).ForceUnlock()
}
param := nsParam{volume, path}