mirror of
https://github.com/minio/minio.git
synced 2025-01-11 15:03:22 -05:00
Move to URL based syntax formatting. (#3092)
For command line arguments we are currently following - <node-1>:/path ... <node-n>:/path This patch changes this to - http://<node-1>/path ... http://<node-n>/path
This commit is contained in:
parent
30dc11a931
commit
9e2d0ac50b
@ -35,7 +35,7 @@ func prepareBenchmarkBackend(instanceType string) (ObjectLayer, []string, error)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
endpoints, err := parseStorageEndPoints(disks, 0)
|
endpoints, err := parseStorageEndpoints(disks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"syscall"
|
"syscall"
|
||||||
@ -32,10 +31,16 @@ import (
|
|||||||
// This causes confusion on Mac OSX that minio server is not reachable
|
// This causes confusion on Mac OSX that minio server is not reachable
|
||||||
// on 127.0.0.1 even though minio server is running. So before we start
|
// on 127.0.0.1 even though minio server is running. So before we start
|
||||||
// the minio server we make sure that the port is free on each tcp network.
|
// the minio server we make sure that the port is free on each tcp network.
|
||||||
func checkPortAvailability(port int) error {
|
//
|
||||||
|
// Port is string on purpose here.
|
||||||
|
// https://github.com/golang/go/issues/16142#issuecomment-245912773
|
||||||
|
//
|
||||||
|
// "Keep in mind that ports in Go are strings: https://play.golang.org/p/zk2WEri_E9"
|
||||||
|
// - @bradfitz
|
||||||
|
func checkPortAvailability(portStr string) error {
|
||||||
network := [3]string{"tcp", "tcp4", "tcp6"}
|
network := [3]string{"tcp", "tcp4", "tcp6"}
|
||||||
for _, n := range network {
|
for _, n := range network {
|
||||||
l, err := net.Listen(n, fmt.Sprintf(":%d", port))
|
l, err := net.Listen(n, net.JoinHostPort("", portStr))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isAddrInUse(err) {
|
if isAddrInUse(err) {
|
||||||
// Return error if another process is listening on the
|
// Return error if another process is listening on the
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"net"
|
"net"
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
@ -26,7 +25,7 @@ import (
|
|||||||
// Tests for port availability logic written for server startup sequence.
|
// Tests for port availability logic written for server startup sequence.
|
||||||
func TestCheckPortAvailability(t *testing.T) {
|
func TestCheckPortAvailability(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
port int
|
port string
|
||||||
}{
|
}{
|
||||||
{getFreePort()},
|
{getFreePort()},
|
||||||
{getFreePort()},
|
{getFreePort()},
|
||||||
@ -35,11 +34,11 @@ func TestCheckPortAvailability(t *testing.T) {
|
|||||||
// This test should pass if the ports are available
|
// This test should pass if the ports are available
|
||||||
err := checkPortAvailability(test.port)
|
err := checkPortAvailability(test.port)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("checkPortAvailability test failed for port: %d. Error: %v", test.port, err)
|
t.Fatalf("checkPortAvailability test failed for port: %s. Error: %v", test.port, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now use the ports and check again
|
// Now use the ports and check again
|
||||||
ln, err := net.Listen("tcp", fmt.Sprintf(":%d", test.port))
|
ln, err := net.Listen("tcp", net.JoinHostPort("", test.port))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
@ -49,7 +48,7 @@ func TestCheckPortAvailability(t *testing.T) {
|
|||||||
|
|
||||||
// Skip if the os is windows due to https://github.com/golang/go/issues/7598
|
// Skip if the os is windows due to https://github.com/golang/go/issues/7598
|
||||||
if err == nil && runtime.GOOS != "windows" {
|
if err == nil && runtime.GOOS != "windows" {
|
||||||
t.Fatalf("checkPortAvailability should fail for port: %d. Error: %v", test.port, err)
|
t.Fatalf("checkPortAvailability should fail for port: %s. Error: %v", test.port, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -73,7 +73,7 @@ func TestControlHealMain(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove the object - to simulate the case where the disk was down when the object was created.
|
// Remove the object - to simulate the case where the disk was down when the object was created.
|
||||||
err = os.RemoveAll(path.Join(testServer.Disks[0].path, bucket, object))
|
err = os.RemoveAll(path.Join(testServer.Disks[0].Path, bucket, object))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"net/rpc"
|
"net/rpc"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
@ -36,24 +35,21 @@ func initRemoteControlClients(srvCmdConfig serverCmdConfig) []*AuthRPCClient {
|
|||||||
}
|
}
|
||||||
// Initialize auth rpc clients.
|
// Initialize auth rpc clients.
|
||||||
var remoteControlClnts []*AuthRPCClient
|
var remoteControlClnts []*AuthRPCClient
|
||||||
localMap := make(map[storageEndPoint]int)
|
localMap := make(map[string]int)
|
||||||
for _, ep := range srvCmdConfig.endPoints {
|
for _, ep := range srvCmdConfig.endpoints {
|
||||||
// Set path to "" so that it is not used for filtering the
|
|
||||||
// unique entries.
|
|
||||||
ep.path = ""
|
|
||||||
// Validates if remote disk is local.
|
// Validates if remote disk is local.
|
||||||
if isLocalStorage(ep) {
|
if isLocalStorage(ep) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if localMap[ep] == 1 {
|
if localMap[ep.Host] == 1 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
localMap[ep]++
|
localMap[ep.Host]++
|
||||||
remoteControlClnts = append(remoteControlClnts, newAuthClient(&authConfig{
|
remoteControlClnts = append(remoteControlClnts, newAuthClient(&authConfig{
|
||||||
accessKey: serverConfig.GetCredential().AccessKeyID,
|
accessKey: serverConfig.GetCredential().AccessKeyID,
|
||||||
secretKey: serverConfig.GetCredential().SecretAccessKey,
|
secretKey: serverConfig.GetCredential().SecretAccessKey,
|
||||||
secureConn: isSSL(),
|
secureConn: isSSL(),
|
||||||
address: fmt.Sprintf("%s:%d", ep.host, ep.port),
|
address: ep.Host,
|
||||||
path: path.Join(reservedBucket, controlPath),
|
path: path.Join(reservedBucket, controlPath),
|
||||||
loginMethod: "Control.LoginHandler",
|
loginMethod: "Control.LoginHandler",
|
||||||
}))
|
}))
|
||||||
|
@ -16,7 +16,10 @@
|
|||||||
|
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import "testing"
|
import (
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
// Tests initialization of remote controller clients.
|
// Tests initialization of remote controller clients.
|
||||||
func TestInitRemoteControlClients(t *testing.T) {
|
func TestInitRemoteControlClients(t *testing.T) {
|
||||||
@ -41,11 +44,19 @@ func TestInitRemoteControlClients(t *testing.T) {
|
|||||||
{
|
{
|
||||||
srvCmdConfig: serverCmdConfig{
|
srvCmdConfig: serverCmdConfig{
|
||||||
isDistXL: true,
|
isDistXL: true,
|
||||||
endPoints: []storageEndPoint{
|
endpoints: []*url.URL{{
|
||||||
{"10.1.10.1", 9000, "/mnt/disk1"},
|
Scheme: "http",
|
||||||
{"10.1.10.1", 9000, "/mnt/disk2"},
|
Host: "10.1.10.1:9000",
|
||||||
{"10.1.10.2", 9000, "/mnt/disk1"},
|
Path: "/mnt/disk1",
|
||||||
{"10.1.10.2", 9000, "/mnt/disk2"},
|
}, {
|
||||||
|
Scheme: "http",
|
||||||
|
Host: "10.1.10.1:9000", Path: "/mnt/disk2",
|
||||||
|
}, {
|
||||||
|
Scheme: "http",
|
||||||
|
Host: "10.1.10.2:9000", Path: "/mnt/disk1",
|
||||||
|
}, {
|
||||||
|
Scheme: "http",
|
||||||
|
Host: "10.1.10.2:9000", Path: "/mnt/disk2"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
totalClients: 2,
|
totalClients: 2,
|
||||||
@ -54,11 +65,18 @@ func TestInitRemoteControlClients(t *testing.T) {
|
|||||||
{
|
{
|
||||||
srvCmdConfig: serverCmdConfig{
|
srvCmdConfig: serverCmdConfig{
|
||||||
isDistXL: true,
|
isDistXL: true,
|
||||||
endPoints: []storageEndPoint{
|
endpoints: []*url.URL{{
|
||||||
{"10.1.10.1", 9000, "/mnt/disk1"},
|
Scheme: "http",
|
||||||
{"10.1.10.2", 9000, "/mnt/disk2"},
|
Host: "10.1.10.1:9000", Path: "/mnt/disk1",
|
||||||
{"10.1.10.3", 9000, "/mnt/disk3"},
|
}, {
|
||||||
{"10.1.10.4", 9000, "/mnt/disk4"},
|
Scheme: "http",
|
||||||
|
Host: "10.1.10.2:9000", Path: "/mnt/disk2",
|
||||||
|
}, {
|
||||||
|
Scheme: "http",
|
||||||
|
Host: "10.1.10.3:9000", Path: "/mnt/disk1",
|
||||||
|
}, {
|
||||||
|
Scheme: "http",
|
||||||
|
Host: "10.1.10.4:9000", Path: "/mnt/disk2"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
totalClients: 4,
|
totalClients: 4,
|
||||||
|
@ -222,7 +222,7 @@ func TestErasureReadUtils(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
endpoints, err := parseStorageEndPoints(disks, 0)
|
endpoints, err := parseStorageEndpoints(disks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
@ -40,7 +39,7 @@ func TestInitEventNotifierFaultyDisks(t *testing.T) {
|
|||||||
t.Fatal("Unable to create directories for FS backend. ", err)
|
t.Fatal("Unable to create directories for FS backend. ", err)
|
||||||
}
|
}
|
||||||
defer removeAll(disks[0])
|
defer removeAll(disks[0])
|
||||||
endpoints, err := parseStorageEndPoints(disks, 0)
|
endpoints, err := parseStorageEndpoints(disks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -94,7 +93,7 @@ func TestInitEventNotifierWithAMQP(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unable to create directories for FS backend. ", err)
|
t.Fatal("Unable to create directories for FS backend. ", err)
|
||||||
}
|
}
|
||||||
endpoints, err := parseStorageEndPoints(disks, 0)
|
endpoints, err := parseStorageEndpoints(disks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -125,7 +124,7 @@ func TestInitEventNotifierWithElasticSearch(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unable to create directories for FS backend. ", err)
|
t.Fatal("Unable to create directories for FS backend. ", err)
|
||||||
}
|
}
|
||||||
endpoints, err := parseStorageEndPoints(disks, 0)
|
endpoints, err := parseStorageEndpoints(disks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -156,7 +155,7 @@ func TestInitEventNotifierWithRedis(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unable to create directories for FS backend. ", err)
|
t.Fatal("Unable to create directories for FS backend. ", err)
|
||||||
}
|
}
|
||||||
endpoints, err := parseStorageEndPoints(disks, 0)
|
endpoints, err := parseStorageEndpoints(disks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -180,14 +179,12 @@ func (s *TestPeerRPCServerData) Setup(t *testing.T) {
|
|||||||
s.testServer = StartTestPeersRPCServer(t, s.serverType)
|
s.testServer = StartTestPeersRPCServer(t, s.serverType)
|
||||||
|
|
||||||
// setup port and minio addr
|
// setup port and minio addr
|
||||||
_, portStr, err := net.SplitHostPort(s.testServer.Server.Listener.Addr().String())
|
host, port, err := net.SplitHostPort(s.testServer.Server.Listener.Addr().String())
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Initialisation error: %v", err)
|
|
||||||
}
|
|
||||||
globalMinioPort, err = strconv.Atoi(portStr)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Initialisation error: %v", err)
|
t.Fatalf("Initialisation error: %v", err)
|
||||||
}
|
}
|
||||||
|
globalMinioHost = host
|
||||||
|
globalMinioPort = port
|
||||||
globalMinioAddr = getLocalAddress(
|
globalMinioAddr = getLocalAddress(
|
||||||
s.testServer.SrvCmdCfg,
|
s.testServer.SrvCmdCfg,
|
||||||
)
|
)
|
||||||
@ -200,7 +197,7 @@ func (s *TestPeerRPCServerData) TearDown() {
|
|||||||
s.testServer.Stop()
|
s.testServer.Stop()
|
||||||
_ = removeAll(s.testServer.Root)
|
_ = removeAll(s.testServer.Root)
|
||||||
for _, d := range s.testServer.Disks {
|
for _, d := range s.testServer.Disks {
|
||||||
_ = removeAll(d.path)
|
_ = removeAll(d.Path)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -275,7 +275,7 @@ func TestFormatXLHealFreshDisks(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
endpoints, err := parseStorageEndPoints(fsDirs, 0)
|
endpoints, err := parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -311,7 +311,7 @@ func TestFormatXLHealFreshDisksErrorExpected(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
endpoints, err := parseStorageEndPoints(fsDirs, 0)
|
endpoints, err := parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -600,7 +600,7 @@ func TestInitFormatXLErrors(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer removeRoots(fsDirs)
|
defer removeRoots(fsDirs)
|
||||||
endpoints, err := parseStorageEndPoints(fsDirs, 0)
|
endpoints, err := parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -706,7 +706,7 @@ func TestLoadFormatXLErrs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer removeRoots(fsDirs)
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
endpoints, err := parseStorageEndPoints(fsDirs, 0)
|
endpoints, err := parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -737,7 +737,7 @@ func TestLoadFormatXLErrs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer removeRoots(fsDirs)
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
endpoints, err = parseStorageEndPoints(fsDirs, 0)
|
endpoints, err = parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -765,7 +765,7 @@ func TestLoadFormatXLErrs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer removeRoots(fsDirs)
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
endpoints, err = parseStorageEndPoints(fsDirs, 0)
|
endpoints, err = parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -791,7 +791,7 @@ func TestLoadFormatXLErrs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer removeRoots(fsDirs)
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
endpoints, err = parseStorageEndPoints(fsDirs, 0)
|
endpoints, err = parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -818,7 +818,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err := parseStorageEndPoints(fsDirs, 0)
|
endpoints, err := parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -840,7 +840,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err = parseStorageEndPoints(fsDirs, 0)
|
endpoints, err = parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -864,7 +864,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err = parseStorageEndPoints(fsDirs, 0)
|
endpoints, err = parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -890,7 +890,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err = parseStorageEndPoints(fsDirs, 0)
|
endpoints, err = parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -912,7 +912,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err = parseStorageEndPoints(fsDirs, 0)
|
endpoints, err = parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -938,7 +938,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err = parseStorageEndPoints(fsDirs, 0)
|
endpoints, err = parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -968,7 +968,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err := parseStorageEndPoints(fsDirs, 0)
|
endpoints, err := parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -989,7 +989,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err = parseStorageEndPoints(fsDirs, 0)
|
endpoints, err = parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -1013,7 +1013,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err = parseStorageEndPoints(fsDirs, 0)
|
endpoints, err = parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -1039,7 +1039,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err = parseStorageEndPoints(fsDirs, 0)
|
endpoints, err = parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -1061,7 +1061,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err = parseStorageEndPoints(fsDirs, 0)
|
endpoints, err = parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -1087,7 +1087,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err = parseStorageEndPoints(fsDirs, 0)
|
endpoints, err = parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -68,7 +68,7 @@ func TestHasExtendedHeader(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func initFSObjects(disk string, t *testing.T) (obj ObjectLayer) {
|
func initFSObjects(disk string, t *testing.T) (obj ObjectLayer) {
|
||||||
endpoints, err := parseStorageEndPoints([]string{disk}, 0)
|
endpoints, err := parseStorageEndpoints([]string{disk})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,7 @@ func TestNewFS(t *testing.T) {
|
|||||||
disks = append(disks, xlDisk)
|
disks = append(disks, xlDisk)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err := parseStorageEndPoints([]string{disk}, 0)
|
endpoints, err := parseStorageEndpoints([]string{disk})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Uexpected error: ", err)
|
t.Fatal("Uexpected error: ", err)
|
||||||
}
|
}
|
||||||
@ -50,7 +50,7 @@ func TestNewFS(t *testing.T) {
|
|||||||
t.Fatal("Uexpected error: ", err)
|
t.Fatal("Uexpected error: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err = parseStorageEndPoints(disks, 0)
|
endpoints, err = parseStorageEndpoints(disks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Uexpected error: ", err)
|
t.Fatal("Uexpected error: ", err)
|
||||||
}
|
}
|
||||||
@ -61,7 +61,7 @@ func TestNewFS(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Initializes all disks with XL
|
// Initializes all disks with XL
|
||||||
err = waitForFormatDisks(true, "", xlStorageDisks)
|
err = waitForFormatDisks(true, endpoints[0], xlStorageDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to format XL %s", err)
|
t.Fatalf("Unable to format XL %s", err)
|
||||||
}
|
}
|
||||||
@ -79,7 +79,7 @@ func TestNewFS(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
if err = waitForFormatDisks(true, "", []StorageAPI{testCase.disk}); err != testCase.expectedErr {
|
if err = waitForFormatDisks(true, endpoints[0], []StorageAPI{testCase.disk}); err != testCase.expectedErr {
|
||||||
t.Errorf("expected: %s, got :%s", testCase.expectedErr, err)
|
t.Errorf("expected: %s, got :%s", testCase.expectedErr, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ var (
|
|||||||
// Minio local server address (in `host:port` format)
|
// Minio local server address (in `host:port` format)
|
||||||
globalMinioAddr = ""
|
globalMinioAddr = ""
|
||||||
// Minio default port, can be changed through command line.
|
// Minio default port, can be changed through command line.
|
||||||
globalMinioPort = 9000
|
globalMinioPort = "9000"
|
||||||
// Holds the host that was passed using --address
|
// Holds the host that was passed using --address
|
||||||
globalMinioHost = ""
|
globalMinioHost = ""
|
||||||
// Peer communication struct
|
// Peer communication struct
|
||||||
|
@ -81,10 +81,10 @@ func registerDistNSLockRouter(mux *router.Router, serverConfig serverCmdConfig)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create one lock server for every local storage rpc server.
|
// Create one lock server for every local storage rpc server.
|
||||||
func newLockServers(serverConfig serverCmdConfig) (lockServers []*lockServer) {
|
func newLockServers(srvConfig serverCmdConfig) (lockServers []*lockServer) {
|
||||||
for _, ep := range serverConfig.endPoints {
|
for _, ep := range srvConfig.endpoints {
|
||||||
if ep.presentIn(serverConfig.ignoredEndPoints) {
|
if containsEndpoint(srvConfig.ignoredEndpoints, ep) {
|
||||||
// Skip initializing ignored end point.
|
// Skip initializing ignored endpoint.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -92,9 +92,10 @@ func newLockServers(serverConfig serverCmdConfig) (lockServers []*lockServer) {
|
|||||||
if !isLocalStorage(ep) {
|
if !isLocalStorage(ep) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create handler for lock RPCs
|
// Create handler for lock RPCs
|
||||||
locker := &lockServer{
|
locker := &lockServer{
|
||||||
rpcPath: ep.path,
|
rpcPath: getPath(ep),
|
||||||
mutex: sync.Mutex{},
|
mutex: sync.Mutex{},
|
||||||
lockMap: make(map[string][]lockRequesterInfo),
|
lockMap: make(map[string][]lockRequesterInfo),
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net/url"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
@ -444,6 +445,7 @@ func TestLockServers(t *testing.T) {
|
|||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
globalMinioHost = ""
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
srvCmdConfig serverCmdConfig
|
srvCmdConfig serverCmdConfig
|
||||||
totalLockServers int
|
totalLockServers int
|
||||||
@ -452,12 +454,23 @@ func TestLockServers(t *testing.T) {
|
|||||||
{
|
{
|
||||||
srvCmdConfig: serverCmdConfig{
|
srvCmdConfig: serverCmdConfig{
|
||||||
isDistXL: true,
|
isDistXL: true,
|
||||||
endPoints: []storageEndPoint{
|
endpoints: []*url.URL{{
|
||||||
{"localhost", 9000, "/mnt/disk1"},
|
Scheme: "http",
|
||||||
{"1.1.1.2", 9000, "/mnt/disk2"},
|
Host: "localhost:9000",
|
||||||
{"1.1.2.1", 9000, "/mnt/disk3"},
|
Path: "/mnt/disk1",
|
||||||
{"1.1.2.2", 9000, "/mnt/disk4"},
|
}, {
|
||||||
},
|
Scheme: "http",
|
||||||
|
Host: "1.1.1.2:9000",
|
||||||
|
Path: "/mnt/disk2",
|
||||||
|
}, {
|
||||||
|
Scheme: "http",
|
||||||
|
Host: "1.1.2.1:9000",
|
||||||
|
Path: "/mnt/disk3",
|
||||||
|
}, {
|
||||||
|
Scheme: "http",
|
||||||
|
Host: "1.1.2.2:9000",
|
||||||
|
Path: "/mnt/disk4",
|
||||||
|
}},
|
||||||
},
|
},
|
||||||
totalLockServers: 1,
|
totalLockServers: 1,
|
||||||
},
|
},
|
||||||
@ -465,15 +478,28 @@ func TestLockServers(t *testing.T) {
|
|||||||
{
|
{
|
||||||
srvCmdConfig: serverCmdConfig{
|
srvCmdConfig: serverCmdConfig{
|
||||||
isDistXL: true,
|
isDistXL: true,
|
||||||
endPoints: []storageEndPoint{
|
endpoints: []*url.URL{{
|
||||||
{"localhost", 9000, "/mnt/disk1"},
|
Scheme: "http",
|
||||||
{"localhost", 9000, "/mnt/disk2"},
|
Host: "localhost:9000",
|
||||||
{"1.1.2.1", 9000, "/mnt/disk3"},
|
Path: "/mnt/disk1",
|
||||||
{"1.1.2.2", 9000, "/mnt/disk4"},
|
}, {
|
||||||
},
|
Scheme: "http",
|
||||||
ignoredEndPoints: []storageEndPoint{
|
Host: "localhost:9000",
|
||||||
{"localhost", 9000, "/mnt/disk2"},
|
Path: "/mnt/disk2",
|
||||||
},
|
}, {
|
||||||
|
Scheme: "http",
|
||||||
|
Host: "1.1.2.1:9000",
|
||||||
|
Path: "/mnt/disk3",
|
||||||
|
}, {
|
||||||
|
Scheme: "http",
|
||||||
|
Host: "1.1.2.2:9000",
|
||||||
|
Path: "/mnt/disk4",
|
||||||
|
}},
|
||||||
|
ignoredEndpoints: []*url.URL{{
|
||||||
|
Scheme: "http",
|
||||||
|
Host: "localhost:9000",
|
||||||
|
Path: "/mnt/disk2",
|
||||||
|
}},
|
||||||
},
|
},
|
||||||
totalLockServers: 1,
|
totalLockServers: 1,
|
||||||
},
|
},
|
||||||
|
@ -18,9 +18,9 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"net/url"
|
||||||
pathutil "path"
|
pathutil "path"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/minio/dsync"
|
"github.com/minio/dsync"
|
||||||
@ -31,13 +31,13 @@ var nsMutex *nsLockMap
|
|||||||
|
|
||||||
// Initialize distributed locking only in case of distributed setup.
|
// Initialize distributed locking only in case of distributed setup.
|
||||||
// Returns if the setup is distributed or not on success.
|
// Returns if the setup is distributed or not on success.
|
||||||
func initDsyncNodes(eps []storageEndPoint) error {
|
func initDsyncNodes(eps []*url.URL) error {
|
||||||
cred := serverConfig.GetCredential()
|
cred := serverConfig.GetCredential()
|
||||||
// Initialize rpc lock client information only if this instance is a distributed setup.
|
// Initialize rpc lock client information only if this instance is a distributed setup.
|
||||||
var clnts []dsync.RPC
|
var clnts []dsync.RPC
|
||||||
myNode := -1
|
myNode := -1
|
||||||
for _, ep := range eps {
|
for _, ep := range eps {
|
||||||
if ep.host == "" || ep.port == 0 || ep.path == "" {
|
if ep == nil {
|
||||||
return errInvalidArgument
|
return errInvalidArgument
|
||||||
}
|
}
|
||||||
clnts = append(clnts, newAuthClient(&authConfig{
|
clnts = append(clnts, newAuthClient(&authConfig{
|
||||||
@ -45,9 +45,9 @@ func initDsyncNodes(eps []storageEndPoint) error {
|
|||||||
secretKey: cred.SecretAccessKey,
|
secretKey: cred.SecretAccessKey,
|
||||||
// Construct a new dsync server addr.
|
// Construct a new dsync server addr.
|
||||||
secureConn: isSSL(),
|
secureConn: isSSL(),
|
||||||
address: ep.host + ":" + strconv.Itoa(ep.port),
|
address: ep.Host,
|
||||||
// Construct a new rpc path for the endpoint.
|
// Construct a new rpc path for the endpoint.
|
||||||
path: pathutil.Join(lockRPCPath, ep.path),
|
path: pathutil.Join(lockRPCPath, getPath(ep)),
|
||||||
loginMethod: "Dsync.LoginHandler",
|
loginMethod: "Dsync.LoginHandler",
|
||||||
}))
|
}))
|
||||||
if isLocalStorage(ep) && myNode == -1 {
|
if isLocalStorage(ep) && myNode == -1 {
|
||||||
|
@ -565,7 +565,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func initFSObjectsB(disk string, t *testing.B) (obj ObjectLayer) {
|
func initFSObjectsB(disk string, t *testing.B) (obj ObjectLayer) {
|
||||||
endPoints, err := parseStorageEndPoints([]string{disk}, 0)
|
endPoints, err := parseStorageEndpoints([]string{disk})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,8 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
@ -104,21 +106,27 @@ func houseKeeping(storageDisks []StorageAPI) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check if a network path is local to this node.
|
// Check if a network path is local to this node.
|
||||||
func isLocalStorage(ep storageEndPoint) bool {
|
func isLocalStorage(ep *url.URL) bool {
|
||||||
if ep.host == "" {
|
if ep.Host == "" {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if globalMinioHost != "" {
|
if globalMinioHost != "" && globalMinioPort != "" {
|
||||||
// if --address host:port was specified for distXL we short circuit only the endPoint
|
// if --address host:port was specified for distXL we short
|
||||||
// that matches host:port
|
// circuit only the endPoint that matches host:port
|
||||||
if globalMinioHost == ep.host && globalMinioPort == ep.port {
|
if net.JoinHostPort(globalMinioHost, globalMinioPort) == ep.Host {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
// Split host to extract host information.
|
||||||
|
host, _, err := net.SplitHostPort(ep.Host)
|
||||||
|
if err != nil {
|
||||||
|
errorIf(err, "Cannot split host port")
|
||||||
|
return false
|
||||||
|
}
|
||||||
// Resolve host to address to check if the IP is loopback.
|
// Resolve host to address to check if the IP is loopback.
|
||||||
// If address resolution fails, assume it's a non-local host.
|
// If address resolution fails, assume it's a non-local host.
|
||||||
addrs, err := net.LookupHost(ep.host)
|
addrs, err := net.LookupHost(host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorIf(err, "Failed to lookup host")
|
errorIf(err, "Failed to lookup host")
|
||||||
return false
|
return false
|
||||||
@ -149,12 +157,37 @@ func isLocalStorage(ep storageEndPoint) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Depending on the disk type network or local, initialize storage API.
|
// Fetch the path component from *url.URL*.
|
||||||
func newStorageAPI(ep storageEndPoint) (storage StorageAPI, err error) {
|
func getPath(ep *url.URL) string {
|
||||||
if isLocalStorage(ep) {
|
if ep == nil {
|
||||||
return newPosix(ep.path)
|
return ""
|
||||||
}
|
}
|
||||||
return newRPCClient(ep)
|
var diskPath string
|
||||||
|
// For windows ep.Path is usually empty
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
// For full URLs windows drive is part of URL path.
|
||||||
|
// Eg: http://ip:port/C:\mydrive
|
||||||
|
if ep.Scheme == "http" || ep.Scheme == "https" {
|
||||||
|
// For windows trim off the preceding "/".
|
||||||
|
diskPath = ep.Path[1:]
|
||||||
|
} else {
|
||||||
|
// For the rest url splits drive letter into
|
||||||
|
// Scheme contruct the disk path back.
|
||||||
|
diskPath = ep.Scheme + ":" + ep.Opaque
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// For other operating systems ep.Path is non empty.
|
||||||
|
diskPath = ep.Path
|
||||||
|
}
|
||||||
|
return diskPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// Depending on the disk type network or local, initialize storage API.
|
||||||
|
func newStorageAPI(ep *url.URL) (storage StorageAPI, err error) {
|
||||||
|
if isLocalStorage(ep) {
|
||||||
|
return newPosix(getPath(ep))
|
||||||
|
}
|
||||||
|
return newStorageRPC(ep)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initializes meta volume on all input storage disks.
|
// Initializes meta volume on all input storage disks.
|
||||||
|
@ -17,6 +17,8 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net/url"
|
||||||
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
@ -35,8 +37,9 @@ func TestHouseKeeping(t *testing.T) {
|
|||||||
defer removeRoots(noSpaceDirs)
|
defer removeRoots(noSpaceDirs)
|
||||||
|
|
||||||
properStorage := []StorageAPI{}
|
properStorage := []StorageAPI{}
|
||||||
for _, fs := range fsDirs {
|
for _, fsDir := range fsDirs {
|
||||||
sd, err := newPosix(fs)
|
var sd StorageAPI
|
||||||
|
sd, err = newPosix(fsDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create a local disk-based storage layer <ERROR> %v", err)
|
t.Fatalf("Failed to create a local disk-based storage layer <ERROR> %v", err)
|
||||||
}
|
}
|
||||||
@ -44,8 +47,8 @@ func TestHouseKeeping(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
noSpaceBackend := []StorageAPI{}
|
noSpaceBackend := []StorageAPI{}
|
||||||
for _, noSpaceFS := range noSpaceDirs {
|
for _, noSpaceDir := range noSpaceDirs {
|
||||||
sd, err := newPosix(noSpaceFS)
|
sd, err := newPosix(noSpaceDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create a local disk-based storage layer <ERROR> %v", err)
|
t.Fatalf("Failed to create a local disk-based storage layer <ERROR> %v", err)
|
||||||
}
|
}
|
||||||
@ -68,7 +71,6 @@ func TestHouseKeeping(t *testing.T) {
|
|||||||
if errs[index] != nil {
|
if errs[index] != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
errs[index] = store.AppendFile(pathJoin(minioMetaBucket, tmpMetaPrefix), "hello.txt", []byte("hello"))
|
errs[index] = store.AppendFile(pathJoin(minioMetaBucket, tmpMetaPrefix), "hello.txt", []byte("hello"))
|
||||||
}(i, store)
|
}(i, store)
|
||||||
}
|
}
|
||||||
@ -97,3 +99,52 @@ func TestHouseKeeping(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test constructing the final path.
|
||||||
|
func TestGetPath(t *testing.T) {
|
||||||
|
var testCases []struct {
|
||||||
|
ep *url.URL
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
|
testCases = []struct {
|
||||||
|
ep *url.URL
|
||||||
|
path string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
ep: nil,
|
||||||
|
path: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ep: &url.URL{Path: "/test1"},
|
||||||
|
path: "/test1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
testCases = []struct {
|
||||||
|
ep *url.URL
|
||||||
|
path string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
ep: nil,
|
||||||
|
path: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ep: &url.URL{Opaque: "\\test1", Scheme: "C"},
|
||||||
|
path: "C:\\test1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ep: &url.URL{Scheme: "http", Path: "/C:\\test1"},
|
||||||
|
path: "C:\\test1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate all the test cases.
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
path := getPath(testCase.ep)
|
||||||
|
if path != testCase.path {
|
||||||
|
t.Fatalf("Test: %d Expected path %s, got %s", i+1, testCase.path, path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -48,7 +48,7 @@ func TestUNCPaths(t *testing.T) {
|
|||||||
defer os.RemoveAll("c:\\testdisk")
|
defer os.RemoveAll("c:\\testdisk")
|
||||||
|
|
||||||
var fs StorageAPI
|
var fs StorageAPI
|
||||||
fs, err = newPosix("c:\\testdisk")
|
fs, err = newPosix(`c:\testdisk`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -66,7 +66,6 @@ func TestUNCPaths(t *testing.T) {
|
|||||||
} else if err == nil && !test.pass {
|
} else if err == nil && !test.pass {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fs.DeleteFile("voldir", test.objName)
|
fs.DeleteFile("voldir", test.objName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -81,8 +80,9 @@ func TestUNCPathENOTDIR(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Cleanup on exit of test
|
// Cleanup on exit of test
|
||||||
defer os.RemoveAll("c:\\testdisk")
|
defer os.RemoveAll("c:\\testdisk")
|
||||||
|
|
||||||
var fs StorageAPI
|
var fs StorageAPI
|
||||||
fs, err = newPosix("c:\\testdisk")
|
fs, err = newPosix(`c:\testdisk`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -106,58 +106,30 @@ func TestUNCPathENOTDIR(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test to validate that path name in UNC form works
|
|
||||||
func TestUNCPathDiskName(t *testing.T) {
|
|
||||||
var err error
|
|
||||||
// Instantiate posix object to manage a disk
|
|
||||||
longPathDisk := `\\?\c:\testdisk`
|
|
||||||
err = mkdirAll(longPathDisk, 0777)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
// Cleanup on exit of test
|
|
||||||
defer removeAll(longPathDisk)
|
|
||||||
var fs StorageAPI
|
|
||||||
fs, err = newPosix(longPathDisk)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create volume to use in conjunction with other StorageAPI's file API(s)
|
|
||||||
err = fs.MakeVol("voldir")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test to validate 32k path works on windows platform
|
// Test to validate 32k path works on windows platform
|
||||||
func Test32kUNCPath(t *testing.T) {
|
func Test32kUNCPath(t *testing.T) {
|
||||||
var err error
|
var err error
|
||||||
// Instantiate posix object to manage a disk
|
// The following calculation was derived empirically. It is not exactly MAX_PATH - len(longDiskName)
|
||||||
longDiskName := `\\?\c:`
|
// possibly due to expansion rules as mentioned here -
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
|
||||||
|
var longPathName string
|
||||||
for {
|
for {
|
||||||
compt := strings.Repeat("a", 255)
|
compt := strings.Repeat("a", 255)
|
||||||
if len(compt)+len(longDiskName)+1 > 32767 {
|
if len(compt)+len(longPathName)+1 > 32767 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
longDiskName = longDiskName + `\` + compt
|
longPathName = longPathName + `\` + compt
|
||||||
}
|
}
|
||||||
|
longPathName = "C:" + longPathName
|
||||||
if len(longDiskName) < 32767 {
|
err = mkdirAll(longPathName, 0777)
|
||||||
// The following calculation was derived empirically. It is not exactly MAX_PATH - len(longDiskName)
|
|
||||||
// possibly due to expansion rules as mentioned here -
|
|
||||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
|
|
||||||
remaining := 32767 - 25 - len(longDiskName) - 10
|
|
||||||
longDiskName = longDiskName + `\` + strings.Repeat("a", remaining)
|
|
||||||
}
|
|
||||||
err = mkdirAll(longDiskName, 0777)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup on exit of test
|
// Cleanup on exit of test
|
||||||
defer removeAll(longDiskName)
|
defer removeAll(longPathName)
|
||||||
_, err = newPosix(longDiskName)
|
|
||||||
|
_, err = newPosix(longPathName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
28
cmd/posix.go
28
cmd/posix.go
@ -41,12 +41,11 @@ const (
|
|||||||
|
|
||||||
// posix - implements StorageAPI interface.
|
// posix - implements StorageAPI interface.
|
||||||
type posix struct {
|
type posix struct {
|
||||||
ioErrCount int32 // ref: https://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
ioErrCount int32 // ref: https://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
diskPath string
|
diskPath string
|
||||||
suppliedDiskPath string
|
minFreeSpace int64
|
||||||
minFreeSpace int64
|
minFreeInodes int64
|
||||||
minFreeInodes int64
|
pool sync.Pool
|
||||||
pool sync.Pool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var errFaultyDisk = errors.New("Faulty disk")
|
var errFaultyDisk = errors.New("Faulty disk")
|
||||||
@ -100,22 +99,19 @@ func isDirEmpty(dirname string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Initialize a new storage disk.
|
// Initialize a new storage disk.
|
||||||
func newPosix(diskPath string) (StorageAPI, error) {
|
func newPosix(path string) (StorageAPI, error) {
|
||||||
if diskPath == "" {
|
if path == "" {
|
||||||
return nil, errInvalidArgument
|
return nil, errInvalidArgument
|
||||||
}
|
}
|
||||||
suppliedDiskPath := diskPath
|
|
||||||
var err error
|
|
||||||
// Disallow relative paths, figure out absolute paths.
|
// Disallow relative paths, figure out absolute paths.
|
||||||
diskPath, err = filepath.Abs(diskPath)
|
diskPath, err := filepath.Abs(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
fs := &posix{
|
fs := &posix{
|
||||||
suppliedDiskPath: suppliedDiskPath,
|
diskPath: diskPath,
|
||||||
diskPath: diskPath,
|
minFreeSpace: fsMinFreeSpace,
|
||||||
minFreeSpace: fsMinFreeSpace,
|
minFreeInodes: fsMinFreeInodesPercent,
|
||||||
minFreeInodes: fsMinFreeInodesPercent,
|
|
||||||
// 1MiB buffer pool for posix internal operations.
|
// 1MiB buffer pool for posix internal operations.
|
||||||
pool: sync.Pool{
|
pool: sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() interface{} {
|
||||||
@ -182,7 +178,7 @@ func (s *posix) checkDiskFree() (err error) {
|
|||||||
|
|
||||||
// Implements stringer compatible interface.
|
// Implements stringer compatible interface.
|
||||||
func (s *posix) String() string {
|
func (s *posix) String() string {
|
||||||
return s.suppliedDiskPath
|
return s.diskPath
|
||||||
}
|
}
|
||||||
|
|
||||||
// DiskInfo provides current information about disk space usage,
|
// DiskInfo provides current information about disk space usage,
|
||||||
|
@ -35,6 +35,7 @@ func newPosixTestSetup() (StorageAPI, string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
|
// Initialize a new posix layer.
|
||||||
posixStorage, err := newPosix(diskPath)
|
posixStorage, err := newPosix(diskPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
@ -179,8 +180,8 @@ func TestNewPosix(t *testing.T) {
|
|||||||
|
|
||||||
// List of all tests for posix initialization.
|
// List of all tests for posix initialization.
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
diskPath string
|
name string
|
||||||
err error
|
err error
|
||||||
}{
|
}{
|
||||||
// Validates input argument cannot be empty.
|
// Validates input argument cannot be empty.
|
||||||
{
|
{
|
||||||
@ -203,7 +204,8 @@ func TestNewPosix(t *testing.T) {
|
|||||||
|
|
||||||
// Validate all test cases.
|
// Validate all test cases.
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
_, err := newPosix(testCase.diskPath)
|
// Initialize a new posix layer.
|
||||||
|
_, err := newPosix(testCase.name)
|
||||||
if err != testCase.err {
|
if err != testCase.err {
|
||||||
t.Fatalf("Test %d failed wanted: %s, got: %s", i+1, err, testCase.err)
|
t.Fatalf("Test %d failed wanted: %s, got: %s", i+1, err, testCase.err)
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/minio/mc/pkg/console"
|
"github.com/minio/mc/pkg/console"
|
||||||
@ -184,7 +185,10 @@ func prepForInitXL(firstDisk bool, sErrs []error, diskCount int) InitActions {
|
|||||||
|
|
||||||
// Implements a jitter backoff loop for formatting all disks during
|
// Implements a jitter backoff loop for formatting all disks during
|
||||||
// initialization of the server.
|
// initialization of the server.
|
||||||
func retryFormattingDisks(firstDisk bool, firstEndpoint string, storageDisks []StorageAPI) error {
|
func retryFormattingDisks(firstDisk bool, firstEndpoint *url.URL, storageDisks []StorageAPI) error {
|
||||||
|
if firstEndpoint == nil {
|
||||||
|
return errInvalidArgument
|
||||||
|
}
|
||||||
if storageDisks == nil {
|
if storageDisks == nil {
|
||||||
return errInvalidArgument
|
return errInvalidArgument
|
||||||
}
|
}
|
||||||
@ -227,7 +231,7 @@ func retryFormattingDisks(firstDisk bool, firstEndpoint string, storageDisks []S
|
|||||||
// Validate formats load before proceeding forward.
|
// Validate formats load before proceeding forward.
|
||||||
err := genericFormatCheck(formatConfigs, sErrs)
|
err := genericFormatCheck(formatConfigs, sErrs)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
printHealMsg(firstEndpoint, storageDisks, printOnceFn())
|
printHealMsg(firstEndpoint.String(), storageDisks, printOnceFn())
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
case WaitForQuorum:
|
case WaitForQuorum:
|
||||||
@ -256,23 +260,28 @@ func retryFormattingDisks(firstDisk bool, firstEndpoint string, storageDisks []S
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Initialize storage disks based on input arguments.
|
// Initialize storage disks based on input arguments.
|
||||||
func initStorageDisks(endPoints, ignoredEndPoints []storageEndPoint) ([]StorageAPI, error) {
|
func initStorageDisks(endpoints, ignoredEndpoints []*url.URL) ([]StorageAPI, error) {
|
||||||
// Single disk means we will use FS backend.
|
// Single disk means we will use FS backend.
|
||||||
if len(endPoints) == 1 {
|
if len(endpoints) == 1 {
|
||||||
storage, err := newStorageAPI(endPoints[0])
|
if endpoints[0] == nil {
|
||||||
|
return nil, errInvalidArgument
|
||||||
|
}
|
||||||
|
storage, err := newStorageAPI(endpoints[0])
|
||||||
if err != nil && err != errDiskNotFound {
|
if err != nil && err != errDiskNotFound {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return []StorageAPI{storage}, nil
|
return []StorageAPI{storage}, nil
|
||||||
}
|
}
|
||||||
// Otherwise proceed with XL setup.
|
// Otherwise proceed with XL setup. Bootstrap disks.
|
||||||
// Bootstrap disks.
|
storageDisks := make([]StorageAPI, len(endpoints))
|
||||||
storageDisks := make([]StorageAPI, len(endPoints))
|
for index, ep := range endpoints {
|
||||||
for index, ep := range endPoints {
|
if ep == nil {
|
||||||
|
return nil, errInvalidArgument
|
||||||
|
}
|
||||||
// Check if disk is ignored.
|
// Check if disk is ignored.
|
||||||
ignored := false
|
ignored := false
|
||||||
for _, iep := range ignoredEndPoints {
|
for _, iep := range ignoredEndpoints {
|
||||||
if ep == iep {
|
if *ep == *iep {
|
||||||
ignored = true
|
ignored = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -294,7 +303,10 @@ func initStorageDisks(endPoints, ignoredEndPoints []storageEndPoint) ([]StorageA
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Format disks before initialization object layer.
|
// Format disks before initialization object layer.
|
||||||
func waitForFormatDisks(firstDisk bool, firstEndpoint string, storageDisks []StorageAPI) (err error) {
|
func waitForFormatDisks(firstDisk bool, firstEndpoint *url.URL, storageDisks []StorageAPI) (err error) {
|
||||||
|
if firstEndpoint == nil {
|
||||||
|
return errInvalidArgument
|
||||||
|
}
|
||||||
if storageDisks == nil {
|
if storageDisks == nil {
|
||||||
return errInvalidArgument
|
return errInvalidArgument
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/rpc"
|
"net/rpc"
|
||||||
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -39,7 +40,7 @@ type s3Peers struct {
|
|||||||
peers []string
|
peers []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func initGlobalS3Peers(eps []storageEndPoint) {
|
func initGlobalS3Peers(eps []*url.URL) {
|
||||||
// Get list of de-duplicated peers.
|
// Get list of de-duplicated peers.
|
||||||
peers := getAllPeers(eps)
|
peers := getAllPeers(eps)
|
||||||
|
|
||||||
@ -111,16 +112,17 @@ func (s3p *s3Peers) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Returns the network addresses of all Minio servers in the cluster in `host:port` format.
|
// Returns the network addresses of all Minio servers in the cluster in `host:port` format.
|
||||||
func getAllPeers(eps []storageEndPoint) (peers []string) {
|
func getAllPeers(eps []*url.URL) (peers []string) {
|
||||||
if eps == nil {
|
if eps == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
peers = []string{globalMinioAddr} // Starts with a default peer.
|
peers = []string{globalMinioAddr} // Starts with a default peer.
|
||||||
for _, ep := range eps {
|
for _, ep := range eps {
|
||||||
// Rest of the peers configured.
|
if ep == nil {
|
||||||
if ep.host != "" {
|
return nil
|
||||||
peers = append(peers, fmt.Sprintf("%s:%d", ep.host, ep.port))
|
|
||||||
}
|
}
|
||||||
|
// Rest of the peers configured.
|
||||||
|
peers = append(peers, ep.Host)
|
||||||
}
|
}
|
||||||
return peers
|
return peers
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net/url"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
@ -24,16 +25,14 @@ import (
|
|||||||
// Validates getAllPeers, fetches all peers based on list of storage endpoints.
|
// Validates getAllPeers, fetches all peers based on list of storage endpoints.
|
||||||
func TestGetAllPeers(t *testing.T) {
|
func TestGetAllPeers(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
eps []storageEndPoint
|
eps []*url.URL
|
||||||
peers []string
|
peers []string
|
||||||
}{
|
}{
|
||||||
{nil, nil},
|
{nil, nil},
|
||||||
{[]storageEndPoint{{path: "/mnt/disk1"}}, []string{globalMinioAddr}},
|
{[]*url.URL{nil}, nil},
|
||||||
{[]storageEndPoint{{
|
{[]*url.URL{{Path: "/mnt/disk1"}}, []string{globalMinioAddr, ""}},
|
||||||
host: "localhost",
|
{[]*url.URL{{Host: "localhost:9001"}}, []string{globalMinioAddr,
|
||||||
port: 9001,
|
"localhost:9001",
|
||||||
}}, []string{
|
|
||||||
globalMinioAddr, "localhost:9001",
|
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,18 +17,15 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"regexp"
|
|
||||||
|
|
||||||
"github.com/minio/cli"
|
"github.com/minio/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -90,126 +87,44 @@ EXAMPLES:
|
|||||||
/mnt/export3/ /mnt/export4/ /mnt/export5/ /mnt/export6/ /mnt/export7/ \
|
/mnt/export3/ /mnt/export4/ /mnt/export5/ /mnt/export6/ /mnt/export7/ \
|
||||||
/mnt/export8/ /mnt/export9/ /mnt/export10/ /mnt/export11/ /mnt/export12/
|
/mnt/export8/ /mnt/export9/ /mnt/export10/ /mnt/export11/ /mnt/export12/
|
||||||
|
|
||||||
6. Start minio server on a 4 node distributed setup. Type the following command on all the 4 nodes.
|
6. Start minio server for a 4 node distributed setup. Type the following command on all the 4 nodes exactly.
|
||||||
$ export MINIO_ACCESS_KEY=minio
|
$ export MINIO_ACCESS_KEY=minio
|
||||||
$ export MINIO_SECRET_KEY=miniostorage
|
$ export MINIO_SECRET_KEY=miniostorage
|
||||||
$ minio {{.Name}} 192.168.1.11:/mnt/export/ 192.168.1.12:/mnt/export/ \
|
$ minio {{.Name}} http://192.168.1.11/mnt/export/ http://192.168.1.12/mnt/export/ \
|
||||||
192.168.1.13:/mnt/export/ 192.168.1.14:/mnt/export/
|
http://192.168.1.13/mnt/export/ http://192.168.1.14/mnt/export/
|
||||||
|
|
||||||
|
7. Start minio server on a 4 node distributed setup. Type the following command on all the 4 nodes exactly.
|
||||||
|
$ minio {{.Name}} http://minio:miniostorage@192.168.1.11/mnt/export/ \
|
||||||
|
http://minio:miniostorage@192.168.1.12/mnt/export/ \
|
||||||
|
http://minio:miniostorage@192.168.1.13/mnt/export/ \
|
||||||
|
http://minio:miniostorage@192.168.1.14/mnt/export/
|
||||||
|
|
||||||
`,
|
`,
|
||||||
}
|
}
|
||||||
|
|
||||||
type serverCmdConfig struct {
|
type serverCmdConfig struct {
|
||||||
serverAddr string
|
serverAddr string
|
||||||
endPoints []storageEndPoint
|
endpoints []*url.URL
|
||||||
ignoredEndPoints []storageEndPoint
|
ignoredEndpoints []*url.URL
|
||||||
isDistXL bool // True only if its distributed XL.
|
isDistXL bool // True only if its distributed XL.
|
||||||
storageDisks []StorageAPI
|
storageDisks []StorageAPI
|
||||||
}
|
}
|
||||||
|
|
||||||
// End point is specified in the command line as host:port:path or host:path or path
|
// Parse an array of end-points (from the command line)
|
||||||
// host:port:path or host:path - for distributed XL. Default port is 9000.
|
func parseStorageEndpoints(eps []string) (endpoints []*url.URL, err error) {
|
||||||
// just path - for single node XL or FS.
|
|
||||||
type storageEndPoint struct {
|
|
||||||
host string // Will be empty for single node XL and FS
|
|
||||||
port int // Will be valid for distributed XL
|
|
||||||
path string // Will be valid for all configs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns string form.
|
|
||||||
func (ep storageEndPoint) String() string {
|
|
||||||
var str []string
|
|
||||||
if ep.host != "" {
|
|
||||||
str = append(str, ep.host)
|
|
||||||
}
|
|
||||||
if ep.port != 0 {
|
|
||||||
str = append(str, strconv.Itoa(ep.port))
|
|
||||||
}
|
|
||||||
if ep.path != "" {
|
|
||||||
str = append(str, ep.path)
|
|
||||||
}
|
|
||||||
return strings.Join(str, ":")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns if ep is present in the eps list.
|
|
||||||
func (ep storageEndPoint) presentIn(eps []storageEndPoint) bool {
|
|
||||||
for _, entry := range eps {
|
|
||||||
if entry == ep {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse end-point (of the form host:port:path or host:path or path)
|
|
||||||
func parseStorageEndPoint(ep string, defaultPort int) (storageEndPoint, error) {
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
// Try to match path, ex. C:\export or export
|
|
||||||
matched, err := regexp.MatchString(`^([a-zA-Z]:\\[^:]+|[^:]+)$`, ep)
|
|
||||||
if err != nil {
|
|
||||||
return storageEndPoint{}, err
|
|
||||||
}
|
|
||||||
if matched {
|
|
||||||
return storageEndPoint{path: ep}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to match host:path ex. 127.0.0.1:C:\export
|
|
||||||
re, err := regexp.Compile(`^([^:]+):([a-zA-Z]:\\[^:]+)$`)
|
|
||||||
if err != nil {
|
|
||||||
return storageEndPoint{}, err
|
|
||||||
}
|
|
||||||
result := re.FindStringSubmatch(ep)
|
|
||||||
if len(result) != 0 {
|
|
||||||
return storageEndPoint{host: result[1], port: defaultPort, path: result[2]}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to match host:port:path ex. 127.0.0.1:443:C:\export
|
|
||||||
re, err = regexp.Compile(`^([^:]+):([0-9]+):([a-zA-Z]:\\[^:]+)$`)
|
|
||||||
if err != nil {
|
|
||||||
return storageEndPoint{}, err
|
|
||||||
}
|
|
||||||
result = re.FindStringSubmatch(ep)
|
|
||||||
if len(result) != 0 {
|
|
||||||
portInt, err := strconv.Atoi(result[2])
|
|
||||||
if err != nil {
|
|
||||||
return storageEndPoint{}, err
|
|
||||||
}
|
|
||||||
return storageEndPoint{host: result[1], port: portInt, path: result[3]}, nil
|
|
||||||
}
|
|
||||||
return storageEndPoint{}, errors.New("Unable to parse endpoint " + ep)
|
|
||||||
}
|
|
||||||
// For *nix OSes
|
|
||||||
parts := strings.Split(ep, ":")
|
|
||||||
var parsedep storageEndPoint
|
|
||||||
switch len(parts) {
|
|
||||||
case 1:
|
|
||||||
parsedep = storageEndPoint{path: parts[0]}
|
|
||||||
case 2:
|
|
||||||
parsedep = storageEndPoint{host: parts[0], port: defaultPort, path: parts[1]}
|
|
||||||
case 3:
|
|
||||||
port, err := strconv.Atoi(parts[1])
|
|
||||||
if err != nil {
|
|
||||||
return storageEndPoint{}, err
|
|
||||||
}
|
|
||||||
parsedep = storageEndPoint{host: parts[0], port: port, path: parts[2]}
|
|
||||||
default:
|
|
||||||
return storageEndPoint{}, errors.New("Unable to parse " + ep)
|
|
||||||
}
|
|
||||||
return parsedep, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse an array of end-points (passed on the command line)
|
|
||||||
func parseStorageEndPoints(eps []string, defaultPort int) (endpoints []storageEndPoint, err error) {
|
|
||||||
for _, ep := range eps {
|
for _, ep := range eps {
|
||||||
if ep == "" {
|
if ep == "" {
|
||||||
continue
|
return nil, errInvalidArgument
|
||||||
}
|
}
|
||||||
var endpoint storageEndPoint
|
var u *url.URL
|
||||||
endpoint, err = parseStorageEndPoint(ep, defaultPort)
|
u, err = url.Parse(ep)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
endpoints = append(endpoints, endpoint)
|
if u.Host != "" && globalMinioHost == "" {
|
||||||
|
u.Host = net.JoinHostPort(u.Host, globalMinioPort)
|
||||||
|
}
|
||||||
|
endpoints = append(endpoints, u)
|
||||||
}
|
}
|
||||||
return endpoints, nil
|
return endpoints, nil
|
||||||
}
|
}
|
||||||
@ -305,7 +220,7 @@ func initServerConfig(c *cli.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Validate if input disks are sufficient for initializing XL.
|
// Validate if input disks are sufficient for initializing XL.
|
||||||
func checkSufficientDisks(eps []storageEndPoint) error {
|
func checkSufficientDisks(eps []*url.URL) error {
|
||||||
// Verify total number of disks.
|
// Verify total number of disks.
|
||||||
total := len(eps)
|
total := len(eps)
|
||||||
if total > maxErasureBlocks {
|
if total > maxErasureBlocks {
|
||||||
@ -331,30 +246,31 @@ func checkSufficientDisks(eps []storageEndPoint) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Validate input disks.
|
// Validate input disks.
|
||||||
func validateDisks(endPoints []storageEndPoint, ignoredEndPoints []storageEndPoint) []StorageAPI {
|
func validateDisks(endpoints []*url.URL, ignoredEndpoints []*url.URL) []StorageAPI {
|
||||||
isXL := len(endPoints) > 1
|
isXL := len(endpoints) > 1
|
||||||
if isXL {
|
if isXL {
|
||||||
// Validate if input disks have duplicates in them.
|
// Validate if input disks have duplicates in them.
|
||||||
err := checkDuplicateEndPoints(endPoints)
|
err := checkDuplicateEndpoints(endpoints)
|
||||||
fatalIf(err, "Invalid disk arguments for server.")
|
fatalIf(err, "Invalid disk arguments for server.")
|
||||||
|
|
||||||
// Validate if input disks are sufficient for erasure coded setup.
|
// Validate if input disks are sufficient for erasure coded setup.
|
||||||
err = checkSufficientDisks(endPoints)
|
err = checkSufficientDisks(endpoints)
|
||||||
fatalIf(err, "Invalid disk arguments for server.")
|
fatalIf(err, "Invalid disk arguments for server. %#v", endpoints)
|
||||||
}
|
}
|
||||||
storageDisks, err := initStorageDisks(endPoints, ignoredEndPoints)
|
storageDisks, err := initStorageDisks(endpoints, ignoredEndpoints)
|
||||||
fatalIf(err, "Unable to initialize storage disks.")
|
fatalIf(err, "Unable to initialize storage disks.")
|
||||||
return storageDisks
|
return storageDisks
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns if slice of disks is a distributed setup.
|
// Returns if slice of disks is a distributed setup.
|
||||||
func isDistributedSetup(eps []storageEndPoint) (isDist bool) {
|
func isDistributedSetup(eps []*url.URL) (isDist bool) {
|
||||||
// Port to connect to for the lock servers in a distributed setup.
|
// Validate if one the disks is not local.
|
||||||
for _, ep := range eps {
|
for _, ep := range eps {
|
||||||
if !isLocalStorage(ep) {
|
if !isLocalStorage(ep) {
|
||||||
// One or more disks supplied as arguments are not
|
// One or more disks supplied as arguments are not
|
||||||
// attached to the local node.
|
// attached to the local node.
|
||||||
isDist = true
|
isDist = true
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return isDist
|
return isDist
|
||||||
@ -372,29 +288,28 @@ func serverMain(c *cli.Context) {
|
|||||||
// Check if requested port is available.
|
// Check if requested port is available.
|
||||||
host, portStr, err := net.SplitHostPort(serverAddr)
|
host, portStr, err := net.SplitHostPort(serverAddr)
|
||||||
fatalIf(err, "Unable to parse %s.", serverAddr)
|
fatalIf(err, "Unable to parse %s.", serverAddr)
|
||||||
|
|
||||||
portInt, err := strconv.Atoi(portStr)
|
|
||||||
fatalIf(err, "Invalid port number.")
|
|
||||||
|
|
||||||
fatalIf(checkPortAvailability(portInt), "Port unavailable %d", portInt)
|
|
||||||
|
|
||||||
// Saves host and port in a globally accessible value.
|
|
||||||
globalMinioPort = portInt
|
|
||||||
globalMinioHost = host
|
globalMinioHost = host
|
||||||
|
|
||||||
|
// Check if requested port is available.
|
||||||
|
fatalIf(checkPortAvailability(portStr), "Port unavailable %s", portStr)
|
||||||
|
globalMinioPort = portStr
|
||||||
|
|
||||||
// Disks to be ignored in server init, to skip format healing.
|
// Disks to be ignored in server init, to skip format healing.
|
||||||
ignoredDisks, err := parseStorageEndPoints(strings.Split(c.String("ignore-disks"), ","), portInt)
|
var ignoredEndpoints []*url.URL
|
||||||
fatalIf(err, "Unable to parse storage endpoints %s", strings.Split(c.String("ignore-disks"), ","))
|
if len(c.String("ignore-disks")) > 0 {
|
||||||
|
ignoredEndpoints, err = parseStorageEndpoints(strings.Split(c.String("ignore-disks"), ","))
|
||||||
|
fatalIf(err, "Unable to parse storage endpoints %s", strings.Split(c.String("ignore-disks"), ","))
|
||||||
|
}
|
||||||
|
|
||||||
// Disks to be used in server init.
|
// Disks to be used in server init.
|
||||||
disks, err := parseStorageEndPoints(c.Args(), portInt)
|
endpoints, err := parseStorageEndpoints(c.Args())
|
||||||
fatalIf(err, "Unable to parse storage endpoints %s", c.Args())
|
fatalIf(err, "Unable to parse storage endpoints %s", c.Args())
|
||||||
|
|
||||||
// Initialize server config.
|
|
||||||
initServerConfig(c)
|
|
||||||
|
|
||||||
// Check 'server' cli arguments.
|
// Check 'server' cli arguments.
|
||||||
storageDisks := validateDisks(disks, ignoredDisks)
|
storageDisks := validateDisks(endpoints, ignoredEndpoints)
|
||||||
|
|
||||||
|
// Check if endpoints are part of distributed setup.
|
||||||
|
isDistXL := isDistributedSetup(endpoints)
|
||||||
|
|
||||||
// Cleanup objects that weren't successfully written into the namespace.
|
// Cleanup objects that weren't successfully written into the namespace.
|
||||||
fatalIf(houseKeeping(storageDisks), "Unable to purge temporary files.")
|
fatalIf(houseKeeping(storageDisks), "Unable to purge temporary files.")
|
||||||
@ -402,16 +317,28 @@ func serverMain(c *cli.Context) {
|
|||||||
// If https.
|
// If https.
|
||||||
tls := isSSL()
|
tls := isSSL()
|
||||||
|
|
||||||
|
// Fail if SSL is not configured and ssl endpoints are provided for distributed setup.
|
||||||
|
if !tls && isDistXL {
|
||||||
|
for _, ep := range endpoints {
|
||||||
|
if ep.Host != "" && ep.Scheme == "https" {
|
||||||
|
fatalIf(errInvalidArgument, "Cannot use secure endpoints when SSL is not configured %s", ep)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize server config.
|
||||||
|
initServerConfig(c)
|
||||||
|
|
||||||
// First disk argument check if it is local.
|
// First disk argument check if it is local.
|
||||||
firstDisk := isLocalStorage(disks[0])
|
firstDisk := isLocalStorage(endpoints[0])
|
||||||
|
|
||||||
// Configure server.
|
// Configure server.
|
||||||
srvConfig := serverCmdConfig{
|
srvConfig := serverCmdConfig{
|
||||||
serverAddr: serverAddr,
|
serverAddr: serverAddr,
|
||||||
endPoints: disks,
|
endpoints: endpoints,
|
||||||
ignoredEndPoints: ignoredDisks,
|
ignoredEndpoints: ignoredEndpoints,
|
||||||
storageDisks: storageDisks,
|
storageDisks: storageDisks,
|
||||||
isDistXL: isDistributedSetup(disks),
|
isDistXL: isDistXL,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configure server.
|
// Configure server.
|
||||||
@ -419,12 +346,12 @@ func serverMain(c *cli.Context) {
|
|||||||
fatalIf(err, "Unable to configure one of server's RPC services.")
|
fatalIf(err, "Unable to configure one of server's RPC services.")
|
||||||
|
|
||||||
// Set nodes for dsync for distributed setup.
|
// Set nodes for dsync for distributed setup.
|
||||||
if srvConfig.isDistXL {
|
if isDistXL {
|
||||||
fatalIf(initDsyncNodes(disks), "Unable to initialize distributed locking")
|
fatalIf(initDsyncNodes(endpoints), "Unable to initialize distributed locking")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize name space lock.
|
// Initialize name space lock.
|
||||||
initNSLock(srvConfig.isDistXL)
|
initNSLock(isDistXL)
|
||||||
|
|
||||||
// Initialize a new HTTP server.
|
// Initialize a new HTTP server.
|
||||||
apiServer := NewServerMux(serverAddr, handler)
|
apiServer := NewServerMux(serverAddr, handler)
|
||||||
@ -436,7 +363,7 @@ func serverMain(c *cli.Context) {
|
|||||||
globalMinioAddr = getLocalAddress(srvConfig)
|
globalMinioAddr = getLocalAddress(srvConfig)
|
||||||
|
|
||||||
// Initialize S3 Peers inter-node communication
|
// Initialize S3 Peers inter-node communication
|
||||||
initGlobalS3Peers(disks)
|
initGlobalS3Peers(endpoints)
|
||||||
|
|
||||||
// Start server, automatically configures TLS if certs are available.
|
// Start server, automatically configures TLS if certs are available.
|
||||||
go func(tls bool) {
|
go func(tls bool) {
|
||||||
@ -451,7 +378,7 @@ func serverMain(c *cli.Context) {
|
|||||||
}(tls)
|
}(tls)
|
||||||
|
|
||||||
// Wait for formatting of disks.
|
// Wait for formatting of disks.
|
||||||
err = waitForFormatDisks(firstDisk, endPoints[0], storageDisks)
|
err = waitForFormatDisks(firstDisk, endpoints[0], storageDisks)
|
||||||
fatalIf(err, "formatting storage disks failed")
|
fatalIf(err, "formatting storage disks failed")
|
||||||
|
|
||||||
// Once formatted, initialize object layer.
|
// Once formatted, initialize object layer.
|
||||||
|
@ -167,7 +167,7 @@ func TestCheckSufficientDisks(t *testing.T) {
|
|||||||
|
|
||||||
// Validates different variations of input disks.
|
// Validates different variations of input disks.
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
endpoints, err := parseStorageEndPoints(testCase.disks, 0)
|
endpoints, err := parseStorageEndpoints(testCase.disks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error %s", err)
|
t.Fatalf("Unexpected error %s", err)
|
||||||
}
|
}
|
||||||
@ -201,7 +201,7 @@ func TestCheckServerSyntax(t *testing.T) {
|
|||||||
t.Errorf("Test %d failed to parse arguments %s", i+1, disks)
|
t.Errorf("Test %d failed to parse arguments %s", i+1, disks)
|
||||||
}
|
}
|
||||||
defer removeRoots(disks)
|
defer removeRoots(disks)
|
||||||
endpoints, err := parseStorageEndPoints(disks, 0)
|
endpoints, err := parseStorageEndpoints(disks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error %s", err)
|
t.Fatalf("Unexpected error %s", err)
|
||||||
}
|
}
|
||||||
@ -219,26 +219,25 @@ func TestIsDistributedSetup(t *testing.T) {
|
|||||||
disks []string
|
disks []string
|
||||||
result bool
|
result bool
|
||||||
}{
|
}{
|
||||||
{[]string{`4.4.4.4:c:\mnt\disk1`, `4.4.4.4:c:\mnt\disk2`}, true},
|
{[]string{`http://4.4.4.4:80/c:\mnt\disk1`, `http://4.4.4.4:80/c:\mnt\disk2`}, true},
|
||||||
{[]string{`4.4.4.4:c:\mnt\disk1`, `localhost:c:\mnt\disk2`}, true},
|
{[]string{`http://4.4.4.4:9000/c:\mnt\disk1`, `http://127.0.0.1:9000/c:\mnt\disk2`}, true},
|
||||||
{[]string{`localhost:c:\mnt\disk1`, `localhost:c:\mnt\disk2`}, false},
|
{[]string{`http://127.0.0.1:9000/c:\mnt\disk1`, `http://127.0.0.1:9001/c:\mnt\disk2`}, true},
|
||||||
{[]string{`c:\mnt\disk1`, `c:\mnt\disk2`}, false},
|
{[]string{`c:\mnt\disk1`, `c:\mnt\disk2`}, false},
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
testCases = []struct {
|
testCases = []struct {
|
||||||
disks []string
|
disks []string
|
||||||
result bool
|
result bool
|
||||||
}{
|
}{
|
||||||
{[]string{"4.4.4.4:/mnt/disk1", "4.4.4.4:/mnt/disk2"}, true},
|
{[]string{"http://4.4.4.4:9000/mnt/disk1", "http://4.4.4.4:9000/mnt/disk2"}, true},
|
||||||
{[]string{"4.4.4.4:/mnt/disk1", "localhost:/mnt/disk2"}, true},
|
{[]string{"http://4.4.4.4:9000/mnt/disk1", "http://127.0.0.1:9000/mnt/disk2"}, true},
|
||||||
{[]string{"localhost:/mnt/disk1", "localhost:/mnt/disk2"}, false},
|
{[]string{"http://127.0.0.1:9000/mnt/disk1", "http://127.0.0.1:9000/mnt/disk2"}, true},
|
||||||
{[]string{"/mnt/disk1", "/mnt/disk2"}, false},
|
{[]string{"/mnt/disk1", "/mnt/disk2"}, false},
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
for i, test := range testCases {
|
for i, test := range testCases {
|
||||||
endpoints, err := parseStorageEndPoints(test.disks, 0)
|
endpoints, err := parseStorageEndpoints(test.disks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error %s", err)
|
t.Fatalf("Unexpected error %s", err)
|
||||||
}
|
}
|
||||||
|
@ -31,7 +31,6 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -153,7 +152,7 @@ func TestServerCloseBlocking(t *testing.T) {
|
|||||||
|
|
||||||
func TestListenAndServePlain(t *testing.T) {
|
func TestListenAndServePlain(t *testing.T) {
|
||||||
wait := make(chan struct{})
|
wait := make(chan struct{})
|
||||||
addr := "127.0.0.1:" + strconv.Itoa(getFreePort())
|
addr := net.JoinHostPort("127.0.0.1", getFreePort())
|
||||||
errc := make(chan error)
|
errc := make(chan error)
|
||||||
once := &sync.Once{}
|
once := &sync.Once{}
|
||||||
|
|
||||||
@ -203,7 +202,7 @@ func TestListenAndServePlain(t *testing.T) {
|
|||||||
|
|
||||||
func TestListenAndServeTLS(t *testing.T) {
|
func TestListenAndServeTLS(t *testing.T) {
|
||||||
wait := make(chan struct{})
|
wait := make(chan struct{})
|
||||||
addr := "127.0.0.1:" + strconv.Itoa(getFreePort())
|
addr := net.JoinHostPort("127.0.0.1", getFreePort())
|
||||||
errc := make(chan error)
|
errc := make(chan error)
|
||||||
once := &sync.Once{}
|
once := &sync.Once{}
|
||||||
|
|
||||||
|
@ -101,7 +101,7 @@ func calculateStreamContentLength(dataLen, chunkSize int64) int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Ask the kernel for a free open port.
|
// Ask the kernel for a free open port.
|
||||||
func getFreePort() int {
|
func getFreePort() string {
|
||||||
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
|
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -112,7 +112,7 @@ func getFreePort() int {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
defer l.Close()
|
defer l.Close()
|
||||||
return l.Addr().(*net.TCPAddr).Port
|
return fmt.Sprintf("%d", l.Addr().(*net.TCPAddr).Port)
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyError(c *C, response *http.Response, code, description string, statusCode int) {
|
func verifyError(c *C, response *http.Response, code, description string, statusCode int) {
|
||||||
|
@ -17,10 +17,10 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/rpc"
|
"net/rpc"
|
||||||
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
"github.com/minio/minio/pkg/disk"
|
"github.com/minio/minio/pkg/disk"
|
||||||
@ -92,22 +92,28 @@ func toStorageErr(err error) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize new rpc client.
|
// Initialize new storage rpc client.
|
||||||
func newRPCClient(ep storageEndPoint) (StorageAPI, error) {
|
func newStorageRPC(ep *url.URL) (StorageAPI, error) {
|
||||||
// Input validation.
|
if ep == nil {
|
||||||
if ep.host == "" || ep.port == 0 || ep.path == "" {
|
|
||||||
return nil, errInvalidArgument
|
return nil, errInvalidArgument
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dial minio rpc storage http path.
|
// Dial minio rpc storage http path.
|
||||||
rpcPath := path.Join(storageRPCPath, ep.path)
|
rpcPath := path.Join(storageRPCPath, getPath(ep))
|
||||||
rpcAddr := fmt.Sprintf("%s:%d", ep.host, ep.port)
|
rpcAddr := ep.Host
|
||||||
|
|
||||||
// Initialize rpc client with network address and rpc path.
|
// Initialize rpc client with network address and rpc path.
|
||||||
cred := serverConfig.GetCredential()
|
accessKeyID := serverConfig.GetCredential().AccessKeyID
|
||||||
|
secretAccessKey := serverConfig.GetCredential().SecretAccessKey
|
||||||
|
if ep.User != nil {
|
||||||
|
accessKeyID = ep.User.Username()
|
||||||
|
if key, set := ep.User.Password(); set {
|
||||||
|
secretAccessKey = key
|
||||||
|
}
|
||||||
|
}
|
||||||
rpcClient := newAuthClient(&authConfig{
|
rpcClient := newAuthClient(&authConfig{
|
||||||
accessKey: cred.AccessKeyID,
|
accessKey: accessKeyID,
|
||||||
secretKey: cred.SecretAccessKey,
|
secretKey: secretAccessKey,
|
||||||
secureConn: isSSL(),
|
secureConn: isSSL(),
|
||||||
address: rpcAddr,
|
address: rpcAddr,
|
||||||
path: rpcPath,
|
path: rpcPath,
|
||||||
@ -116,8 +122,8 @@ func newRPCClient(ep storageEndPoint) (StorageAPI, error) {
|
|||||||
|
|
||||||
// Initialize network storage.
|
// Initialize network storage.
|
||||||
ndisk := &networkStorage{
|
ndisk := &networkStorage{
|
||||||
netAddr: ep.host,
|
netAddr: ep.Host,
|
||||||
netPath: ep.path,
|
netPath: getPath(ep),
|
||||||
rpcClient: rpcClient,
|
rpcClient: rpcClient,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/rpc"
|
"net/rpc"
|
||||||
|
"net/url"
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
@ -144,17 +145,26 @@ func (s *TestRPCStorageSuite) SetUpSuite(c *testing.T) {
|
|||||||
s.testServer = StartTestStorageRPCServer(c, s.serverType, 1)
|
s.testServer = StartTestStorageRPCServer(c, s.serverType, 1)
|
||||||
listenAddress := s.testServer.Server.Listener.Addr().String()
|
listenAddress := s.testServer.Server.Listener.Addr().String()
|
||||||
|
|
||||||
for _, disk := range s.testServer.Disks {
|
for _, ep := range s.testServer.Disks {
|
||||||
remoteEndPoint, err := parseStorageEndPoint(listenAddress+":"+disk.path, 0)
|
ep.Host = listenAddress
|
||||||
if err != nil {
|
storageDisk, err := newStorageRPC(ep)
|
||||||
c.Fatalf("Unexpected error %s", err)
|
|
||||||
}
|
|
||||||
storageDisk, err := newRPCClient(remoteEndPoint)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatal("Unable to initialize RPC client", err)
|
c.Fatal("Unable to initialize RPC client", err)
|
||||||
}
|
}
|
||||||
s.remoteDisks = append(s.remoteDisks, storageDisk)
|
s.remoteDisks = append(s.remoteDisks, storageDisk)
|
||||||
}
|
}
|
||||||
|
_, err := newStorageRPC(nil)
|
||||||
|
if err != errInvalidArgument {
|
||||||
|
c.Fatalf("Unexpected error %s, expecting %s", err, errInvalidArgument)
|
||||||
|
}
|
||||||
|
u, err := url.Parse("http://abcd:abcd123@localhost/mnt/disk")
|
||||||
|
if err != nil {
|
||||||
|
c.Fatal("Unexpected error", err)
|
||||||
|
}
|
||||||
|
_, err = newStorageRPC(u)
|
||||||
|
if err != nil {
|
||||||
|
c.Fatal("Unexpected error", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// No longer used with gocheck, but used in explicit teardown code in
|
// No longer used with gocheck, but used in explicit teardown code in
|
||||||
|
@ -217,21 +217,24 @@ func (s *storageServer) TryInitHandler(args *GenericArgs, reply *GenericReply) e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Initialize new storage rpc.
|
// Initialize new storage rpc.
|
||||||
func newRPCServer(serverConfig serverCmdConfig) (servers []*storageServer, err error) {
|
func newRPCServer(srvConfig serverCmdConfig) (servers []*storageServer, err error) {
|
||||||
for _, ep := range serverConfig.endPoints {
|
for _, ep := range srvConfig.endpoints {
|
||||||
if ep.presentIn(serverConfig.ignoredEndPoints) {
|
if containsEndpoint(srvConfig.ignoredEndpoints, ep) {
|
||||||
// Do not init ignored end point.
|
// Do not init disk RPC for ignored end point.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// e.g server:/mnt/disk1
|
// e.g server:/mnt/disk1
|
||||||
if isLocalStorage(ep) {
|
if isLocalStorage(ep) {
|
||||||
storage, err := newPosix(ep.path)
|
// Get the posix path.
|
||||||
|
path := getPath(ep)
|
||||||
|
var storage StorageAPI
|
||||||
|
storage, err = newPosix(path)
|
||||||
if err != nil && err != errDiskNotFound {
|
if err != nil && err != errDiskNotFound {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
servers = append(servers, &storageServer{
|
servers = append(servers, &storageServer{
|
||||||
storage: storage,
|
storage: storage,
|
||||||
path: ep.path,
|
path: path,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -62,7 +62,7 @@ func prepareFS() (ObjectLayer, string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
endpoints, err := parseStorageEndPoints(fsDirs, 0)
|
endpoints, err := parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
@ -80,7 +80,7 @@ func prepareXL() (ObjectLayer, []string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
endpoints, err := parseStorageEndPoints(fsDirs, 0)
|
endpoints, err := parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@ -154,7 +154,7 @@ func isSameType(obj1, obj2 interface{}) bool {
|
|||||||
// defer s.Stop()
|
// defer s.Stop()
|
||||||
type TestServer struct {
|
type TestServer struct {
|
||||||
Root string
|
Root string
|
||||||
Disks []storageEndPoint
|
Disks []*url.URL
|
||||||
AccessKey string
|
AccessKey string
|
||||||
SecretKey string
|
SecretKey string
|
||||||
Server *httptest.Server
|
Server *httptest.Server
|
||||||
@ -182,7 +182,7 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer {
|
|||||||
credentials := serverConfig.GetCredential()
|
credentials := serverConfig.GetCredential()
|
||||||
|
|
||||||
testServer.Root = root
|
testServer.Root = root
|
||||||
testServer.Disks, err = parseStorageEndPoints(disks, 0)
|
testServer.Disks, err = parseStorageEndpoints(disks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error %s", err)
|
t.Fatalf("Unexpected error %s", err)
|
||||||
}
|
}
|
||||||
@ -195,7 +195,7 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
srvCmdCfg := serverCmdConfig{
|
srvCmdCfg := serverCmdConfig{
|
||||||
endPoints: testServer.Disks,
|
endpoints: testServer.Disks,
|
||||||
storageDisks: storageDisks,
|
storageDisks: storageDisks,
|
||||||
}
|
}
|
||||||
httpHandler, err := configureServerHandler(
|
httpHandler, err := configureServerHandler(
|
||||||
@ -216,16 +216,14 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer {
|
|||||||
globalObjLayerMutex.Unlock()
|
globalObjLayerMutex.Unlock()
|
||||||
|
|
||||||
// initialize peer rpc
|
// initialize peer rpc
|
||||||
_, portStr, err := net.SplitHostPort(srvCmdCfg.serverAddr)
|
host, port, err := net.SplitHostPort(srvCmdCfg.serverAddr)
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Early setup error:", err)
|
|
||||||
}
|
|
||||||
globalMinioPort, err = strconv.Atoi(portStr)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Early setup error:", err)
|
t.Fatal("Early setup error:", err)
|
||||||
}
|
}
|
||||||
|
globalMinioHost = host
|
||||||
|
globalMinioPort = port
|
||||||
globalMinioAddr = getLocalAddress(srvCmdCfg)
|
globalMinioAddr = getLocalAddress(srvCmdCfg)
|
||||||
endpoints, err := parseStorageEndPoints(disks, 0)
|
endpoints, err := parseStorageEndpoints(disks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Early setup error:", err)
|
t.Fatal("Early setup error:", err)
|
||||||
}
|
}
|
||||||
@ -331,7 +329,7 @@ func StartTestStorageRPCServer(t TestErrHandler, instanceType string, diskN int)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Failed to create disks for the backend")
|
t.Fatal("Failed to create disks for the backend")
|
||||||
}
|
}
|
||||||
endPoints, err := parseStorageEndPoints(disks, 0)
|
endpoints, err := parseStorageEndpoints(disks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s", err)
|
t.Fatalf("%s", err)
|
||||||
}
|
}
|
||||||
@ -347,13 +345,13 @@ func StartTestStorageRPCServer(t TestErrHandler, instanceType string, diskN int)
|
|||||||
credentials := serverConfig.GetCredential()
|
credentials := serverConfig.GetCredential()
|
||||||
|
|
||||||
testRPCServer.Root = root
|
testRPCServer.Root = root
|
||||||
testRPCServer.Disks = endPoints
|
testRPCServer.Disks = endpoints
|
||||||
testRPCServer.AccessKey = credentials.AccessKeyID
|
testRPCServer.AccessKey = credentials.AccessKeyID
|
||||||
testRPCServer.SecretKey = credentials.SecretAccessKey
|
testRPCServer.SecretKey = credentials.SecretAccessKey
|
||||||
|
|
||||||
// Run TestServer.
|
// Run TestServer.
|
||||||
testRPCServer.Server = httptest.NewServer(initTestStorageRPCEndPoint(serverCmdConfig{
|
testRPCServer.Server = httptest.NewServer(initTestStorageRPCEndPoint(serverCmdConfig{
|
||||||
endPoints: endPoints,
|
endpoints: endpoints,
|
||||||
}))
|
}))
|
||||||
return testRPCServer
|
return testRPCServer
|
||||||
}
|
}
|
||||||
@ -366,7 +364,7 @@ func StartTestPeersRPCServer(t TestErrHandler, instanceType string) TestServer {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Failed to create disks for the backend")
|
t.Fatal("Failed to create disks for the backend")
|
||||||
}
|
}
|
||||||
endPoints, err := parseStorageEndPoints(disks, 0)
|
endpoints, err := parseStorageEndpoints(disks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s", err)
|
t.Fatalf("%s", err)
|
||||||
}
|
}
|
||||||
@ -382,12 +380,12 @@ func StartTestPeersRPCServer(t TestErrHandler, instanceType string) TestServer {
|
|||||||
credentials := serverConfig.GetCredential()
|
credentials := serverConfig.GetCredential()
|
||||||
|
|
||||||
testRPCServer.Root = root
|
testRPCServer.Root = root
|
||||||
testRPCServer.Disks = endPoints
|
testRPCServer.Disks = endpoints
|
||||||
testRPCServer.AccessKey = credentials.AccessKeyID
|
testRPCServer.AccessKey = credentials.AccessKeyID
|
||||||
testRPCServer.SecretKey = credentials.SecretAccessKey
|
testRPCServer.SecretKey = credentials.SecretAccessKey
|
||||||
|
|
||||||
// create temporary backend for the test server.
|
// create temporary backend for the test server.
|
||||||
objLayer, storageDisks, err := initObjectLayer(endPoints, nil)
|
objLayer, storageDisks, err := initObjectLayer(endpoints, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
t.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||||
}
|
}
|
||||||
@ -398,7 +396,7 @@ func StartTestPeersRPCServer(t TestErrHandler, instanceType string) TestServer {
|
|||||||
globalObjLayerMutex.Unlock()
|
globalObjLayerMutex.Unlock()
|
||||||
|
|
||||||
srvCfg := serverCmdConfig{
|
srvCfg := serverCmdConfig{
|
||||||
endPoints: endPoints,
|
endpoints: endpoints,
|
||||||
storageDisks: storageDisks,
|
storageDisks: storageDisks,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -438,7 +436,7 @@ func StartTestControlRPCServer(t TestErrHandler, instanceType string) TestServer
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Failed to create disks for the backend")
|
t.Fatal("Failed to create disks for the backend")
|
||||||
}
|
}
|
||||||
endPoints, err := parseStorageEndPoints(disks, 0)
|
endpoints, err := parseStorageEndpoints(disks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s", err)
|
t.Fatalf("%s", err)
|
||||||
}
|
}
|
||||||
@ -454,12 +452,12 @@ func StartTestControlRPCServer(t TestErrHandler, instanceType string) TestServer
|
|||||||
credentials := serverConfig.GetCredential()
|
credentials := serverConfig.GetCredential()
|
||||||
|
|
||||||
testRPCServer.Root = root
|
testRPCServer.Root = root
|
||||||
testRPCServer.Disks = endPoints
|
testRPCServer.Disks = endpoints
|
||||||
testRPCServer.AccessKey = credentials.AccessKeyID
|
testRPCServer.AccessKey = credentials.AccessKeyID
|
||||||
testRPCServer.SecretKey = credentials.SecretAccessKey
|
testRPCServer.SecretKey = credentials.SecretAccessKey
|
||||||
|
|
||||||
// create temporary backend for the test server.
|
// create temporary backend for the test server.
|
||||||
objLayer, storageDisks, err := initObjectLayer(endPoints, nil)
|
objLayer, storageDisks, err := initObjectLayer(endpoints, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
t.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||||
}
|
}
|
||||||
@ -508,7 +506,7 @@ func newTestConfig(bucketLocation string) (rootPath string, err error) {
|
|||||||
func (testServer TestServer) Stop() {
|
func (testServer TestServer) Stop() {
|
||||||
removeAll(testServer.Root)
|
removeAll(testServer.Root)
|
||||||
for _, disk := range testServer.Disks {
|
for _, disk := range testServer.Disks {
|
||||||
removeAll(disk.path)
|
removeAll(disk.Path)
|
||||||
}
|
}
|
||||||
testServer.Server.Close()
|
testServer.Server.Close()
|
||||||
}
|
}
|
||||||
@ -1556,13 +1554,13 @@ func getRandomDisks(N int) ([]string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// initObjectLayer - Instantiates object layer and returns it.
|
// initObjectLayer - Instantiates object layer and returns it.
|
||||||
func initObjectLayer(endPoints []storageEndPoint, ignoredEndPoints []storageEndPoint) (ObjectLayer, []StorageAPI, error) {
|
func initObjectLayer(endpoints, ignoredEndpoints []*url.URL) (ObjectLayer, []StorageAPI, error) {
|
||||||
storageDisks, err := initStorageDisks(endPoints, ignoredEndPoints)
|
storageDisks, err := initStorageDisks(endpoints, ignoredEndpoints)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = waitForFormatDisks(true, "", storageDisks)
|
err = waitForFormatDisks(true, endpoints[0], storageDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@ -1633,7 +1631,7 @@ func prepareXLStorageDisks(t *testing.T) ([]StorageAPI, []string) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unexpected error: ", err)
|
t.Fatal("Unexpected error: ", err)
|
||||||
}
|
}
|
||||||
endpoints, err := parseStorageEndPoints(fsDirs, 0)
|
endpoints, err := parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unexpected error: ", err)
|
t.Fatal("Unexpected error: ", err)
|
||||||
}
|
}
|
||||||
@ -1650,7 +1648,7 @@ func prepareXLStorageDisks(t *testing.T) ([]StorageAPI, []string) {
|
|||||||
// initializes the specified API endpoints for the tests.
|
// initializes the specified API endpoints for the tests.
|
||||||
// initialies the root and returns its path.
|
// initialies the root and returns its path.
|
||||||
// return credentials.
|
// return credentials.
|
||||||
func initAPIHandlerTest(obj ObjectLayer, endPoints []string) (bucketName string, apiRouter http.Handler, err error) {
|
func initAPIHandlerTest(obj ObjectLayer, endpoints []string) (bucketName string, apiRouter http.Handler, err error) {
|
||||||
// get random bucket name.
|
// get random bucket name.
|
||||||
bucketName = getRandomBucketName()
|
bucketName = getRandomBucketName()
|
||||||
|
|
||||||
@ -1662,7 +1660,7 @@ func initAPIHandlerTest(obj ObjectLayer, endPoints []string) (bucketName string,
|
|||||||
}
|
}
|
||||||
// Register the API end points with XL/FS object layer.
|
// Register the API end points with XL/FS object layer.
|
||||||
// Registering only the GetObject handler.
|
// Registering only the GetObject handler.
|
||||||
apiRouter = initTestAPIEndPoints(obj, endPoints)
|
apiRouter = initTestAPIEndPoints(obj, endpoints)
|
||||||
return bucketName, apiRouter, nil
|
return bucketName, apiRouter, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1835,7 +1833,7 @@ func ExecObjectLayerAPINilTest(t TestErrHandler, bucketName, objectName, instanc
|
|||||||
|
|
||||||
// ExecObjectLayerAPITest - executes object layer API tests.
|
// ExecObjectLayerAPITest - executes object layer API tests.
|
||||||
// Creates single node and XL ObjectLayer instance, registers the specified API end points and runs test for both the layers.
|
// Creates single node and XL ObjectLayer instance, registers the specified API end points and runs test for both the layers.
|
||||||
func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endPoints []string) {
|
func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints []string) {
|
||||||
// initialize the server and obtain the credentials and root.
|
// initialize the server and obtain the credentials and root.
|
||||||
// credentials are necessary to sign the HTTP request.
|
// credentials are necessary to sign the HTTP request.
|
||||||
rootPath, err := newTestConfig("us-east-1")
|
rootPath, err := newTestConfig("us-east-1")
|
||||||
@ -1846,7 +1844,7 @@ func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endPoints [
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Initialization of object layer failed for single node setup: %s", err)
|
t.Fatalf("Initialization of object layer failed for single node setup: %s", err)
|
||||||
}
|
}
|
||||||
bucketFS, fsAPIRouter, err := initAPIHandlerTest(objLayer, endPoints)
|
bucketFS, fsAPIRouter, err := initAPIHandlerTest(objLayer, endpoints)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Initialzation of API handler tests failed: <ERROR> %s", err)
|
t.Fatalf("Initialzation of API handler tests failed: <ERROR> %s", err)
|
||||||
}
|
}
|
||||||
@ -1858,7 +1856,7 @@ func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endPoints [
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
|
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
|
||||||
}
|
}
|
||||||
bucketXL, xlAPIRouter, err := initAPIHandlerTest(objLayer, endPoints)
|
bucketXL, xlAPIRouter, err := initAPIHandlerTest(objLayer, endpoints)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Initialzation of API handler tests failed: <ERROR> %s", err)
|
t.Fatalf("Initialzation of API handler tests failed: <ERROR> %s", err)
|
||||||
}
|
}
|
||||||
@ -1929,7 +1927,7 @@ func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Initialization of disks for XL setup: %s", err)
|
t.Fatalf("Initialization of disks for XL setup: %s", err)
|
||||||
}
|
}
|
||||||
endpoints, err := parseStorageEndPoints(erasureDisks, 0)
|
endpoints, err := parseStorageEndpoints(erasureDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Initialization of disks for XL setup: %s", err)
|
t.Fatalf("Initialization of disks for XL setup: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -165,11 +165,11 @@ func TestTreeWalk(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to create tmp directory: %s", err)
|
t.Fatalf("Unable to create tmp directory: %s", err)
|
||||||
}
|
}
|
||||||
endpoint, err := parseStorageEndPoint(fsDir, 0)
|
endpoints, err := parseStorageEndpoints([]string{fsDir})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error %s", err)
|
t.Fatalf("Unexpected error %s", err)
|
||||||
}
|
}
|
||||||
disk, err := newStorageAPI(endpoint)
|
disk, err := newStorageAPI(endpoints[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to create StorageAPI: %s", err)
|
t.Fatalf("Unable to create StorageAPI: %s", err)
|
||||||
}
|
}
|
||||||
@ -206,11 +206,11 @@ func TestTreeWalkTimeout(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to create tmp directory: %s", err)
|
t.Fatalf("Unable to create tmp directory: %s", err)
|
||||||
}
|
}
|
||||||
endpoint, err := parseStorageEndPoint(fsDir, 0)
|
endpoints, err := parseStorageEndpoints([]string{fsDir})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error %s", err)
|
t.Fatalf("Unexpected error %s", err)
|
||||||
}
|
}
|
||||||
disk, err := newStorageAPI(endpoint)
|
disk, err := newStorageAPI(endpoints[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to create StorageAPI: %s", err)
|
t.Fatalf("Unable to create StorageAPI: %s", err)
|
||||||
}
|
}
|
||||||
@ -286,23 +286,18 @@ func TestListDir(t *testing.T) {
|
|||||||
t.Errorf("Unable to create tmp directory: %s", err)
|
t.Errorf("Unable to create tmp directory: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoint1, err := parseStorageEndPoint(fsDir1, 0)
|
endpoints, err := parseStorageEndpoints([]string{fsDir1, fsDir2})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error %s", err)
|
t.Fatalf("Unexpected error %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create two StorageAPIs disk1 and disk2.
|
// Create two StorageAPIs disk1 and disk2.
|
||||||
disk1, err := newStorageAPI(endpoint1)
|
disk1, err := newStorageAPI(endpoints[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unable to create StorageAPI: %s", err)
|
t.Errorf("Unable to create StorageAPI: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoint2, err := parseStorageEndPoint(fsDir2, 0)
|
disk2, err := newStorageAPI(endpoints[1])
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unexpected error %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
disk2, err := newStorageAPI(endpoint2)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unable to create StorageAPI: %s", err)
|
t.Errorf("Unable to create StorageAPI: %s", err)
|
||||||
}
|
}
|
||||||
@ -370,13 +365,11 @@ func TestRecursiveTreeWalk(t *testing.T) {
|
|||||||
t.Fatalf("Unable to create tmp directory: %s", err)
|
t.Fatalf("Unable to create tmp directory: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoint1, err := parseStorageEndPoint(fsDir1, 0)
|
endpoints, err := parseStorageEndpoints([]string{fsDir1})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error %s", err)
|
t.Fatalf("Unexpected error %s", err)
|
||||||
}
|
}
|
||||||
|
disk1, err := newStorageAPI(endpoints[0])
|
||||||
// Create two StorageAPIs disk1.
|
|
||||||
disk1, err := newStorageAPI(endpoint1)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to create StorageAPI: %s", err)
|
t.Fatalf("Unable to create StorageAPI: %s", err)
|
||||||
}
|
}
|
||||||
@ -482,15 +475,13 @@ func TestSortedness(t *testing.T) {
|
|||||||
t.Errorf("Unable to create tmp directory: %s", err)
|
t.Errorf("Unable to create tmp directory: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoint1, err := parseStorageEndPoint(fsDir1, 0)
|
endpoints, err := parseStorageEndpoints([]string{fsDir1})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error %s", err)
|
t.Fatalf("Unexpected error %s", err)
|
||||||
}
|
}
|
||||||
|
disk1, err := newStorageAPI(endpoints[0])
|
||||||
// Create two StorageAPIs disk1.
|
|
||||||
disk1, err := newStorageAPI(endpoint1)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unable to create StorageAPI: %s", err)
|
t.Fatalf("Unable to create StorageAPI: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Simple isLeaf check, returns true if there is no trailing "/"
|
// Simple isLeaf check, returns true if there is no trailing "/"
|
||||||
@ -562,15 +553,13 @@ func TestTreeWalkIsEnd(t *testing.T) {
|
|||||||
t.Errorf("Unable to create tmp directory: %s", err)
|
t.Errorf("Unable to create tmp directory: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoint1, err := parseStorageEndPoint(fsDir1, 0)
|
endpoints, err := parseStorageEndpoints([]string{fsDir1})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error %s", err)
|
t.Fatalf("Unexpected error %s", err)
|
||||||
}
|
}
|
||||||
|
disk1, err := newStorageAPI(endpoints[0])
|
||||||
// Create two StorageAPIs disk1.
|
|
||||||
disk1, err := newStorageAPI(endpoint1)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unable to create StorageAPI: %s", err)
|
t.Fatalf("Unable to create StorageAPI: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
isLeaf := func(volume, prefix string) bool {
|
isLeaf := func(volume, prefix string) bool {
|
||||||
|
24
cmd/utils.go
24
cmd/utils.go
@ -22,6 +22,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -70,24 +71,23 @@ func checkDuplicateStrings(list []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// checkDuplicates - function to validate if there are duplicates in a slice of endPoints.
|
// checkDuplicates - function to validate if there are duplicates in a slice of endPoints.
|
||||||
func checkDuplicateEndPoints(list []storageEndPoint) error {
|
func checkDuplicateEndpoints(endpoints []*url.URL) error {
|
||||||
var strs []string
|
var strs []string
|
||||||
for _, ep := range list {
|
for _, ep := range endpoints {
|
||||||
strs = append(strs, ep.String())
|
strs = append(strs, ep.String())
|
||||||
}
|
}
|
||||||
return checkDuplicateStrings(strs)
|
return checkDuplicateStrings(strs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find local node through the command line arguments. Returns in
|
// Find local node through the command line arguments. Returns in `host:port` format.
|
||||||
// `host:port` format.
|
|
||||||
func getLocalAddress(srvCmdConfig serverCmdConfig) string {
|
func getLocalAddress(srvCmdConfig serverCmdConfig) string {
|
||||||
if !srvCmdConfig.isDistXL {
|
if !srvCmdConfig.isDistXL {
|
||||||
return srvCmdConfig.serverAddr
|
return srvCmdConfig.serverAddr
|
||||||
}
|
}
|
||||||
for _, ep := range srvCmdConfig.endPoints {
|
for _, ep := range srvCmdConfig.endpoints {
|
||||||
// Validates if remote disk is local.
|
// Validates if remote endpoint is local.
|
||||||
if isLocalStorage(ep) {
|
if isLocalStorage(ep) {
|
||||||
return fmt.Sprintf("%s:%d", ep.host, ep.port)
|
return ep.Host
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
@ -144,6 +144,16 @@ func contains(stringList []string, element string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Contains endpoint returns true if endpoint found in the list of input endpoints.
|
||||||
|
func containsEndpoint(endpoints []*url.URL, endpoint *url.URL) bool {
|
||||||
|
for _, ep := range endpoints {
|
||||||
|
if *ep == *endpoint {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// urlPathSplit - split url path into bucket and object components.
|
// urlPathSplit - split url path into bucket and object components.
|
||||||
func urlPathSplit(urlPath string) (bucketName, prefixName string) {
|
func urlPathSplit(urlPath string) (bucketName, prefixName string) {
|
||||||
if urlPath == "" {
|
if urlPath == "" {
|
||||||
|
@ -18,7 +18,9 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"reflect"
|
"reflect"
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
@ -224,7 +226,8 @@ func TestLocalAddress(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// need to set this to avoid stale values from other tests.
|
// need to set this to avoid stale values from other tests.
|
||||||
globalMinioPort = 9000
|
globalMinioPort = "9000"
|
||||||
|
globalMinioHost = ""
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
srvCmdConfig serverCmdConfig
|
srvCmdConfig serverCmdConfig
|
||||||
localAddr string
|
localAddr string
|
||||||
@ -233,39 +236,64 @@ func TestLocalAddress(t *testing.T) {
|
|||||||
{
|
{
|
||||||
srvCmdConfig: serverCmdConfig{
|
srvCmdConfig: serverCmdConfig{
|
||||||
isDistXL: true,
|
isDistXL: true,
|
||||||
endPoints: []storageEndPoint{
|
endpoints: []*url.URL{{
|
||||||
{"localhost", 9000, "/mnt/disk1"},
|
Scheme: "http",
|
||||||
{"1.1.1.2", 9000, "/mnt/disk2"},
|
Host: "localhost:9000",
|
||||||
{"1.1.2.1", 9000, "/mnt/disk3"},
|
Path: "/mnt/disk1",
|
||||||
{"1.1.2.2", 9000, "/mnt/disk4"},
|
}, {
|
||||||
},
|
Scheme: "http",
|
||||||
|
Host: "1.1.1.2:9000",
|
||||||
|
Path: "/mnt/disk2",
|
||||||
|
}, {
|
||||||
|
Scheme: "http",
|
||||||
|
Host: "1.1.2.1:9000",
|
||||||
|
Path: "/mnt/disk3",
|
||||||
|
}, {
|
||||||
|
Scheme: "http",
|
||||||
|
Host: "1.1.2.2:9000",
|
||||||
|
Path: "/mnt/disk4",
|
||||||
|
}},
|
||||||
},
|
},
|
||||||
localAddr: fmt.Sprintf("localhost:%d", globalMinioPort),
|
localAddr: net.JoinHostPort("localhost", globalMinioPort),
|
||||||
},
|
},
|
||||||
// Test 2 - local address is everything.
|
// Test 2 - local address is everything.
|
||||||
{
|
{
|
||||||
srvCmdConfig: serverCmdConfig{
|
srvCmdConfig: serverCmdConfig{
|
||||||
serverAddr: fmt.Sprintf(":%d", globalMinioPort),
|
serverAddr: net.JoinHostPort("", globalMinioPort),
|
||||||
isDistXL: false,
|
isDistXL: false,
|
||||||
endPoints: []storageEndPoint{
|
endpoints: []*url.URL{{
|
||||||
{path: "/mnt/disk1"},
|
Path: "/mnt/disk1",
|
||||||
{path: "/mnt/disk2"},
|
}, {
|
||||||
{path: "/mnt/disk3"},
|
Path: "/mnt/disk2",
|
||||||
{path: "/mnt/disk4"},
|
}, {
|
||||||
},
|
Path: "/mnt/disk3",
|
||||||
|
}, {
|
||||||
|
Path: "/mnt/disk4",
|
||||||
|
}},
|
||||||
},
|
},
|
||||||
localAddr: fmt.Sprintf(":%d", globalMinioPort),
|
localAddr: net.JoinHostPort("", globalMinioPort),
|
||||||
},
|
},
|
||||||
// Test 3 - local address is not found.
|
// Test 3 - local address is not found.
|
||||||
{
|
{
|
||||||
srvCmdConfig: serverCmdConfig{
|
srvCmdConfig: serverCmdConfig{
|
||||||
isDistXL: true,
|
isDistXL: true,
|
||||||
endPoints: []storageEndPoint{
|
endpoints: []*url.URL{{
|
||||||
{"1.1.1.1", 9000, "/mnt/disk1"},
|
Scheme: "http",
|
||||||
{"1.1.1.2", 9000, "/mnt/disk2"},
|
Host: "1.1.1.1:9000",
|
||||||
{"1.1.2.1", 9000, "/mnt/disk3"},
|
Path: "/mnt/disk2",
|
||||||
{"1.1.2.2", 9000, "/mnt/disk4"},
|
}, {
|
||||||
},
|
Scheme: "http",
|
||||||
|
Host: "1.1.1.2:9000",
|
||||||
|
Path: "/mnt/disk2",
|
||||||
|
}, {
|
||||||
|
Scheme: "http",
|
||||||
|
Host: "1.1.2.1:9000",
|
||||||
|
Path: "/mnt/disk3",
|
||||||
|
}, {
|
||||||
|
Scheme: "http",
|
||||||
|
Host: "1.1.2.2:9000",
|
||||||
|
Path: "/mnt/disk4",
|
||||||
|
}},
|
||||||
},
|
},
|
||||||
localAddr: "",
|
localAddr: "",
|
||||||
},
|
},
|
||||||
@ -276,12 +304,15 @@ func TestLocalAddress(t *testing.T) {
|
|||||||
srvCmdConfig: serverCmdConfig{
|
srvCmdConfig: serverCmdConfig{
|
||||||
serverAddr: "play.minio.io:9000",
|
serverAddr: "play.minio.io:9000",
|
||||||
isDistXL: false,
|
isDistXL: false,
|
||||||
endPoints: []storageEndPoint{
|
endpoints: []*url.URL{{
|
||||||
{path: "/mnt/disk1"},
|
Path: "/mnt/disk1",
|
||||||
{path: "/mnt/disk2"},
|
}, {
|
||||||
{path: "/mnt/disk3"},
|
Path: "/mnt/disk2",
|
||||||
{path: "/mnt/disk4"},
|
}, {
|
||||||
},
|
Path: "/mnt/disk3",
|
||||||
|
}, {
|
||||||
|
Path: "/mnt/disk4",
|
||||||
|
}},
|
||||||
},
|
},
|
||||||
localAddr: "play.minio.io:9000",
|
localAddr: "play.minio.io:9000",
|
||||||
},
|
},
|
||||||
|
@ -51,12 +51,12 @@ func TestStorageInfo(t *testing.T) {
|
|||||||
t.Fatalf("Diskinfo total values should be greater 0")
|
t.Fatalf("Diskinfo total values should be greater 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err := parseStorageEndPoints(fsDirs, 0)
|
endpoints, err := parseStorageEndpoints(fsDirs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error %s", err)
|
t.Fatalf("Unexpected error %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ignoredEndpoints, err := parseStorageEndPoints(fsDirs[:4], 0)
|
ignoredEndpoints, err := parseStorageEndpoints(fsDirs[:4])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error %s", err)
|
t.Fatalf("Unexpected error %s", err)
|
||||||
}
|
}
|
||||||
@ -68,7 +68,7 @@ func TestStorageInfo(t *testing.T) {
|
|||||||
|
|
||||||
objLayer, err = newXLObjects(storageDisks)
|
objLayer, err = newXLObjects(storageDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to initialize 'XL' object layer with ignored disks %s.", fsDirs[:4])
|
t.Fatalf("Unable to initialize 'XL' object layer with ignored disks %s. error %s", fsDirs[:4], err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get storage info first attempt.
|
// Get storage info first attempt.
|
||||||
@ -151,7 +151,7 @@ func TestNewXL(t *testing.T) {
|
|||||||
t.Fatalf("Unable to initialize erasure, %s", err)
|
t.Fatalf("Unable to initialize erasure, %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err := parseStorageEndPoints(erasureDisks, 0)
|
endpoints, err := parseStorageEndpoints(erasureDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to initialize erasure, %s", err)
|
t.Fatalf("Unable to initialize erasure, %s", err)
|
||||||
}
|
}
|
||||||
@ -161,13 +161,18 @@ func TestNewXL(t *testing.T) {
|
|||||||
t.Fatal("Unexpected error: ", err)
|
t.Fatal("Unexpected error: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = waitForFormatDisks(true, "", nil)
|
err = waitForFormatDisks(true, endpoints[0], nil)
|
||||||
|
if err != errInvalidArgument {
|
||||||
|
t.Fatalf("Expecting error, got %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = waitForFormatDisks(true, nil, storageDisks)
|
||||||
if err != errInvalidArgument {
|
if err != errInvalidArgument {
|
||||||
t.Fatalf("Expecting error, got %s", err)
|
t.Fatalf("Expecting error, got %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initializes all erasure disks
|
// Initializes all erasure disks
|
||||||
err = waitForFormatDisks(true, "", storageDisks)
|
err = waitForFormatDisks(true, endpoints[0], storageDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to format disks for erasure, %s", err)
|
t.Fatalf("Unable to format disks for erasure, %s", err)
|
||||||
}
|
}
|
||||||
@ -176,12 +181,12 @@ func TestNewXL(t *testing.T) {
|
|||||||
t.Fatalf("Unable to initialize erasure, %s", err)
|
t.Fatalf("Unable to initialize erasure, %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err = parseStorageEndPoints(erasureDisks, 0)
|
endpoints, err = parseStorageEndpoints(erasureDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to initialize erasure, %s", err)
|
t.Fatalf("Unable to initialize erasure, %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ignoredEndpoints, err := parseStorageEndPoints(erasureDisks[:2], 0)
|
ignoredEndpoints, err := parseStorageEndpoints(erasureDisks[:2])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to initialize erasure, %s", err)
|
t.Fatalf("Unable to initialize erasure, %s", err)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user