mirror of
https://github.com/minio/minio.git
synced 2024-12-24 22:25:54 -05:00
Implement bucket expansion (#8509)
This commit is contained in:
parent
3a34d98db8
commit
347b29d059
@ -61,7 +61,7 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
|
|||||||
// Initialize boot time
|
// Initialize boot time
|
||||||
globalBootTime = UTCNow()
|
globalBootTime = UTCNow()
|
||||||
|
|
||||||
globalEndpoints = mustGetNewEndpointList(xlDirs...)
|
globalEndpoints = mustGetZoneEndpoints(xlDirs...)
|
||||||
|
|
||||||
// Set globalIsXL to indicate that the setup uses an erasure
|
// Set globalIsXL to indicate that the setup uses an erasure
|
||||||
// code backend.
|
// code backend.
|
||||||
@ -113,7 +113,7 @@ func initTestXLObjLayer() (ObjectLayer, []string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
endpoints := mustGetNewEndpointList(xlDirs...)
|
endpoints := mustGetNewEndpoints(xlDirs...)
|
||||||
format, err := waitForFormatXL(true, endpoints, 1, 16)
|
format, err := waitForFormatXL(true, endpoints, 1, 16)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
removeRoots(xlDirs)
|
removeRoots(xlDirs)
|
||||||
|
@ -30,13 +30,13 @@ import (
|
|||||||
cpuhw "github.com/shirou/gopsutil/cpu"
|
cpuhw "github.com/shirou/gopsutil/cpu"
|
||||||
)
|
)
|
||||||
|
|
||||||
// getLocalMemUsage - returns ServerMemUsageInfo for only the
|
// getLocalMemUsage - returns ServerMemUsageInfo for all zones, endpoints.
|
||||||
// local endpoints from given list of endpoints
|
func getLocalMemUsage(endpointZones EndpointZones, r *http.Request) ServerMemUsageInfo {
|
||||||
func getLocalMemUsage(endpoints EndpointList, r *http.Request) ServerMemUsageInfo {
|
|
||||||
var memUsages []mem.Usage
|
var memUsages []mem.Usage
|
||||||
var historicUsages []mem.Usage
|
var historicUsages []mem.Usage
|
||||||
seenHosts := set.NewStringSet()
|
seenHosts := set.NewStringSet()
|
||||||
for _, endpoint := range endpoints {
|
for _, ep := range endpointZones {
|
||||||
|
for _, endpoint := range ep.Endpoints {
|
||||||
if seenHosts.Contains(endpoint.Host) {
|
if seenHosts.Contains(endpoint.Host) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -48,9 +48,10 @@ func getLocalMemUsage(endpoints EndpointList, r *http.Request) ServerMemUsageInf
|
|||||||
historicUsages = append(historicUsages, mem.GetHistoricUsage())
|
historicUsages = append(historicUsages, mem.GetHistoricUsage())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
addr := r.Host
|
addr := r.Host
|
||||||
if globalIsDistXL {
|
if globalIsDistXL {
|
||||||
addr = GetLocalPeer(endpoints)
|
addr = GetLocalPeer(endpointZones)
|
||||||
}
|
}
|
||||||
return ServerMemUsageInfo{
|
return ServerMemUsageInfo{
|
||||||
Addr: addr,
|
Addr: addr,
|
||||||
@ -59,13 +60,13 @@ func getLocalMemUsage(endpoints EndpointList, r *http.Request) ServerMemUsageInf
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// getLocalCPULoad - returns ServerCPULoadInfo for only the
|
// getLocalCPULoad - returns ServerCPULoadInfo for all zones, endpoints.
|
||||||
// local endpoints from given list of endpoints
|
func getLocalCPULoad(endpointZones EndpointZones, r *http.Request) ServerCPULoadInfo {
|
||||||
func getLocalCPULoad(endpoints EndpointList, r *http.Request) ServerCPULoadInfo {
|
|
||||||
var cpuLoads []cpu.Load
|
var cpuLoads []cpu.Load
|
||||||
var historicLoads []cpu.Load
|
var historicLoads []cpu.Load
|
||||||
seenHosts := set.NewStringSet()
|
seenHosts := set.NewStringSet()
|
||||||
for _, endpoint := range endpoints {
|
for _, ep := range endpointZones {
|
||||||
|
for _, endpoint := range ep.Endpoints {
|
||||||
if seenHosts.Contains(endpoint.Host) {
|
if seenHosts.Contains(endpoint.Host) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -77,9 +78,10 @@ func getLocalCPULoad(endpoints EndpointList, r *http.Request) ServerCPULoadInfo
|
|||||||
historicLoads = append(historicLoads, cpu.GetHistoricLoad())
|
historicLoads = append(historicLoads, cpu.GetHistoricLoad())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
addr := r.Host
|
addr := r.Host
|
||||||
if globalIsDistXL {
|
if globalIsDistXL {
|
||||||
addr = GetLocalPeer(endpoints)
|
addr = GetLocalPeer(endpointZones)
|
||||||
}
|
}
|
||||||
return ServerCPULoadInfo{
|
return ServerCPULoadInfo{
|
||||||
Addr: addr,
|
Addr: addr,
|
||||||
@ -88,11 +90,11 @@ func getLocalCPULoad(endpoints EndpointList, r *http.Request) ServerCPULoadInfo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// getLocalDrivesPerf - returns ServerDrivesPerfInfo for only the
|
// getLocalDrivesPerf - returns ServerDrivesPerfInfo for all zones, endpoints.
|
||||||
// local endpoints from given list of endpoints
|
func getLocalDrivesPerf(endpointZones EndpointZones, size int64, r *http.Request) madmin.ServerDrivesPerfInfo {
|
||||||
func getLocalDrivesPerf(endpoints EndpointList, size int64, r *http.Request) madmin.ServerDrivesPerfInfo {
|
|
||||||
var dps []disk.Performance
|
var dps []disk.Performance
|
||||||
for _, endpoint := range endpoints {
|
for _, ep := range endpointZones {
|
||||||
|
for _, endpoint := range ep.Endpoints {
|
||||||
// Only proceed for local endpoints
|
// Only proceed for local endpoints
|
||||||
if endpoint.IsLocal {
|
if endpoint.IsLocal {
|
||||||
if _, err := os.Stat(endpoint.Path); err != nil {
|
if _, err := os.Stat(endpoint.Path); err != nil {
|
||||||
@ -105,9 +107,10 @@ func getLocalDrivesPerf(endpoints EndpointList, size int64, r *http.Request) mad
|
|||||||
dps = append(dps, dp)
|
dps = append(dps, dp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
addr := r.Host
|
addr := r.Host
|
||||||
if globalIsDistXL {
|
if globalIsDistXL {
|
||||||
addr = GetLocalPeer(endpoints)
|
addr = GetLocalPeer(endpointZones)
|
||||||
}
|
}
|
||||||
return madmin.ServerDrivesPerfInfo{
|
return madmin.ServerDrivesPerfInfo{
|
||||||
Addr: addr,
|
Addr: addr,
|
||||||
@ -116,12 +119,12 @@ func getLocalDrivesPerf(endpoints EndpointList, size int64, r *http.Request) mad
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// getLocalCPUInfo - returns ServerCPUHardwareInfo only for the
|
// getLocalCPUInfo - returns ServerCPUHardwareInfo for all zones, endpoints.
|
||||||
// local endpoints from given list of endpoints
|
func getLocalCPUInfo(endpointZones EndpointZones, r *http.Request) madmin.ServerCPUHardwareInfo {
|
||||||
func getLocalCPUInfo(endpoints EndpointList, r *http.Request) madmin.ServerCPUHardwareInfo {
|
|
||||||
var cpuHardwares []cpuhw.InfoStat
|
var cpuHardwares []cpuhw.InfoStat
|
||||||
seenHosts := set.NewStringSet()
|
seenHosts := set.NewStringSet()
|
||||||
for _, endpoint := range endpoints {
|
for _, ep := range endpointZones {
|
||||||
|
for _, endpoint := range ep.Endpoints {
|
||||||
if seenHosts.Contains(endpoint.Host) {
|
if seenHosts.Contains(endpoint.Host) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -138,9 +141,10 @@ func getLocalCPUInfo(endpoints EndpointList, r *http.Request) madmin.ServerCPUHa
|
|||||||
cpuHardwares = append(cpuHardwares, cpuHardware...)
|
cpuHardwares = append(cpuHardwares, cpuHardware...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
addr := r.Host
|
addr := r.Host
|
||||||
if globalIsDistXL {
|
if globalIsDistXL {
|
||||||
addr = GetLocalPeer(endpoints)
|
addr = GetLocalPeer(endpointZones)
|
||||||
}
|
}
|
||||||
|
|
||||||
return madmin.ServerCPUHardwareInfo{
|
return madmin.ServerCPUHardwareInfo{
|
||||||
@ -149,12 +153,12 @@ func getLocalCPUInfo(endpoints EndpointList, r *http.Request) madmin.ServerCPUHa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// getLocalNetworkInfo - returns ServerNetworkHardwareInfo only for the
|
// getLocalNetworkInfo - returns ServerNetworkHardwareInfo for all zones, endpoints.
|
||||||
// local endpoints from given list of endpoints
|
func getLocalNetworkInfo(endpointZones EndpointZones, r *http.Request) madmin.ServerNetworkHardwareInfo {
|
||||||
func getLocalNetworkInfo(endpoints EndpointList, r *http.Request) madmin.ServerNetworkHardwareInfo {
|
|
||||||
var networkHardwares []net.Interface
|
var networkHardwares []net.Interface
|
||||||
seenHosts := set.NewStringSet()
|
seenHosts := set.NewStringSet()
|
||||||
for _, endpoint := range endpoints {
|
for _, ep := range endpointZones {
|
||||||
|
for _, endpoint := range ep.Endpoints {
|
||||||
if seenHosts.Contains(endpoint.Host) {
|
if seenHosts.Contains(endpoint.Host) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -171,9 +175,10 @@ func getLocalNetworkInfo(endpoints EndpointList, r *http.Request) madmin.ServerN
|
|||||||
networkHardwares = append(networkHardwares, networkHardware...)
|
networkHardwares = append(networkHardwares, networkHardware...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
addr := r.Host
|
addr := r.Host
|
||||||
if globalIsDistXL {
|
if globalIsDistXL {
|
||||||
addr = GetLocalPeer(endpoints)
|
addr = GetLocalPeer(endpointZones)
|
||||||
}
|
}
|
||||||
|
|
||||||
return madmin.ServerNetworkHardwareInfo{
|
return madmin.ServerNetworkHardwareInfo{
|
||||||
|
@ -65,7 +65,7 @@ func (h *healRoutine) run() {
|
|||||||
// Wait at max 10 minute for an inprogress request before proceeding to heal
|
// Wait at max 10 minute for an inprogress request before proceeding to heal
|
||||||
waitCount := 600
|
waitCount := 600
|
||||||
// Any requests in progress, delay the heal.
|
// Any requests in progress, delay the heal.
|
||||||
for (globalHTTPServer.GetRequestCount() >= int32(globalXLSetCount*globalXLSetDriveCount)) &&
|
for (globalHTTPServer.GetRequestCount() >= int32(globalEndpoints.Nodes())) &&
|
||||||
waitCount > 0 {
|
waitCount > 0 {
|
||||||
waitCount--
|
waitCount--
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
@ -44,7 +44,7 @@ func monitorLocalDisksAndHeal() {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
sets, ok := objAPI.(*xlSets)
|
z, ok := objAPI.(*xlZones)
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -66,8 +66,10 @@ func monitorLocalDisksAndHeal() {
|
|||||||
for {
|
for {
|
||||||
time.Sleep(defaultMonitorNewDiskInterval)
|
time.Sleep(defaultMonitorNewDiskInterval)
|
||||||
|
|
||||||
localDisksToHeal := []Endpoint{}
|
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
|
||||||
for _, endpoint := range globalEndpoints {
|
for i, ep := range globalEndpoints {
|
||||||
|
localDisksToHeal := Endpoints{}
|
||||||
|
for _, endpoint := range ep.Endpoints {
|
||||||
if !endpoint.IsLocal {
|
if !endpoint.IsLocal {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -78,17 +80,20 @@ func monitorLocalDisksAndHeal() {
|
|||||||
localDisksToHeal = append(localDisksToHeal, endpoint)
|
localDisksToHeal = append(localDisksToHeal, endpoint)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(localDisksToHeal) == 0 {
|
if len(localDisksToHeal) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
localDisksInZoneHeal[i] = localDisksToHeal
|
||||||
|
}
|
||||||
|
|
||||||
// Reformat disks
|
// Reformat disks
|
||||||
bgSeq.sourceCh <- SlashSeparator
|
bgSeq.sourceCh <- SlashSeparator
|
||||||
// Ensure that reformatting disks is finished
|
// Ensure that reformatting disks is finished
|
||||||
bgSeq.sourceCh <- nopHeal
|
bgSeq.sourceCh <- nopHeal
|
||||||
|
|
||||||
|
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
|
||||||
// Compute the list of erasure set to heal
|
// Compute the list of erasure set to heal
|
||||||
|
for i, localDisksToHeal := range localDisksInZoneHeal {
|
||||||
var erasureSetToHeal []int
|
var erasureSetToHeal []int
|
||||||
for _, endpoint := range localDisksToHeal {
|
for _, endpoint := range localDisksToHeal {
|
||||||
// Load the new format of this passed endpoint
|
// Load the new format of this passed endpoint
|
||||||
@ -98,7 +103,7 @@ func monitorLocalDisksAndHeal() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Calculate the set index where the current endpoint belongs
|
// Calculate the set index where the current endpoint belongs
|
||||||
setIndex, _, err := findDiskIndex(sets.format, format)
|
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
continue
|
continue
|
||||||
@ -106,14 +111,17 @@ func monitorLocalDisksAndHeal() {
|
|||||||
|
|
||||||
erasureSetToHeal = append(erasureSetToHeal, setIndex)
|
erasureSetToHeal = append(erasureSetToHeal, setIndex)
|
||||||
}
|
}
|
||||||
|
erasureSetInZoneToHeal[i] = erasureSetToHeal
|
||||||
|
}
|
||||||
|
|
||||||
// Heal all erasure sets that need
|
// Heal all erasure sets that need
|
||||||
|
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
|
||||||
for _, setIndex := range erasureSetToHeal {
|
for _, setIndex := range erasureSetToHeal {
|
||||||
xlObj := sets.sets[setIndex]
|
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex])
|
||||||
err := healErasureSet(ctx, setIndex, xlObj)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -383,12 +383,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||||||
deleteObjectsFn = api.CacheAPI().DeleteObjects
|
deleteObjectsFn = api.CacheAPI().DeleteObjects
|
||||||
}
|
}
|
||||||
|
|
||||||
type delObj struct {
|
var objectsToDelete = map[string]int{}
|
||||||
origIndex int
|
|
||||||
name string
|
|
||||||
}
|
|
||||||
|
|
||||||
var objectsToDelete []delObj
|
|
||||||
var dErrs = make([]APIErrorCode, len(deleteObjects.Objects))
|
var dErrs = make([]APIErrorCode, len(deleteObjects.Objects))
|
||||||
|
|
||||||
for index, object := range deleteObjects.Objects {
|
for index, object := range deleteObjects.Objects {
|
||||||
@ -400,13 +395,16 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
objectsToDelete = append(objectsToDelete, delObj{index, object.ObjectName})
|
// Avoid duplicate objects, we use map to filter them out.
|
||||||
|
if _, ok := objectsToDelete[object.ObjectName]; !ok {
|
||||||
|
objectsToDelete[object.ObjectName] = index
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
toNames := func(input []delObj) (output []string) {
|
toNames := func(input map[string]int) (output []string) {
|
||||||
output = make([]string, len(input))
|
output = make([]string, len(input))
|
||||||
for i := range input {
|
for name, index := range input {
|
||||||
output[i] = input[i].name
|
output[index] = name
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -417,8 +415,8 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, obj := range objectsToDelete {
|
for _, index := range objectsToDelete {
|
||||||
dErrs[obj.origIndex] = toAPIErrorCode(ctx, errs[i])
|
dErrs[index] = toAPIErrorCode(ctx, errs[index])
|
||||||
}
|
}
|
||||||
|
|
||||||
// Collect deleted objects and errors if any.
|
// Collect deleted objects and errors if any.
|
||||||
|
@ -331,7 +331,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
|||||||
shouldPass: false,
|
shouldPass: false,
|
||||||
},
|
},
|
||||||
// Test case -3.
|
// Test case -3.
|
||||||
// Setting invalid delimiter, expecting the HTTP response status to be http.StatusNotImplemented.
|
// Delimiter unsupported, but response is empty.
|
||||||
{
|
{
|
||||||
bucket: bucketName,
|
bucket: bucketName,
|
||||||
prefix: "",
|
prefix: "",
|
||||||
@ -341,8 +341,8 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
|||||||
maxUploads: "0",
|
maxUploads: "0",
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
expectedRespStatus: http.StatusNotImplemented,
|
expectedRespStatus: http.StatusOK,
|
||||||
shouldPass: false,
|
shouldPass: true,
|
||||||
},
|
},
|
||||||
// Test case - 4.
|
// Test case - 4.
|
||||||
// Setting Invalid prefix and marker combination.
|
// Setting Invalid prefix and marker combination.
|
||||||
|
@ -44,20 +44,6 @@ func handleEncryptedConfigBackend(objAPI ObjectLayer, server bool) error {
|
|||||||
var encrypted bool
|
var encrypted bool
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
// Construct path to config/transaction.lock for locking
|
|
||||||
transactionConfigPrefix := minioConfigPrefix + "/transaction.lock"
|
|
||||||
|
|
||||||
// Make sure to hold lock for entire migration to avoid
|
|
||||||
// such that only one server should migrate the entire config
|
|
||||||
// at a given time, this big transaction lock ensures this
|
|
||||||
// appropriately. This is also true for rotation of encrypted
|
|
||||||
// content.
|
|
||||||
objLock := objAPI.NewNSLock(context.Background(), minioMetaBucket, transactionConfigPrefix)
|
|
||||||
if err := objLock.GetLock(globalOperationTimeout); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer objLock.Unlock()
|
|
||||||
|
|
||||||
// Migrating Config backend needs a retry mechanism for
|
// Migrating Config backend needs a retry mechanism for
|
||||||
// the following reasons:
|
// the following reasons:
|
||||||
// - Read quorum is lost just after the initialization
|
// - Read quorum is lost just after the initialization
|
||||||
|
@ -287,19 +287,6 @@ func initConfig(objAPI ObjectLayer) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct path to config/transaction.lock for locking
|
|
||||||
transactionConfigPrefix := minioConfigPrefix + "/transaction.lock"
|
|
||||||
|
|
||||||
// Hold lock only by one server and let that server alone migrate
|
|
||||||
// all the config as necessary, this is to ensure that
|
|
||||||
// redundant locks are not held for each migration - this allows
|
|
||||||
// for a more predictable behavior while debugging.
|
|
||||||
objLock := objAPI.NewNSLock(context.Background(), minioMetaBucket, transactionConfigPrefix)
|
|
||||||
if err := objLock.GetLock(globalOperationTimeout); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer objLock.Unlock()
|
|
||||||
|
|
||||||
// Migrates ${HOME}/.minio/config.json or config.json.deprecated
|
// Migrates ${HOME}/.minio/config.json or config.json.deprecated
|
||||||
// to '<export_path>/.minio.sys/config/config.json'
|
// to '<export_path>/.minio.sys/config/config.json'
|
||||||
// ignore if the file doesn't exist.
|
// ignore if the file doesn't exist.
|
||||||
|
@ -44,8 +44,8 @@ type HTTPConsoleLoggerSys struct {
|
|||||||
|
|
||||||
// NewConsoleLogger - creates new HTTPConsoleLoggerSys with all nodes subscribed to
|
// NewConsoleLogger - creates new HTTPConsoleLoggerSys with all nodes subscribed to
|
||||||
// the console logging pub sub system
|
// the console logging pub sub system
|
||||||
func NewConsoleLogger(ctx context.Context, endpoints EndpointList) *HTTPConsoleLoggerSys {
|
func NewConsoleLogger(ctx context.Context, endpointZones EndpointZones) *HTTPConsoleLoggerSys {
|
||||||
host, err := xnet.ParseHost(GetLocalPeer(globalEndpoints))
|
host, err := xnet.ParseHost(GetLocalPeer(endpointZones))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.FatalIf(err, "Unable to start console logging subsystem")
|
logger.FatalIf(err, "Unable to start console logging subsystem")
|
||||||
}
|
}
|
||||||
|
@ -58,31 +58,20 @@ func getDivisibleSize(totalSizes []uint64) (result uint64) {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isValidSetSize - checks whether given count is a valid set size for erasure coding.
|
||||||
|
var isValidSetSize = func(count uint64) bool {
|
||||||
|
return (count >= setSizes[0] && count <= setSizes[len(setSizes)-1] && count%2 == 0)
|
||||||
|
}
|
||||||
|
|
||||||
// getSetIndexes returns list of indexes which provides the set size
|
// getSetIndexes returns list of indexes which provides the set size
|
||||||
// on each index, this function also determines the final set size
|
// on each index, this function also determines the final set size
|
||||||
// The final set size has the affinity towards choosing smaller
|
// The final set size has the affinity towards choosing smaller
|
||||||
// indexes (total sets)
|
// indexes (total sets)
|
||||||
func getSetIndexes(args []string, totalSizes []uint64) (setIndexes [][]uint64, err error) {
|
func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint64) (setIndexes [][]uint64, err error) {
|
||||||
if len(totalSizes) == 0 || len(args) == 0 {
|
if len(totalSizes) == 0 || len(args) == 0 {
|
||||||
return nil, errInvalidArgument
|
return nil, errInvalidArgument
|
||||||
}
|
}
|
||||||
|
|
||||||
// isValidSetSize - checks whether given count is a valid set size for erasure coding.
|
|
||||||
isValidSetSize := func(count uint64) bool {
|
|
||||||
return (count >= setSizes[0] && count <= setSizes[len(setSizes)-1] && count%2 == 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
var customSetDriveCount uint64
|
|
||||||
if v := env.Get("MINIO_ERASURE_SET_DRIVE_COUNT", ""); v != "" {
|
|
||||||
customSetDriveCount, err = strconv.ParseUint(v, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, config.ErrInvalidErasureSetSize(err)
|
|
||||||
}
|
|
||||||
if !isValidSetSize(customSetDriveCount) {
|
|
||||||
return nil, config.ErrInvalidErasureSetSize(nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
setIndexes = make([][]uint64, len(totalSizes))
|
setIndexes = make([][]uint64, len(totalSizes))
|
||||||
for _, totalSize := range totalSizes {
|
for _, totalSize := range totalSizes {
|
||||||
// Check if totalSize has minimum range upto setSize
|
// Check if totalSize has minimum range upto setSize
|
||||||
@ -189,7 +178,7 @@ func getTotalSizes(argPatterns []ellipses.ArgPattern) []uint64 {
|
|||||||
// Parses all arguments and returns an endpointSet which is a collection
|
// Parses all arguments and returns an endpointSet which is a collection
|
||||||
// of endpoints following the ellipses pattern, this is what is used
|
// of endpoints following the ellipses pattern, this is what is used
|
||||||
// by the object layer for initializing itself.
|
// by the object layer for initializing itself.
|
||||||
func parseEndpointSet(args ...string) (ep endpointSet, err error) {
|
func parseEndpointSet(customSetDriveCount uint64, args ...string) (ep endpointSet, err error) {
|
||||||
var argPatterns = make([]ellipses.ArgPattern, len(args))
|
var argPatterns = make([]ellipses.ArgPattern, len(args))
|
||||||
for i, arg := range args {
|
for i, arg := range args {
|
||||||
patterns, perr := ellipses.FindEllipsesPatterns(arg)
|
patterns, perr := ellipses.FindEllipsesPatterns(arg)
|
||||||
@ -199,7 +188,7 @@ func parseEndpointSet(args ...string) (ep endpointSet, err error) {
|
|||||||
argPatterns[i] = patterns
|
argPatterns[i] = patterns
|
||||||
}
|
}
|
||||||
|
|
||||||
ep.setIndexes, err = getSetIndexes(args, getTotalSizes(argPatterns))
|
ep.setIndexes, err = getSetIndexes(args, getTotalSizes(argPatterns), customSetDriveCount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return endpointSet{}, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
return endpointSet{}, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
||||||
}
|
}
|
||||||
@ -215,8 +204,15 @@ func parseEndpointSet(args ...string) (ep endpointSet, err error) {
|
|||||||
// For example: {1...64} is divided into 4 sets each of size 16.
|
// For example: {1...64} is divided into 4 sets each of size 16.
|
||||||
// This applies to even distributed setup syntax as well.
|
// This applies to even distributed setup syntax as well.
|
||||||
func GetAllSets(args ...string) ([][]string, error) {
|
func GetAllSets(args ...string) ([][]string, error) {
|
||||||
if len(args) == 0 {
|
var customSetDriveCount uint64
|
||||||
return nil, errInvalidArgument
|
if v := env.Get("MINIO_ERASURE_SET_DRIVE_COUNT", ""); v != "" {
|
||||||
|
customSetDriveCount, err := strconv.ParseUint(v, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, config.ErrInvalidErasureSetSize(err)
|
||||||
|
}
|
||||||
|
if !isValidSetSize(customSetDriveCount) {
|
||||||
|
return nil, config.ErrInvalidErasureSetSize(nil)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var setArgs [][]string
|
var setArgs [][]string
|
||||||
@ -225,7 +221,7 @@ func GetAllSets(args ...string) ([][]string, error) {
|
|||||||
// Check if we have more one args.
|
// Check if we have more one args.
|
||||||
if len(args) > 1 {
|
if len(args) > 1 {
|
||||||
var err error
|
var err error
|
||||||
setIndexes, err = getSetIndexes(args, []uint64{uint64(len(args))})
|
setIndexes, err = getSetIndexes(args, []uint64{uint64(len(args))}, customSetDriveCount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -239,7 +235,7 @@ func GetAllSets(args ...string) ([][]string, error) {
|
|||||||
}
|
}
|
||||||
setArgs = s.Get()
|
setArgs = s.Get()
|
||||||
} else {
|
} else {
|
||||||
s, err := parseEndpointSet(args...)
|
s, err := parseEndpointSet(customSetDriveCount, args...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -261,18 +257,60 @@ func GetAllSets(args ...string) ([][]string, error) {
|
|||||||
|
|
||||||
// CreateServerEndpoints - validates and creates new endpoints from input args, supports
|
// CreateServerEndpoints - validates and creates new endpoints from input args, supports
|
||||||
// both ellipses and without ellipses transparently.
|
// both ellipses and without ellipses transparently.
|
||||||
func createServerEndpoints(serverAddr string, args ...string) (string, EndpointList, SetupType, int, int, error) {
|
func createServerEndpoints(serverAddr string, args ...string) (EndpointZones, SetupType, error) {
|
||||||
|
if len(args) == 0 {
|
||||||
|
return nil, -1, errInvalidArgument
|
||||||
|
}
|
||||||
|
|
||||||
|
var endpointZones EndpointZones
|
||||||
|
var setupType SetupType
|
||||||
|
if !ellipses.HasEllipses(args...) {
|
||||||
setArgs, err := GetAllSets(args...)
|
setArgs, err := GetAllSets(args...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return serverAddr, nil, -1, 0, 0, err
|
return nil, -1, err
|
||||||
}
|
}
|
||||||
|
endpointList, newSetupType, err := CreateEndpoints(serverAddr, setArgs...)
|
||||||
var endpoints EndpointList
|
|
||||||
var setupType SetupType
|
|
||||||
serverAddr, endpoints, setupType, err = CreateEndpoints(serverAddr, setArgs...)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return serverAddr, nil, -1, 0, 0, err
|
return nil, -1, err
|
||||||
|
}
|
||||||
|
endpointZones = append(endpointZones, ZoneEndpoints{
|
||||||
|
SetCount: len(setArgs),
|
||||||
|
DrivesPerSet: len(setArgs[0]),
|
||||||
|
Endpoints: endpointList,
|
||||||
|
})
|
||||||
|
globalXLSetDriveCount = len(setArgs[0])
|
||||||
|
setupType = newSetupType
|
||||||
|
return endpointZones, setupType, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return serverAddr, endpoints, setupType, len(setArgs), len(setArgs[0]), nil
|
// Look for duplicate args.
|
||||||
|
if _, err := GetAllSets(args...); err != nil {
|
||||||
|
return nil, -1, err
|
||||||
|
}
|
||||||
|
for _, arg := range args {
|
||||||
|
setArgs, err := GetAllSets(arg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, -1, err
|
||||||
|
}
|
||||||
|
endpointList, newSetupType, err := CreateEndpoints(serverAddr, setArgs...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, -1, err
|
||||||
|
}
|
||||||
|
if setupType != 0 && setupType != newSetupType {
|
||||||
|
return nil, -1, fmt.Errorf("Mixed modes of operation %s and %s are not allowed",
|
||||||
|
setupType, newSetupType)
|
||||||
|
}
|
||||||
|
if globalXLSetDriveCount != 0 && globalXLSetDriveCount != len(setArgs[0]) {
|
||||||
|
return nil, -1, fmt.Errorf("All zones should have same drive per set ratio - expected %d, got %d",
|
||||||
|
globalXLSetDriveCount, len(setArgs[0]))
|
||||||
|
}
|
||||||
|
endpointZones = append(endpointZones, ZoneEndpoints{
|
||||||
|
SetCount: len(setArgs),
|
||||||
|
DrivesPerSet: len(setArgs[0]),
|
||||||
|
Endpoints: endpointList,
|
||||||
|
})
|
||||||
|
globalXLSetDriveCount = len(setArgs[0])
|
||||||
|
setupType = newSetupType
|
||||||
|
}
|
||||||
|
return endpointZones, setupType, nil
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,6 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -55,7 +54,7 @@ func TestCreateServerEndpoints(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
_, _, _, _, _, err := createServerEndpoints(testCase.serverAddr, testCase.args...)
|
_, _, err := createServerEndpoints(testCase.serverAddr, testCase.args...)
|
||||||
if err != nil && testCase.success {
|
if err != nil && testCase.success {
|
||||||
t.Errorf("Test %d: Expected success but failed instead %s", i+1, err)
|
t.Errorf("Test %d: Expected success but failed instead %s", i+1, err)
|
||||||
}
|
}
|
||||||
@ -74,8 +73,10 @@ func TestGetDivisibleSize(t *testing.T) {
|
|||||||
{[]uint64{8, 8, 8}, 8},
|
{[]uint64{8, 8, 8}, 8},
|
||||||
{[]uint64{24}, 24},
|
{[]uint64{24}, 24},
|
||||||
}
|
}
|
||||||
for i, testCase := range testCases {
|
|
||||||
t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) {
|
for _, testCase := range testCases {
|
||||||
|
testCase := testCase
|
||||||
|
t.Run("", func(t *testing.T) {
|
||||||
gotGCD := getDivisibleSize(testCase.totalSizes)
|
gotGCD := getDivisibleSize(testCase.totalSizes)
|
||||||
if testCase.result != gotGCD {
|
if testCase.result != gotGCD {
|
||||||
t.Errorf("Expected %v, got %v", testCase.result, gotGCD)
|
t.Errorf("Expected %v, got %v", testCase.result, gotGCD)
|
||||||
@ -90,45 +91,43 @@ func TestGetSetIndexesEnvOverride(t *testing.T) {
|
|||||||
args []string
|
args []string
|
||||||
totalSizes []uint64
|
totalSizes []uint64
|
||||||
indexes [][]uint64
|
indexes [][]uint64
|
||||||
envOverride string
|
envOverride uint64
|
||||||
success bool
|
success bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
[]string{"data{1...64}"},
|
[]string{"data{1...64}"},
|
||||||
[]uint64{64},
|
[]uint64{64},
|
||||||
[][]uint64{{8, 8, 8, 8, 8, 8, 8, 8}},
|
[][]uint64{{8, 8, 8, 8, 8, 8, 8, 8}},
|
||||||
"8",
|
8,
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
[]string{"data{1...60}"},
|
[]string{"data{1...60}"},
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
"8",
|
8,
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
[]string{"data{1...64}"},
|
[]string{"data{1...64}"},
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
"-1",
|
64,
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
[]string{"data{1...64}"},
|
[]string{"data{1...64}"},
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
"2",
|
2,
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) {
|
testCase := testCase
|
||||||
if err := os.Setenv("MINIO_ERASURE_SET_DRIVE_COUNT", testCase.envOverride); err != nil {
|
t.Run("", func(t *testing.T) {
|
||||||
t.Fatal(err)
|
gotIndexes, err := getSetIndexes(testCase.args, testCase.totalSizes, testCase.envOverride)
|
||||||
}
|
|
||||||
gotIndexes, err := getSetIndexes(testCase.args, testCase.totalSizes)
|
|
||||||
if err != nil && testCase.success {
|
if err != nil && testCase.success {
|
||||||
t.Errorf("Expected success but failed instead %s", err)
|
t.Errorf("Expected success but failed instead %s", err)
|
||||||
}
|
}
|
||||||
@ -138,7 +137,6 @@ func TestGetSetIndexesEnvOverride(t *testing.T) {
|
|||||||
if !reflect.DeepEqual(testCase.indexes, gotIndexes) {
|
if !reflect.DeepEqual(testCase.indexes, gotIndexes) {
|
||||||
t.Errorf("Expected %v, got %v", testCase.indexes, gotIndexes)
|
t.Errorf("Expected %v, got %v", testCase.indexes, gotIndexes)
|
||||||
}
|
}
|
||||||
os.Unsetenv("MINIO_ERASURE_SET_DRIVE_COUNT")
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -209,9 +207,10 @@ func TestGetSetIndexes(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) {
|
testCase := testCase
|
||||||
gotIndexes, err := getSetIndexes(testCase.args, testCase.totalSizes)
|
t.Run("", func(t *testing.T) {
|
||||||
|
gotIndexes, err := getSetIndexes(testCase.args, testCase.totalSizes, 0)
|
||||||
if err != nil && testCase.success {
|
if err != nil && testCase.success {
|
||||||
t.Errorf("Expected success but failed instead %s", err)
|
t.Errorf("Expected success but failed instead %s", err)
|
||||||
}
|
}
|
||||||
@ -530,9 +529,10 @@ func TestParseEndpointSet(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) {
|
testCase := testCase
|
||||||
gotEs, err := parseEndpointSet(testCase.arg)
|
t.Run("", func(t *testing.T) {
|
||||||
|
gotEs, err := parseEndpointSet(0, testCase.arg)
|
||||||
if err != nil && testCase.success {
|
if err != nil && testCase.success {
|
||||||
t.Errorf("Expected success but failed instead %s", err)
|
t.Errorf("Expected success but failed instead %s", err)
|
||||||
}
|
}
|
||||||
|
196
cmd/endpoint.go
196
cmd/endpoint.go
@ -55,7 +55,6 @@ type Endpoint struct {
|
|||||||
*url.URL
|
*url.URL
|
||||||
IsLocal bool
|
IsLocal bool
|
||||||
SetIndex int
|
SetIndex int
|
||||||
HostName string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (endpoint Endpoint) String() string {
|
func (endpoint Endpoint) String() string {
|
||||||
@ -75,15 +74,15 @@ func (endpoint Endpoint) Type() EndpointType {
|
|||||||
return URLEndpointType
|
return URLEndpointType
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsHTTPS - returns true if secure for URLEndpointType.
|
// HTTPS - returns true if secure for URLEndpointType.
|
||||||
func (endpoint Endpoint) IsHTTPS() bool {
|
func (endpoint Endpoint) HTTPS() bool {
|
||||||
return endpoint.Scheme == "https"
|
return endpoint.Scheme == "https"
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateIsLocal - resolves the host and updates if it is local or not.
|
// UpdateIsLocal - resolves the host and updates if it is local or not.
|
||||||
func (endpoint *Endpoint) UpdateIsLocal() error {
|
func (endpoint *Endpoint) UpdateIsLocal() error {
|
||||||
if !endpoint.IsLocal {
|
if !endpoint.IsLocal {
|
||||||
isLocal, err := isLocalHost(endpoint.HostName)
|
isLocal, err := isLocalHost(endpoint.Hostname())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -183,33 +182,44 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
|
|||||||
return Endpoint{
|
return Endpoint{
|
||||||
URL: u,
|
URL: u,
|
||||||
IsLocal: isLocal,
|
IsLocal: isLocal,
|
||||||
HostName: host,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EndpointList - list of same type of endpoint.
|
// ZoneEndpoints represent endpoints in a given zone
|
||||||
type EndpointList []Endpoint
|
// along with its setCount and drivesPerSet.
|
||||||
|
type ZoneEndpoints struct {
|
||||||
// Nodes - returns number of unique servers.
|
SetCount int
|
||||||
func (endpoints EndpointList) Nodes() int {
|
DrivesPerSet int
|
||||||
uniqueNodes := set.NewStringSet()
|
Endpoints Endpoints
|
||||||
for _, endpoint := range endpoints {
|
|
||||||
if uniqueNodes.Contains(endpoint.Host) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
uniqueNodes.Add(endpoint.Host)
|
|
||||||
}
|
|
||||||
return len(uniqueNodes)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsHTTPS - returns true if secure for URLEndpointType.
|
// EndpointZones - list of list of endpoints
|
||||||
func (endpoints EndpointList) IsHTTPS() bool {
|
type EndpointZones []ZoneEndpoints
|
||||||
return endpoints[0].IsHTTPS()
|
|
||||||
|
// HTTPS - returns true if secure for URLEndpointType.
|
||||||
|
func (l EndpointZones) HTTPS() bool {
|
||||||
|
return l[0].Endpoints.HTTPS()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nodes - returns all nodes count
|
||||||
|
func (l EndpointZones) Nodes() (count int) {
|
||||||
|
for _, ep := range l {
|
||||||
|
count += len(ep.Endpoints)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Endpoints - list of same type of endpoint.
|
||||||
|
type Endpoints []Endpoint
|
||||||
|
|
||||||
|
// HTTPS - returns true if secure for URLEndpointType.
|
||||||
|
func (endpoints Endpoints) HTTPS() bool {
|
||||||
|
return endpoints[0].HTTPS()
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetString - returns endpoint string of i-th endpoint (0-based),
|
// GetString - returns endpoint string of i-th endpoint (0-based),
|
||||||
// and empty string for invalid indexes.
|
// and empty string for invalid indexes.
|
||||||
func (endpoints EndpointList) GetString(i int) string {
|
func (endpoints Endpoints) GetString(i int) string {
|
||||||
if i < 0 || i >= len(endpoints) {
|
if i < 0 || i >= len(endpoints) {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
@ -217,7 +227,7 @@ func (endpoints EndpointList) GetString(i int) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UpdateIsLocal - resolves the host and discovers the local host.
|
// UpdateIsLocal - resolves the host and discovers the local host.
|
||||||
func (endpoints EndpointList) UpdateIsLocal() error {
|
func (endpoints Endpoints) UpdateIsLocal() error {
|
||||||
var epsResolved int
|
var epsResolved int
|
||||||
var foundLocal bool
|
var foundLocal bool
|
||||||
resolvedList := make([]bool, len(endpoints))
|
resolvedList := make([]bool, len(endpoints))
|
||||||
@ -246,7 +256,7 @@ func (endpoints EndpointList) UpdateIsLocal() error {
|
|||||||
// return err if not Docker or Kubernetes
|
// return err if not Docker or Kubernetes
|
||||||
// We use IsDocker() to check for Docker environment
|
// We use IsDocker() to check for Docker environment
|
||||||
// We use IsKubernetes() to check for Kubernetes environment
|
// We use IsKubernetes() to check for Kubernetes environment
|
||||||
isLocal, err := isLocalHost(endpoints[i].HostName)
|
isLocal, err := isLocalHost(endpoints[i].Hostname())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !IsDocker() && !IsKubernetes() {
|
if !IsDocker() && !IsKubernetes() {
|
||||||
return err
|
return err
|
||||||
@ -256,8 +266,10 @@ func (endpoints EndpointList) UpdateIsLocal() error {
|
|||||||
// log error only if more than 1s elapsed
|
// log error only if more than 1s elapsed
|
||||||
if timeElapsed > time.Second {
|
if timeElapsed > time.Second {
|
||||||
// Log the message to console about the host not being resolveable.
|
// Log the message to console about the host not being resolveable.
|
||||||
reqInfo := (&logger.ReqInfo{}).AppendTags("host", endpoints[i].HostName)
|
reqInfo := (&logger.ReqInfo{}).AppendTags("host", endpoints[i].Hostname())
|
||||||
reqInfo.AppendTags("elapsedTime", humanize.RelTime(startTime, startTime.Add(timeElapsed), "elapsed", ""))
|
reqInfo.AppendTags("elapsedTime",
|
||||||
|
humanize.RelTime(startTime, startTime.Add(timeElapsed),
|
||||||
|
"elapsed", ""))
|
||||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||||
logger.LogIf(ctx, err, logger.Application)
|
logger.LogIf(ctx, err, logger.Application)
|
||||||
}
|
}
|
||||||
@ -301,8 +313,8 @@ func (endpoints EndpointList) UpdateIsLocal() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEndpointList - returns new endpoint list based on input args.
|
// NewEndpoints - returns new endpoint list based on input args.
|
||||||
func NewEndpointList(args ...string) (endpoints EndpointList, err error) {
|
func NewEndpoints(args ...string) (endpoints Endpoints, err error) {
|
||||||
var endpointType EndpointType
|
var endpointType EndpointType
|
||||||
var scheme string
|
var scheme string
|
||||||
|
|
||||||
@ -335,14 +347,15 @@ func NewEndpointList(args ...string) (endpoints EndpointList, err error) {
|
|||||||
return endpoints, nil
|
return endpoints, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkEndpointsSubOptimal(ctx *cli.Context, setupType SetupType, endpoints EndpointList) (err error) {
|
func checkEndpointsSubOptimal(ctx *cli.Context, setupType SetupType, endpointZones EndpointZones) (err error) {
|
||||||
// Validate sub optimal ordering only for distributed setup.
|
// Validate sub optimal ordering only for distributed setup.
|
||||||
if setupType != DistXLSetupType {
|
if setupType != DistXLSetupType {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var endpointOrder int
|
var endpointOrder int
|
||||||
err = fmt.Errorf("Too many disk args are local, input is in sub-optimal order. Please review input args: %s", ctx.Args())
|
err = fmt.Errorf("Too many disk args are local, input is in sub-optimal order. Please review input args: %s", ctx.Args())
|
||||||
for _, endpoint := range endpoints {
|
for _, endpoints := range endpointZones {
|
||||||
|
for _, endpoint := range endpoints.Endpoints {
|
||||||
if endpoint.IsLocal {
|
if endpoint.IsLocal {
|
||||||
endpointOrder++
|
endpointOrder++
|
||||||
} else {
|
} else {
|
||||||
@ -352,11 +365,12 @@ func checkEndpointsSubOptimal(ctx *cli.Context, setupType SetupType, endpoints E
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Checks if there are any cross device mounts.
|
// Checks if there are any cross device mounts.
|
||||||
func checkCrossDeviceMounts(endpoints EndpointList) (err error) {
|
func checkCrossDeviceMounts(endpoints Endpoints) (err error) {
|
||||||
var absPaths []string
|
var absPaths []string
|
||||||
for _, endpoint := range endpoints {
|
for _, endpoint := range endpoints {
|
||||||
if endpoint.IsLocal {
|
if endpoint.IsLocal {
|
||||||
@ -372,14 +386,14 @@ func checkCrossDeviceMounts(endpoints EndpointList) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateEndpoints - validates and creates new endpoints for given args.
|
// CreateEndpoints - validates and creates new endpoints for given args.
|
||||||
func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList, SetupType, error) {
|
func CreateEndpoints(serverAddr string, args ...[]string) (Endpoints, SetupType, error) {
|
||||||
var endpoints EndpointList
|
var endpoints Endpoints
|
||||||
var setupType SetupType
|
var setupType SetupType
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
// Check whether serverAddr is valid for this host.
|
// Check whether serverAddr is valid for this host.
|
||||||
if err = CheckLocalServerAddr(serverAddr); err != nil {
|
if err = CheckLocalServerAddr(serverAddr); err != nil {
|
||||||
return serverAddr, endpoints, setupType, err
|
return endpoints, setupType, err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, serverAddrPort := mustSplitHostPort(serverAddr)
|
_, serverAddrPort := mustSplitHostPort(serverAddr)
|
||||||
@ -389,36 +403,36 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
|
|||||||
var endpoint Endpoint
|
var endpoint Endpoint
|
||||||
endpoint, err = NewEndpoint(args[0][0])
|
endpoint, err = NewEndpoint(args[0][0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return serverAddr, endpoints, setupType, err
|
return endpoints, setupType, err
|
||||||
}
|
}
|
||||||
if err := endpoint.UpdateIsLocal(); err != nil {
|
if err := endpoint.UpdateIsLocal(); err != nil {
|
||||||
return serverAddr, endpoints, setupType, err
|
return endpoints, setupType, err
|
||||||
}
|
}
|
||||||
if endpoint.Type() != PathEndpointType {
|
if endpoint.Type() != PathEndpointType {
|
||||||
return serverAddr, endpoints, setupType, config.ErrInvalidFSEndpoint(nil).Msg("use path style endpoint for FS setup")
|
return endpoints, setupType, config.ErrInvalidFSEndpoint(nil).Msg("use path style endpoint for FS setup")
|
||||||
}
|
}
|
||||||
endpoints = append(endpoints, endpoint)
|
endpoints = append(endpoints, endpoint)
|
||||||
setupType = FSSetupType
|
setupType = FSSetupType
|
||||||
|
|
||||||
// Check for cross device mounts if any.
|
// Check for cross device mounts if any.
|
||||||
if err = checkCrossDeviceMounts(endpoints); err != nil {
|
if err = checkCrossDeviceMounts(endpoints); err != nil {
|
||||||
return serverAddr, endpoints, setupType, config.ErrInvalidFSEndpoint(nil).Msg(err.Error())
|
return endpoints, setupType, config.ErrInvalidFSEndpoint(nil).Msg(err.Error())
|
||||||
}
|
}
|
||||||
return serverAddr, endpoints, setupType, nil
|
|
||||||
|
return endpoints, setupType, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, iargs := range args {
|
for i, iargs := range args {
|
||||||
var newEndpoints EndpointList
|
|
||||||
// Convert args to endpoints
|
// Convert args to endpoints
|
||||||
var eps EndpointList
|
var newEndpoints Endpoints
|
||||||
eps, err = NewEndpointList(iargs...)
|
eps, err := NewEndpoints(iargs...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return serverAddr, endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
return endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for cross device mounts if any.
|
// Check for cross device mounts if any.
|
||||||
if err = checkCrossDeviceMounts(eps); err != nil {
|
if err = checkCrossDeviceMounts(eps); err != nil {
|
||||||
return serverAddr, endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
return endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ep := range eps {
|
for _, ep := range eps {
|
||||||
@ -431,54 +445,44 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
|
|||||||
// Return XL setup when all endpoints are path style.
|
// Return XL setup when all endpoints are path style.
|
||||||
if endpoints[0].Type() == PathEndpointType {
|
if endpoints[0].Type() == PathEndpointType {
|
||||||
setupType = XLSetupType
|
setupType = XLSetupType
|
||||||
return serverAddr, endpoints, setupType, nil
|
return endpoints, setupType, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := endpoints.UpdateIsLocal(); err != nil {
|
if err = endpoints.UpdateIsLocal(); err != nil {
|
||||||
return serverAddr, endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
return endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Here all endpoints are URL style.
|
// Here all endpoints are URL style.
|
||||||
endpointPathSet := set.NewStringSet()
|
endpointPathSet := set.NewStringSet()
|
||||||
localEndpointCount := 0
|
localEndpointCount := 0
|
||||||
localServerAddrSet := set.NewStringSet()
|
localServerHostSet := set.NewStringSet()
|
||||||
localPortSet := set.NewStringSet()
|
localPortSet := set.NewStringSet()
|
||||||
|
|
||||||
for _, endpoint := range endpoints {
|
for _, endpoint := range endpoints {
|
||||||
endpointPathSet.Add(endpoint.Path)
|
endpointPathSet.Add(endpoint.Path)
|
||||||
if endpoint.IsLocal {
|
if endpoint.IsLocal {
|
||||||
localServerAddrSet.Add(endpoint.Host)
|
localServerHostSet.Add(endpoint.Hostname())
|
||||||
|
|
||||||
var port string
|
var port string
|
||||||
_, port, err = net.SplitHostPort(endpoint.Host)
|
_, port, err = net.SplitHostPort(endpoint.Host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
port = serverAddrPort
|
port = serverAddrPort
|
||||||
}
|
}
|
||||||
|
|
||||||
localPortSet.Add(port)
|
localPortSet.Add(port)
|
||||||
|
|
||||||
localEndpointCount++
|
localEndpointCount++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// No local endpoint found.
|
|
||||||
if localEndpointCount == 0 {
|
|
||||||
return serverAddr, endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg("no endpoint pointing to the local machine is found")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check whether same path is not used in endpoints of a host on different port.
|
// Check whether same path is not used in endpoints of a host on different port.
|
||||||
{
|
{
|
||||||
pathIPMap := make(map[string]set.StringSet)
|
pathIPMap := make(map[string]set.StringSet)
|
||||||
for _, endpoint := range endpoints {
|
for _, endpoint := range endpoints {
|
||||||
var host string
|
host := endpoint.Hostname()
|
||||||
host, _, err = net.SplitHostPort(endpoint.Host)
|
|
||||||
if err != nil {
|
|
||||||
host = endpoint.Host
|
|
||||||
}
|
|
||||||
hostIPSet, _ := getHostIP(host)
|
hostIPSet, _ := getHostIP(host)
|
||||||
if IPSet, ok := pathIPMap[endpoint.Path]; ok {
|
if IPSet, ok := pathIPMap[endpoint.Path]; ok {
|
||||||
if !IPSet.Intersection(hostIPSet).IsEmpty() {
|
if !IPSet.Intersection(hostIPSet).IsEmpty() {
|
||||||
return serverAddr, endpoints, setupType,
|
return endpoints, setupType,
|
||||||
config.ErrInvalidErasureEndpoints(nil).Msg(fmt.Sprintf("path '%s' can not be served by different port on same address", endpoint.Path))
|
config.ErrInvalidErasureEndpoints(nil).Msg(fmt.Sprintf("path '%s' can not be served by different port on same address", endpoint.Path))
|
||||||
}
|
}
|
||||||
pathIPMap[endpoint.Path] = IPSet.Union(hostIPSet)
|
pathIPMap[endpoint.Path] = IPSet.Union(hostIPSet)
|
||||||
@ -496,42 +500,25 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if localPathSet.Contains(endpoint.Path) {
|
if localPathSet.Contains(endpoint.Path) {
|
||||||
return serverAddr, endpoints, setupType,
|
return endpoints, setupType,
|
||||||
config.ErrInvalidErasureEndpoints(nil).Msg(fmt.Sprintf("path '%s' cannot be served by different address on same server", endpoint.Path))
|
config.ErrInvalidErasureEndpoints(nil).Msg(fmt.Sprintf("path '%s' cannot be served by different address on same server", endpoint.Path))
|
||||||
}
|
}
|
||||||
localPathSet.Add(endpoint.Path)
|
localPathSet.Add(endpoint.Path)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check whether serverAddrPort matches at least in one of port used in local endpoints.
|
|
||||||
{
|
|
||||||
if !localPortSet.Contains(serverAddrPort) {
|
|
||||||
if len(localPortSet) > 1 {
|
|
||||||
return serverAddr, endpoints, setupType,
|
|
||||||
config.ErrInvalidErasureEndpoints(nil).Msg("port number in server address must match with one of the port in local endpoints")
|
|
||||||
}
|
|
||||||
return serverAddr, endpoints, setupType,
|
|
||||||
config.ErrInvalidErasureEndpoints(nil).Msg("server address and local endpoint have different ports")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// All endpoints are pointing to local host
|
// All endpoints are pointing to local host
|
||||||
if len(endpoints) == localEndpointCount {
|
if len(endpoints) == localEndpointCount {
|
||||||
// If all endpoints have same port number, then this is XL setup using URL style endpoints.
|
// If all endpoints have same port number, then this is XL setup using URL style endpoints.
|
||||||
if len(localPortSet) == 1 {
|
if len(localPortSet) == 1 {
|
||||||
if len(localServerAddrSet) > 1 {
|
if len(localServerHostSet) > 1 {
|
||||||
// TODO: Even though all endpoints are local, the local host is referred by different IP/name.
|
return endpoints, setupType,
|
||||||
// eg '172.0.0.1', 'localhost' and 'mylocalhostname' point to same local host.
|
config.ErrInvalidErasureEndpoints(nil).Msg("all local endpoints should not have different hostnames/ips")
|
||||||
//
|
|
||||||
// In this case, we bind to 0.0.0.0 ie to all interfaces.
|
|
||||||
// The actual way to do is bind to only IPs in uniqueLocalHosts.
|
|
||||||
serverAddr = net.JoinHostPort("", serverAddrPort)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
endpointPaths := endpointPathSet.ToSlice()
|
endpointPaths := endpointPathSet.ToSlice()
|
||||||
endpoints, _ = NewEndpointList(endpointPaths...)
|
endpoints, _ := NewEndpoints(endpointPaths...)
|
||||||
setupType = XLSetupType
|
setupType = XLSetupType
|
||||||
return serverAddr, endpoints, setupType, nil
|
return endpoints, setupType, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Even though all endpoints are local, but those endpoints use different ports.
|
// Even though all endpoints are local, but those endpoints use different ports.
|
||||||
@ -539,24 +526,20 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
|
|||||||
} else {
|
} else {
|
||||||
// This is DistXL setup.
|
// This is DistXL setup.
|
||||||
// Check whether local server address are not 127.x.x.x
|
// Check whether local server address are not 127.x.x.x
|
||||||
for _, localServerAddr := range localServerAddrSet.ToSlice() {
|
for _, localHost := range localServerHostSet.ToSlice() {
|
||||||
host, _, err := net.SplitHostPort(localServerAddr)
|
ipList, err := getHostIP(localHost)
|
||||||
if err != nil {
|
logger.FatalIf(err, "unexpected error when resolving host '%s'", localHost)
|
||||||
host = localServerAddr
|
|
||||||
}
|
|
||||||
|
|
||||||
ipList, err := getHostIP(host)
|
|
||||||
logger.FatalIf(err, "unexpected error when resolving host '%s'", host)
|
|
||||||
|
|
||||||
// Filter ipList by IPs those start with '127.' or '::1'
|
// Filter ipList by IPs those start with '127.' or '::1'
|
||||||
loopBackIPs := ipList.FuncMatch(func(ip string, matchString string) bool {
|
loopBackIPs := ipList.FuncMatch(func(ip string, matchString string) bool {
|
||||||
return strings.HasPrefix(ip, "127.") || strings.HasPrefix(ip, "::1")
|
return net.ParseIP(ip).IsLoopback()
|
||||||
}, "")
|
}, "")
|
||||||
|
|
||||||
// If loop back IP is found and ipList contains only loop back IPs, then error out.
|
// If loop back IP is found and ipList contains only loop back IPs, then error out.
|
||||||
if len(loopBackIPs) > 0 && len(loopBackIPs) == len(ipList) {
|
if len(loopBackIPs) > 0 && len(loopBackIPs) == len(ipList) {
|
||||||
err = fmt.Errorf("'%s' resolves to loopback address is not allowed for distributed XL", localServerAddr)
|
err = fmt.Errorf("'%s' resolves to loopback address is not allowed for distributed XL",
|
||||||
return serverAddr, endpoints, setupType, err
|
localHost)
|
||||||
|
return endpoints, setupType, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -580,7 +563,7 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
|
|||||||
// Error out if we have less than 2 unique servers.
|
// Error out if we have less than 2 unique servers.
|
||||||
if len(uniqueArgs.ToSlice()) < 2 && setupType == DistXLSetupType {
|
if len(uniqueArgs.ToSlice()) < 2 && setupType == DistXLSetupType {
|
||||||
err := fmt.Errorf("Unsupported number of endpoints (%s), minimum number of servers cannot be less than 2 in distributed setup", endpoints)
|
err := fmt.Errorf("Unsupported number of endpoints (%s), minimum number of servers cannot be less than 2 in distributed setup", endpoints)
|
||||||
return serverAddr, endpoints, setupType, err
|
return endpoints, setupType, err
|
||||||
}
|
}
|
||||||
|
|
||||||
publicIPs := env.Get(config.EnvPublicIPs, "")
|
publicIPs := env.Get(config.EnvPublicIPs, "")
|
||||||
@ -589,7 +572,7 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
|
|||||||
}
|
}
|
||||||
|
|
||||||
setupType = DistXLSetupType
|
setupType = DistXLSetupType
|
||||||
return serverAddr, endpoints, setupType, nil
|
return endpoints, setupType, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLocalPeer - returns local peer value, returns globalMinioAddr
|
// GetLocalPeer - returns local peer value, returns globalMinioAddr
|
||||||
@ -597,9 +580,10 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
|
|||||||
// the first element from the set of peers which indicate that
|
// the first element from the set of peers which indicate that
|
||||||
// they are local. There is always one entry that is local
|
// they are local. There is always one entry that is local
|
||||||
// even with repeated server endpoints.
|
// even with repeated server endpoints.
|
||||||
func GetLocalPeer(endpoints EndpointList) (localPeer string) {
|
func GetLocalPeer(endpointZones EndpointZones) (localPeer string) {
|
||||||
peerSet := set.NewStringSet()
|
peerSet := set.NewStringSet()
|
||||||
for _, endpoint := range endpoints {
|
for _, ep := range endpointZones {
|
||||||
|
for _, endpoint := range ep.Endpoints {
|
||||||
if endpoint.Type() != URLEndpointType {
|
if endpoint.Type() != URLEndpointType {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -607,6 +591,7 @@ func GetLocalPeer(endpoints EndpointList) (localPeer string) {
|
|||||||
peerSet.Add(endpoint.Host)
|
peerSet.Add(endpoint.Host)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if peerSet.IsEmpty() {
|
if peerSet.IsEmpty() {
|
||||||
// Local peer can be empty in FS or Erasure coded mode.
|
// Local peer can be empty in FS or Erasure coded mode.
|
||||||
// If so, return globalMinioHost + globalMinioPort value.
|
// If so, return globalMinioHost + globalMinioPort value.
|
||||||
@ -620,9 +605,10 @@ func GetLocalPeer(endpoints EndpointList) (localPeer string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetRemotePeers - get hosts information other than this minio service.
|
// GetRemotePeers - get hosts information other than this minio service.
|
||||||
func GetRemotePeers(endpoints EndpointList) []string {
|
func GetRemotePeers(endpointZones EndpointZones) []string {
|
||||||
peerSet := set.NewStringSet()
|
peerSet := set.NewStringSet()
|
||||||
for _, endpoint := range endpoints {
|
for _, ep := range endpointZones {
|
||||||
|
for _, endpoint := range ep.Endpoints {
|
||||||
if endpoint.Type() != URLEndpointType {
|
if endpoint.Type() != URLEndpointType {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -636,7 +622,7 @@ func GetRemotePeers(endpoints EndpointList) []string {
|
|||||||
|
|
||||||
peerSet.Add(peer)
|
peerSet.Add(peer)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return peerSet.ToSlice()
|
return peerSet.ToSlice()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -664,6 +650,10 @@ func updateDomainIPs(endPoints set.StringSet) {
|
|||||||
ipList = ipList.Union(IPsWithPort)
|
ipList = ipList.Union(IPsWithPort)
|
||||||
}
|
}
|
||||||
globalDomainIPs = ipList.FuncMatch(func(ip string, matchString string) bool {
|
globalDomainIPs = ipList.FuncMatch(func(ip string, matchString string) bool {
|
||||||
return !(strings.HasPrefix(ip, "127.") || strings.HasPrefix(ip, "::1") || strings.HasPrefix(ip, "[::1]"))
|
host, _, err := net.SplitHostPort(ip)
|
||||||
|
if err != nil {
|
||||||
|
host = ip
|
||||||
|
}
|
||||||
|
return !net.ParseIP(host).IsLoopback()
|
||||||
}, "")
|
}, "")
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
|
* MinIO Cloud Storage, (C) 2017,2018,2019 MinIO, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -19,6 +19,7 @@ package cmd
|
|||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
@ -35,19 +36,19 @@ func TestSubOptimalEndpointInput(t *testing.T) {
|
|||||||
tests := []struct {
|
tests := []struct {
|
||||||
setupType SetupType
|
setupType SetupType
|
||||||
ctx *cli.Context
|
ctx *cli.Context
|
||||||
endpoints EndpointList
|
endpoints EndpointZones
|
||||||
isErr bool
|
isErr bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
setupType: DistXLSetupType,
|
setupType: DistXLSetupType,
|
||||||
ctx: cli.NewContext(cli.NewApp(), flag.NewFlagSet("", flag.ContinueOnError), nil),
|
ctx: cli.NewContext(cli.NewApp(), flag.NewFlagSet("", flag.ContinueOnError), nil),
|
||||||
endpoints: mustGetNewEndpointList(args1...),
|
endpoints: mustGetZoneEndpoints(args1...),
|
||||||
isErr: false,
|
isErr: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
setupType: DistXLSetupType,
|
setupType: DistXLSetupType,
|
||||||
ctx: cli.NewContext(cli.NewApp(), flag.NewFlagSet("", flag.ContinueOnError), nil),
|
ctx: cli.NewContext(cli.NewApp(), flag.NewFlagSet("", flag.ContinueOnError), nil),
|
||||||
endpoints: mustGetNewEndpointList(args2...),
|
endpoints: mustGetZoneEndpoints(args2...),
|
||||||
isErr: false,
|
isErr: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -90,11 +91,11 @@ func TestNewEndpoint(t *testing.T) {
|
|||||||
{"http:path", Endpoint{URL: &url.URL{Path: "http:path"}, IsLocal: true}, PathEndpointType, nil},
|
{"http:path", Endpoint{URL: &url.URL{Path: "http:path"}, IsLocal: true}, PathEndpointType, nil},
|
||||||
{"http:/path", Endpoint{URL: &url.URL{Path: "http:/path"}, IsLocal: true}, PathEndpointType, nil},
|
{"http:/path", Endpoint{URL: &url.URL{Path: "http:/path"}, IsLocal: true}, PathEndpointType, nil},
|
||||||
{"http:///path", Endpoint{URL: &url.URL{Path: "http:/path"}, IsLocal: true}, PathEndpointType, nil},
|
{"http:///path", Endpoint{URL: &url.URL{Path: "http:/path"}, IsLocal: true}, PathEndpointType, nil},
|
||||||
{"http://localhost/path", Endpoint{URL: u1, IsLocal: true, HostName: "localhost"}, URLEndpointType, nil},
|
{"http://localhost/path", Endpoint{URL: u1, IsLocal: true}, URLEndpointType, nil},
|
||||||
{"http://localhost/path//", Endpoint{URL: u1, IsLocal: true, HostName: "localhost"}, URLEndpointType, nil},
|
{"http://localhost/path//", Endpoint{URL: u1, IsLocal: true}, URLEndpointType, nil},
|
||||||
{"https://example.org/path", Endpoint{URL: u2, IsLocal: false, HostName: "example.org"}, URLEndpointType, nil},
|
{"https://example.org/path", Endpoint{URL: u2, IsLocal: false}, URLEndpointType, nil},
|
||||||
{"http://127.0.0.1:8080/path", Endpoint{URL: u3, IsLocal: true, HostName: "127.0.0.1"}, URLEndpointType, nil},
|
{"http://127.0.0.1:8080/path", Endpoint{URL: u3, IsLocal: true}, URLEndpointType, nil},
|
||||||
{"http://192.168.253.200/path", Endpoint{URL: u4, IsLocal: false, HostName: "192.168.253.200"}, URLEndpointType, nil},
|
{"http://192.168.253.200/path", Endpoint{URL: u4, IsLocal: false}, URLEndpointType, nil},
|
||||||
{"", Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
{"", Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
||||||
{SlashSeparator, Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
{SlashSeparator, Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
||||||
{`\`, Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
{`\`, Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
||||||
@ -136,7 +137,7 @@ func TestNewEndpoint(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewEndpointList(t *testing.T) {
|
func TestNewEndpoints(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
args []string
|
args []string
|
||||||
expectedErr error
|
expectedErr error
|
||||||
@ -159,7 +160,7 @@ func TestNewEndpointList(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
_, err := NewEndpointList(testCase.args...)
|
_, err := NewEndpoints(testCase.args...)
|
||||||
if testCase.expectedErr == nil {
|
if testCase.expectedErr == nil {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||||
@ -175,7 +176,7 @@ func TestNewEndpointList(t *testing.T) {
|
|||||||
func TestCreateEndpoints(t *testing.T) {
|
func TestCreateEndpoints(t *testing.T) {
|
||||||
// Filter ipList by IPs those do not start with '127.'.
|
// Filter ipList by IPs those do not start with '127.'.
|
||||||
nonLoopBackIPs := localIP4.FuncMatch(func(ip string, matchString string) bool {
|
nonLoopBackIPs := localIP4.FuncMatch(func(ip string, matchString string) bool {
|
||||||
return !strings.HasPrefix(ip, "127.")
|
return !net.ParseIP(ip).IsLoopback()
|
||||||
}, "")
|
}, "")
|
||||||
if len(nonLoopBackIPs) == 0 {
|
if len(nonLoopBackIPs) == 0 {
|
||||||
t.Fatalf("No non-loop back IP address found for this host")
|
t.Fatalf("No non-loop back IP address found for this host")
|
||||||
@ -257,120 +258,111 @@ func TestCreateEndpoints(t *testing.T) {
|
|||||||
serverAddr string
|
serverAddr string
|
||||||
args [][]string
|
args [][]string
|
||||||
expectedServerAddr string
|
expectedServerAddr string
|
||||||
expectedEndpoints EndpointList
|
expectedEndpoints Endpoints
|
||||||
expectedSetupType SetupType
|
expectedSetupType SetupType
|
||||||
expectedErr error
|
expectedErr error
|
||||||
}{
|
}{
|
||||||
{"localhost", [][]string{}, "", EndpointList{}, -1, fmt.Errorf("address localhost: missing port in address")},
|
{"localhost", [][]string{}, "", Endpoints{}, -1, fmt.Errorf("address localhost: missing port in address")},
|
||||||
|
|
||||||
// FS Setup
|
// FS Setup
|
||||||
{"localhost:9000", [][]string{{"http://localhost/d1"}}, "", EndpointList{}, -1, fmt.Errorf("use path style endpoint for FS setup")},
|
{"localhost:9000", [][]string{{"http://localhost/d1"}}, "", Endpoints{}, -1, fmt.Errorf("use path style endpoint for FS setup")},
|
||||||
{":443", [][]string{{"d1"}}, ":443", EndpointList{Endpoint{URL: &url.URL{Path: "d1"}, IsLocal: true}}, FSSetupType, nil},
|
{":443", [][]string{{"d1"}}, ":443", Endpoints{Endpoint{URL: &url.URL{Path: "d1"}, IsLocal: true}}, FSSetupType, nil},
|
||||||
{"localhost:10000", [][]string{{"/d1"}}, "localhost:10000", EndpointList{Endpoint{URL: &url.URL{Path: "/d1"}, IsLocal: true}}, FSSetupType, nil},
|
{"localhost:10000", [][]string{{"/d1"}}, "localhost:10000", Endpoints{Endpoint{URL: &url.URL{Path: "/d1"}, IsLocal: true}}, FSSetupType, nil},
|
||||||
{"localhost:10000", [][]string{{"./d1"}}, "localhost:10000", EndpointList{Endpoint{URL: &url.URL{Path: "d1"}, IsLocal: true}}, FSSetupType, nil},
|
{"localhost:10000", [][]string{{"./d1"}}, "localhost:10000", Endpoints{Endpoint{URL: &url.URL{Path: "d1"}, IsLocal: true}}, FSSetupType, nil},
|
||||||
{"localhost:10000", [][]string{{`\d1`}}, "localhost:10000", EndpointList{Endpoint{URL: &url.URL{Path: `\d1`}, IsLocal: true}}, FSSetupType, nil},
|
{"localhost:10000", [][]string{{`\d1`}}, "localhost:10000", Endpoints{Endpoint{URL: &url.URL{Path: `\d1`}, IsLocal: true}}, FSSetupType, nil},
|
||||||
{"localhost:10000", [][]string{{`.\d1`}}, "localhost:10000", EndpointList{Endpoint{URL: &url.URL{Path: `.\d1`}, IsLocal: true}}, FSSetupType, nil},
|
{"localhost:10000", [][]string{{`.\d1`}}, "localhost:10000", Endpoints{Endpoint{URL: &url.URL{Path: `.\d1`}, IsLocal: true}}, FSSetupType, nil},
|
||||||
{":8080", [][]string{{"https://example.org/d1", "https://example.org/d2", "https://example.org/d3", "https://example.org/d4"}}, "", EndpointList{}, -1, fmt.Errorf("no endpoint pointing to the local machine is found")},
|
{"localhost:9000", [][]string{{"https://127.0.0.1:9000/d1", "https://localhost:9001/d1", "https://example.com/d1", "https://example.com/d2"}}, "", Endpoints{}, -1, fmt.Errorf("path '/d1' can not be served by different port on same address")},
|
||||||
{":8080", [][]string{{"https://example.org/d1", "https://example.com/d2", "https://example.net:8000/d3", "https://example.edu/d1"}}, "", EndpointList{}, -1, fmt.Errorf("no endpoint pointing to the local machine is found")},
|
|
||||||
{"localhost:9000", [][]string{{"https://127.0.0.1:9000/d1", "https://localhost:9001/d1", "https://example.com/d1", "https://example.com/d2"}}, "", EndpointList{}, -1, fmt.Errorf("path '/d1' can not be served by different port on same address")},
|
|
||||||
{"localhost:9000", [][]string{{"https://127.0.0.1:8000/d1", "https://localhost:9001/d2", "https://example.com/d1", "https://example.com/d2"}}, "", EndpointList{}, -1, fmt.Errorf("port number in server address must match with one of the port in local endpoints")},
|
|
||||||
{"localhost:10000", [][]string{{"https://127.0.0.1:8000/d1", "https://localhost:8000/d2", "https://example.com/d1", "https://example.com/d2"}}, "", EndpointList{}, -1, fmt.Errorf("server address and local endpoint have different ports")},
|
|
||||||
|
|
||||||
// XL Setup with PathEndpointType
|
// XL Setup with PathEndpointType
|
||||||
{":1234", [][]string{{"/d1", "/d2", "d3", "d4"}}, ":1234",
|
{":1234", [][]string{{"/d1", "/d2", "d3", "d4"}}, ":1234",
|
||||||
EndpointList{
|
Endpoints{
|
||||||
Endpoint{URL: &url.URL{Path: "/d1"}, IsLocal: true},
|
Endpoint{URL: &url.URL{Path: "/d1"}, IsLocal: true},
|
||||||
Endpoint{URL: &url.URL{Path: "/d2"}, IsLocal: true},
|
Endpoint{URL: &url.URL{Path: "/d2"}, IsLocal: true},
|
||||||
Endpoint{URL: &url.URL{Path: "d3"}, IsLocal: true},
|
Endpoint{URL: &url.URL{Path: "d3"}, IsLocal: true},
|
||||||
Endpoint{URL: &url.URL{Path: "d4"}, IsLocal: true},
|
Endpoint{URL: &url.URL{Path: "d4"}, IsLocal: true},
|
||||||
}, XLSetupType, nil},
|
}, XLSetupType, nil},
|
||||||
// XL Setup with URLEndpointType
|
// XL Setup with URLEndpointType
|
||||||
{":9000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://localhost/d3", "http://localhost/d4"}}, ":9000", EndpointList{
|
{":9000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://localhost/d3", "http://localhost/d4"}}, ":9000", Endpoints{
|
||||||
Endpoint{URL: &url.URL{Path: "/d1"}, IsLocal: true},
|
Endpoint{URL: &url.URL{Path: "/d1"}, IsLocal: true},
|
||||||
Endpoint{URL: &url.URL{Path: "/d2"}, IsLocal: true},
|
Endpoint{URL: &url.URL{Path: "/d2"}, IsLocal: true},
|
||||||
Endpoint{URL: &url.URL{Path: "/d3"}, IsLocal: true},
|
Endpoint{URL: &url.URL{Path: "/d3"}, IsLocal: true},
|
||||||
Endpoint{URL: &url.URL{Path: "/d4"}, IsLocal: true},
|
Endpoint{URL: &url.URL{Path: "/d4"}, IsLocal: true},
|
||||||
}, XLSetupType, nil},
|
}, XLSetupType, nil},
|
||||||
// XL Setup with URLEndpointType having mixed naming to local host.
|
// XL Setup with URLEndpointType having mixed naming to local host.
|
||||||
{"127.0.0.1:10000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://127.0.0.1/d3", "http://127.0.0.1/d4"}}, ":10000", EndpointList{
|
{"127.0.0.1:10000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://127.0.0.1/d3", "http://127.0.0.1/d4"}}, ":10000", Endpoints{
|
||||||
Endpoint{URL: &url.URL{Path: "/d1"}, IsLocal: true},
|
Endpoint{URL: &url.URL{Path: "/d1"}, IsLocal: true},
|
||||||
Endpoint{URL: &url.URL{Path: "/d2"}, IsLocal: true},
|
Endpoint{URL: &url.URL{Path: "/d2"}, IsLocal: true},
|
||||||
Endpoint{URL: &url.URL{Path: "/d3"}, IsLocal: true},
|
Endpoint{URL: &url.URL{Path: "/d3"}, IsLocal: true},
|
||||||
Endpoint{URL: &url.URL{Path: "/d4"}, IsLocal: true},
|
Endpoint{URL: &url.URL{Path: "/d4"}, IsLocal: true},
|
||||||
}, XLSetupType, nil},
|
}, XLSetupType, fmt.Errorf("all local endpoints should not have different hostname/ips")},
|
||||||
{":9001", [][]string{{"http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export", "http://" + nonLoopBackIP + ":9001/export", "http://10.0.0.2:9001/export"}}, "", EndpointList{}, -1, fmt.Errorf("path '/export' can not be served by different port on same address")},
|
{":9001", [][]string{{"http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export", "http://" + nonLoopBackIP + ":9001/export", "http://10.0.0.2:9001/export"}}, "", Endpoints{}, -1, fmt.Errorf("path '/export' can not be served by different port on same address")},
|
||||||
|
|
||||||
{":9000", [][]string{{"http://127.0.0.1:9000/export", "http://" + nonLoopBackIP + ":9000/export", "http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export"}}, "", EndpointList{}, -1, fmt.Errorf("path '/export' cannot be served by different address on same server")},
|
{":9000", [][]string{{"http://127.0.0.1:9000/export", "http://" + nonLoopBackIP + ":9000/export", "http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export"}}, "", Endpoints{}, -1, fmt.Errorf("path '/export' cannot be served by different address on same server")},
|
||||||
|
|
||||||
{":9000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://example.org/d3", "http://example.com/d4"}}, "", EndpointList{}, -1, fmt.Errorf("'localhost' resolves to loopback address is not allowed for distributed XL")},
|
{":9000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://example.org/d3", "http://example.com/d4"}}, "", Endpoints{}, -1, fmt.Errorf("'localhost' resolves to loopback address is not allowed for distributed XL")},
|
||||||
|
|
||||||
// DistXL type
|
// DistXL type
|
||||||
{"127.0.0.1:10000", [][]string{{case1Endpoint1, case1Endpoint2, "http://example.org/d3", "http://example.com/d4"}}, "127.0.0.1:10000", EndpointList{
|
{"127.0.0.1:10000", [][]string{{case1Endpoint1, case1Endpoint2, "http://example.org/d3", "http://example.com/d4"}}, "127.0.0.1:10000", Endpoints{
|
||||||
Endpoint{URL: case1URLs[0], IsLocal: case1LocalFlags[0], HostName: nonLoopBackIP},
|
Endpoint{URL: case1URLs[0], IsLocal: case1LocalFlags[0]},
|
||||||
Endpoint{URL: case1URLs[1], IsLocal: case1LocalFlags[1], HostName: nonLoopBackIP},
|
Endpoint{URL: case1URLs[1], IsLocal: case1LocalFlags[1]},
|
||||||
Endpoint{URL: case1URLs[2], IsLocal: case1LocalFlags[2], HostName: "example.org"},
|
Endpoint{URL: case1URLs[2], IsLocal: case1LocalFlags[2]},
|
||||||
Endpoint{URL: case1URLs[3], IsLocal: case1LocalFlags[3], HostName: "example.com"},
|
Endpoint{URL: case1URLs[3], IsLocal: case1LocalFlags[3]},
|
||||||
}, DistXLSetupType, nil},
|
}, DistXLSetupType, nil},
|
||||||
|
|
||||||
{"127.0.0.1:10000", [][]string{{case2Endpoint1, case2Endpoint2, "http://example.org/d3", "http://example.com/d4"}}, "127.0.0.1:10000", EndpointList{
|
{"127.0.0.1:10000", [][]string{{case2Endpoint1, case2Endpoint2, "http://example.org/d3", "http://example.com/d4"}}, "127.0.0.1:10000", Endpoints{
|
||||||
Endpoint{URL: case2URLs[0], IsLocal: case2LocalFlags[0], HostName: nonLoopBackIP},
|
Endpoint{URL: case2URLs[0], IsLocal: case2LocalFlags[0]},
|
||||||
Endpoint{URL: case2URLs[1], IsLocal: case2LocalFlags[1], HostName: nonLoopBackIP},
|
Endpoint{URL: case2URLs[1], IsLocal: case2LocalFlags[1]},
|
||||||
Endpoint{URL: case2URLs[2], IsLocal: case2LocalFlags[2], HostName: "example.org"},
|
Endpoint{URL: case2URLs[2], IsLocal: case2LocalFlags[2]},
|
||||||
Endpoint{URL: case2URLs[3], IsLocal: case2LocalFlags[3], HostName: "example.com"},
|
Endpoint{URL: case2URLs[3], IsLocal: case2LocalFlags[3]},
|
||||||
}, DistXLSetupType, nil},
|
}, DistXLSetupType, nil},
|
||||||
|
|
||||||
{":80", [][]string{{case3Endpoint1, "http://example.org:9000/d2", "http://example.com/d3", "http://example.net/d4"}}, ":80", EndpointList{
|
{":80", [][]string{{case3Endpoint1, "http://example.org:9000/d2", "http://example.com/d3", "http://example.net/d4"}}, ":80", Endpoints{
|
||||||
Endpoint{URL: case3URLs[0], IsLocal: case3LocalFlags[0], HostName: nonLoopBackIP},
|
Endpoint{URL: case3URLs[0], IsLocal: case3LocalFlags[0]},
|
||||||
Endpoint{URL: case3URLs[1], IsLocal: case3LocalFlags[1], HostName: "example.org"},
|
Endpoint{URL: case3URLs[1], IsLocal: case3LocalFlags[1]},
|
||||||
Endpoint{URL: case3URLs[2], IsLocal: case3LocalFlags[2], HostName: "example.com"},
|
Endpoint{URL: case3URLs[2], IsLocal: case3LocalFlags[2]},
|
||||||
Endpoint{URL: case3URLs[3], IsLocal: case3LocalFlags[3], HostName: "example.net"},
|
Endpoint{URL: case3URLs[3], IsLocal: case3LocalFlags[3]},
|
||||||
}, DistXLSetupType, nil},
|
}, DistXLSetupType, nil},
|
||||||
|
|
||||||
{":9000", [][]string{{case4Endpoint1, "http://example.org/d2", "http://example.com/d3", "http://example.net/d4"}}, ":9000", EndpointList{
|
{":9000", [][]string{{case4Endpoint1, "http://example.org/d2", "http://example.com/d3", "http://example.net/d4"}}, ":9000", Endpoints{
|
||||||
Endpoint{URL: case4URLs[0], IsLocal: case4LocalFlags[0], HostName: nonLoopBackIP},
|
Endpoint{URL: case4URLs[0], IsLocal: case4LocalFlags[0]},
|
||||||
Endpoint{URL: case4URLs[1], IsLocal: case4LocalFlags[1], HostName: "example.org"},
|
Endpoint{URL: case4URLs[1], IsLocal: case4LocalFlags[1]},
|
||||||
Endpoint{URL: case4URLs[2], IsLocal: case4LocalFlags[2], HostName: "example.com"},
|
Endpoint{URL: case4URLs[2], IsLocal: case4LocalFlags[2]},
|
||||||
Endpoint{URL: case4URLs[3], IsLocal: case4LocalFlags[3], HostName: "example.net"},
|
Endpoint{URL: case4URLs[3], IsLocal: case4LocalFlags[3]},
|
||||||
}, DistXLSetupType, nil},
|
}, DistXLSetupType, nil},
|
||||||
|
|
||||||
{":9000", [][]string{{case5Endpoint1, case5Endpoint2, case5Endpoint3, case5Endpoint4}}, ":9000", EndpointList{
|
{":9000", [][]string{{case5Endpoint1, case5Endpoint2, case5Endpoint3, case5Endpoint4}}, ":9000", Endpoints{
|
||||||
Endpoint{URL: case5URLs[0], IsLocal: case5LocalFlags[0], HostName: nonLoopBackIP},
|
Endpoint{URL: case5URLs[0], IsLocal: case5LocalFlags[0]},
|
||||||
Endpoint{URL: case5URLs[1], IsLocal: case5LocalFlags[1], HostName: nonLoopBackIP},
|
Endpoint{URL: case5URLs[1], IsLocal: case5LocalFlags[1]},
|
||||||
Endpoint{URL: case5URLs[2], IsLocal: case5LocalFlags[2], HostName: nonLoopBackIP},
|
Endpoint{URL: case5URLs[2], IsLocal: case5LocalFlags[2]},
|
||||||
Endpoint{URL: case5URLs[3], IsLocal: case5LocalFlags[3], HostName: nonLoopBackIP},
|
Endpoint{URL: case5URLs[3], IsLocal: case5LocalFlags[3]},
|
||||||
}, DistXLSetupType, nil},
|
}, DistXLSetupType, nil},
|
||||||
|
|
||||||
// DistXL Setup using only local host.
|
// DistXL Setup using only local host.
|
||||||
{":9003", [][]string{{"http://localhost:9000/d1", "http://localhost:9001/d2", "http://127.0.0.1:9002/d3", case6Endpoint}}, ":9003", EndpointList{
|
{":9003", [][]string{{"http://localhost:9000/d1", "http://localhost:9001/d2", "http://127.0.0.1:9002/d3", case6Endpoint}}, ":9003", Endpoints{
|
||||||
Endpoint{URL: case6URLs[0], IsLocal: case6LocalFlags[0], HostName: "localhost"},
|
Endpoint{URL: case6URLs[0], IsLocal: case6LocalFlags[0]},
|
||||||
Endpoint{URL: case6URLs[1], IsLocal: case6LocalFlags[1], HostName: "localhost"},
|
Endpoint{URL: case6URLs[1], IsLocal: case6LocalFlags[1]},
|
||||||
Endpoint{URL: case6URLs[2], IsLocal: case6LocalFlags[2], HostName: "127.0.0.1"},
|
Endpoint{URL: case6URLs[2], IsLocal: case6LocalFlags[2]},
|
||||||
Endpoint{URL: case6URLs[3], IsLocal: case6LocalFlags[3], HostName: nonLoopBackIP},
|
Endpoint{URL: case6URLs[3], IsLocal: case6LocalFlags[3]},
|
||||||
}, DistXLSetupType, nil},
|
}, DistXLSetupType, nil},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
testCase := testCase
|
testCase := testCase
|
||||||
t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) {
|
t.Run("", func(t *testing.T) {
|
||||||
serverAddr, endpoints, setupType, err := CreateEndpoints(testCase.serverAddr, testCase.args...)
|
endpoints, setupType, err := CreateEndpoints(testCase.serverAddr, testCase.args...)
|
||||||
if err == nil {
|
if err == nil && testCase.expectedErr != nil {
|
||||||
if testCase.expectedErr != nil {
|
t.Errorf("error: expected = %v, got = <nil>", testCase.expectedErr)
|
||||||
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
|
|
||||||
} else {
|
|
||||||
if serverAddr != testCase.expectedServerAddr {
|
|
||||||
t.Fatalf("serverAddr: expected = %v, got = %v", testCase.expectedServerAddr, serverAddr)
|
|
||||||
}
|
}
|
||||||
|
if err == nil {
|
||||||
if !reflect.DeepEqual(endpoints, testCase.expectedEndpoints) {
|
if !reflect.DeepEqual(endpoints, testCase.expectedEndpoints) {
|
||||||
t.Fatalf("endpoints: expected = %v, got = %v", testCase.expectedEndpoints, endpoints)
|
t.Errorf("endpoints: expected = %v, got = %v", testCase.expectedEndpoints, endpoints)
|
||||||
}
|
}
|
||||||
if setupType != testCase.expectedSetupType {
|
if setupType != testCase.expectedSetupType {
|
||||||
t.Fatalf("setupType: expected = %v, got = %v", testCase.expectedSetupType, setupType)
|
t.Errorf("setupType: expected = %v, got = %v", testCase.expectedSetupType, setupType)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if testCase.expectedErr == nil {
|
if err != nil && testCase.expectedErr == nil {
|
||||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
t.Errorf("error: expected = <nil>, got = %v, testCase: %v", err, testCase)
|
||||||
} else if err.Error() != testCase.expectedErr.Error() {
|
|
||||||
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -403,13 +395,13 @@ func TestGetLocalPeer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
endpoints, _ := NewEndpointList(testCase.endpointArgs...)
|
zendpoints := mustGetZoneEndpoints(testCase.endpointArgs...)
|
||||||
if !endpoints[0].IsLocal {
|
if !zendpoints[0].Endpoints[0].IsLocal {
|
||||||
if err := endpoints.UpdateIsLocal(); err != nil {
|
if err := zendpoints[0].Endpoints.UpdateIsLocal(); err != nil {
|
||||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
remotePeer := GetLocalPeer(endpoints)
|
remotePeer := GetLocalPeer(zendpoints)
|
||||||
if remotePeer != testCase.expectedResult {
|
if remotePeer != testCase.expectedResult {
|
||||||
t.Fatalf("Test %d: expected: %v, got: %v", i+1, testCase.expectedResult, remotePeer)
|
t.Fatalf("Test %d: expected: %v, got: %v", i+1, testCase.expectedResult, remotePeer)
|
||||||
}
|
}
|
||||||
@ -435,13 +427,13 @@ func TestGetRemotePeers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
endpoints, _ := NewEndpointList(testCase.endpointArgs...)
|
zendpoints := mustGetZoneEndpoints(testCase.endpointArgs...)
|
||||||
if !endpoints[0].IsLocal {
|
if !zendpoints[0].Endpoints[0].IsLocal {
|
||||||
if err := endpoints.UpdateIsLocal(); err != nil {
|
if err := zendpoints[0].Endpoints.UpdateIsLocal(); err != nil {
|
||||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
remotePeers := GetRemotePeers(endpoints)
|
remotePeers := GetRemotePeers(zendpoints)
|
||||||
if !reflect.DeepEqual(remotePeers, testCase.expectedResult) {
|
if !reflect.DeepEqual(remotePeers, testCase.expectedResult) {
|
||||||
t.Fatalf("expected: %v, got: %v", testCase.expectedResult, remotePeers)
|
t.Fatalf("expected: %v, got: %v", testCase.expectedResult, remotePeers)
|
||||||
}
|
}
|
||||||
|
@ -471,7 +471,7 @@ func formatXLGetDeploymentID(refFormat *formatXLV3, formats []*formatXLV3) (stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
// formatXLFixDeploymentID - Add deployment id if it is not present.
|
// formatXLFixDeploymentID - Add deployment id if it is not present.
|
||||||
func formatXLFixDeploymentID(endpoints EndpointList, storageDisks []StorageAPI, refFormat *formatXLV3) (err error) {
|
func formatXLFixDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, refFormat *formatXLV3) (err error) {
|
||||||
// Attempt to load all `format.json` from all disks.
|
// Attempt to load all `format.json` from all disks.
|
||||||
var sErrs []error
|
var sErrs []error
|
||||||
formats, sErrs := loadFormatXLAll(storageDisks)
|
formats, sErrs := loadFormatXLAll(storageDisks)
|
||||||
@ -515,7 +515,7 @@ func formatXLFixDeploymentID(endpoints EndpointList, storageDisks []StorageAPI,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update only the valid local disks which have not been updated before.
|
// Update only the valid local disks which have not been updated before.
|
||||||
func formatXLFixLocalDeploymentID(endpoints EndpointList, storageDisks []StorageAPI, refFormat *formatXLV3) error {
|
func formatXLFixLocalDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, refFormat *formatXLV3) error {
|
||||||
// If this server was down when the deploymentID was updated
|
// If this server was down when the deploymentID was updated
|
||||||
// then we make sure that we update the local disks with the deploymentID.
|
// then we make sure that we update the local disks with the deploymentID.
|
||||||
for index, storageDisk := range storageDisks {
|
for index, storageDisk := range storageDisks {
|
||||||
@ -655,7 +655,7 @@ func closeStorageDisks(storageDisks []StorageAPI) {
|
|||||||
|
|
||||||
// Initialize storage disks for each endpoint.
|
// Initialize storage disks for each endpoint.
|
||||||
// Errors are returned for each endpoint with matching index.
|
// Errors are returned for each endpoint with matching index.
|
||||||
func initStorageDisksWithErrors(endpoints EndpointList) ([]StorageAPI, []error) {
|
func initStorageDisksWithErrors(endpoints Endpoints) ([]StorageAPI, []error) {
|
||||||
// Bootstrap disks.
|
// Bootstrap disks.
|
||||||
storageDisks := make([]StorageAPI, len(endpoints))
|
storageDisks := make([]StorageAPI, len(endpoints))
|
||||||
g := errgroup.WithNErrs(len(endpoints))
|
g := errgroup.WithNErrs(len(endpoints))
|
||||||
@ -695,7 +695,7 @@ func formatXLV3ThisEmpty(formats []*formatXLV3) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// fixFormatXLV3 - fix format XL configuration on all disks.
|
// fixFormatXLV3 - fix format XL configuration on all disks.
|
||||||
func fixFormatXLV3(storageDisks []StorageAPI, endpoints EndpointList, formats []*formatXLV3) error {
|
func fixFormatXLV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*formatXLV3) error {
|
||||||
for i, format := range formats {
|
for i, format := range formats {
|
||||||
if format == nil || !endpoints[i].IsLocal {
|
if format == nil || !endpoints[i].IsLocal {
|
||||||
continue
|
continue
|
||||||
|
@ -83,7 +83,7 @@ func TestFixFormatV3(t *testing.T) {
|
|||||||
for _, xlDir := range xlDirs {
|
for _, xlDir := range xlDirs {
|
||||||
defer os.RemoveAll(xlDir)
|
defer os.RemoveAll(xlDir)
|
||||||
}
|
}
|
||||||
endpoints := mustGetNewEndpointList(xlDirs...)
|
endpoints := mustGetNewEndpoints(xlDirs...)
|
||||||
|
|
||||||
storageDisks, errs := initStorageDisksWithErrors(endpoints)
|
storageDisks, errs := initStorageDisksWithErrors(endpoints)
|
||||||
for _, err := range errs {
|
for _, err := range errs {
|
||||||
@ -593,7 +593,7 @@ func benchmarkInitStorageDisksN(b *testing.B, nDisks int) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
endpoints := mustGetNewEndpointList(fsDirs...)
|
endpoints := mustGetNewEndpoints(fsDirs...)
|
||||||
b.RunParallel(func(pb *testing.PB) {
|
b.RunParallel(func(pb *testing.PB) {
|
||||||
endpoints := endpoints
|
endpoints := endpoints
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
|
@ -80,16 +80,6 @@ func getLocalBackgroundHealStatus() madmin.BgHealState {
|
|||||||
|
|
||||||
// healErasureSet lists and heals all objects in a specific erasure set
|
// healErasureSet lists and heals all objects in a specific erasure set
|
||||||
func healErasureSet(ctx context.Context, setIndex int, xlObj *xlObjects) error {
|
func healErasureSet(ctx context.Context, setIndex int, xlObj *xlObjects) error {
|
||||||
// Hold a lock for healing the erasure set
|
|
||||||
zeroDuration := time.Millisecond
|
|
||||||
zeroDynamicTimeout := newDynamicTimeout(zeroDuration, zeroDuration)
|
|
||||||
erasureSetHealLock := xlObj.nsMutex.NewNSLock(ctx, xlObj.getLockers(),
|
|
||||||
"system", fmt.Sprintf("erasure-set-heal-%d", setIndex))
|
|
||||||
if err := erasureSetHealLock.GetLock(zeroDynamicTimeout); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer erasureSetHealLock.Unlock()
|
|
||||||
|
|
||||||
buckets, err := xlObj.ListBuckets(ctx)
|
buckets, err := xlObj.ListBuckets(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -123,11 +113,11 @@ func healErasureSet(ctx context.Context, setIndex int, xlObj *xlObjects) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Healing leader will take the charge of healing all erasure sets
|
// Healing leader will take the charge of healing all erasure sets
|
||||||
func execLeaderTasks(sets *xlSets) {
|
func execLeaderTasks(z *xlZones) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
// Hold a lock so only one server performs auto-healing
|
// Hold a lock so only one server performs auto-healing
|
||||||
leaderLock := sets.NewNSLock(ctx, minioMetaBucket, "leader")
|
leaderLock := z.NewNSLock(ctx, minioMetaBucket, "leader")
|
||||||
for {
|
for {
|
||||||
err := leaderLock.GetLock(leaderLockTimeout)
|
err := leaderLock.GetLock(leaderLockTimeout)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -136,19 +126,31 @@ func execLeaderTasks(sets *xlSets) {
|
|||||||
time.Sleep(leaderTick)
|
time.Sleep(leaderTick)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Hold a lock for healing the erasure set
|
||||||
|
zeroDuration := time.Millisecond
|
||||||
|
zeroDynamicTimeout := newDynamicTimeout(zeroDuration, zeroDuration)
|
||||||
|
|
||||||
lastScanTime := time.Now() // So that we don't heal immediately, but after one month.
|
lastScanTime := time.Now() // So that we don't heal immediately, but after one month.
|
||||||
for {
|
for {
|
||||||
if time.Since(lastScanTime) < healInterval {
|
if time.Since(lastScanTime) < healInterval {
|
||||||
time.Sleep(healTick)
|
time.Sleep(healTick)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
for _, zone := range z.zones {
|
||||||
// Heal set by set
|
// Heal set by set
|
||||||
for i, set := range sets.sets {
|
for i, set := range zone.sets {
|
||||||
err := healErasureSet(ctx, i, set)
|
setLock := z.zones[0].NewNSLock(ctx, "system", fmt.Sprintf("erasure-set-heal-%d", i))
|
||||||
if err != nil {
|
if err := setLock.GetLock(zeroDynamicTimeout); err != nil {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if err := healErasureSet(ctx, i, set); err != nil {
|
||||||
|
setLock.Unlock()
|
||||||
|
logger.LogIf(ctx, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
setLock.Unlock()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
lastScanTime = time.Now()
|
lastScanTime = time.Now()
|
||||||
}
|
}
|
||||||
@ -165,12 +167,12 @@ func startGlobalHeal() {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
sets, ok := objAPI.(*xlSets)
|
zones, ok := objAPI.(*xlZones)
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
execLeaderTasks(sets)
|
execLeaderTasks(zones)
|
||||||
}
|
}
|
||||||
|
|
||||||
func initGlobalHeal() {
|
func initGlobalHeal() {
|
||||||
|
@ -103,9 +103,6 @@ var globalCLIContext = struct {
|
|||||||
}{}
|
}{}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Indicates the total number of erasure coded sets configured.
|
|
||||||
globalXLSetCount int
|
|
||||||
|
|
||||||
// Indicates set drive count.
|
// Indicates set drive count.
|
||||||
globalXLSetDriveCount int
|
globalXLSetDriveCount int
|
||||||
|
|
||||||
@ -130,9 +127,6 @@ var (
|
|||||||
// This flag is set to 'us-east-1' by default
|
// This flag is set to 'us-east-1' by default
|
||||||
globalServerRegion = globalMinioDefaultRegion
|
globalServerRegion = globalMinioDefaultRegion
|
||||||
|
|
||||||
// Maximum size of internal objects parts
|
|
||||||
globalPutPartSize = int64(64 * 1024 * 1024)
|
|
||||||
|
|
||||||
// MinIO local server address (in `host:port` format)
|
// MinIO local server address (in `host:port` format)
|
||||||
globalMinioAddr = ""
|
globalMinioAddr = ""
|
||||||
// MinIO default port, can be changed through command line.
|
// MinIO default port, can be changed through command line.
|
||||||
@ -173,7 +167,7 @@ var (
|
|||||||
// registered listeners
|
// registered listeners
|
||||||
globalConsoleSys *HTTPConsoleLoggerSys
|
globalConsoleSys *HTTPConsoleLoggerSys
|
||||||
|
|
||||||
globalEndpoints EndpointList
|
globalEndpoints EndpointZones
|
||||||
|
|
||||||
// Global server's network statistics
|
// Global server's network statistics
|
||||||
globalConnStats = newConnStats()
|
globalConnStats = newConnStats()
|
||||||
|
@ -77,26 +77,26 @@ func LivenessCheckHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// For FS and Erasure backend, check if local disks are up.
|
// For FS and Erasure backend, check if local disks are up.
|
||||||
var totalLocalDisks int
|
|
||||||
var erroredDisks int
|
var erroredDisks int
|
||||||
for _, endpoint := range globalEndpoints {
|
for _, ep := range globalEndpoints {
|
||||||
|
for _, endpoint := range ep.Endpoints {
|
||||||
// Check only if local disks are accessible, we do not have
|
// Check only if local disks are accessible, we do not have
|
||||||
// to reach to rest of the other servers in a distributed setup.
|
// to reach to rest of the other servers in a distributed setup.
|
||||||
if endpoint.IsLocal {
|
if !endpoint.IsLocal {
|
||||||
totalLocalDisks++
|
continue
|
||||||
|
}
|
||||||
// Attempt a stat to backend, any error resulting
|
// Attempt a stat to backend, any error resulting
|
||||||
// from this Stat() operation is considered as backend
|
// from this Stat() operation is considered as backend
|
||||||
// is not available, count them as errors.
|
// is not available, count them as errors.
|
||||||
if _, err := os.Stat(endpoint.Path); err != nil {
|
if _, err := os.Stat(endpoint.Path); err != nil && os.IsNotExist(err) {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
erroredDisks++
|
erroredDisks++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If all exported local disks have errored, we simply let kubernetes
|
// Any errored disks, we let orchestrators take us down.
|
||||||
// take us down.
|
if erroredDisks > 0 {
|
||||||
if totalLocalDisks == erroredDisks {
|
|
||||||
writeResponse(w, http.StatusServiceUnavailable, nil, mimeNone)
|
writeResponse(w, http.StatusServiceUnavailable, nil, mimeNone)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -65,6 +65,10 @@ func (d *errorLocker) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *errorLocker) IsOnline() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// localLocker implements Dsync.NetLocker
|
// localLocker implements Dsync.NetLocker
|
||||||
type localLocker struct {
|
type localLocker struct {
|
||||||
mutex sync.Mutex
|
mutex sync.Mutex
|
||||||
@ -193,6 +197,11 @@ func (l *localLocker) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Local locker is always online.
|
||||||
|
func (l *localLocker) IsOnline() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func newLocker(endpoint Endpoint) *localLocker {
|
func newLocker(endpoint Endpoint) *localLocker {
|
||||||
return &localLocker{
|
return &localLocker{
|
||||||
endpoint: endpoint,
|
endpoint: endpoint,
|
||||||
|
@ -128,9 +128,10 @@ func (l *lockRESTServer) RUnlockHandler(w http.ResponseWriter, r *http.Request)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// registerLockRESTHandlers - register lock rest router.
|
// registerLockRESTHandlers - register lock rest router.
|
||||||
func registerLockRESTHandlers(router *mux.Router, endpoints EndpointList) {
|
func registerLockRESTHandlers(router *mux.Router, endpointZones EndpointZones) {
|
||||||
queries := restQueries(lockRESTUID, lockRESTSource, lockRESTResource)
|
queries := restQueries(lockRESTUID, lockRESTSource, lockRESTResource)
|
||||||
for _, endpoint := range endpoints {
|
for _, ep := range endpointZones {
|
||||||
|
for _, endpoint := range ep.Endpoints {
|
||||||
if !endpoint.IsLocal {
|
if !endpoint.IsLocal {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -147,6 +148,7 @@ func registerLockRESTHandlers(router *mux.Router, endpoints EndpointList) {
|
|||||||
|
|
||||||
globalLockServers[endpoint] = lockServer.ll
|
globalLockServers[endpoint] = lockServer.ll
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// If none of the routes match add default error handler routes
|
// If none of the routes match add default error handler routes
|
||||||
router.NotFoundHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
router.NotFoundHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||||
|
@ -90,13 +90,15 @@ func (c *minioCollector) Collect(ch chan<- prometheus.Metric) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
storageAPIs := []StorageAPI{}
|
storageAPIs := []StorageAPI{}
|
||||||
for _, endpoint := range globalEndpoints {
|
for _, ep := range globalEndpoints {
|
||||||
|
for _, endpoint := range ep.Endpoints {
|
||||||
if endpoint.IsLocal {
|
if endpoint.IsLocal {
|
||||||
// Construct storageAPIs.
|
// Construct storageAPIs.
|
||||||
sAPI, _ := newStorageAPI(endpoint)
|
sAPI, _ := newStorageAPI(endpoint)
|
||||||
storageAPIs = append(storageAPIs, sAPI)
|
storageAPIs = append(storageAPIs, sAPI)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
disksInfo, onlineDisks, offlineDisks := getDisksInfo(storageAPIs)
|
disksInfo, onlineDisks, offlineDisks := getDisksInfo(storageAPIs)
|
||||||
totalDisks := offlineDisks.Merge(onlineDisks)
|
totalDisks := offlineDisks.Merge(onlineDisks)
|
||||||
|
@ -223,14 +223,12 @@ type localLockInstance struct {
|
|||||||
// NewNSLock - returns a lock instance for a given volume and
|
// NewNSLock - returns a lock instance for a given volume and
|
||||||
// path. The returned lockInstance object encapsulates the nsLockMap,
|
// path. The returned lockInstance object encapsulates the nsLockMap,
|
||||||
// volume, path and operation ID.
|
// volume, path and operation ID.
|
||||||
func (n *nsLockMap) NewNSLock(ctx context.Context, lockers []dsync.NetLocker, volume, path string) RWLocker {
|
func (n *nsLockMap) NewNSLock(ctx context.Context, lockersFn func() []dsync.NetLocker, volume, path string) RWLocker {
|
||||||
opsID := mustGetUUID()
|
opsID := mustGetUUID()
|
||||||
if n.isDistXL {
|
if n.isDistXL {
|
||||||
sync, err := dsync.New(lockers)
|
return &distLockInstance{dsync.NewDRWMutex(ctx, pathJoin(volume, path), &dsync.Dsync{
|
||||||
if err != nil {
|
GetLockersFn: lockersFn,
|
||||||
logger.CriticalIf(ctx, err)
|
}), volume, path, opsID}
|
||||||
}
|
|
||||||
return &distLockInstance{dsync.NewDRWMutex(ctx, pathJoin(volume, path), sync), volume, path, opsID}
|
|
||||||
}
|
}
|
||||||
return &localLockInstance{ctx, n, volume, path, opsID}
|
return &localLockInstance{ctx, n, volume, path, opsID}
|
||||||
}
|
}
|
||||||
|
@ -120,10 +120,10 @@ func (n byLastOctetValue) Less(i, j int) bool {
|
|||||||
// This case is needed when all ips in the list
|
// This case is needed when all ips in the list
|
||||||
// have same last octets, Following just ensures that
|
// have same last octets, Following just ensures that
|
||||||
// 127.0.0.1 is moved to the end of the list.
|
// 127.0.0.1 is moved to the end of the list.
|
||||||
if n[i].String() == "127.0.0.1" {
|
if n[i].IsLoopback() {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if n[j].String() == "127.0.0.1" {
|
if n[j].IsLoopback() {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return []byte(n[i].To4())[3] > []byte(n[j].To4())[3]
|
return []byte(n[i].To4())[3] > []byte(n[j].To4())[3]
|
||||||
@ -171,7 +171,8 @@ func getAPIEndpoints() (apiEndpoints []string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, ip := range ipList {
|
for _, ip := range ipList {
|
||||||
apiEndpoints = append(apiEndpoints, fmt.Sprintf("%s://%s", getURLScheme(globalIsSSL), net.JoinHostPort(ip, globalMinioPort)))
|
endpoint := fmt.Sprintf("%s://%s", getURLScheme(globalIsSSL), net.JoinHostPort(ip, globalMinioPort))
|
||||||
|
apiEndpoints = append(apiEndpoints, endpoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
return apiEndpoints
|
return apiEndpoints
|
||||||
|
@ -697,17 +697,6 @@ func (sys *NotificationSys) initListeners(ctx context.Context, objAPI ObjectLaye
|
|||||||
|
|
||||||
// Construct path to listener.json for the given bucket.
|
// Construct path to listener.json for the given bucket.
|
||||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketListenerConfig)
|
configFile := path.Join(bucketConfigPrefix, bucketName, bucketListenerConfig)
|
||||||
transactionConfigFile := configFile + "/transaction.lock"
|
|
||||||
|
|
||||||
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
|
|
||||||
// and configFile, take a transaction lock to avoid data race between readConfig()
|
|
||||||
// and saveConfig().
|
|
||||||
objLock := objAPI.NewNSLock(ctx, minioMetaBucket, transactionConfigFile)
|
|
||||||
if err := objLock.GetRLock(globalOperationTimeout); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer objLock.RUnlock()
|
|
||||||
|
|
||||||
configData, e := readConfig(ctx, objAPI, configFile)
|
configData, e := readConfig(ctx, objAPI, configFile)
|
||||||
if e != nil && !IsErrIgnored(e, errDiskNotFound, errConfigNotFound) {
|
if e != nil && !IsErrIgnored(e, errDiskNotFound, errConfigNotFound) {
|
||||||
return e
|
return e
|
||||||
@ -1180,7 +1169,7 @@ func (sys *NotificationSys) NetworkInfo() []madmin.ServerNetworkHardwareInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewNotificationSys - creates new notification system object.
|
// NewNotificationSys - creates new notification system object.
|
||||||
func NewNotificationSys(endpoints EndpointList) *NotificationSys {
|
func NewNotificationSys(endpoints EndpointZones) *NotificationSys {
|
||||||
// bucketRulesMap/bucketRemoteTargetRulesMap are initialized by NotificationSys.Init()
|
// bucketRulesMap/bucketRemoteTargetRulesMap are initialized by NotificationSys.Init()
|
||||||
return &NotificationSys{
|
return &NotificationSys{
|
||||||
targetList: event.NewTargetList(),
|
targetList: event.NewTargetList(),
|
||||||
@ -1338,16 +1327,6 @@ func SaveListener(objAPI ObjectLayer, bucketName string, eventNames []event.Name
|
|||||||
|
|
||||||
// Construct path to listener.json for the given bucket.
|
// Construct path to listener.json for the given bucket.
|
||||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketListenerConfig)
|
configFile := path.Join(bucketConfigPrefix, bucketName, bucketListenerConfig)
|
||||||
transactionConfigFile := configFile + "/transaction.lock"
|
|
||||||
|
|
||||||
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
|
|
||||||
// and configFile, take a transaction lock to avoid data race between readConfig()
|
|
||||||
// and saveConfig().
|
|
||||||
objLock := objAPI.NewNSLock(ctx, minioMetaBucket, transactionConfigFile)
|
|
||||||
if err := objLock.GetLock(globalOperationTimeout); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer objLock.Unlock()
|
|
||||||
|
|
||||||
configData, err := readConfig(ctx, objAPI, configFile)
|
configData, err := readConfig(ctx, objAPI, configFile)
|
||||||
if err != nil && !IsErrIgnored(err, errDiskNotFound, errConfigNotFound) {
|
if err != nil && !IsErrIgnored(err, errDiskNotFound, errConfigNotFound) {
|
||||||
@ -1389,17 +1368,6 @@ func RemoveListener(objAPI ObjectLayer, bucketName string, targetID event.Target
|
|||||||
|
|
||||||
// Construct path to listener.json for the given bucket.
|
// Construct path to listener.json for the given bucket.
|
||||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketListenerConfig)
|
configFile := path.Join(bucketConfigPrefix, bucketName, bucketListenerConfig)
|
||||||
transactionConfigFile := configFile + "/transaction.lock"
|
|
||||||
|
|
||||||
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
|
|
||||||
// and configFile, take a transaction lock to avoid data race between readConfig()
|
|
||||||
// and saveConfig().
|
|
||||||
objLock := objAPI.NewNSLock(ctx, minioMetaBucket, transactionConfigFile)
|
|
||||||
if err := objLock.GetLock(globalOperationTimeout); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer objLock.Unlock()
|
|
||||||
|
|
||||||
configData, err := readConfig(ctx, objAPI, configFile)
|
configData, err := readConfig(ctx, objAPI, configFile)
|
||||||
if err != nil && !IsErrIgnored(err, errDiskNotFound, errConfigNotFound) {
|
if err != nil && !IsErrIgnored(err, errDiskNotFound, errConfigNotFound) {
|
||||||
return err
|
return err
|
||||||
|
@ -170,6 +170,16 @@ type ListPartsInfo struct {
|
|||||||
EncodingType string // Not supported yet.
|
EncodingType string // Not supported yet.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Lookup - returns if uploadID is valid
|
||||||
|
func (lm ListMultipartsInfo) Lookup(uploadID string) bool {
|
||||||
|
for _, upload := range lm.Uploads {
|
||||||
|
if upload.UploadID == uploadID {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// ListMultipartsInfo - represnets bucket resources for incomplete multipart uploads.
|
// ListMultipartsInfo - represnets bucket resources for incomplete multipart uploads.
|
||||||
type ListMultipartsInfo struct {
|
type ListMultipartsInfo struct {
|
||||||
// Together with upload-id-marker, this parameter specifies the multipart upload
|
// Together with upload-id-marker, this parameter specifies the multipart upload
|
||||||
|
@ -427,6 +427,12 @@ func (e BackendDown) Error() string {
|
|||||||
return "Backend down"
|
return "Backend down"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isErrBucketNotFound - Check if error type is BucketNotFound.
|
||||||
|
func isErrBucketNotFound(err error) bool {
|
||||||
|
_, ok := err.(BucketNotFound)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
// isErrObjectNotFound - Check if error type is ObjectNotFound.
|
// isErrObjectNotFound - Check if error type is ObjectNotFound.
|
||||||
func isErrObjectNotFound(err error) bool {
|
func isErrObjectNotFound(err error) bool {
|
||||||
_, ok := err.(ObjectNotFound)
|
_, ok := err.(ObjectNotFound)
|
||||||
|
@ -74,15 +74,6 @@ func checkListObjsArgs(ctx context.Context, bucket, prefix, marker, delimiter st
|
|||||||
Object: prefix,
|
Object: prefix,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Verify if delimiter is anything other than '/', which we do not support.
|
|
||||||
if delimiter != "" && delimiter != SlashSeparator {
|
|
||||||
logger.LogIf(ctx, UnsupportedDelimiter{
|
|
||||||
Delimiter: delimiter,
|
|
||||||
})
|
|
||||||
return UnsupportedDelimiter{
|
|
||||||
Delimiter: delimiter,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Verify if marker has prefix.
|
// Verify if marker has prefix.
|
||||||
if marker != "" && !hasPrefix(marker, prefix) {
|
if marker != "" && !hasPrefix(marker, prefix) {
|
||||||
logger.LogIf(ctx, InvalidMarkerPrefixCombination{
|
logger.LogIf(ctx, InvalidMarkerPrefixCombination{
|
||||||
|
@ -1128,10 +1128,9 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
|||||||
{"volatile-bucket-1", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
{"volatile-bucket-1", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
||||||
{"volatile-bucket-2", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
{"volatile-bucket-2", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
||||||
{"volatile-bucket-3", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
|
{"volatile-bucket-3", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
|
||||||
// Valid, existing bucket, but sending invalid delimeter values (Test number 8-9).
|
// Valid, existing bucket, delimiter not supported, returns empty values (Test number 8-9).
|
||||||
// Empty string < "" > and forward slash < / > are the ony two valid arguments for delimeter.
|
{bucketNames[0], "", "", "", "*", 0, ListMultipartsInfo{Delimiter: "*"}, nil, true},
|
||||||
{bucketNames[0], "", "", "", "*", 0, ListMultipartsInfo{}, fmt.Errorf("delimiter '%s' is not supported", "*"), false},
|
{bucketNames[0], "", "", "", "-", 0, ListMultipartsInfo{Delimiter: "-"}, nil, true},
|
||||||
{bucketNames[0], "", "", "", "-", 0, ListMultipartsInfo{}, fmt.Errorf("delimiter '%s' is not supported", "-"), false},
|
|
||||||
// Testing for failure cases with both perfix and marker (Test number 10).
|
// Testing for failure cases with both perfix and marker (Test number 10).
|
||||||
// The prefix and marker combination to be valid it should satisfy strings.HasPrefix(marker, prefix).
|
// The prefix and marker combination to be valid it should satisfy strings.HasPrefix(marker, prefix).
|
||||||
{bucketNames[0], "asia", "europe-object", "", "", 0, ListMultipartsInfo{},
|
{bucketNames[0], "asia", "europe-object", "", "", 0, ListMultipartsInfo{},
|
||||||
@ -1193,9 +1192,6 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
|||||||
{bucketNames[1], "Asia", "", "", "", 10, listMultipartResults[23], nil, true},
|
{bucketNames[1], "Asia", "", "", "", 10, listMultipartResults[23], nil, true},
|
||||||
// Test case with `Prefix` and `UploadIDMarker` (Test number 37).
|
// Test case with `Prefix` and `UploadIDMarker` (Test number 37).
|
||||||
{bucketNames[1], "min", "minio-object-1.txt", uploadIDs[1], "", 10, listMultipartResults[24], nil, true},
|
{bucketNames[1], "min", "minio-object-1.txt", uploadIDs[1], "", 10, listMultipartResults[24], nil, true},
|
||||||
// Test case with `KeyMarker` and `UploadIDMarker` (Test number 38).
|
|
||||||
// {bucketNames[1], "", "minio-object-1.txt", uploadIDs[1], "", 10, listMultipartResults[24], nil, true},
|
|
||||||
|
|
||||||
// Test case for bucket with multiple objects in it.
|
// Test case for bucket with multiple objects in it.
|
||||||
// Bucket used : `bucketNames[2]`.
|
// Bucket used : `bucketNames[2]`.
|
||||||
// Objects used: `objectNames[1-5]`.
|
// Objects used: `objectNames[1-5]`.
|
||||||
@ -1217,16 +1213,10 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
|||||||
// Since all available entries are listed, IsTruncated is expected to be false
|
// Since all available entries are listed, IsTruncated is expected to be false
|
||||||
// and NextMarkers are expected to empty.
|
// and NextMarkers are expected to empty.
|
||||||
{bucketNames[2], "", "", "", "", 6, listMultipartResults[31], nil, true},
|
{bucketNames[2], "", "", "", "", 6, listMultipartResults[31], nil, true},
|
||||||
// Test case with `uploadIDMarker` (Test number 46).
|
|
||||||
// {bucketNames[2], "", "", uploadIDs[6], "", 10, listMultipartResults[32], nil, true},
|
|
||||||
// Test case with `KeyMarker` (Test number 47).
|
// Test case with `KeyMarker` (Test number 47).
|
||||||
{bucketNames[2], "", objectNames[3], "", "", 10, listMultipartResults[33], nil, true},
|
{bucketNames[2], "", objectNames[3], "", "", 10, listMultipartResults[33], nil, true},
|
||||||
// Test case with `prefix` and `KeyMarker` (Test number 48).
|
// Test case with `prefix` and `KeyMarker` (Test number 48).
|
||||||
{bucketNames[2], "minio-object", objectNames[1], "", "", 10, listMultipartResults[34], nil, true},
|
{bucketNames[2], "minio-object", objectNames[1], "", "", 10, listMultipartResults[34], nil, true},
|
||||||
// Test case with `prefix` and `uploadIDMarker` (Test number 49).
|
|
||||||
// {bucketNames[2], globalMinioDefaultOwnerID, "", uploadIDs[4], "", 10, listMultipartResults[35], nil, true},
|
|
||||||
// Test case with `KeyMarker` and `uploadIDMarker` (Test number 50).
|
|
||||||
// {bucketNames[2], "minio-object.txt", "", uploadIDs[5], "", 10, listMultipartResults[36], nil, true},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
|
@ -708,9 +708,9 @@ func (client *peerRESTClient) ConsoleLog(logCh chan interface{}, doneCh chan str
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func getRemoteHosts(endpoints EndpointList) []*xnet.Host {
|
func getRemoteHosts(endpointZones EndpointZones) []*xnet.Host {
|
||||||
var remoteHosts []*xnet.Host
|
var remoteHosts []*xnet.Host
|
||||||
for _, hostStr := range GetRemotePeers(endpoints) {
|
for _, hostStr := range GetRemotePeers(endpointZones) {
|
||||||
host, err := xnet.ParseHost(hostStr)
|
host, err := xnet.ParseHost(hostStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.LogIf(context.Background(), err)
|
logger.LogIf(context.Background(), err)
|
||||||
@ -722,7 +722,7 @@ func getRemoteHosts(endpoints EndpointList) []*xnet.Host {
|
|||||||
return remoteHosts
|
return remoteHosts
|
||||||
}
|
}
|
||||||
|
|
||||||
func getRestClients(endpoints EndpointList) []*peerRESTClient {
|
func getRestClients(endpoints EndpointZones) []*peerRESTClient {
|
||||||
peerHosts := getRemoteHosts(endpoints)
|
peerHosts := getRemoteHosts(endpoints)
|
||||||
restClients := make([]*peerRESTClient, len(peerHosts))
|
restClients := make([]*peerRESTClient, len(peerHosts))
|
||||||
for i, host := range peerHosts {
|
for i, host := range peerHosts {
|
||||||
|
@ -53,7 +53,7 @@ var printEndpointError = func() func(Endpoint, error) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// Migrates backend format of local disks.
|
// Migrates backend format of local disks.
|
||||||
func formatXLMigrateLocalEndpoints(endpoints EndpointList) error {
|
func formatXLMigrateLocalEndpoints(endpoints Endpoints) error {
|
||||||
g := errgroup.WithNErrs(len(endpoints))
|
g := errgroup.WithNErrs(len(endpoints))
|
||||||
for index, endpoint := range endpoints {
|
for index, endpoint := range endpoints {
|
||||||
if !endpoint.IsLocal {
|
if !endpoint.IsLocal {
|
||||||
@ -81,7 +81,7 @@ func formatXLMigrateLocalEndpoints(endpoints EndpointList) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Cleans up tmp directory of local disks.
|
// Cleans up tmp directory of local disks.
|
||||||
func formatXLCleanupTmpLocalEndpoints(endpoints EndpointList) error {
|
func formatXLCleanupTmpLocalEndpoints(endpoints Endpoints) error {
|
||||||
g := errgroup.WithNErrs(len(endpoints))
|
g := errgroup.WithNErrs(len(endpoints))
|
||||||
for index, endpoint := range endpoints {
|
for index, endpoint := range endpoints {
|
||||||
if !endpoint.IsLocal {
|
if !endpoint.IsLocal {
|
||||||
@ -145,7 +145,7 @@ func formatXLCleanupTmpLocalEndpoints(endpoints EndpointList) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// validate reference format against list of XL formats.
|
// validate reference format against list of XL formats.
|
||||||
func validateXLFormats(format *formatXLV3, formats []*formatXLV3, endpoints EndpointList, setCount, drivesPerSet int) error {
|
func validateXLFormats(format *formatXLV3, formats []*formatXLV3, endpoints Endpoints, setCount, drivesPerSet int) error {
|
||||||
for i := range formats {
|
for i := range formats {
|
||||||
if formats[i] == nil {
|
if formats[i] == nil {
|
||||||
continue
|
continue
|
||||||
@ -174,7 +174,7 @@ var errXLV3ThisEmpty = fmt.Errorf("XL format version 3 has This field empty")
|
|||||||
// connect to list of endpoints and load all XL disk formats, validate the formats are correct
|
// connect to list of endpoints and load all XL disk formats, validate the formats are correct
|
||||||
// and are in quorum, if no formats are found attempt to initialize all of them for the first
|
// and are in quorum, if no formats are found attempt to initialize all of them for the first
|
||||||
// time. additionally make sure to close all the disks used in this attempt.
|
// time. additionally make sure to close all the disks used in this attempt.
|
||||||
func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints EndpointList, setCount, drivesPerSet int) (*formatXLV3, error) {
|
func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, setCount, drivesPerSet int) (*formatXLV3, error) {
|
||||||
// Initialize all storage disks
|
// Initialize all storage disks
|
||||||
storageDisks, errs := initStorageDisksWithErrors(endpoints)
|
storageDisks, errs := initStorageDisksWithErrors(endpoints)
|
||||||
defer closeStorageDisks(storageDisks)
|
defer closeStorageDisks(storageDisks)
|
||||||
@ -286,7 +286,7 @@ func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints EndpointLi
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Format disks before initialization of object layer.
|
// Format disks before initialization of object layer.
|
||||||
func waitForFormatXL(firstDisk bool, endpoints EndpointList, setCount, disksPerSet int) (format *formatXLV3, err error) {
|
func waitForFormatXL(firstDisk bool, endpoints Endpoints, setCount, disksPerSet int) (format *formatXLV3, err error) {
|
||||||
if len(endpoints) == 0 || setCount == 0 || disksPerSet == 0 {
|
if len(endpoints) == 0 || setCount == 0 || disksPerSet == 0 {
|
||||||
return nil, errInvalidArgument
|
return nil, errInvalidArgument
|
||||||
}
|
}
|
||||||
|
@ -23,15 +23,15 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Composed function registering routers for only distributed XL setup.
|
// Composed function registering routers for only distributed XL setup.
|
||||||
func registerDistXLRouters(router *mux.Router, endpoints EndpointList) {
|
func registerDistXLRouters(router *mux.Router, endpointZones EndpointZones) {
|
||||||
// Register storage rpc router only if its a distributed setup.
|
// Register storage rpc router only if its a distributed setup.
|
||||||
registerStorageRESTHandlers(router, endpoints)
|
registerStorageRESTHandlers(router, endpointZones)
|
||||||
|
|
||||||
// Register peer REST router only if its a distributed setup.
|
// Register peer REST router only if its a distributed setup.
|
||||||
registerPeerRESTHandlers(router)
|
registerPeerRESTHandlers(router)
|
||||||
|
|
||||||
// Register distributed namespace lock.
|
// Register distributed namespace lock.
|
||||||
registerLockRESTHandlers(router, endpoints)
|
registerLockRESTHandlers(router, endpointZones)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,14 +79,14 @@ var globalHandlers = []HandlerFunc{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// configureServer handler returns final handler for the http server.
|
// configureServer handler returns final handler for the http server.
|
||||||
func configureServerHandler(endpoints EndpointList) (http.Handler, error) {
|
func configureServerHandler(endpointZones EndpointZones) (http.Handler, error) {
|
||||||
// Initialize router. `SkipClean(true)` stops gorilla/mux from
|
// Initialize router. `SkipClean(true)` stops gorilla/mux from
|
||||||
// normalizing URL path minio/minio#3256
|
// normalizing URL path minio/minio#3256
|
||||||
router := mux.NewRouter().SkipClean(true)
|
router := mux.NewRouter().SkipClean(true)
|
||||||
|
|
||||||
// Initialize distributed NS lock.
|
// Initialize distributed NS lock.
|
||||||
if globalIsDistXL {
|
if globalIsDistXL {
|
||||||
registerDistXLRouters(router, endpoints)
|
registerDistXLRouters(router, endpointZones)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add STS router always.
|
// Add STS router always.
|
||||||
|
@ -146,12 +146,13 @@ func serverHandleCmdArgs(ctx *cli.Context) {
|
|||||||
|
|
||||||
endpoints := strings.Fields(env.Get(config.EnvEndpoints, ""))
|
endpoints := strings.Fields(env.Get(config.EnvEndpoints, ""))
|
||||||
if len(endpoints) > 0 {
|
if len(endpoints) > 0 {
|
||||||
globalMinioAddr, globalEndpoints, setupType, globalXLSetCount, globalXLSetDriveCount, err = createServerEndpoints(globalCLIContext.Addr, endpoints...)
|
globalEndpoints, setupType, err = createServerEndpoints(globalCLIContext.Addr, endpoints...)
|
||||||
} else {
|
} else {
|
||||||
globalMinioAddr, globalEndpoints, setupType, globalXLSetCount, globalXLSetDriveCount, err = createServerEndpoints(globalCLIContext.Addr, ctx.Args()...)
|
globalEndpoints, setupType, err = createServerEndpoints(globalCLIContext.Addr, ctx.Args()...)
|
||||||
}
|
}
|
||||||
logger.FatalIf(err, "Invalid command line arguments")
|
logger.FatalIf(err, "Invalid command line arguments")
|
||||||
|
|
||||||
|
globalMinioAddr = globalCLIContext.Addr
|
||||||
logger.LogIf(context.Background(), checkEndpointsSubOptimal(ctx, setupType, globalEndpoints))
|
logger.LogIf(context.Background(), checkEndpointsSubOptimal(ctx, setupType, globalEndpoints))
|
||||||
|
|
||||||
globalMinioHost, globalMinioPort = mustSplitHostPort(globalMinioAddr)
|
globalMinioHost, globalMinioPort = mustSplitHostPort(globalMinioAddr)
|
||||||
@ -192,7 +193,24 @@ func newAllSubsystems() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func initSafeModeInit(buckets []BucketInfo) (err error) {
|
func initSafeModeInit(buckets []BucketInfo) (err error) {
|
||||||
|
newObject := newObjectLayerWithoutSafeModeFn()
|
||||||
|
|
||||||
|
// Construct path to config/transaction.lock for locking
|
||||||
|
transactionConfigPrefix := minioConfigPrefix + "/transaction.lock"
|
||||||
|
|
||||||
|
// Make sure to hold lock for entire migration to avoid
|
||||||
|
// such that only one server should migrate the entire config
|
||||||
|
// at a given time, this big transaction lock ensures this
|
||||||
|
// appropriately. This is also true for rotation of encrypted
|
||||||
|
// content.
|
||||||
|
objLock := newObject.NewNSLock(context.Background(), minioMetaBucket, transactionConfigPrefix)
|
||||||
|
if err = objLock.GetLock(globalOperationTimeout); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
|
objLock.Unlock()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var cerr config.Err
|
var cerr config.Err
|
||||||
if errors.As(err, &cerr) {
|
if errors.As(err, &cerr) {
|
||||||
@ -210,8 +228,6 @@ func initSafeModeInit(buckets []BucketInfo) (err error) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
newObject := newObjectLayerWithoutSafeModeFn()
|
|
||||||
|
|
||||||
// Calls New() for all sub-systems.
|
// Calls New() for all sub-systems.
|
||||||
newAllSubsystems()
|
newAllSubsystems()
|
||||||
|
|
||||||
@ -302,10 +318,10 @@ func serverMain(ctx *cli.Context) {
|
|||||||
|
|
||||||
// Is distributed setup, error out if no certificates are found for HTTPS endpoints.
|
// Is distributed setup, error out if no certificates are found for HTTPS endpoints.
|
||||||
if globalIsDistXL {
|
if globalIsDistXL {
|
||||||
if globalEndpoints.IsHTTPS() && !globalIsSSL {
|
if globalEndpoints.HTTPS() && !globalIsSSL {
|
||||||
logger.Fatal(config.ErrNoCertsAndHTTPSEndpoints(nil), "Unable to start the server")
|
logger.Fatal(config.ErrNoCertsAndHTTPSEndpoints(nil), "Unable to start the server")
|
||||||
}
|
}
|
||||||
if !globalEndpoints.IsHTTPS() && globalIsSSL {
|
if !globalEndpoints.HTTPS() && globalIsSSL {
|
||||||
logger.Fatal(config.ErrCertsAndHTTPEndpoints(nil), "Unable to start the server")
|
logger.Fatal(config.ErrCertsAndHTTPEndpoints(nil), "Unable to start the server")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -413,19 +429,21 @@ func serverMain(ctx *cli.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Initialize object layer with the supplied disks, objectLayer is nil upon any error.
|
// Initialize object layer with the supplied disks, objectLayer is nil upon any error.
|
||||||
func newObjectLayer(endpoints EndpointList) (newObject ObjectLayer, err error) {
|
func newObjectLayer(endpointZones EndpointZones) (newObject ObjectLayer, err error) {
|
||||||
// For FS only, directly use the disk.
|
// For FS only, directly use the disk.
|
||||||
|
|
||||||
isFS := len(endpoints) == 1
|
if endpointZones.Nodes() == 1 {
|
||||||
if isFS {
|
|
||||||
// Initialize new FS object layer.
|
// Initialize new FS object layer.
|
||||||
return NewFSObjectLayer(endpoints[0].Path)
|
return NewFSObjectLayer(endpointZones[0].Endpoints[0].Path)
|
||||||
}
|
}
|
||||||
|
|
||||||
format, err := waitForFormatXL(endpoints[0].IsLocal, endpoints, globalXLSetCount, globalXLSetDriveCount)
|
var formats []*formatXLV3
|
||||||
|
for _, ep := range endpointZones {
|
||||||
|
format, err := waitForFormatXL(ep.Endpoints[0].IsLocal, ep.Endpoints, ep.SetCount, ep.DrivesPerSet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
formats = append(formats, format)
|
||||||
return newXLSets(endpoints, format, len(format.XL.Sets), len(format.XL.Sets[0]))
|
}
|
||||||
|
return newXLZones(endpointZones, formats)
|
||||||
}
|
}
|
||||||
|
@ -31,8 +31,7 @@ func TestNewObjectLayer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer removeRoots(disks)
|
defer removeRoots(disks)
|
||||||
|
|
||||||
endpoints := mustGetNewEndpointList(disks...)
|
obj, err := newObjectLayer(mustGetZoneEndpoints(disks...))
|
||||||
obj, err := newObjectLayer(endpoints)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unexpected object layer initialization error", err)
|
t.Fatal("Unexpected object layer initialization error", err)
|
||||||
}
|
}
|
||||||
@ -51,16 +50,12 @@ func TestNewObjectLayer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer removeRoots(disks)
|
defer removeRoots(disks)
|
||||||
|
|
||||||
globalXLSetCount = 1
|
obj, err = newObjectLayer(mustGetZoneEndpoints(disks...))
|
||||||
globalXLSetDriveCount = 16
|
|
||||||
|
|
||||||
endpoints = mustGetNewEndpointList(disks...)
|
|
||||||
obj, err = newObjectLayer(endpoints)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unexpected object layer initialization error", err)
|
t.Fatal("Unexpected object layer initialization error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, ok = obj.(*xlSets)
|
_, ok = obj.(*xlZones)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatal("Unexpected object layer detected", reflect.TypeOf(obj))
|
t.Fatal("Unexpected object layer detected", reflect.TypeOf(obj))
|
||||||
}
|
}
|
||||||
|
@ -560,8 +560,9 @@ func (s *storageRESTServer) VerifyFile(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// registerStorageRPCRouter - register storage rpc router.
|
// registerStorageRPCRouter - register storage rpc router.
|
||||||
func registerStorageRESTHandlers(router *mux.Router, endpoints EndpointList) {
|
func registerStorageRESTHandlers(router *mux.Router, endpointZones EndpointZones) {
|
||||||
for _, endpoint := range endpoints {
|
for _, ep := range endpointZones {
|
||||||
|
for _, endpoint := range ep.Endpoints {
|
||||||
if !endpoint.IsLocal {
|
if !endpoint.IsLocal {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -610,6 +611,7 @@ func registerStorageRESTHandlers(router *mux.Router, endpoints EndpointList) {
|
|||||||
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodVerifyFile).HandlerFunc(httpTraceHdrs(server.VerifyFile)).
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodVerifyFile).HandlerFunc(httpTraceHdrs(server.VerifyFile)).
|
||||||
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTBitrotAlgo, storageRESTBitrotHash, storageRESTLength, storageRESTShardSize)...)
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTBitrotAlgo, storageRESTBitrotHash, storageRESTLength, storageRESTShardSize)...)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// If none of the routes match add default error handler routes
|
// If none of the routes match add default error handler routes
|
||||||
router.NotFoundHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
router.NotFoundHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
|
||||||
|
@ -513,7 +513,9 @@ func newStorageRESTHTTPServerClient(t *testing.T) (*httptest.Server, *storageRES
|
|||||||
t.Fatalf("UpdateIsLocal failed %v", err)
|
t.Fatalf("UpdateIsLocal failed %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
registerStorageRESTHandlers(router, EndpointList{endpoint})
|
registerStorageRESTHandlers(router, []ZoneEndpoints{{
|
||||||
|
Endpoints: Endpoints{endpoint},
|
||||||
|
}})
|
||||||
restClient := newStorageRESTClient(endpoint)
|
restClient := newStorageRESTClient(endpoint)
|
||||||
prevGlobalServerConfig := globalServerConfig
|
prevGlobalServerConfig := globalServerConfig
|
||||||
globalServerConfig = newServerConfig()
|
globalServerConfig = newServerConfig()
|
||||||
|
@ -59,8 +59,6 @@ import (
|
|||||||
"github.com/minio/minio/cmd/config"
|
"github.com/minio/minio/cmd/config"
|
||||||
"github.com/minio/minio/cmd/logger"
|
"github.com/minio/minio/cmd/logger"
|
||||||
"github.com/minio/minio/pkg/auth"
|
"github.com/minio/minio/pkg/auth"
|
||||||
"github.com/minio/minio/pkg/bpool"
|
|
||||||
"github.com/minio/minio/pkg/dsync"
|
|
||||||
"github.com/minio/minio/pkg/hash"
|
"github.com/minio/minio/pkg/hash"
|
||||||
"github.com/minio/minio/pkg/policy"
|
"github.com/minio/minio/pkg/policy"
|
||||||
)
|
)
|
||||||
@ -181,13 +179,13 @@ func prepareXLSets32() (ObjectLayer, []string, error) {
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints1 := mustGetNewEndpointList(fsDirs1...)
|
endpoints1 := mustGetNewEndpoints(fsDirs1...)
|
||||||
fsDirs2, err := getRandomDisks(16)
|
fsDirs2, err := getRandomDisks(16)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
removeRoots(fsDirs1)
|
removeRoots(fsDirs1)
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
endpoints2 := mustGetNewEndpointList(fsDirs2...)
|
endpoints2 := mustGetNewEndpoints(fsDirs2...)
|
||||||
|
|
||||||
endpoints := append(endpoints1, endpoints2...)
|
endpoints := append(endpoints1, endpoints2...)
|
||||||
fsDirs := append(fsDirs1, fsDirs2...)
|
fsDirs := append(fsDirs1, fsDirs2...)
|
||||||
@ -210,7 +208,7 @@ func prepareXL(nDisks int) (ObjectLayer, []string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
obj, _, err := initObjectLayer(mustGetNewEndpointList(fsDirs...))
|
obj, _, err := initObjectLayer(mustGetZoneEndpoints(fsDirs...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
removeRoots(fsDirs)
|
removeRoots(fsDirs)
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
@ -302,7 +300,7 @@ func isSameType(obj1, obj2 interface{}) bool {
|
|||||||
// defer s.Stop()
|
// defer s.Stop()
|
||||||
type TestServer struct {
|
type TestServer struct {
|
||||||
Root string
|
Root string
|
||||||
Disks EndpointList
|
Disks EndpointZones
|
||||||
AccessKey string
|
AccessKey string
|
||||||
SecretKey string
|
SecretKey string
|
||||||
Server *httptest.Server
|
Server *httptest.Server
|
||||||
@ -331,9 +329,7 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer {
|
|||||||
credentials := globalActiveCred
|
credentials := globalActiveCred
|
||||||
|
|
||||||
testServer.Obj = objLayer
|
testServer.Obj = objLayer
|
||||||
for _, disk := range disks {
|
testServer.Disks = mustGetZoneEndpoints(disks...)
|
||||||
testServer.Disks = append(testServer.Disks, mustGetNewEndpointList(disk)...)
|
|
||||||
}
|
|
||||||
testServer.AccessKey = credentials.AccessKey
|
testServer.AccessKey = credentials.AccessKey
|
||||||
testServer.SecretKey = credentials.SecretKey
|
testServer.SecretKey = credentials.SecretKey
|
||||||
|
|
||||||
@ -450,7 +446,7 @@ func resetGlobalConfig() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func resetGlobalEndpoints() {
|
func resetGlobalEndpoints() {
|
||||||
globalEndpoints = EndpointList{}
|
globalEndpoints = EndpointZones{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func resetGlobalIsXL() {
|
func resetGlobalIsXL() {
|
||||||
@ -525,9 +521,11 @@ func newTestConfig(bucketLocation string, obj ObjectLayer) (err error) {
|
|||||||
// Deleting the temporary backend and stopping the server.
|
// Deleting the temporary backend and stopping the server.
|
||||||
func (testServer TestServer) Stop() {
|
func (testServer TestServer) Stop() {
|
||||||
os.RemoveAll(testServer.Root)
|
os.RemoveAll(testServer.Root)
|
||||||
for _, disk := range testServer.Disks {
|
for _, ep := range testServer.Disks {
|
||||||
|
for _, disk := range ep.Endpoints {
|
||||||
os.RemoveAll(disk.Path)
|
os.RemoveAll(disk.Path)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
testServer.Server.Close()
|
testServer.Server.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1580,73 +1578,52 @@ func getRandomDisks(N int) ([]string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Initialize object layer with the supplied disks, objectLayer is nil upon any error.
|
// Initialize object layer with the supplied disks, objectLayer is nil upon any error.
|
||||||
func newTestObjectLayer(endpoints EndpointList) (newObject ObjectLayer, err error) {
|
func newTestObjectLayer(endpointZones EndpointZones) (newObject ObjectLayer, err error) {
|
||||||
// For FS only, directly use the disk.
|
// For FS only, directly use the disk.
|
||||||
isFS := len(endpoints) == 1
|
if endpointZones.Nodes() == 1 {
|
||||||
if isFS {
|
|
||||||
// Initialize new FS object layer.
|
// Initialize new FS object layer.
|
||||||
return NewFSObjectLayer(endpoints[0].Path)
|
return NewFSObjectLayer(endpointZones[0].Endpoints[0].Path)
|
||||||
}
|
}
|
||||||
|
|
||||||
format, err := waitForFormatXL(endpoints[0].IsLocal, endpoints, 1, 16)
|
var formats []*formatXLV3
|
||||||
|
for _, ep := range endpointZones {
|
||||||
|
format, err := waitForFormatXL(ep.Endpoints[0].IsLocal, ep.Endpoints, ep.SetCount, ep.DrivesPerSet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
formats = append(formats, format)
|
||||||
|
}
|
||||||
|
|
||||||
storageDisks, errs := initStorageDisksWithErrors(endpoints)
|
zones, err := newXLZones(endpointZones, formats)
|
||||||
for _, err = range errs {
|
if err != nil {
|
||||||
if err != nil && err != errDiskNotFound {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
for i, disk := range storageDisks {
|
|
||||||
disk.SetDiskID(format.XL.Sets[0][i])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize list pool.
|
|
||||||
listPool := NewTreeWalkPool(globalLookupTimeout)
|
|
||||||
|
|
||||||
// Initialize xl objects.
|
|
||||||
xl := &xlObjects{
|
|
||||||
listPool: listPool,
|
|
||||||
storageDisks: storageDisks,
|
|
||||||
nsMutex: newNSLock(false),
|
|
||||||
bp: bpool.NewBytePoolCap(4, blockSizeV1, blockSizeV1*2),
|
|
||||||
}
|
|
||||||
|
|
||||||
xl.getDisks = func() []StorageAPI {
|
|
||||||
return xl.storageDisks
|
|
||||||
}
|
|
||||||
xl.getLockers = func() []dsync.NetLocker {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
globalConfigSys = NewConfigSys()
|
globalConfigSys = NewConfigSys()
|
||||||
|
|
||||||
globalIAMSys = NewIAMSys()
|
globalIAMSys = NewIAMSys()
|
||||||
globalIAMSys.Init(xl)
|
globalIAMSys.Init(zones)
|
||||||
|
|
||||||
globalPolicySys = NewPolicySys()
|
globalPolicySys = NewPolicySys()
|
||||||
globalPolicySys.Init(nil, xl)
|
globalPolicySys.Init(nil, zones)
|
||||||
|
|
||||||
globalNotificationSys = NewNotificationSys(endpoints)
|
globalNotificationSys = NewNotificationSys(endpointZones)
|
||||||
globalNotificationSys.Init(nil, xl)
|
globalNotificationSys.Init(nil, zones)
|
||||||
|
|
||||||
return xl, nil
|
return zones, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// initObjectLayer - Instantiates object layer and returns it.
|
// initObjectLayer - Instantiates object layer and returns it.
|
||||||
func initObjectLayer(endpoints EndpointList) (ObjectLayer, []StorageAPI, error) {
|
func initObjectLayer(endpointZones EndpointZones) (ObjectLayer, []StorageAPI, error) {
|
||||||
objLayer, err := newTestObjectLayer(endpoints)
|
objLayer, err := newTestObjectLayer(endpointZones)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var formattedDisks []StorageAPI
|
var formattedDisks []StorageAPI
|
||||||
// Should use the object layer tests for validating cache.
|
// Should use the object layer tests for validating cache.
|
||||||
if xl, ok := objLayer.(*xlObjects); ok {
|
if z, ok := objLayer.(*xlZones); ok {
|
||||||
formattedDisks = xl.storageDisks
|
formattedDisks = z.zones[0].GetDisks(0)()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Success.
|
// Success.
|
||||||
@ -2052,7 +2029,7 @@ func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Initialization of disks for XL setup: %s", err)
|
t.Fatalf("Initialization of disks for XL setup: %s", err)
|
||||||
}
|
}
|
||||||
objLayer, _, err := initObjectLayer(mustGetNewEndpointList(erasureDisks...))
|
objLayer, _, err := initObjectLayer(mustGetZoneEndpoints(erasureDisks...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
|
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
|
||||||
}
|
}
|
||||||
@ -2303,25 +2280,29 @@ func generateTLSCertKey(host string) ([]byte, []byte, error) {
|
|||||||
return certOut.Bytes(), keyOut.Bytes(), nil
|
return certOut.Bytes(), keyOut.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func mustGetNewEndpointList(args ...string) (endpoints EndpointList) {
|
func mustGetZoneEndpoints(args ...string) EndpointZones {
|
||||||
if len(args) == 1 {
|
endpoints := mustGetNewEndpoints(args...)
|
||||||
endpoint, err := NewEndpoint(args[0])
|
return []ZoneEndpoints{{
|
||||||
logger.FatalIf(err, "unable to create new endpoint")
|
SetCount: 1,
|
||||||
endpoints = append(endpoints, endpoint)
|
DrivesPerSet: len(args),
|
||||||
} else {
|
Endpoints: endpoints,
|
||||||
var err error
|
}}
|
||||||
endpoints, err = NewEndpointList(args...)
|
}
|
||||||
|
|
||||||
|
func mustGetNewEndpoints(args ...string) (endpoints Endpoints) {
|
||||||
|
endpoints, err := NewEndpoints(args...)
|
||||||
logger.FatalIf(err, "unable to create new endpoint list")
|
logger.FatalIf(err, "unable to create new endpoint list")
|
||||||
}
|
|
||||||
return endpoints
|
return endpoints
|
||||||
}
|
}
|
||||||
|
|
||||||
func getEndpointsLocalAddr(endpoints EndpointList) string {
|
func getEndpointsLocalAddr(endpointZones EndpointZones) string {
|
||||||
for _, endpoint := range endpoints {
|
for _, endpoints := range endpointZones {
|
||||||
|
for _, endpoint := range endpoints.Endpoints {
|
||||||
if endpoint.IsLocal && endpoint.Type() == URLEndpointType {
|
if endpoint.IsLocal && endpoint.Type() == URLEndpointType {
|
||||||
return endpoint.Host
|
return endpoint.Host
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return net.JoinHostPort(globalMinioHost, globalMinioPort)
|
return net.JoinHostPort(globalMinioHost, globalMinioPort)
|
||||||
}
|
}
|
||||||
|
@ -123,7 +123,7 @@ func TestTreeWalk(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to create tmp directory: %s", err)
|
t.Fatalf("Unable to create tmp directory: %s", err)
|
||||||
}
|
}
|
||||||
endpoints := mustGetNewEndpointList(fsDir)
|
endpoints := mustGetNewEndpoints(fsDir)
|
||||||
disk, err := newStorageAPI(endpoints[0])
|
disk, err := newStorageAPI(endpoints[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to create StorageAPI: %s", err)
|
t.Fatalf("Unable to create StorageAPI: %s", err)
|
||||||
@ -160,7 +160,7 @@ func TestTreeWalkTimeout(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to create tmp directory: %s", err)
|
t.Fatalf("Unable to create tmp directory: %s", err)
|
||||||
}
|
}
|
||||||
endpoints := mustGetNewEndpointList(fsDir)
|
endpoints := mustGetNewEndpoints(fsDir)
|
||||||
disk, err := newStorageAPI(endpoints[0])
|
disk, err := newStorageAPI(endpoints[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to create StorageAPI: %s", err)
|
t.Fatalf("Unable to create StorageAPI: %s", err)
|
||||||
@ -235,13 +235,13 @@ func TestListDir(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create two StorageAPIs disk1 and disk2.
|
// Create two StorageAPIs disk1 and disk2.
|
||||||
endpoints := mustGetNewEndpointList(fsDir1)
|
endpoints := mustGetNewEndpoints(fsDir1)
|
||||||
disk1, err := newStorageAPI(endpoints[0])
|
disk1, err := newStorageAPI(endpoints[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unable to create StorageAPI: %s", err)
|
t.Errorf("Unable to create StorageAPI: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints = mustGetNewEndpointList(fsDir2)
|
endpoints = mustGetNewEndpoints(fsDir2)
|
||||||
disk2, err := newStorageAPI(endpoints[0])
|
disk2, err := newStorageAPI(endpoints[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unable to create StorageAPI: %s", err)
|
t.Errorf("Unable to create StorageAPI: %s", err)
|
||||||
@ -300,7 +300,7 @@ func TestRecursiveTreeWalk(t *testing.T) {
|
|||||||
t.Fatalf("Unable to create tmp directory: %s", err)
|
t.Fatalf("Unable to create tmp directory: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints := mustGetNewEndpointList(fsDir1)
|
endpoints := mustGetNewEndpoints(fsDir1)
|
||||||
disk1, err := newStorageAPI(endpoints[0])
|
disk1, err := newStorageAPI(endpoints[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to create StorageAPI: %s", err)
|
t.Fatalf("Unable to create StorageAPI: %s", err)
|
||||||
@ -405,7 +405,7 @@ func TestSortedness(t *testing.T) {
|
|||||||
t.Errorf("Unable to create tmp directory: %s", err)
|
t.Errorf("Unable to create tmp directory: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints := mustGetNewEndpointList(fsDir1)
|
endpoints := mustGetNewEndpoints(fsDir1)
|
||||||
disk1, err := newStorageAPI(endpoints[0])
|
disk1, err := newStorageAPI(endpoints[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to create StorageAPI: %s", err)
|
t.Fatalf("Unable to create StorageAPI: %s", err)
|
||||||
@ -476,7 +476,7 @@ func TestTreeWalkIsEnd(t *testing.T) {
|
|||||||
t.Errorf("Unable to create tmp directory: %s", err)
|
t.Errorf("Unable to create tmp directory: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints := mustGetNewEndpointList(fsDir1)
|
endpoints := mustGetNewEndpoints(fsDir1)
|
||||||
disk1, err := newStorageAPI(endpoints[0])
|
disk1, err := newStorageAPI(endpoints[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to create StorageAPI: %s", err)
|
t.Fatalf("Unable to create StorageAPI: %s", err)
|
||||||
|
@ -1587,9 +1587,14 @@ func TestWebObjectLayerFaultyDisks(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set faulty disks to XL backend
|
// Set faulty disks to XL backend
|
||||||
xl := obj.(*xlObjects)
|
z := obj.(*xlZones)
|
||||||
for i, d := range xl.storageDisks {
|
xl := z.zones[0].sets[0]
|
||||||
xl.storageDisks[i] = newNaughtyDisk(d, nil, errFaultyDisk)
|
xlDisks := xl.getDisks()
|
||||||
|
xl.getDisks = func() []StorageAPI {
|
||||||
|
for i, d := range xlDisks {
|
||||||
|
xlDisks[i] = newNaughtyDisk(d, nil, errFaultyDisk)
|
||||||
|
}
|
||||||
|
return xlDisks
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize web rpc endpoint.
|
// Initialize web rpc endpoint.
|
||||||
|
@ -55,18 +55,6 @@ func (s setsStorageAPI) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s setsDsyncLockers) Close() error {
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
for _, locker := range s[i] {
|
|
||||||
if locker == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
locker.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// xlSets implements ObjectLayer combining a static list of erasure coded
|
// xlSets implements ObjectLayer combining a static list of erasure coded
|
||||||
// object sets. NOTE: There is no dynamic scaling allowed or intended in
|
// object sets. NOTE: There is no dynamic scaling allowed or intended in
|
||||||
// current design.
|
// current design.
|
||||||
@ -89,7 +77,7 @@ type xlSets struct {
|
|||||||
lockersMap map[Endpoint]dsync.NetLocker
|
lockersMap map[Endpoint]dsync.NetLocker
|
||||||
|
|
||||||
// List of endpoints provided on the command line.
|
// List of endpoints provided on the command line.
|
||||||
endpoints EndpointList
|
endpoints Endpoints
|
||||||
|
|
||||||
// Total number of sets and the number of disks per set.
|
// Total number of sets and the number of disks per set.
|
||||||
setCount, drivesPerSet int
|
setCount, drivesPerSet int
|
||||||
@ -123,11 +111,10 @@ func (s *xlSets) isConnected(endpoint Endpoint) bool {
|
|||||||
if s.xlDisks[i][j].String() != endpointStr {
|
if s.xlDisks[i][j].String() != endpointStr {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if s.xlDisks[i][j].IsOnline() {
|
if !s.xlLockers[i][j].IsOnline() {
|
||||||
return true
|
continue
|
||||||
}
|
}
|
||||||
s.xlLockers[i][j].Close()
|
return s.xlDisks[i][j].IsOnline()
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
@ -282,8 +269,7 @@ func (s *xlSets) GetDisks(setIndex int) func() []StorageAPI {
|
|||||||
const defaultMonitorConnectEndpointInterval = time.Second * 10 // Set to 10 secs.
|
const defaultMonitorConnectEndpointInterval = time.Second * 10 // Set to 10 secs.
|
||||||
|
|
||||||
// Initialize new set of erasure coded sets.
|
// Initialize new set of erasure coded sets.
|
||||||
func newXLSets(endpoints EndpointList, format *formatXLV3, setCount int, drivesPerSet int) (ObjectLayer, error) {
|
func newXLSets(endpoints Endpoints, format *formatXLV3, setCount int, drivesPerSet int) (*xlSets, error) {
|
||||||
|
|
||||||
lockersMap := make(map[Endpoint]dsync.NetLocker)
|
lockersMap := make(map[Endpoint]dsync.NetLocker)
|
||||||
for _, endpoint := range endpoints {
|
for _, endpoint := range endpoints {
|
||||||
lockersMap[endpoint] = newLockAPI(endpoint)
|
lockersMap[endpoint] = newLockAPI(endpoint)
|
||||||
@ -464,13 +450,6 @@ func (s *xlSets) Shutdown(ctx context.Context) error {
|
|||||||
// even if one of the sets fail to create buckets, we proceed to undo a
|
// even if one of the sets fail to create buckets, we proceed to undo a
|
||||||
// successful operation.
|
// successful operation.
|
||||||
func (s *xlSets) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
|
func (s *xlSets) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
|
||||||
set := s.getHashedSet(bucket)
|
|
||||||
bucketLock := set.nsMutex.NewNSLock(ctx, set.getLockers(), bucket, "")
|
|
||||||
if err := bucketLock.GetLock(globalOperationTimeout); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer bucketLock.Unlock()
|
|
||||||
|
|
||||||
g := errgroup.WithNErrs(len(s.sets))
|
g := errgroup.WithNErrs(len(s.sets))
|
||||||
|
|
||||||
// Create buckets in parallel across all sets.
|
// Create buckets in parallel across all sets.
|
||||||
@ -549,14 +528,7 @@ func (s *xlSets) getHashedSet(input string) (set *xlObjects) {
|
|||||||
|
|
||||||
// GetBucketInfo - returns bucket info from one of the erasure coded set.
|
// GetBucketInfo - returns bucket info from one of the erasure coded set.
|
||||||
func (s *xlSets) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
|
func (s *xlSets) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
|
||||||
set := s.getHashedSet(bucket)
|
return s.getHashedSet("").GetBucketInfo(ctx, bucket)
|
||||||
bucketLock := set.nsMutex.NewNSLock(ctx, set.getLockers(), bucket, "")
|
|
||||||
if err = bucketLock.GetRLock(globalOperationTimeout); err != nil {
|
|
||||||
return bucketInfo, err
|
|
||||||
}
|
|
||||||
defer bucketLock.RUnlock()
|
|
||||||
|
|
||||||
return s.getHashedSet(bucket).GetBucketInfo(ctx, bucket)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListObjectsV2 lists all objects in bucket filtered by prefix
|
// ListObjectsV2 lists all objects in bucket filtered by prefix
|
||||||
@ -635,13 +607,6 @@ func (s *xlSets) IsCompressionSupported() bool {
|
|||||||
// even if one of the sets fail to delete buckets, we proceed to
|
// even if one of the sets fail to delete buckets, we proceed to
|
||||||
// undo a successful operation.
|
// undo a successful operation.
|
||||||
func (s *xlSets) DeleteBucket(ctx context.Context, bucket string) error {
|
func (s *xlSets) DeleteBucket(ctx context.Context, bucket string) error {
|
||||||
set := s.getHashedSet(bucket)
|
|
||||||
bucketLock := set.nsMutex.NewNSLock(ctx, set.getLockers(), bucket, "")
|
|
||||||
if err := bucketLock.GetLock(globalOperationTimeout); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer bucketLock.Unlock()
|
|
||||||
|
|
||||||
g := errgroup.WithNErrs(len(s.sets))
|
g := errgroup.WithNErrs(len(s.sets))
|
||||||
|
|
||||||
// Delete buckets in parallel across all sets.
|
// Delete buckets in parallel across all sets.
|
||||||
@ -729,7 +694,6 @@ func (s *xlSets) DeleteObject(ctx context.Context, bucket string, object string)
|
|||||||
// objects are group by set first, and then bulk delete is invoked
|
// objects are group by set first, and then bulk delete is invoked
|
||||||
// for each set, the error response of each delete will be returned
|
// for each set, the error response of each delete will be returned
|
||||||
func (s *xlSets) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) {
|
func (s *xlSets) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) {
|
||||||
|
|
||||||
type delObj struct {
|
type delObj struct {
|
||||||
// Set index associated to this object
|
// Set index associated to this object
|
||||||
setIndex int
|
setIndex int
|
||||||
@ -787,13 +751,6 @@ func (s *xlSets) CopyObject(ctx context.Context, srcBucket, srcObject, destBucke
|
|||||||
return srcSet.CopyObject(ctx, srcBucket, srcObject, destBucket, destObject, srcInfo, srcOpts, dstOpts)
|
return srcSet.CopyObject(ctx, srcBucket, srcObject, destBucket, destObject, srcInfo, srcOpts, dstOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !cpSrcDstSame {
|
|
||||||
objectDWLock := destSet.nsMutex.NewNSLock(ctx, destSet.getLockers(), destBucket, destObject)
|
|
||||||
if err := objectDWLock.GetLock(globalObjectTimeout); err != nil {
|
|
||||||
return objInfo, err
|
|
||||||
}
|
|
||||||
defer objectDWLock.Unlock()
|
|
||||||
}
|
|
||||||
putOpts := ObjectOptions{ServerSideEncryption: dstOpts.ServerSideEncryption, UserDefined: srcInfo.UserDefined}
|
putOpts := ObjectOptions{ServerSideEncryption: dstOpts.ServerSideEncryption, UserDefined: srcInfo.UserDefined}
|
||||||
return destSet.putObject(ctx, destBucket, destObject, srcInfo.PutObjReader, putOpts)
|
return destSet.putObject(ctx, destBucket, destObject, srcInfo.PutObjReader, putOpts)
|
||||||
}
|
}
|
||||||
@ -1078,11 +1035,6 @@ func (s *xlSets) listObjectsNonSlash(ctx context.Context, bucket, prefix, marker
|
|||||||
// walked and merged at this layer. Resulting value through the merge process sends
|
// walked and merged at this layer. Resulting value through the merge process sends
|
||||||
// the data in lexically sorted order.
|
// the data in lexically sorted order.
|
||||||
func (s *xlSets) listObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int, heal bool) (loi ListObjectsInfo, err error) {
|
func (s *xlSets) listObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int, heal bool) (loi ListObjectsInfo, err error) {
|
||||||
if delimiter != SlashSeparator && delimiter != "" {
|
|
||||||
// "heal" option passed can be ignored as the heal-listing does not send non-standard delimiter.
|
|
||||||
return s.listObjectsNonSlash(ctx, bucket, prefix, marker, delimiter, maxKeys)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = checkListObjsArgs(ctx, bucket, prefix, marker, delimiter, s); err != nil {
|
if err = checkListObjsArgs(ctx, bucket, prefix, marker, delimiter, s); err != nil {
|
||||||
return loi, err
|
return loi, err
|
||||||
}
|
}
|
||||||
@ -1114,6 +1066,11 @@ func (s *xlSets) listObjects(ctx context.Context, bucket, prefix, marker, delimi
|
|||||||
maxKeys = maxObjectList
|
maxKeys = maxObjectList
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if delimiter != SlashSeparator && delimiter != "" {
|
||||||
|
// "heal" option passed can be ignored as the heal-listing does not send non-standard delimiter.
|
||||||
|
return s.listObjectsNonSlash(ctx, bucket, prefix, marker, delimiter, maxKeys)
|
||||||
|
}
|
||||||
|
|
||||||
// Default is recursive, if delimiter is set then list non recursive.
|
// Default is recursive, if delimiter is set then list non recursive.
|
||||||
recursive := true
|
recursive := true
|
||||||
if delimiter == SlashSeparator {
|
if delimiter == SlashSeparator {
|
||||||
@ -1284,7 +1241,7 @@ else
|
|||||||
fi
|
fi
|
||||||
*/
|
*/
|
||||||
|
|
||||||
func formatsToDrivesInfo(endpoints EndpointList, formats []*formatXLV3, sErrs []error) (beforeDrives []madmin.DriveInfo) {
|
func formatsToDrivesInfo(endpoints Endpoints, formats []*formatXLV3, sErrs []error) (beforeDrives []madmin.DriveInfo) {
|
||||||
beforeDrives = make([]madmin.DriveInfo, len(endpoints))
|
beforeDrives = make([]madmin.DriveInfo, len(endpoints))
|
||||||
// Existing formats are available (i.e. ok), so save it in
|
// Existing formats are available (i.e. ok), so save it in
|
||||||
// result, also populate disks to be healed.
|
// result, also populate disks to be healed.
|
||||||
@ -1317,14 +1274,6 @@ func formatsToDrivesInfo(endpoints EndpointList, formats []*formatXLV3, sErrs []
|
|||||||
// Reloads the format from the disk, usually called by a remote peer notifier while
|
// Reloads the format from the disk, usually called by a remote peer notifier while
|
||||||
// healing in a distributed setup.
|
// healing in a distributed setup.
|
||||||
func (s *xlSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) {
|
func (s *xlSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) {
|
||||||
// Acquire lock on format.json
|
|
||||||
set := s.getHashedSet(formatConfigFile)
|
|
||||||
formatLock := set.nsMutex.NewNSLock(ctx, set.getLockers(), minioMetaBucket, formatConfigFile)
|
|
||||||
if err = formatLock.GetRLock(globalHealingTimeout); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer formatLock.RUnlock()
|
|
||||||
|
|
||||||
storageDisks, errs := initStorageDisksWithErrors(s.endpoints)
|
storageDisks, errs := initStorageDisksWithErrors(s.endpoints)
|
||||||
for i, err := range errs {
|
for i, err := range errs {
|
||||||
if err != nil && err != errDiskNotFound {
|
if err != nil && err != errDiskNotFound {
|
||||||
@ -1367,7 +1316,6 @@ func (s *xlSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) {
|
|||||||
|
|
||||||
// Close all existing disks, lockers and reconnect all the disks/lockers.
|
// Close all existing disks, lockers and reconnect all the disks/lockers.
|
||||||
s.xlDisks.Close()
|
s.xlDisks.Close()
|
||||||
s.xlLockers.Close()
|
|
||||||
s.connectDisksAndLockers()
|
s.connectDisksAndLockers()
|
||||||
|
|
||||||
// Restart monitoring loop to monitor reformatted disks again.
|
// Restart monitoring loop to monitor reformatted disks again.
|
||||||
@ -1433,17 +1381,7 @@ func markRootDisksAsDown(storageDisks []StorageAPI) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// HealFormat - heals missing `format.json` on fresh unformatted disks.
|
// HealFormat - heals missing `format.json` on fresh unformatted disks.
|
||||||
// TODO: In future support corrupted disks missing format.json but has erasure
|
|
||||||
// coded data in it.
|
|
||||||
func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealResultItem, err error) {
|
func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealResultItem, err error) {
|
||||||
// Acquire lock on format.json
|
|
||||||
set := s.getHashedSet(formatConfigFile)
|
|
||||||
formatLock := set.nsMutex.NewNSLock(ctx, set.getLockers(), minioMetaBucket, formatConfigFile)
|
|
||||||
if err = formatLock.GetLock(globalHealingTimeout); err != nil {
|
|
||||||
return madmin.HealResultItem{}, err
|
|
||||||
}
|
|
||||||
defer formatLock.Unlock()
|
|
||||||
|
|
||||||
storageDisks, errs := initStorageDisksWithErrors(s.endpoints)
|
storageDisks, errs := initStorageDisksWithErrors(s.endpoints)
|
||||||
for i, derr := range errs {
|
for i, derr := range errs {
|
||||||
if derr != nil && derr != errDiskNotFound {
|
if derr != nil && derr != errDiskNotFound {
|
||||||
@ -1576,7 +1514,6 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealRe
|
|||||||
|
|
||||||
// Disconnect/relinquish all existing disks, lockers and reconnect the disks, lockers.
|
// Disconnect/relinquish all existing disks, lockers and reconnect the disks, lockers.
|
||||||
s.xlDisks.Close()
|
s.xlDisks.Close()
|
||||||
s.xlLockers.Close()
|
|
||||||
s.connectDisksAndLockers()
|
s.connectDisksAndLockers()
|
||||||
|
|
||||||
// Restart our monitoring loop to start monitoring newly formatted disks.
|
// Restart our monitoring loop to start monitoring newly formatted disks.
|
||||||
@ -1588,13 +1525,6 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealRe
|
|||||||
|
|
||||||
// HealBucket - heals inconsistent buckets and bucket metadata on all sets.
|
// HealBucket - heals inconsistent buckets and bucket metadata on all sets.
|
||||||
func (s *xlSets) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (result madmin.HealResultItem, err error) {
|
func (s *xlSets) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (result madmin.HealResultItem, err error) {
|
||||||
set := s.getHashedSet(bucket)
|
|
||||||
bucketLock := set.nsMutex.NewNSLock(ctx, set.getLockers(), bucket, "")
|
|
||||||
if err = bucketLock.GetLock(globalOperationTimeout); err != nil {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
defer bucketLock.Unlock()
|
|
||||||
|
|
||||||
// Initialize heal result info
|
// Initialize heal result info
|
||||||
result = madmin.HealResultItem{
|
result = madmin.HealResultItem{
|
||||||
Type: madmin.HealItemBucket,
|
Type: madmin.HealItemBucket,
|
||||||
@ -1697,7 +1627,7 @@ func (s *xlSets) HealObjects(ctx context.Context, bucket, prefix string, healObj
|
|||||||
// Wait at max 10 minute for an inprogress request before proceeding to heal
|
// Wait at max 10 minute for an inprogress request before proceeding to heal
|
||||||
waitCount := 600
|
waitCount := 600
|
||||||
// Any requests in progress, delay the heal.
|
// Any requests in progress, delay the heal.
|
||||||
for (globalHTTPServer.GetRequestCount() >= int32(globalXLSetCount*globalXLSetDriveCount)) &&
|
for (globalHTTPServer.GetRequestCount() >= int32(s.setCount*s.drivesPerSet)) &&
|
||||||
waitCount > 0 {
|
waitCount > 0 {
|
||||||
waitCount--
|
waitCount--
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
@ -75,7 +75,7 @@ func TestNewXLSets(t *testing.T) {
|
|||||||
defer os.RemoveAll(disk)
|
defer os.RemoveAll(disk)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints := mustGetNewEndpointList(erasureDisks...)
|
endpoints := mustGetNewEndpoints(erasureDisks...)
|
||||||
_, err := waitForFormatXL(true, endpoints, 0, 16)
|
_, err := waitForFormatXL(true, endpoints, 0, 16)
|
||||||
if err != errInvalidArgument {
|
if err != errInvalidArgument {
|
||||||
t.Fatalf("Expecting error, got %s", err)
|
t.Fatalf("Expecting error, got %s", err)
|
||||||
@ -113,7 +113,8 @@ func TestHashedLayer(t *testing.T) {
|
|||||||
defer os.RemoveAll(dir)
|
defer os.RemoveAll(dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
objs = append(objs, obj.(*xlObjects))
|
z := obj.(*xlZones)
|
||||||
|
objs = append(objs, z.zones[0].sets[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
sets := &xlSets{sets: objs, distributionAlgo: "CRCMOD"}
|
sets := &xlSets{sets: objs, distributionAlgo: "CRCMOD"}
|
||||||
|
@ -51,7 +51,8 @@ func TestXLParentDirIsObject(t *testing.T) {
|
|||||||
t.Fatalf("Unexpected object name returned got %s, expected %s", objInfo.Name, objectName)
|
t.Fatalf("Unexpected object name returned got %s, expected %s", objInfo.Name, objectName)
|
||||||
}
|
}
|
||||||
|
|
||||||
fs := obj.(*xlObjects)
|
z := obj.(*xlZones)
|
||||||
|
xl := z.zones[0].sets[0]
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
parentIsObject bool
|
parentIsObject bool
|
||||||
objectName string
|
objectName string
|
||||||
@ -86,7 +87,7 @@ func TestXLParentDirIsObject(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
gotValue := fs.parentDirIsObject(context.Background(), bucketName, testCase.objectName)
|
gotValue := xl.parentDirIsObject(context.Background(), bucketName, testCase.objectName)
|
||||||
if testCase.parentIsObject != gotValue {
|
if testCase.parentIsObject != gotValue {
|
||||||
t.Errorf("Test %d: Unexpected value returned got %t, expected %t", i+1, gotValue, testCase.parentIsObject)
|
t.Errorf("Test %d: Unexpected value returned got %t, expected %t", i+1, gotValue, testCase.parentIsObject)
|
||||||
}
|
}
|
||||||
|
@ -122,7 +122,7 @@ func listOnlineDisks(disks []StorageAPI, partsMetadata []xlMetaV1, errs []error)
|
|||||||
func getLatestXLMeta(ctx context.Context, partsMetadata []xlMetaV1, errs []error) (xlMetaV1, error) {
|
func getLatestXLMeta(ctx context.Context, partsMetadata []xlMetaV1, errs []error) (xlMetaV1, error) {
|
||||||
|
|
||||||
// There should be atleast half correct entries, if not return failure
|
// There should be atleast half correct entries, if not return failure
|
||||||
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, globalXLSetDriveCount/2); reducedErr != nil {
|
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, len(partsMetadata)/2); reducedErr != nil {
|
||||||
return xlMetaV1{}, reducedErr
|
return xlMetaV1{}, reducedErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -168,7 +168,8 @@ func TestListOnlineDisks(t *testing.T) {
|
|||||||
bucket := "bucket"
|
bucket := "bucket"
|
||||||
object := "object"
|
object := "object"
|
||||||
data := bytes.Repeat([]byte("a"), 1024)
|
data := bytes.Repeat([]byte("a"), 1024)
|
||||||
xlDisks := obj.(*xlObjects).storageDisks
|
z := obj.(*xlZones)
|
||||||
|
xlDisks := z.zones[0].sets[0].getDisks()
|
||||||
for i, test := range testCases {
|
for i, test := range testCases {
|
||||||
// Prepare bucket/object backend for the tests below.
|
// Prepare bucket/object backend for the tests below.
|
||||||
|
|
||||||
@ -266,10 +267,10 @@ func TestDisksWithAllParts(t *testing.T) {
|
|||||||
object := "object"
|
object := "object"
|
||||||
// make data with more than one part
|
// make data with more than one part
|
||||||
partCount := 3
|
partCount := 3
|
||||||
data := bytes.Repeat([]byte("a"), int(globalPutPartSize)*partCount)
|
data := bytes.Repeat([]byte("a"), 6*1024*1024*partCount)
|
||||||
xl := obj.(*xlObjects)
|
z := obj.(*xlZones)
|
||||||
xlDisks := xl.storageDisks
|
xl := z.zones[0].sets[0]
|
||||||
|
xlDisks := xl.getDisks()
|
||||||
err = obj.MakeBucketWithLocation(ctx, "bucket", "")
|
err = obj.MakeBucketWithLocation(ctx, "bucket", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to make a bucket %v", err)
|
t.Fatalf("Failed to make a bucket %v", err)
|
||||||
@ -281,7 +282,7 @@ func TestDisksWithAllParts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
_, errs := readAllXLMetadata(ctx, xlDisks, bucket, object)
|
_, errs := readAllXLMetadata(ctx, xlDisks, bucket, object)
|
||||||
readQuorum := len(xl.storageDisks) / 2
|
readQuorum := len(xlDisks) / 2
|
||||||
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
|
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
|
||||||
t.Fatalf("Failed to read xl meta data %v", reducedErr)
|
t.Fatalf("Failed to read xl meta data %v", reducedErr)
|
||||||
}
|
}
|
||||||
|
@ -679,14 +679,6 @@ func (xl xlObjects) HealObject(ctx context.Context, bucket, object string, dryRu
|
|||||||
}
|
}
|
||||||
healCtx := logger.SetReqInfo(context.Background(), newReqInfo)
|
healCtx := logger.SetReqInfo(context.Background(), newReqInfo)
|
||||||
|
|
||||||
// Lock the object before healing. Use read lock since healing
|
|
||||||
// will only regenerate parts & xl.json of outdated disks.
|
|
||||||
objectLock := xl.nsMutex.NewNSLock(ctx, xl.getLockers(), bucket, object)
|
|
||||||
if lerr := objectLock.GetRLock(globalHealingTimeout); lerr != nil {
|
|
||||||
return madmin.HealResultItem{}, lerr
|
|
||||||
}
|
|
||||||
defer objectLock.RUnlock()
|
|
||||||
|
|
||||||
// Healing directories handle it separately.
|
// Healing directories handle it separately.
|
||||||
if hasSuffix(object, SlashSeparator) {
|
if hasSuffix(object, SlashSeparator) {
|
||||||
return xl.healObjectDir(healCtx, bucket, object, dryRun)
|
return xl.healObjectDir(healCtx, bucket, object, dryRun)
|
||||||
@ -733,7 +725,7 @@ func (xl xlObjects) HealObject(ctx context.Context, bucket, object string, dryRu
|
|||||||
writeQuorum = len(storageDisks)/2 + 1
|
writeQuorum = len(storageDisks)/2 + 1
|
||||||
}
|
}
|
||||||
if !dryRun && remove {
|
if !dryRun && remove {
|
||||||
err = xl.deleteObject(ctx, bucket, object, writeQuorum, false)
|
xl.deleteObject(ctx, bucket, object, writeQuorum, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return defaultHealResult(latestXLMeta, storageDisks, errs, bucket, object), toObjectErr(reducedErr, bucket, object)
|
return defaultHealResult(latestXLMeta, storageDisks, errs, bucket, object), toObjectErr(reducedErr, bucket, object)
|
||||||
|
@ -35,7 +35,7 @@ func TestUndoMakeBucket(t *testing.T) {
|
|||||||
defer removeRoots(fsDirs)
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
// Remove format.json on 16 disks.
|
// Remove format.json on 16 disks.
|
||||||
obj, _, err := initObjectLayer(mustGetNewEndpointList(fsDirs...))
|
obj, _, err := initObjectLayer(mustGetZoneEndpoints(fsDirs...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -44,8 +44,9 @@ func TestUndoMakeBucket(t *testing.T) {
|
|||||||
if err = obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
|
if err = obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
xl := obj.(*xlObjects)
|
z := obj.(*xlZones)
|
||||||
undoMakeBucket(xl.storageDisks, bucketName)
|
xl := z.zones[0].sets[0]
|
||||||
|
undoMakeBucket(xl.getDisks(), bucketName)
|
||||||
|
|
||||||
// Validate if bucket was deleted properly.
|
// Validate if bucket was deleted properly.
|
||||||
_, err = obj.GetBucketInfo(context.Background(), bucketName)
|
_, err = obj.GetBucketInfo(context.Background(), bucketName)
|
||||||
@ -68,7 +69,7 @@ func TestHealObjectCorrupted(t *testing.T) {
|
|||||||
defer removeRoots(fsDirs)
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
// Everything is fine, should return nil
|
// Everything is fine, should return nil
|
||||||
objLayer, _, err := initObjectLayer(mustGetNewEndpointList(fsDirs...))
|
objLayer, _, err := initObjectLayer(mustGetZoneEndpoints(fsDirs...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -108,8 +109,9 @@ func TestHealObjectCorrupted(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test 1: Remove the object backend files from the first disk.
|
// Test 1: Remove the object backend files from the first disk.
|
||||||
xl := objLayer.(*xlObjects)
|
z := objLayer.(*xlZones)
|
||||||
firstDisk := xl.storageDisks[0]
|
xl := z.zones[0].sets[0]
|
||||||
|
firstDisk := xl.getDisks()[0]
|
||||||
err = firstDisk.DeleteFile(bucket, filepath.Join(object, xlMetaJSONFile))
|
err = firstDisk.DeleteFile(bucket, filepath.Join(object, xlMetaJSONFile))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to delete a file - %v", err)
|
t.Fatalf("Failed to delete a file - %v", err)
|
||||||
@ -179,8 +181,8 @@ func TestHealObjectCorrupted(t *testing.T) {
|
|||||||
// Test 4: checks if HealObject returns an error when xl.json is not found
|
// Test 4: checks if HealObject returns an error when xl.json is not found
|
||||||
// in more than read quorum number of disks, to create a corrupted situation.
|
// in more than read quorum number of disks, to create a corrupted situation.
|
||||||
|
|
||||||
for i := 0; i <= len(xl.storageDisks)/2; i++ {
|
for i := 0; i <= len(xl.getDisks())/2; i++ {
|
||||||
xl.storageDisks[i].DeleteFile(bucket, filepath.Join(object, xlMetaJSONFile))
|
xl.getDisks()[i].DeleteFile(bucket, filepath.Join(object, xlMetaJSONFile))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try healing now, expect to receive errDiskNotFound.
|
// Try healing now, expect to receive errDiskNotFound.
|
||||||
@ -207,7 +209,7 @@ func TestHealObjectXL(t *testing.T) {
|
|||||||
defer removeRoots(fsDirs)
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
// Everything is fine, should return nil
|
// Everything is fine, should return nil
|
||||||
obj, _, err := initObjectLayer(mustGetNewEndpointList(fsDirs...))
|
obj, _, err := initObjectLayer(mustGetZoneEndpoints(fsDirs...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -247,8 +249,9 @@ func TestHealObjectXL(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove the object backend files from the first disk.
|
// Remove the object backend files from the first disk.
|
||||||
xl := obj.(*xlObjects)
|
z := obj.(*xlZones)
|
||||||
firstDisk := xl.storageDisks[0]
|
xl := z.zones[0].sets[0]
|
||||||
|
firstDisk := xl.getDisks()[0]
|
||||||
err = firstDisk.DeleteFile(bucket, filepath.Join(object, xlMetaJSONFile))
|
err = firstDisk.DeleteFile(bucket, filepath.Join(object, xlMetaJSONFile))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to delete a file - %v", err)
|
t.Fatalf("Failed to delete a file - %v", err)
|
||||||
@ -264,9 +267,13 @@ func TestHealObjectXL(t *testing.T) {
|
|||||||
t.Errorf("Expected xl.json file to be present but stat failed - %v", err)
|
t.Errorf("Expected xl.json file to be present but stat failed - %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
xlDisks := xl.getDisks()
|
||||||
|
xl.getDisks = func() []StorageAPI {
|
||||||
// Nil more than half the disks, to remove write quorum.
|
// Nil more than half the disks, to remove write quorum.
|
||||||
for i := 0; i <= len(xl.storageDisks)/2; i++ {
|
for i := 0; i <= len(xlDisks)/2; i++ {
|
||||||
xl.storageDisks[i] = nil
|
xlDisks[i] = nil
|
||||||
|
}
|
||||||
|
return xlDisks
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try healing now, expect to receive errDiskNotFound.
|
// Try healing now, expect to receive errDiskNotFound.
|
||||||
@ -287,7 +294,7 @@ func TestHealEmptyDirectoryXL(t *testing.T) {
|
|||||||
defer removeRoots(fsDirs)
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
// Everything is fine, should return nil
|
// Everything is fine, should return nil
|
||||||
obj, _, err := initObjectLayer(mustGetNewEndpointList(fsDirs...))
|
obj, _, err := initObjectLayer(mustGetZoneEndpoints(fsDirs...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -302,14 +309,16 @@ func TestHealEmptyDirectoryXL(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Upload an empty directory
|
// Upload an empty directory
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte{}), 0, "", ""), opts)
|
_, err = obj.PutObject(context.Background(), bucket, object, mustGetPutObjReader(t,
|
||||||
|
bytes.NewReader([]byte{}), 0, "", ""), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the object backend files from the first disk.
|
// Remove the object backend files from the first disk.
|
||||||
xl := obj.(*xlObjects)
|
z := obj.(*xlZones)
|
||||||
firstDisk := xl.storageDisks[0]
|
xl := z.zones[0].sets[0]
|
||||||
|
firstDisk := xl.getDisks()[0]
|
||||||
err = firstDisk.DeleteFile(bucket, object)
|
err = firstDisk.DeleteFile(bucket, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to delete a file - %v", err)
|
t.Fatalf("Failed to delete a file - %v", err)
|
||||||
|
@ -76,7 +76,9 @@ func testXLReadStat(obj ObjectLayer, instanceType string, disks []string, t *tes
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, err = obj.(*xlObjects).readXLMetaStat(context.Background(), bucketName, objectName)
|
z := obj.(*xlZones)
|
||||||
|
xl := z.zones[0].sets[0]
|
||||||
|
_, _, err = xl.readXLMetaStat(context.Background(), bucketName, objectName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -85,7 +87,7 @@ func testXLReadStat(obj ObjectLayer, instanceType string, disks []string, t *tes
|
|||||||
removeDiskN(disks, 7)
|
removeDiskN(disks, 7)
|
||||||
|
|
||||||
// Removing disk shouldn't affect reading object info.
|
// Removing disk shouldn't affect reading object info.
|
||||||
_, _, err = obj.(*xlObjects).readXLMetaStat(context.Background(), bucketName, objectName)
|
_, _, err = xl.readXLMetaStat(context.Background(), bucketName, objectName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -94,7 +96,7 @@ func testXLReadStat(obj ObjectLayer, instanceType string, disks []string, t *tes
|
|||||||
os.RemoveAll(path.Join(disk, bucketName))
|
os.RemoveAll(path.Join(disk, bucketName))
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, err = obj.(*xlObjects).readXLMetaStat(context.Background(), bucketName, objectName)
|
_, _, err = xl.readXLMetaStat(context.Background(), bucketName, objectName)
|
||||||
if err != errVolumeNotFound {
|
if err != errVolumeNotFound {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -159,9 +161,11 @@ func testXLReadMetaParts(obj ObjectLayer, instanceType string, disks []string, t
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadIDPath := obj.(*xlObjects).getUploadIDDir(bucketNames[0], objectNames[0], uploadIDs[0])
|
z := obj.(*xlZones)
|
||||||
|
xl := z.zones[0].sets[0]
|
||||||
|
uploadIDPath := xl.getUploadIDDir(bucketNames[0], objectNames[0], uploadIDs[0])
|
||||||
|
|
||||||
_, _, err = obj.(*xlObjects).readXLMetaParts(context.Background(), minioMetaMultipartBucket, uploadIDPath)
|
_, _, err = xl.readXLMetaParts(context.Background(), minioMetaMultipartBucket, uploadIDPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -170,17 +174,17 @@ func testXLReadMetaParts(obj ObjectLayer, instanceType string, disks []string, t
|
|||||||
removeDiskN(disks, 7)
|
removeDiskN(disks, 7)
|
||||||
|
|
||||||
// Removing disk shouldn't affect reading object parts info.
|
// Removing disk shouldn't affect reading object parts info.
|
||||||
_, _, err = obj.(*xlObjects).readXLMetaParts(context.Background(), minioMetaMultipartBucket, uploadIDPath)
|
_, _, err = xl.readXLMetaParts(context.Background(), minioMetaMultipartBucket, uploadIDPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, disk := range disks {
|
for _, disk := range disks {
|
||||||
os.RemoveAll(path.Join(disk, bucketNames[0]))
|
os.RemoveAll(path.Join(disk, bucketNames[0]))
|
||||||
os.RemoveAll(path.Join(disk, minioMetaMultipartBucket, obj.(*xlObjects).getMultipartSHADir(bucketNames[0], objectNames[0])))
|
os.RemoveAll(path.Join(disk, minioMetaMultipartBucket, xl.getMultipartSHADir(bucketNames[0], objectNames[0])))
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, err = obj.(*xlObjects).readXLMetaParts(context.Background(), minioMetaMultipartBucket, uploadIDPath)
|
_, _, err = xl.readXLMetaParts(context.Background(), minioMetaMultipartBucket, uploadIDPath)
|
||||||
if err != errFileNotFound {
|
if err != errFileNotFound {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -262,13 +262,6 @@ func (xl xlObjects) NewMultipartUpload(ctx context.Context, bucket, object strin
|
|||||||
//
|
//
|
||||||
// Implements S3 compatible Upload Part Copy API.
|
// Implements S3 compatible Upload Part Copy API.
|
||||||
func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) {
|
func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) {
|
||||||
// Hold read locks on source object only if we are
|
|
||||||
// going to read data from source object.
|
|
||||||
objectSRLock := xl.nsMutex.NewNSLock(ctx, xl.getLockers(), srcBucket, srcObject)
|
|
||||||
if err := objectSRLock.GetRLock(globalObjectTimeout); err != nil {
|
|
||||||
return pi, err
|
|
||||||
}
|
|
||||||
defer objectSRLock.RUnlock()
|
|
||||||
|
|
||||||
if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, xl); err != nil {
|
if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, xl); err != nil {
|
||||||
return pi, err
|
return pi, err
|
||||||
@ -303,17 +296,9 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
|
|||||||
var partsMetadata []xlMetaV1
|
var partsMetadata []xlMetaV1
|
||||||
var errs []error
|
var errs []error
|
||||||
uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID)
|
uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID)
|
||||||
uploadIDLockPath := xl.getUploadIDLockPath(bucket, object, uploadID)
|
|
||||||
|
|
||||||
// pre-check upload id lock.
|
|
||||||
preUploadIDLock := xl.nsMutex.NewNSLock(ctx, xl.getLockers(), minioMetaMultipartBucket, uploadIDLockPath)
|
|
||||||
if err := preUploadIDLock.GetRLock(globalOperationTimeout); err != nil {
|
|
||||||
return pi, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validates if upload ID exists.
|
// Validates if upload ID exists.
|
||||||
if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
||||||
preUploadIDLock.RUnlock()
|
|
||||||
return pi, toObjectErr(err, bucket, object, uploadID)
|
return pi, toObjectErr(err, bucket, object, uploadID)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -324,16 +309,13 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
|
|||||||
// get Quorum for this object
|
// get Quorum for this object
|
||||||
_, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs)
|
_, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
preUploadIDLock.RUnlock()
|
|
||||||
return pi, toObjectErr(err, bucket, object)
|
return pi, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
|
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
|
||||||
if reducedErr == errXLWriteQuorum {
|
if reducedErr == errXLWriteQuorum {
|
||||||
preUploadIDLock.RUnlock()
|
|
||||||
return pi, toObjectErr(reducedErr, bucket, object)
|
return pi, toObjectErr(reducedErr, bucket, object)
|
||||||
}
|
}
|
||||||
preUploadIDLock.RUnlock()
|
|
||||||
|
|
||||||
// List all online disks.
|
// List all online disks.
|
||||||
onlineDisks, modTime := listOnlineDisks(xl.getDisks(), partsMetadata, errs)
|
onlineDisks, modTime := listOnlineDisks(xl.getDisks(), partsMetadata, errs)
|
||||||
@ -403,13 +385,6 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// post-upload check (write) lock
|
|
||||||
postUploadIDLock := xl.nsMutex.NewNSLock(ctx, xl.getLockers(), minioMetaMultipartBucket, uploadIDLockPath)
|
|
||||||
if err = postUploadIDLock.GetLock(globalOperationTimeout); err != nil {
|
|
||||||
return pi, err
|
|
||||||
}
|
|
||||||
defer postUploadIDLock.Unlock()
|
|
||||||
|
|
||||||
// Validates if upload ID exists.
|
// Validates if upload ID exists.
|
||||||
if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
||||||
return pi, toObjectErr(err, bucket, object, uploadID)
|
return pi, toObjectErr(err, bucket, object, uploadID)
|
||||||
@ -497,16 +472,6 @@ func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadI
|
|||||||
if err := checkListPartsArgs(ctx, bucket, object, xl); err != nil {
|
if err := checkListPartsArgs(ctx, bucket, object, xl); err != nil {
|
||||||
return result, err
|
return result, err
|
||||||
}
|
}
|
||||||
// Hold lock so that there is no competing
|
|
||||||
// abort-multipart-upload or complete-multipart-upload.
|
|
||||||
uploadIDLock := xl.nsMutex.NewNSLock(ctx, xl.getLockers(),
|
|
||||||
minioMetaMultipartBucket,
|
|
||||||
xl.getUploadIDLockPath(bucket, object, uploadID))
|
|
||||||
if err := uploadIDLock.GetLock(globalListingTimeout); err != nil {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
defer uploadIDLock.Unlock()
|
|
||||||
|
|
||||||
if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
||||||
return result, toObjectErr(err, bucket, object, uploadID)
|
return result, toObjectErr(err, bucket, object, uploadID)
|
||||||
}
|
}
|
||||||
@ -603,27 +568,6 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
|||||||
if err := checkCompleteMultipartArgs(ctx, bucket, object, xl); err != nil {
|
if err := checkCompleteMultipartArgs(ctx, bucket, object, xl); err != nil {
|
||||||
return oi, err
|
return oi, err
|
||||||
}
|
}
|
||||||
// Hold write lock on the object.
|
|
||||||
destLock := xl.nsMutex.NewNSLock(ctx, xl.getLockers(), bucket, object)
|
|
||||||
if err := destLock.GetLock(globalObjectTimeout); err != nil {
|
|
||||||
return oi, err
|
|
||||||
}
|
|
||||||
defer destLock.Unlock()
|
|
||||||
|
|
||||||
uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID)
|
|
||||||
uploadIDLockPath := xl.getUploadIDLockPath(bucket, object, uploadID)
|
|
||||||
|
|
||||||
// Hold lock so that
|
|
||||||
//
|
|
||||||
// 1) no one aborts this multipart upload
|
|
||||||
//
|
|
||||||
// 2) no one does a parallel complete-multipart-upload on this
|
|
||||||
// multipart upload
|
|
||||||
uploadIDLock := xl.nsMutex.NewNSLock(ctx, xl.getLockers(), minioMetaMultipartBucket, uploadIDLockPath)
|
|
||||||
if err := uploadIDLock.GetLock(globalOperationTimeout); err != nil {
|
|
||||||
return oi, err
|
|
||||||
}
|
|
||||||
defer uploadIDLock.Unlock()
|
|
||||||
|
|
||||||
if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
||||||
return oi, toObjectErr(err, bucket, object, uploadID)
|
return oi, toObjectErr(err, bucket, object, uploadID)
|
||||||
@ -638,6 +582,8 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
|||||||
// Calculate s3 compatible md5sum for complete multipart.
|
// Calculate s3 compatible md5sum for complete multipart.
|
||||||
s3MD5 := getCompleteMultipartMD5(parts)
|
s3MD5 := getCompleteMultipartMD5(parts)
|
||||||
|
|
||||||
|
uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID)
|
||||||
|
|
||||||
// Read metadata associated with the object from all disks.
|
// Read metadata associated with the object from all disks.
|
||||||
partsMetadata, errs := readAllXLMetadata(ctx, xl.getDisks(), minioMetaMultipartBucket, uploadIDPath)
|
partsMetadata, errs := readAllXLMetadata(ctx, xl.getDisks(), minioMetaMultipartBucket, uploadIDPath)
|
||||||
|
|
||||||
@ -820,22 +766,13 @@ func (xl xlObjects) AbortMultipartUpload(ctx context.Context, bucket, object, up
|
|||||||
if err := checkAbortMultipartArgs(ctx, bucket, object, xl); err != nil {
|
if err := checkAbortMultipartArgs(ctx, bucket, object, xl); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Construct uploadIDPath.
|
|
||||||
uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID)
|
|
||||||
uploadIDLockPath := xl.getUploadIDLockPath(bucket, object, uploadID)
|
|
||||||
// Hold lock so that there is no competing
|
|
||||||
// complete-multipart-upload or put-object-part.
|
|
||||||
uploadIDLock := xl.nsMutex.NewNSLock(ctx, xl.getLockers(), minioMetaMultipartBucket, uploadIDLockPath)
|
|
||||||
if err := uploadIDLock.GetLock(globalOperationTimeout); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer uploadIDLock.Unlock()
|
|
||||||
|
|
||||||
// Validates if upload ID exists.
|
// Validates if upload ID exists.
|
||||||
if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
||||||
return toObjectErr(err, bucket, object, uploadID)
|
return toObjectErr(err, bucket, object, uploadID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID)
|
||||||
|
|
||||||
// Read metadata associated with the object from all disks.
|
// Read metadata associated with the object from all disks.
|
||||||
partsMetadata, errs := readAllXLMetadata(ctx, xl.getDisks(), minioMetaMultipartBucket, uploadIDPath)
|
partsMetadata, errs := readAllXLMetadata(ctx, xl.getDisks(), minioMetaMultipartBucket, uploadIDPath)
|
||||||
|
|
||||||
|
@ -32,7 +32,8 @@ func TestXLCleanupStaleMultipartUploads(t *testing.T) {
|
|||||||
// Defer cleanup of backend directories
|
// Defer cleanup of backend directories
|
||||||
defer removeRoots(fsDirs)
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
xl := obj.(*xlObjects)
|
z := obj.(*xlZones)
|
||||||
|
xl := z.zones[0].sets[0]
|
||||||
|
|
||||||
// Close the go-routine, we are going to
|
// Close the go-routine, we are going to
|
||||||
// manually start it and test in this test case.
|
// manually start it and test in this test case.
|
||||||
|
@ -123,27 +123,7 @@ func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBuc
|
|||||||
// GetObjectNInfo - returns object info and an object
|
// GetObjectNInfo - returns object info and an object
|
||||||
// Read(Closer). When err != nil, the returned reader is always nil.
|
// Read(Closer). When err != nil, the returned reader is always nil.
|
||||||
func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||||
var nsUnlocker = func() {}
|
|
||||||
|
|
||||||
// Acquire lock
|
|
||||||
if lockType != noLock {
|
|
||||||
lock := xl.nsMutex.NewNSLock(ctx, xl.getLockers(), bucket, object)
|
|
||||||
switch lockType {
|
|
||||||
case writeLock:
|
|
||||||
if err = lock.GetLock(globalObjectTimeout); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
nsUnlocker = lock.Unlock
|
|
||||||
case readLock:
|
|
||||||
if err = lock.GetRLock(globalObjectTimeout); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
nsUnlocker = lock.RUnlock
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
|
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
|
||||||
nsUnlocker()
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -152,20 +132,18 @@ func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, r
|
|||||||
if hasSuffix(object, SlashSeparator) {
|
if hasSuffix(object, SlashSeparator) {
|
||||||
var objInfo ObjectInfo
|
var objInfo ObjectInfo
|
||||||
if objInfo, err = xl.getObjectInfoDir(ctx, bucket, object); err != nil {
|
if objInfo, err = xl.getObjectInfoDir(ctx, bucket, object); err != nil {
|
||||||
nsUnlocker()
|
|
||||||
return nil, toObjectErr(err, bucket, object)
|
return nil, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts.CheckCopyPrecondFn, nsUnlocker)
|
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts.CheckCopyPrecondFn)
|
||||||
}
|
}
|
||||||
|
|
||||||
var objInfo ObjectInfo
|
var objInfo ObjectInfo
|
||||||
objInfo, err = xl.getObjectInfo(ctx, bucket, object)
|
objInfo, err = xl.getObjectInfo(ctx, bucket, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
nsUnlocker()
|
|
||||||
return nil, toObjectErr(err, bucket, object)
|
return nil, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn, off, length, nErr := NewGetObjectReader(rs, objInfo, opts.CheckCopyPrecondFn, nsUnlocker)
|
fn, off, length, nErr := NewGetObjectReader(rs, objInfo, opts.CheckCopyPrecondFn)
|
||||||
if nErr != nil {
|
if nErr != nil {
|
||||||
return nil, nErr
|
return nil, nErr
|
||||||
}
|
}
|
||||||
@ -189,12 +167,6 @@ func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, r
|
|||||||
// startOffset indicates the starting read location of the object.
|
// startOffset indicates the starting read location of the object.
|
||||||
// length indicates the total length of the object.
|
// length indicates the total length of the object.
|
||||||
func (xl xlObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
|
func (xl xlObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
|
||||||
// Lock the object before reading.
|
|
||||||
objectLock := xl.nsMutex.NewNSLock(ctx, xl.getLockers(), bucket, object)
|
|
||||||
if err := objectLock.GetRLock(globalObjectTimeout); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer objectLock.RUnlock()
|
|
||||||
return xl.getObject(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
return xl.getObject(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -368,13 +340,6 @@ func (xl xlObjects) getObjectInfoDir(ctx context.Context, bucket, object string)
|
|||||||
|
|
||||||
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
|
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
|
||||||
func (xl xlObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (oi ObjectInfo, e error) {
|
func (xl xlObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (oi ObjectInfo, e error) {
|
||||||
// Lock the object before reading.
|
|
||||||
objectLock := xl.nsMutex.NewNSLock(ctx, xl.getLockers(), bucket, object)
|
|
||||||
if err := objectLock.GetRLock(globalObjectTimeout); err != nil {
|
|
||||||
return oi, err
|
|
||||||
}
|
|
||||||
defer objectLock.RUnlock()
|
|
||||||
|
|
||||||
if err := checkGetObjArgs(ctx, bucket, object); err != nil {
|
if err := checkGetObjArgs(ctx, bucket, object); err != nil {
|
||||||
return oi, err
|
return oi, err
|
||||||
}
|
}
|
||||||
@ -497,13 +462,6 @@ func (xl xlObjects) PutObject(ctx context.Context, bucket string, object string,
|
|||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lock the object.
|
|
||||||
objectLock := xl.nsMutex.NewNSLock(ctx, xl.getLockers(), bucket, object)
|
|
||||||
if err := objectLock.GetLock(globalObjectTimeout); err != nil {
|
|
||||||
return objInfo, err
|
|
||||||
}
|
|
||||||
defer objectLock.Unlock()
|
|
||||||
|
|
||||||
return xl.putObject(ctx, bucket, object, data, opts)
|
return xl.putObject(ctx, bucket, object, data, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -844,20 +802,6 @@ func (xl xlObjects) deleteObjects(ctx context.Context, bucket string, objects []
|
|||||||
errs[i] = checkDelObjArgs(ctx, bucket, object)
|
errs[i] = checkDelObjArgs(ctx, bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
var objectLocks = make([]RWLocker, len(objects))
|
|
||||||
|
|
||||||
for i, object := range objects {
|
|
||||||
if errs[i] != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Acquire a write lock before deleting the object.
|
|
||||||
objectLocks[i] = xl.nsMutex.NewNSLock(ctx, xl.getLockers(), bucket, object)
|
|
||||||
if errs[i] = objectLocks[i].GetLock(globalOperationTimeout); errs[i] != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
defer objectLocks[i].Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, object := range objects {
|
for i, object := range objects {
|
||||||
isObjectDirs[i] = hasSuffix(object, SlashSeparator)
|
isObjectDirs[i] = hasSuffix(object, SlashSeparator)
|
||||||
}
|
}
|
||||||
@ -953,13 +897,6 @@ func (xl xlObjects) DeleteObjects(ctx context.Context, bucket string, objects []
|
|||||||
// any error as it is not necessary for the handler to reply back a
|
// any error as it is not necessary for the handler to reply back a
|
||||||
// response to the client request.
|
// response to the client request.
|
||||||
func (xl xlObjects) DeleteObject(ctx context.Context, bucket, object string) (err error) {
|
func (xl xlObjects) DeleteObject(ctx context.Context, bucket, object string) (err error) {
|
||||||
// Acquire a write lock before deleting the object.
|
|
||||||
objectLock := xl.nsMutex.NewNSLock(ctx, xl.getLockers(), bucket, object)
|
|
||||||
if perr := objectLock.GetLock(globalOperationTimeout); perr != nil {
|
|
||||||
return perr
|
|
||||||
}
|
|
||||||
defer objectLock.Unlock()
|
|
||||||
|
|
||||||
if err = checkDelObjArgs(ctx, bucket, object); err != nil {
|
if err = checkDelObjArgs(ctx, bucket, object); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -125,7 +125,9 @@ func TestXLDeleteObjectsXLSet(t *testing.T) {
|
|||||||
for _, dir := range fsDirs {
|
for _, dir := range fsDirs {
|
||||||
defer os.RemoveAll(dir)
|
defer os.RemoveAll(dir)
|
||||||
}
|
}
|
||||||
objs = append(objs, obj.(*xlObjects))
|
z := obj.(*xlZones)
|
||||||
|
xl := z.zones[0].sets[0]
|
||||||
|
objs = append(objs, xl)
|
||||||
}
|
}
|
||||||
|
|
||||||
xlSets := &xlSets{sets: objs, distributionAlgo: "CRCMOD"}
|
xlSets := &xlSets{sets: objs, distributionAlgo: "CRCMOD"}
|
||||||
@ -192,8 +194,11 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
// Cleanup backend directories
|
||||||
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
xl := obj.(*xlObjects)
|
z := obj.(*xlZones)
|
||||||
|
xl := z.zones[0].sets[0]
|
||||||
|
|
||||||
// Create "bucket"
|
// Create "bucket"
|
||||||
err = obj.MakeBucketWithLocation(context.Background(), "bucket", "")
|
err = obj.MakeBucketWithLocation(context.Background(), "bucket", "")
|
||||||
@ -211,8 +216,12 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// for a 16 disk setup, quorum is 9. To simulate disks not found yet
|
// for a 16 disk setup, quorum is 9. To simulate disks not found yet
|
||||||
// quorum is available, we remove disks leaving quorum disks behind.
|
// quorum is available, we remove disks leaving quorum disks behind.
|
||||||
for i := range xl.storageDisks[:7] {
|
xlDisks := xl.getDisks()
|
||||||
xl.storageDisks[i] = newNaughtyDisk(xl.storageDisks[i], nil, errFaultyDisk)
|
xl.getDisks = func() []StorageAPI {
|
||||||
|
for i := range xlDisks[:7] {
|
||||||
|
xlDisks[i] = newNaughtyDisk(xlDisks[i], nil, errFaultyDisk)
|
||||||
|
}
|
||||||
|
return xlDisks
|
||||||
}
|
}
|
||||||
err = obj.DeleteObject(context.Background(), bucket, object)
|
err = obj.DeleteObject(context.Background(), bucket, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -226,15 +235,17 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove one more disk to 'lose' quorum, by setting it to nil.
|
// Remove one more disk to 'lose' quorum, by setting it to nil.
|
||||||
xl.storageDisks[7] = nil
|
xlDisks = xl.getDisks()
|
||||||
xl.storageDisks[8] = nil
|
xl.getDisks = func() []StorageAPI {
|
||||||
|
xlDisks[7] = nil
|
||||||
|
xlDisks[8] = nil
|
||||||
|
return xlDisks
|
||||||
|
}
|
||||||
err = obj.DeleteObject(context.Background(), bucket, object)
|
err = obj.DeleteObject(context.Background(), bucket, object)
|
||||||
// since majority of disks are not available, metaquorum is not achieved and hence errXLReadQuorum error
|
// since majority of disks are not available, metaquorum is not achieved and hence errXLReadQuorum error
|
||||||
if err != toObjectErr(errXLReadQuorum, bucket, object) {
|
if err != toObjectErr(errXLReadQuorum, bucket, object) {
|
||||||
t.Errorf("Expected deleteObject to fail with %v, but failed with %v", toObjectErr(errXLReadQuorum, bucket, object), err)
|
t.Errorf("Expected deleteObject to fail with %v, but failed with %v", toObjectErr(errXLReadQuorum, bucket, object), err)
|
||||||
}
|
}
|
||||||
// Cleanup backend directories
|
|
||||||
removeRoots(fsDirs)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetObjectNoQuorum(t *testing.T) {
|
func TestGetObjectNoQuorum(t *testing.T) {
|
||||||
@ -243,8 +254,11 @@ func TestGetObjectNoQuorum(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
// Cleanup backend directories.
|
||||||
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
xl := obj.(*xlObjects)
|
z := obj.(*xlZones)
|
||||||
|
xl := z.zones[0].sets[0]
|
||||||
|
|
||||||
// Create "bucket"
|
// Create "bucket"
|
||||||
err = obj.MakeBucketWithLocation(context.Background(), "bucket", "")
|
err = obj.MakeBucketWithLocation(context.Background(), "bucket", "")
|
||||||
@ -270,22 +284,24 @@ func TestGetObjectNoQuorum(t *testing.T) {
|
|||||||
for i := 0; i <= f; i++ {
|
for i := 0; i <= f; i++ {
|
||||||
diskErrors[i] = nil
|
diskErrors[i] = nil
|
||||||
}
|
}
|
||||||
for i := range xl.storageDisks[:9] {
|
xlDisks := xl.getDisks()
|
||||||
switch diskType := xl.storageDisks[i].(type) {
|
for i := range xlDisks[:9] {
|
||||||
|
switch diskType := xlDisks[i].(type) {
|
||||||
case *naughtyDisk:
|
case *naughtyDisk:
|
||||||
xl.storageDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk)
|
xlDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk)
|
||||||
default:
|
default:
|
||||||
xl.storageDisks[i] = newNaughtyDisk(xl.storageDisks[i], diskErrors, errFaultyDisk)
|
xlDisks[i] = newNaughtyDisk(xlDisks[i], diskErrors, errFaultyDisk)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
xl.getDisks = func() []StorageAPI {
|
||||||
|
return xlDisks
|
||||||
|
}
|
||||||
// Fetch object from store.
|
// Fetch object from store.
|
||||||
err = xl.GetObject(context.Background(), bucket, object, 0, int64(len("abcd")), ioutil.Discard, "", opts)
|
err = xl.GetObject(context.Background(), bucket, object, 0, int64(len("abcd")), ioutil.Discard, "", opts)
|
||||||
if err != toObjectErr(errXLReadQuorum, bucket, object) {
|
if err != toObjectErr(errXLReadQuorum, bucket, object) {
|
||||||
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
|
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Cleanup backend directories.
|
|
||||||
removeRoots(fsDirs)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPutObjectNoQuorum(t *testing.T) {
|
func TestPutObjectNoQuorum(t *testing.T) {
|
||||||
@ -295,7 +311,11 @@ func TestPutObjectNoQuorum(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
xl := obj.(*xlObjects)
|
// Cleanup backend directories.
|
||||||
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
|
z := obj.(*xlZones)
|
||||||
|
xl := z.zones[0].sets[0]
|
||||||
|
|
||||||
// Create "bucket"
|
// Create "bucket"
|
||||||
err = obj.MakeBucketWithLocation(context.Background(), "bucket", "")
|
err = obj.MakeBucketWithLocation(context.Background(), "bucket", "")
|
||||||
@ -321,22 +341,24 @@ func TestPutObjectNoQuorum(t *testing.T) {
|
|||||||
for i := 0; i <= f; i++ {
|
for i := 0; i <= f; i++ {
|
||||||
diskErrors[i] = nil
|
diskErrors[i] = nil
|
||||||
}
|
}
|
||||||
for i := range xl.storageDisks[:9] {
|
xlDisks := xl.getDisks()
|
||||||
switch diskType := xl.storageDisks[i].(type) {
|
for i := range xlDisks[:9] {
|
||||||
|
switch diskType := xlDisks[i].(type) {
|
||||||
case *naughtyDisk:
|
case *naughtyDisk:
|
||||||
xl.storageDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk)
|
xlDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk)
|
||||||
default:
|
default:
|
||||||
xl.storageDisks[i] = newNaughtyDisk(xl.storageDisks[i], diskErrors, errFaultyDisk)
|
xlDisks[i] = newNaughtyDisk(xlDisks[i], diskErrors, errFaultyDisk)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
xl.getDisks = func() []StorageAPI {
|
||||||
|
return xlDisks
|
||||||
|
}
|
||||||
// Upload new content to same object "object"
|
// Upload new content to same object "object"
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
|
_, err = obj.PutObject(context.Background(), bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
|
||||||
if err != toObjectErr(errXLWriteQuorum, bucket, object) {
|
if err != toObjectErr(errXLWriteQuorum, bucket, object) {
|
||||||
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
|
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Cleanup backend directories.
|
|
||||||
removeRoots(fsDirs)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests both object and bucket healing.
|
// Tests both object and bucket healing.
|
||||||
@ -346,7 +368,9 @@ func TestHealing(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer removeRoots(fsDirs)
|
defer removeRoots(fsDirs)
|
||||||
xl := obj.(*xlObjects)
|
|
||||||
|
z := obj.(*xlZones)
|
||||||
|
xl := z.zones[0].sets[0]
|
||||||
|
|
||||||
// Create "bucket"
|
// Create "bucket"
|
||||||
err = obj.MakeBucketWithLocation(context.Background(), "bucket", "")
|
err = obj.MakeBucketWithLocation(context.Background(), "bucket", "")
|
||||||
@ -369,7 +393,7 @@ func TestHealing(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
disk := xl.storageDisks[0]
|
disk := xl.getDisks()[0]
|
||||||
xlMetaPreHeal, err := readXLMeta(context.Background(), disk, bucket, object)
|
xlMetaPreHeal, err := readXLMeta(context.Background(), disk, bucket, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -438,7 +462,7 @@ func TestHealing(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// Stat the bucket to make sure that it was created.
|
// Stat the bucket to make sure that it was created.
|
||||||
_, err = xl.storageDisks[0].StatVol(bucket)
|
_, err = xl.getDisks()[0].StatVol(bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -454,9 +478,11 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
|||||||
var opts ObjectOptions
|
var opts ObjectOptions
|
||||||
// make data with more than one part
|
// make data with more than one part
|
||||||
partCount := 3
|
partCount := 3
|
||||||
data := bytes.Repeat([]byte("a"), int(globalPutPartSize)*partCount)
|
data := bytes.Repeat([]byte("a"), 6*1024*1024*partCount)
|
||||||
xl := obj.(*xlObjects)
|
|
||||||
xlDisks := xl.storageDisks
|
z := obj.(*xlZones)
|
||||||
|
xl := z.zones[0].sets[0]
|
||||||
|
xlDisks := xl.getDisks()
|
||||||
|
|
||||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, globalMinioDefaultRegion)
|
err := obj.MakeBucketWithLocation(context.Background(), bucket, globalMinioDefaultRegion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -399,19 +399,19 @@ func TestShuffleDisks(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
objLayer, _, err := initObjectLayer(mustGetNewEndpointList(disks...))
|
objLayer, _, err := initObjectLayer(mustGetZoneEndpoints(disks...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
removeRoots(disks)
|
removeRoots(disks)
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer removeRoots(disks)
|
defer removeRoots(disks)
|
||||||
xl := objLayer.(*xlObjects)
|
z := objLayer.(*xlZones)
|
||||||
testShuffleDisks(t, xl)
|
testShuffleDisks(t, z)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test shuffleDisks which returns shuffled slice of disks for their actual distribution.
|
// Test shuffleDisks which returns shuffled slice of disks for their actual distribution.
|
||||||
func testShuffleDisks(t *testing.T, xl *xlObjects) {
|
func testShuffleDisks(t *testing.T, z *xlZones) {
|
||||||
disks := xl.storageDisks
|
disks := z.zones[0].GetDisks(0)()
|
||||||
distribution := []int{16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15}
|
distribution := []int{16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15}
|
||||||
shuffledDisks := shuffleDisks(disks, distribution)
|
shuffledDisks := shuffleDisks(disks, distribution)
|
||||||
// From the "distribution" above you can notice that:
|
// From the "distribution" above you can notice that:
|
||||||
@ -444,12 +444,12 @@ func TestEvalDisks(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
objLayer, _, err := initObjectLayer(mustGetNewEndpointList(disks...))
|
objLayer, _, err := initObjectLayer(mustGetZoneEndpoints(disks...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
removeRoots(disks)
|
removeRoots(disks)
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer removeRoots(disks)
|
defer removeRoots(disks)
|
||||||
xl := objLayer.(*xlObjects)
|
z := objLayer.(*xlZones)
|
||||||
testShuffleDisks(t, xl)
|
testShuffleDisks(t, z)
|
||||||
}
|
}
|
||||||
|
@ -52,16 +52,13 @@ type xlObjects struct {
|
|||||||
// Byte pools used for temporary i/o buffers.
|
// Byte pools used for temporary i/o buffers.
|
||||||
bp *bpool.BytePoolCap
|
bp *bpool.BytePoolCap
|
||||||
|
|
||||||
// TODO: Deprecated only kept here for tests, should be removed in future.
|
|
||||||
storageDisks []StorageAPI
|
|
||||||
|
|
||||||
// TODO: ListObjects pool management, should be removed in future.
|
// TODO: ListObjects pool management, should be removed in future.
|
||||||
listPool *TreeWalkPool
|
listPool *TreeWalkPool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNSLock - initialize a new namespace RWLocker instance.
|
// NewNSLock - initialize a new namespace RWLocker instance.
|
||||||
func (xl xlObjects) NewNSLock(ctx context.Context, bucket string, object string) RWLocker {
|
func (xl xlObjects) NewNSLock(ctx context.Context, bucket string, object string) RWLocker {
|
||||||
return xl.nsMutex.NewNSLock(ctx, xl.getLockers(), bucket, object)
|
return xl.nsMutex.NewNSLock(ctx, xl.getLockers, bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown function for object storage interface.
|
// Shutdown function for object storage interface.
|
||||||
|
1299
cmd/xl-zones.go
Normal file
1299
cmd/xl-zones.go
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
|||||||
# Distributed Server Design Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io)
|
# Distributed Server Design Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io)
|
||||||
This document explains the design approach, advanced use cases and limits of the MinIO distributed server.
|
This document explains the design approach and advanced use cases of the MinIO distributed server.
|
||||||
|
|
||||||
## Command-line
|
## Command-line
|
||||||
```
|
```
|
||||||
@ -127,10 +127,6 @@ Distributed erasure coded configuration with rack level redundancy 32 sets in to
|
|||||||
minio server http://rack{1...4}-host{1...8}.example.net/export{1...16}
|
minio server http://rack{1...4}-host{1...8}.example.net/export{1...16}
|
||||||
```
|
```
|
||||||
|
|
||||||
Distributed erasure coded configuration with no rack level redundancy but redundancy with in the rack we split the arguments, 32 sets in total, 16 disks per set.
|
|
||||||
```
|
|
||||||
minio server http://rack1-host{1...8}.example.net/export{1...16} http://rack2-host{1...8}.example.net/export{1...16} http://rack3-host{1...8}.example.net/export{1...16} http://rack4-host{1...8}.example.net/export{1...16}
|
|
||||||
```
|
|
||||||
## Backend `format.json` changes
|
## Backend `format.json` changes
|
||||||
|
|
||||||
`format.json` has new fields
|
`format.json` has new fields
|
||||||
@ -208,8 +204,3 @@ type formatXLV2 struct {
|
|||||||
} `json:"xl"`
|
} `json:"xl"`
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Limits
|
|
||||||
|
|
||||||
- Minimum of 4 disks are needed for any erasure coded configuration.
|
|
||||||
- Maximum of 32 distinct nodes are supported in distributed configuration.
|
|
||||||
|
@ -12,16 +12,10 @@ Distributed MinIO provides protection against multiple node/drive failures and [
|
|||||||
|
|
||||||
### High availability
|
### High availability
|
||||||
|
|
||||||
A stand-alone MinIO server would go down if the server hosting the disks goes offline. In contrast, a distributed MinIO setup with _n_ disks will have your data safe as long as _n/2_ or more disks are online. You'll need a minimum of _(n/2 + 1)_ [Quorum](https://github.com/minio/dsync#lock-process) disks to create new objects though.
|
A stand-alone MinIO server would go down if the server hosting the disks goes offline. In contrast, a distributed MinIO setup with _n_ disks will have your data safe as long as _n/2_ or more disks are online. You'll need a minimum of _(n/2 + 1)_ disks to create new objects.
|
||||||
|
|
||||||
For example, an 16-node distributed MinIO setup with 16 disks per node would continue serving files, even if up to 8 servers are offline. But, you'll need at least 9 servers online to create new objects.
|
For example, an 16-node distributed MinIO setup with 16 disks per node would continue serving files, even if up to 8 servers are offline. But, you'll need at least 9 servers online to create new objects.
|
||||||
|
|
||||||
### Limits
|
|
||||||
|
|
||||||
As with MinIO in stand-alone mode, distributed MinIO has a per tenant limit of minimum of 2 and maximum of 32 servers. There are no limits on number of disks across these servers. If you need a multiple tenant setup, you can easily spin up multiple MinIO instances managed by orchestration tools like Kubernetes, Docker Swarm etc.
|
|
||||||
|
|
||||||
Note that with distributed MinIO you can play around with the number of nodes and drives as long as the limits are adhered to. For example, you can have 2 nodes with 4 drives each, 4 nodes with 4 drives each, 8 nodes with 2 drives each, 32 servers with 64 drives each and so on.
|
|
||||||
|
|
||||||
You can also use [storage classes](https://github.com/minio/minio/tree/master/docs/erasure/storage-class) to set custom data and parity distribution per object.
|
You can also use [storage classes](https://github.com/minio/minio/tree/master/docs/erasure/storage-class) to set custom data and parity distribution per object.
|
||||||
|
|
||||||
### Consistency Guarantees
|
### Consistency Guarantees
|
||||||
@ -61,7 +55,18 @@ export MINIO_SECRET_KEY=<SECRET_KEY>
|
|||||||
minio server http://host{1...32}/export{1...32}
|
minio server http://host{1...32}/export{1...32}
|
||||||
```
|
```
|
||||||
|
|
||||||
__NOTE:__ `{1...n}` shown have 3 dots! Using only 2 dots `{1..32}` will be interpreted by your shell and won't be passed to minio server, affecting the erasure coding order, which may impact performance and high availability. __Always use ellipses syntax `{1...n}` (3 dots!) for optimal erasure-code distribution__
|
> __NOTE:__ `{1...n}` shown have 3 dots! Using only 2 dots `{1..32}` will be interpreted by your shell and won't be passed to MinIO server, affecting the erasure coding order, which may impact performance and high availability. __Always use ellipses syntax `{1...n}` (3 dots!) for optimal erasure-code distribution__
|
||||||
|
|
||||||
|
#### Expanding existing distributed setup
|
||||||
|
MinIO supports expanding distributed erasure coded clusters by specifying new set of clusters on the command-line as shown below:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
export MINIO_ACCESS_KEY=<ACCESS_KEY>
|
||||||
|
export MINIO_SECRET_KEY=<SECRET_KEY>
|
||||||
|
minio server http://host{1...32}/export{1...32} http://host{33...64}/export{1...32}
|
||||||
|
```
|
||||||
|
|
||||||
|
Now the server has expanded storage of *1024* more disks in total of *2048* disks, new object upload requests automatically start using the least used cluster. This expansion strategy works endlessly, so you can perpetually expand your clusters as needed.
|
||||||
|
|
||||||
## 3. Test your setup
|
## 3. Test your setup
|
||||||
To test this setup, access the MinIO server via browser or [`mc`](https://docs.min.io/docs/minio-client-quickstart-guide).
|
To test this setup, access the MinIO server via browser or [`mc`](https://docs.min.io/docs/minio-client-quickstart-guide).
|
||||||
|
@ -4,10 +4,10 @@
|
|||||||
|
|
||||||
|Item|Specification|
|
|Item|Specification|
|
||||||
|:---|:---|
|
|:---|:---|
|
||||||
|Maximum number of servers per cluster| Unlimited|
|
|Maximum number of servers per cluster| no-limit|
|
||||||
|Maximum number of federated clusters | Unlimited|
|
|Maximum number of federated clusters | no-limit|
|
||||||
|Minimum number of servers| 02|
|
|Minimum number of servers| 02|
|
||||||
|Maximum number of drives per server| Unlimited|
|
|Maximum number of drives per server| no-limit|
|
||||||
|Read quorum| N/2|
|
|Read quorum| N/2|
|
||||||
|Write quorum| N/2+1|
|
|Write quorum| N/2+1|
|
||||||
|
|
||||||
|
6
go.sum
6
go.sum
@ -499,6 +499,8 @@ github.com/nats-io/go-nats-streaming v0.0.0-20161216191029-077898146bfb/go.mod h
|
|||||||
github.com/nats-io/go-nats-streaming v0.4.2/go.mod h1:gfq4R3c9sKAINOpelo0gn/b9QDMBZnmrttcsNF+lqyo=
|
github.com/nats-io/go-nats-streaming v0.4.2/go.mod h1:gfq4R3c9sKAINOpelo0gn/b9QDMBZnmrttcsNF+lqyo=
|
||||||
github.com/nats-io/go-nats-streaming v0.4.4 h1:1I3lkZDRdQYXb+holjdqZ2J6xyekrD06o9Fd8rWlgP4=
|
github.com/nats-io/go-nats-streaming v0.4.4 h1:1I3lkZDRdQYXb+holjdqZ2J6xyekrD06o9Fd8rWlgP4=
|
||||||
github.com/nats-io/go-nats-streaming v0.4.4/go.mod h1:gfq4R3c9sKAINOpelo0gn/b9QDMBZnmrttcsNF+lqyo=
|
github.com/nats-io/go-nats-streaming v0.4.4/go.mod h1:gfq4R3c9sKAINOpelo0gn/b9QDMBZnmrttcsNF+lqyo=
|
||||||
|
github.com/nats-io/jwt v0.3.0 h1:xdnzwFETV++jNc4W1mw//qFyJGb2ABOombmZJQS4+Qo=
|
||||||
|
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||||
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||||
github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI=
|
github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI=
|
||||||
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
|
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
|
||||||
@ -517,7 +519,11 @@ github.com/nats-io/nats.go v1.8.0/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ
|
|||||||
github.com/nats-io/nats.go v1.9.1 h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ=
|
github.com/nats-io/nats.go v1.9.1 h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ=
|
||||||
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
||||||
github.com/nats-io/nkeys v0.0.2 h1:+qM7QpgXnvDDixitZtQUBDY9w/s9mu1ghS+JIbsrx6M=
|
github.com/nats-io/nkeys v0.0.2 h1:+qM7QpgXnvDDixitZtQUBDY9w/s9mu1ghS+JIbsrx6M=
|
||||||
|
github.com/nats-io/nkeys v0.0.2 h1:+qM7QpgXnvDDixitZtQUBDY9w/s9mu1ghS+JIbsrx6M=
|
||||||
github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4=
|
github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4=
|
||||||
|
github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4=
|
||||||
|
github.com/nats-io/nkeys v0.1.0 h1:qMd4+pRHgdr1nAClu+2h/2a5F2TmKcCzjCDazVgRoX4=
|
||||||
|
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||||
github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k=
|
github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k=
|
||||||
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
golog "log"
|
golog "log"
|
||||||
|
"math"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@ -75,7 +76,7 @@ func isLocked(uid string) bool {
|
|||||||
func NewDRWMutex(ctx context.Context, name string, clnt *Dsync) *DRWMutex {
|
func NewDRWMutex(ctx context.Context, name string, clnt *Dsync) *DRWMutex {
|
||||||
return &DRWMutex{
|
return &DRWMutex{
|
||||||
Name: name,
|
Name: name,
|
||||||
writeLocks: make([]string, clnt.dNodeCount),
|
writeLocks: make([]string, len(clnt.GetLockersFn())),
|
||||||
clnt: clnt,
|
clnt: clnt,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
}
|
}
|
||||||
@ -133,6 +134,8 @@ func (dm *DRWMutex) lockBlocking(timeout time.Duration, id, source string, isRea
|
|||||||
doneCh, start := make(chan struct{}), time.Now().UTC()
|
doneCh, start := make(chan struct{}), time.Now().UTC()
|
||||||
defer close(doneCh)
|
defer close(doneCh)
|
||||||
|
|
||||||
|
restClnts := dm.clnt.GetLockersFn()
|
||||||
|
|
||||||
// Use incremental back-off algorithm for repeated attempts to acquire the lock
|
// Use incremental back-off algorithm for repeated attempts to acquire the lock
|
||||||
for range newRetryTimerSimple(doneCh) {
|
for range newRetryTimerSimple(doneCh) {
|
||||||
select {
|
select {
|
||||||
@ -142,7 +145,7 @@ func (dm *DRWMutex) lockBlocking(timeout time.Duration, id, source string, isRea
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create temp array on stack.
|
// Create temp array on stack.
|
||||||
locks := make([]string, dm.clnt.dNodeCount)
|
locks := make([]string, len(restClnts))
|
||||||
|
|
||||||
// Try to acquire the lock.
|
// Try to acquire the lock.
|
||||||
success := lock(dm.clnt, &locks, dm.Name, id, source, isReadLock)
|
success := lock(dm.clnt, &locks, dm.Name, id, source, isReadLock)
|
||||||
@ -152,7 +155,7 @@ func (dm *DRWMutex) lockBlocking(timeout time.Duration, id, source string, isRea
|
|||||||
// If success, copy array to object
|
// If success, copy array to object
|
||||||
if isReadLock {
|
if isReadLock {
|
||||||
// Append new array of strings at the end
|
// Append new array of strings at the end
|
||||||
dm.readersLocks = append(dm.readersLocks, make([]string, dm.clnt.dNodeCount))
|
dm.readersLocks = append(dm.readersLocks, make([]string, len(restClnts)))
|
||||||
// and copy stack array into last spot
|
// and copy stack array into last spot
|
||||||
copy(dm.readersLocks[len(dm.readersLocks)-1], locks[:])
|
copy(dm.readersLocks[len(dm.readersLocks)-1], locks[:])
|
||||||
} else {
|
} else {
|
||||||
@ -174,12 +177,14 @@ func (dm *DRWMutex) lockBlocking(timeout time.Duration, id, source string, isRea
|
|||||||
// lock tries to acquire the distributed lock, returning true or false.
|
// lock tries to acquire the distributed lock, returning true or false.
|
||||||
func lock(ds *Dsync, locks *[]string, lockName, id, source string, isReadLock bool) bool {
|
func lock(ds *Dsync, locks *[]string, lockName, id, source string, isReadLock bool) bool {
|
||||||
|
|
||||||
|
restClnts := ds.GetLockersFn()
|
||||||
|
|
||||||
// Create buffered channel of size equal to total number of nodes.
|
// Create buffered channel of size equal to total number of nodes.
|
||||||
ch := make(chan Granted, ds.dNodeCount)
|
ch := make(chan Granted, len(restClnts))
|
||||||
defer close(ch)
|
defer close(ch)
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for index, c := range ds.restClnts {
|
for index, c := range restClnts {
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
// broadcast lock request to all nodes
|
// broadcast lock request to all nodes
|
||||||
@ -229,7 +234,10 @@ func lock(ds *Dsync, locks *[]string, lockName, id, source string, isReadLock bo
|
|||||||
done := false
|
done := false
|
||||||
timeout := time.After(DRWMutexAcquireTimeout)
|
timeout := time.After(DRWMutexAcquireTimeout)
|
||||||
|
|
||||||
for ; i < ds.dNodeCount; i++ { // Loop until we acquired all locks
|
dquorum := int(len(restClnts)/2) + 1
|
||||||
|
dquorumReads := int(math.Ceil(float64(len(restClnts)) / 2.0))
|
||||||
|
|
||||||
|
for ; i < len(restClnts); i++ { // Loop until we acquired all locks
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case grant := <-ch:
|
case grant := <-ch:
|
||||||
@ -238,22 +246,22 @@ func lock(ds *Dsync, locks *[]string, lockName, id, source string, isReadLock bo
|
|||||||
(*locks)[grant.index] = grant.lockUID
|
(*locks)[grant.index] = grant.lockUID
|
||||||
} else {
|
} else {
|
||||||
locksFailed++
|
locksFailed++
|
||||||
if !isReadLock && locksFailed > ds.dNodeCount-ds.dquorum ||
|
if !isReadLock && locksFailed > len(restClnts)-dquorum ||
|
||||||
isReadLock && locksFailed > ds.dNodeCount-ds.dquorumReads {
|
isReadLock && locksFailed > len(restClnts)-dquorumReads {
|
||||||
// We know that we are not going to get the lock anymore,
|
// We know that we are not going to get the lock anymore,
|
||||||
// so exit out and release any locks that did get acquired
|
// so exit out and release any locks that did get acquired
|
||||||
done = true
|
done = true
|
||||||
// Increment the number of grants received from the buffered channel.
|
// Increment the number of grants received from the buffered channel.
|
||||||
i++
|
i++
|
||||||
releaseAll(ds, locks, lockName, isReadLock)
|
releaseAll(ds, locks, lockName, isReadLock, restClnts)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case <-timeout:
|
case <-timeout:
|
||||||
done = true
|
done = true
|
||||||
// timeout happened, maybe one of the nodes is slow, count
|
// timeout happened, maybe one of the nodes is slow, count
|
||||||
// number of locks to check whether we have quorum or not
|
// number of locks to check whether we have quorum or not
|
||||||
if !quorumMet(locks, isReadLock, ds.dquorum, ds.dquorumReads) {
|
if !quorumMet(locks, isReadLock, dquorum, dquorumReads) {
|
||||||
releaseAll(ds, locks, lockName, isReadLock)
|
releaseAll(ds, locks, lockName, isReadLock, restClnts)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -263,7 +271,7 @@ func lock(ds *Dsync, locks *[]string, lockName, id, source string, isReadLock bo
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Count locks in order to determine whether we have quorum or not
|
// Count locks in order to determine whether we have quorum or not
|
||||||
quorum = quorumMet(locks, isReadLock, ds.dquorum, ds.dquorumReads)
|
quorum = quorumMet(locks, isReadLock, dquorum, dquorumReads)
|
||||||
|
|
||||||
// Signal that we have the quorum
|
// Signal that we have the quorum
|
||||||
wg.Done()
|
wg.Done()
|
||||||
@ -271,11 +279,12 @@ func lock(ds *Dsync, locks *[]string, lockName, id, source string, isReadLock bo
|
|||||||
// Wait for the other responses and immediately release the locks
|
// Wait for the other responses and immediately release the locks
|
||||||
// (do not add them to the locks array because the DRWMutex could
|
// (do not add them to the locks array because the DRWMutex could
|
||||||
// already has been unlocked again by the original calling thread)
|
// already has been unlocked again by the original calling thread)
|
||||||
for ; i < ds.dNodeCount; i++ {
|
for ; i < len(restClnts); i++ {
|
||||||
grantToBeReleased := <-ch
|
grantToBeReleased := <-ch
|
||||||
if grantToBeReleased.isLocked() {
|
if grantToBeReleased.isLocked() {
|
||||||
// release lock
|
// release lock
|
||||||
sendRelease(ds, ds.restClnts[grantToBeReleased.index], lockName, grantToBeReleased.lockUID, isReadLock)
|
sendRelease(ds, restClnts[grantToBeReleased.index], lockName,
|
||||||
|
grantToBeReleased.lockUID, isReadLock)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}(isReadLock)
|
}(isReadLock)
|
||||||
@ -306,10 +315,10 @@ func quorumMet(locks *[]string, isReadLock bool, quorum, quorumReads int) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// releaseAll releases all locks that are marked as locked
|
// releaseAll releases all locks that are marked as locked
|
||||||
func releaseAll(ds *Dsync, locks *[]string, lockName string, isReadLock bool) {
|
func releaseAll(ds *Dsync, locks *[]string, lockName string, isReadLock bool, restClnts []NetLocker) {
|
||||||
for lock := 0; lock < ds.dNodeCount; lock++ {
|
for lock := 0; lock < len(restClnts); lock++ {
|
||||||
if isLocked((*locks)[lock]) {
|
if isLocked((*locks)[lock]) {
|
||||||
sendRelease(ds, ds.restClnts[lock], lockName, (*locks)[lock], isReadLock)
|
sendRelease(ds, restClnts[lock], lockName, (*locks)[lock], isReadLock)
|
||||||
(*locks)[lock] = ""
|
(*locks)[lock] = ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -320,8 +329,9 @@ func releaseAll(ds *Dsync, locks *[]string, lockName string, isReadLock bool) {
|
|||||||
// It is a run-time error if dm is not locked on entry to Unlock.
|
// It is a run-time error if dm is not locked on entry to Unlock.
|
||||||
func (dm *DRWMutex) Unlock() {
|
func (dm *DRWMutex) Unlock() {
|
||||||
|
|
||||||
|
restClnts := dm.clnt.GetLockersFn()
|
||||||
// create temp array on stack
|
// create temp array on stack
|
||||||
locks := make([]string, dm.clnt.dNodeCount)
|
locks := make([]string, len(restClnts))
|
||||||
|
|
||||||
{
|
{
|
||||||
dm.m.Lock()
|
dm.m.Lock()
|
||||||
@ -342,11 +352,11 @@ func (dm *DRWMutex) Unlock() {
|
|||||||
// Copy write locks to stack array
|
// Copy write locks to stack array
|
||||||
copy(locks, dm.writeLocks[:])
|
copy(locks, dm.writeLocks[:])
|
||||||
// Clear write locks array
|
// Clear write locks array
|
||||||
dm.writeLocks = make([]string, dm.clnt.dNodeCount)
|
dm.writeLocks = make([]string, len(restClnts))
|
||||||
}
|
}
|
||||||
|
|
||||||
isReadLock := false
|
isReadLock := false
|
||||||
unlock(dm.clnt, locks, dm.Name, isReadLock)
|
unlock(dm.clnt, locks, dm.Name, isReadLock, restClnts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RUnlock releases a read lock held on dm.
|
// RUnlock releases a read lock held on dm.
|
||||||
@ -355,8 +365,9 @@ func (dm *DRWMutex) Unlock() {
|
|||||||
func (dm *DRWMutex) RUnlock() {
|
func (dm *DRWMutex) RUnlock() {
|
||||||
|
|
||||||
// create temp array on stack
|
// create temp array on stack
|
||||||
locks := make([]string, dm.clnt.dNodeCount)
|
restClnts := dm.clnt.GetLockersFn()
|
||||||
|
|
||||||
|
locks := make([]string, len(restClnts))
|
||||||
{
|
{
|
||||||
dm.m.Lock()
|
dm.m.Lock()
|
||||||
defer dm.m.Unlock()
|
defer dm.m.Unlock()
|
||||||
@ -370,15 +381,15 @@ func (dm *DRWMutex) RUnlock() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
isReadLock := true
|
isReadLock := true
|
||||||
unlock(dm.clnt, locks, dm.Name, isReadLock)
|
unlock(dm.clnt, locks, dm.Name, isReadLock, restClnts)
|
||||||
}
|
}
|
||||||
|
|
||||||
func unlock(ds *Dsync, locks []string, name string, isReadLock bool) {
|
func unlock(ds *Dsync, locks []string, name string, isReadLock bool, restClnts []NetLocker) {
|
||||||
|
|
||||||
// We don't need to synchronously wait until we have released all the locks (or the quorum)
|
// We don't need to synchronously wait until we have released all the locks (or the quorum)
|
||||||
// (a subsequent lock will retry automatically in case it would fail to get quorum)
|
// (a subsequent lock will retry automatically in case it would fail to get quorum)
|
||||||
|
|
||||||
for index, c := range ds.restClnts {
|
for index, c := range restClnts {
|
||||||
|
|
||||||
if isLocked(locks[index]) {
|
if isLocked(locks[index]) {
|
||||||
// broadcast lock release to all nodes that granted the lock
|
// broadcast lock release to all nodes that granted the lock
|
||||||
|
@ -16,45 +16,9 @@
|
|||||||
|
|
||||||
package dsync
|
package dsync
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"math"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Dsync represents dsync client object which is initialized with
|
// Dsync represents dsync client object which is initialized with
|
||||||
// authenticated clients, used to initiate lock REST calls.
|
// authenticated clients, used to initiate lock REST calls.
|
||||||
type Dsync struct {
|
type Dsync struct {
|
||||||
// Number of nodes participating in the distributed locking.
|
|
||||||
dNodeCount int
|
|
||||||
|
|
||||||
// List of rest client objects, one per lock server.
|
// List of rest client objects, one per lock server.
|
||||||
restClnts []NetLocker
|
GetLockersFn func() []NetLocker
|
||||||
|
|
||||||
// Simple majority based quorum, set to dNodeCount/2+1
|
|
||||||
dquorum int
|
|
||||||
|
|
||||||
// Simple quorum for read operations, set to dNodeCount/2
|
|
||||||
dquorumReads int
|
|
||||||
}
|
|
||||||
|
|
||||||
// New - initializes a new dsync object with input restClnts.
|
|
||||||
func New(restClnts []NetLocker) (*Dsync, error) {
|
|
||||||
if len(restClnts) < 2 {
|
|
||||||
return nil, errors.New("Dsync is not designed for less than 2 nodes")
|
|
||||||
} else if len(restClnts) > 32 {
|
|
||||||
return nil, errors.New("Dsync is not designed for more than 32 nodes")
|
|
||||||
}
|
|
||||||
|
|
||||||
ds := &Dsync{}
|
|
||||||
ds.dNodeCount = len(restClnts)
|
|
||||||
|
|
||||||
// With odd number of nodes, write and read quorum is basically the same
|
|
||||||
ds.dquorum = int(ds.dNodeCount/2) + 1
|
|
||||||
ds.dquorumReads = int(math.Ceil(float64(ds.dNodeCount) / 2.0))
|
|
||||||
|
|
||||||
// Initialize node name and rest path for each NetLocker object.
|
|
||||||
ds.restClnts = make([]NetLocker, ds.dNodeCount)
|
|
||||||
copy(ds.restClnts, restClnts)
|
|
||||||
|
|
||||||
return ds, nil
|
|
||||||
}
|
}
|
||||||
|
@ -1,58 +0,0 @@
|
|||||||
/*
|
|
||||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// GOMAXPROCS=10 go test
|
|
||||||
|
|
||||||
package dsync
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
// Tests dsync.New
|
|
||||||
func TestNew(t *testing.T) {
|
|
||||||
nclnts := make([]NetLocker, 33)
|
|
||||||
if _, err := New(nclnts); err == nil {
|
|
||||||
t.Fatal("Should have failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
nclnts = make([]NetLocker, 1)
|
|
||||||
if _, err := New(nclnts); err == nil {
|
|
||||||
t.Fatal("Should have failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
nclnts = make([]NetLocker, 2)
|
|
||||||
nds, err := New(nclnts)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Should pass", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if nds.dquorumReads != 1 {
|
|
||||||
t.Fatalf("Unexpected read quorum values expected 1, got %d", nds.dquorumReads)
|
|
||||||
}
|
|
||||||
|
|
||||||
if nds.dquorum != 2 {
|
|
||||||
t.Fatalf("Unexpected quorum values expected 2, got %d", nds.dquorum)
|
|
||||||
}
|
|
||||||
|
|
||||||
nclnts = make([]NetLocker, 3)
|
|
||||||
nds, err = New(nclnts)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Should pass", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if nds.dquorumReads != nds.dquorum {
|
|
||||||
t.Fatalf("Unexpected quorum values for odd nodes we expect read %d and write %d quorum to be same", nds.dquorumReads, nds.dquorum)
|
|
||||||
}
|
|
||||||
}
|
|
@ -78,10 +78,8 @@ func TestMain(m *testing.M) {
|
|||||||
clnts = append(clnts, newClient(nodes[i], rpcPaths[i]))
|
clnts = append(clnts, newClient(nodes[i], rpcPaths[i]))
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
ds = &Dsync{
|
||||||
ds, err = New(clnts)
|
GetLockersFn: func() []NetLocker { return clnts },
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("set nodes failed with %v", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
startRPCServers(nodes)
|
startRPCServers(nodes)
|
||||||
@ -256,11 +254,10 @@ func TestMutex(t *testing.T) {
|
|||||||
|
|
||||||
func BenchmarkMutexUncontended(b *testing.B) {
|
func BenchmarkMutexUncontended(b *testing.B) {
|
||||||
type PaddedMutex struct {
|
type PaddedMutex struct {
|
||||||
DRWMutex
|
*DRWMutex
|
||||||
pad [128]uint8
|
|
||||||
}
|
}
|
||||||
b.RunParallel(func(pb *testing.PB) {
|
b.RunParallel(func(pb *testing.PB) {
|
||||||
var mu PaddedMutex
|
var mu = PaddedMutex{NewDRWMutex(context.Background(), "", ds)}
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
mu.Lock(id, source)
|
mu.Lock(id, source)
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
|
@ -41,6 +41,14 @@ func newClient(addr, endpoint string) NetLocker {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Close closes the underlying socket file descriptor.
|
||||||
|
func (rpcClient *ReconnectRPCClient) IsOnline() bool {
|
||||||
|
rpcClient.mutex.Lock()
|
||||||
|
defer rpcClient.mutex.Unlock()
|
||||||
|
// If rpc client has not connected yet there is nothing to close.
|
||||||
|
return rpcClient.rpc != nil
|
||||||
|
}
|
||||||
|
|
||||||
// Close closes the underlying socket file descriptor.
|
// Close closes the underlying socket file descriptor.
|
||||||
func (rpcClient *ReconnectRPCClient) Close() error {
|
func (rpcClient *ReconnectRPCClient) Close() error {
|
||||||
rpcClient.mutex.Lock()
|
rpcClient.mutex.Lock()
|
||||||
|
@ -56,4 +56,7 @@ type NetLocker interface {
|
|||||||
|
|
||||||
// Close closes any underlying connection to the service endpoint
|
// Close closes any underlying connection to the service endpoint
|
||||||
Close() error
|
Close() error
|
||||||
|
|
||||||
|
// Is the underlying connection online? (is always true for any local lockers)
|
||||||
|
IsOnline() bool
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user