mirror of
https://github.com/minio/minio.git
synced 2025-04-20 02:27:50 -04:00
Bump response header timeout for proxying list request (#10420)
This commit is contained in:
parent
746f1585eb
commit
eb19c8af40
@ -753,9 +753,17 @@ func GetProxyEndpoints(endpointZones EndpointZones) ([]ProxyEndpoint, error) {
|
|||||||
RootCAs: globalRootCAs,
|
RootCAs: globalRootCAs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tr := newCustomHTTPTransport(tlsConfig, rest.DefaultRESTTimeout)()
|
||||||
|
// Allow more requests to be in flight with higher response header timeout.
|
||||||
|
tr.ResponseHeaderTimeout = 30 * time.Minute
|
||||||
|
tr.MaxConnsPerHost = 256
|
||||||
|
tr.MaxIdleConnsPerHost = 16
|
||||||
|
tr.MaxIdleConns = 256
|
||||||
|
|
||||||
proxyEps = append(proxyEps, ProxyEndpoint{
|
proxyEps = append(proxyEps, ProxyEndpoint{
|
||||||
Endpoint: endpoint,
|
Endpoint: endpoint,
|
||||||
Transport: newCustomHTTPTransport(tlsConfig, rest.DefaultRESTTimeout)(),
|
Transport: tr,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -34,6 +34,22 @@ func (er erasureObjects) getLoadBalancedLocalDisks() (newDisks []StorageAPI) {
|
|||||||
return newDisks
|
return newDisks
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getLoadBalancedNDisks - fetches load balanced (sufficiently randomized) disk slice with N disks online
|
||||||
|
func (er erasureObjects) getLoadBalancedNDisks(ndisks int) (newDisks []StorageAPI) {
|
||||||
|
disks := er.getLoadBalancedDisks()
|
||||||
|
for _, disk := range disks {
|
||||||
|
if disk == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
newDisks = append(newDisks, disk)
|
||||||
|
ndisks--
|
||||||
|
if ndisks == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice.
|
// getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice.
|
||||||
func (er erasureObjects) getLoadBalancedDisks() (newDisks []StorageAPI) {
|
func (er erasureObjects) getLoadBalancedDisks() (newDisks []StorageAPI) {
|
||||||
disks := er.getDisks()
|
disks := er.getDisks()
|
||||||
|
@ -961,7 +961,7 @@ func lexicallySortedEntryVersions(entryChs []FileInfoVersionsCh, entries []FileI
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *erasureSets) startMergeWalks(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}) []FileInfoCh {
|
func (s *erasureSets) startMergeWalks(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}) []FileInfoCh {
|
||||||
return s.startMergeWalksN(ctx, bucket, prefix, marker, recursive, endWalkCh, -1)
|
return s.startMergeWalksN(ctx, bucket, prefix, marker, recursive, endWalkCh, -1, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *erasureSets) startMergeWalksVersions(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}) []FileInfoVersionsCh {
|
func (s *erasureSets) startMergeWalksVersions(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}) []FileInfoVersionsCh {
|
||||||
@ -972,90 +972,64 @@ func (s *erasureSets) startMergeWalksVersions(ctx context.Context, bucket, prefi
|
|||||||
// FileInfoCh which can be read from.
|
// FileInfoCh which can be read from.
|
||||||
func (s *erasureSets) startMergeWalksVersionsN(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}, ndisks int) []FileInfoVersionsCh {
|
func (s *erasureSets) startMergeWalksVersionsN(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}, ndisks int) []FileInfoVersionsCh {
|
||||||
var entryChs []FileInfoVersionsCh
|
var entryChs []FileInfoVersionsCh
|
||||||
var success int
|
var wg sync.WaitGroup
|
||||||
|
var mutex sync.Mutex
|
||||||
for _, set := range s.sets {
|
for _, set := range s.sets {
|
||||||
// Reset for the next erasure set.
|
// Reset for the next erasure set.
|
||||||
success = ndisks
|
for _, disk := range set.getLoadBalancedNDisks(ndisks) {
|
||||||
for _, disk := range set.getLoadBalancedDisks() {
|
wg.Add(1)
|
||||||
if disk == nil {
|
go func(disk StorageAPI) {
|
||||||
// Disk can be offline
|
defer wg.Done()
|
||||||
continue
|
|
||||||
}
|
|
||||||
entryCh, err := disk.WalkVersions(GlobalContext, bucket, prefix, marker, recursive, endWalkCh)
|
entryCh, err := disk.WalkVersions(GlobalContext, bucket, prefix, marker, recursive, endWalkCh)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.LogIf(ctx, err)
|
return
|
||||||
// Disk walk returned error, ignore it.
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex.Lock()
|
||||||
entryChs = append(entryChs, FileInfoVersionsCh{
|
entryChs = append(entryChs, FileInfoVersionsCh{
|
||||||
Ch: entryCh,
|
Ch: entryCh,
|
||||||
})
|
})
|
||||||
success--
|
mutex.Unlock()
|
||||||
if success == 0 {
|
}(disk)
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
wg.Wait()
|
||||||
return entryChs
|
return entryChs
|
||||||
}
|
}
|
||||||
|
|
||||||
// Starts a walk channel across n number of disks and returns a slice of
|
// Starts a walk channel across n number of disks and returns a slice of
|
||||||
// FileInfoCh which can be read from.
|
// FileInfoCh which can be read from.
|
||||||
func (s *erasureSets) startMergeWalksN(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}, ndisks int) []FileInfoCh {
|
func (s *erasureSets) startMergeWalksN(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}, ndisks int, splunk bool) []FileInfoCh {
|
||||||
var entryChs []FileInfoCh
|
var entryChs []FileInfoCh
|
||||||
var success int
|
var wg sync.WaitGroup
|
||||||
|
var mutex sync.Mutex
|
||||||
for _, set := range s.sets {
|
for _, set := range s.sets {
|
||||||
// Reset for the next erasure set.
|
// Reset for the next erasure set.
|
||||||
success = ndisks
|
for _, disk := range set.getLoadBalancedNDisks(ndisks) {
|
||||||
for _, disk := range set.getLoadBalancedDisks() {
|
wg.Add(1)
|
||||||
if disk == nil {
|
go func(disk StorageAPI) {
|
||||||
// Disk can be offline
|
defer wg.Done()
|
||||||
continue
|
|
||||||
}
|
|
||||||
entryCh, err := disk.Walk(GlobalContext, bucket, prefix, marker, recursive, endWalkCh)
|
|
||||||
if err != nil {
|
|
||||||
// Disk walk returned error, ignore it.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
entryChs = append(entryChs, FileInfoCh{
|
|
||||||
Ch: entryCh,
|
|
||||||
})
|
|
||||||
success--
|
|
||||||
if success == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return entryChs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Starts a walk channel across n number of disks and returns a slice of
|
var entryCh chan FileInfo
|
||||||
// FileInfo channels which can be read from.
|
var err error
|
||||||
func (s *erasureSets) startSplunkMergeWalksN(ctx context.Context, bucket, prefix, marker string, endWalkCh <-chan struct{}, ndisks int) []FileInfoCh {
|
if splunk {
|
||||||
var entryChs []FileInfoCh
|
entryCh, err = disk.WalkSplunk(GlobalContext, bucket, prefix, marker, endWalkCh)
|
||||||
var success int
|
} else {
|
||||||
for _, set := range s.sets {
|
entryCh, err = disk.Walk(GlobalContext, bucket, prefix, marker, recursive, endWalkCh)
|
||||||
// Reset for the next erasure set.
|
|
||||||
success = ndisks
|
|
||||||
for _, disk := range set.getLoadBalancedDisks() {
|
|
||||||
if disk == nil {
|
|
||||||
// Disk can be offline
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
entryCh, err := disk.WalkSplunk(GlobalContext, bucket, prefix, marker, endWalkCh)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Disk walk returned error, ignore it.
|
// Disk walk returned error, ignore it.
|
||||||
continue
|
return
|
||||||
}
|
}
|
||||||
|
mutex.Lock()
|
||||||
entryChs = append(entryChs, FileInfoCh{
|
entryChs = append(entryChs, FileInfoCh{
|
||||||
Ch: entryCh,
|
Ch: entryCh,
|
||||||
})
|
})
|
||||||
success--
|
mutex.Unlock()
|
||||||
if success == 0 {
|
}(disk)
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
wg.Wait()
|
||||||
return entryChs
|
return entryChs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -697,7 +697,7 @@ func (z *erasureZones) listObjectsNonSlash(ctx context.Context, bucket, prefix,
|
|||||||
|
|
||||||
for _, zone := range z.zones {
|
for _, zone := range z.zones {
|
||||||
zonesEntryChs = append(zonesEntryChs,
|
zonesEntryChs = append(zonesEntryChs,
|
||||||
zone.startMergeWalksN(ctx, bucket, prefix, "", true, endWalkCh, zone.listTolerancePerSet))
|
zone.startMergeWalksN(ctx, bucket, prefix, "", true, endWalkCh, zone.listTolerancePerSet, false))
|
||||||
zonesListTolerancePerSet = append(zonesListTolerancePerSet, zone.listTolerancePerSet)
|
zonesListTolerancePerSet = append(zonesListTolerancePerSet, zone.listTolerancePerSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -816,7 +816,7 @@ func (z *erasureZones) listObjectsSplunk(ctx context.Context, bucket, prefix, ma
|
|||||||
entryChs, endWalkCh := zone.poolSplunk.Release(listParams{bucket, recursive, marker, prefix})
|
entryChs, endWalkCh := zone.poolSplunk.Release(listParams{bucket, recursive, marker, prefix})
|
||||||
if entryChs == nil {
|
if entryChs == nil {
|
||||||
endWalkCh = make(chan struct{})
|
endWalkCh = make(chan struct{})
|
||||||
entryChs = zone.startSplunkMergeWalksN(ctx, bucket, prefix, marker, endWalkCh, zone.listTolerancePerSet)
|
entryChs = zone.startMergeWalksN(ctx, bucket, prefix, marker, recursive, endWalkCh, zone.listTolerancePerSet, true)
|
||||||
}
|
}
|
||||||
zonesEntryChs = append(zonesEntryChs, entryChs)
|
zonesEntryChs = append(zonesEntryChs, entryChs)
|
||||||
zonesEndWalkCh = append(zonesEndWalkCh, endWalkCh)
|
zonesEndWalkCh = append(zonesEndWalkCh, endWalkCh)
|
||||||
@ -908,7 +908,7 @@ func (z *erasureZones) listObjects(ctx context.Context, bucket, prefix, marker,
|
|||||||
entryChs, endWalkCh := zone.pool.Release(listParams{bucket, recursive, marker, prefix})
|
entryChs, endWalkCh := zone.pool.Release(listParams{bucket, recursive, marker, prefix})
|
||||||
if entryChs == nil {
|
if entryChs == nil {
|
||||||
endWalkCh = make(chan struct{})
|
endWalkCh = make(chan struct{})
|
||||||
entryChs = zone.startMergeWalksN(ctx, bucket, prefix, marker, recursive, endWalkCh, zone.listTolerancePerSet)
|
entryChs = zone.startMergeWalksN(ctx, bucket, prefix, marker, recursive, endWalkCh, zone.listTolerancePerSet, false)
|
||||||
}
|
}
|
||||||
zonesEntryChs = append(zonesEntryChs, entryChs)
|
zonesEntryChs = append(zonesEntryChs, entryChs)
|
||||||
zonesEndWalkCh = append(zonesEndWalkCh, endWalkCh)
|
zonesEndWalkCh = append(zonesEndWalkCh, endWalkCh)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user