2020-04-14 15:46:37 -04:00
|
|
|
/*
|
|
|
|
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"net/http"
|
|
|
|
"sync"
|
|
|
|
"time"
|
2020-06-04 17:58:34 -04:00
|
|
|
|
|
|
|
"github.com/minio/minio/cmd/config/api"
|
2020-09-18 05:03:02 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2020-09-04 22:37:37 -04:00
|
|
|
"github.com/minio/minio/pkg/sys"
|
2020-04-14 15:46:37 -04:00
|
|
|
)
|
|
|
|
|
2020-06-04 17:58:34 -04:00
|
|
|
type apiConfig struct {
|
|
|
|
mu sync.RWMutex
|
2020-04-14 15:46:37 -04:00
|
|
|
|
|
|
|
requestsDeadline time.Duration
|
|
|
|
requestsPool chan struct{}
|
2020-09-23 12:14:33 -04:00
|
|
|
clusterDeadline time.Duration
|
2020-11-02 20:21:56 -05:00
|
|
|
listQuorum int
|
2020-11-05 14:49:56 -05:00
|
|
|
extendListLife time.Duration
|
2020-06-04 17:58:34 -04:00
|
|
|
corsAllowOrigins []string
|
2021-01-22 15:09:24 -05:00
|
|
|
// total drives per erasure set across pools.
|
2021-02-02 06:15:06 -05:00
|
|
|
totalDriveCount int
|
|
|
|
replicationWorkers int
|
2020-04-14 15:46:37 -04:00
|
|
|
}
|
|
|
|
|
2021-01-22 15:09:24 -05:00
|
|
|
func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {
|
2020-06-04 17:58:34 -04:00
|
|
|
t.mu.Lock()
|
|
|
|
defer t.mu.Unlock()
|
|
|
|
|
2020-09-23 12:14:33 -04:00
|
|
|
t.clusterDeadline = cfg.ClusterDeadline
|
2020-09-12 02:03:08 -04:00
|
|
|
t.corsAllowOrigins = cfg.CorsAllowOrigin
|
2021-01-22 15:09:24 -05:00
|
|
|
for _, setDriveCount := range setDriveCounts {
|
|
|
|
t.totalDriveCount += setDriveCount
|
|
|
|
}
|
2020-09-12 02:03:08 -04:00
|
|
|
|
2020-09-04 22:37:37 -04:00
|
|
|
var apiRequestsMaxPerNode int
|
2020-09-12 02:03:08 -04:00
|
|
|
if cfg.RequestsMax <= 0 {
|
2020-09-04 22:37:37 -04:00
|
|
|
stats, err := sys.GetStats()
|
|
|
|
if err != nil {
|
2020-09-18 05:03:02 -04:00
|
|
|
logger.LogIf(GlobalContext, err)
|
[feat]: change erasure coding default block size from 10MiB to 1MiB (#11721)
major performance improvements in range GETs to avoid large
read amplification when ranges are tiny and random
```
-------------------
Operation: GET
Operations: 142014 -> 339421
Duration: 4m50s -> 4m56s
* Average: +139.41% (+1177.3 MiB/s) throughput, +139.11% (+658.4) obj/s
* Fastest: +125.24% (+1207.4 MiB/s) throughput, +132.32% (+612.9) obj/s
* 50% Median: +139.06% (+1175.7 MiB/s) throughput, +133.46% (+660.9) obj/s
* Slowest: +203.40% (+1267.9 MiB/s) throughput, +198.59% (+753.5) obj/s
```
TTFB from 10MiB BlockSize
```
* First Access TTFB: Avg: 81ms, Median: 61ms, Best: 20ms, Worst: 2.056s
```
TTFB from 1MiB BlockSize
```
* First Access TTFB: Avg: 22ms, Median: 21ms, Best: 8ms, Worst: 91ms
```
Full object reads however do see a slight change which won't be
noticeable in real world, so not doing any comparisons
TTFB still had improvements with full object reads with 1MiB
```
* First Access TTFB: Avg: 68ms, Median: 35ms, Best: 11ms, Worst: 1.16s
```
v/s
TTFB with 10MiB
```
* First Access TTFB: Avg: 388ms, Median: 98ms, Best: 20ms, Worst: 4.156s
```
This change should affect all new uploads, previous uploads should
continue to work with business as usual. But dramatic improvements can
be seen with these changes.
2021-03-06 17:09:34 -05:00
|
|
|
// Default to 8 GiB, not critical.
|
|
|
|
stats.TotalRAM = 8 << 30
|
2020-09-04 22:37:37 -04:00
|
|
|
}
|
|
|
|
// max requests per node is calculated as
|
|
|
|
// total_ram / ram_per_request
|
[feat]: change erasure coding default block size from 10MiB to 1MiB (#11721)
major performance improvements in range GETs to avoid large
read amplification when ranges are tiny and random
```
-------------------
Operation: GET
Operations: 142014 -> 339421
Duration: 4m50s -> 4m56s
* Average: +139.41% (+1177.3 MiB/s) throughput, +139.11% (+658.4) obj/s
* Fastest: +125.24% (+1207.4 MiB/s) throughput, +132.32% (+612.9) obj/s
* 50% Median: +139.06% (+1175.7 MiB/s) throughput, +133.46% (+660.9) obj/s
* Slowest: +203.40% (+1267.9 MiB/s) throughput, +198.59% (+753.5) obj/s
```
TTFB from 10MiB BlockSize
```
* First Access TTFB: Avg: 81ms, Median: 61ms, Best: 20ms, Worst: 2.056s
```
TTFB from 1MiB BlockSize
```
* First Access TTFB: Avg: 22ms, Median: 21ms, Best: 8ms, Worst: 91ms
```
Full object reads however do see a slight change which won't be
noticeable in real world, so not doing any comparisons
TTFB still had improvements with full object reads with 1MiB
```
* First Access TTFB: Avg: 68ms, Median: 35ms, Best: 11ms, Worst: 1.16s
```
v/s
TTFB with 10MiB
```
* First Access TTFB: Avg: 388ms, Median: 98ms, Best: 20ms, Worst: 4.156s
```
This change should affect all new uploads, previous uploads should
continue to work with business as usual. But dramatic improvements can
be seen with these changes.
2021-03-06 17:09:34 -05:00
|
|
|
// ram_per_request is (2MiB+128KiB) * driveCount \
|
|
|
|
// + 2 * 10MiB (default erasure block size v1) + 2 * 1MiB (default erasure block size v2)
|
|
|
|
apiRequestsMaxPerNode = int(stats.TotalRAM / uint64(t.totalDriveCount*(blockSizeLarge+blockSizeSmall)+int(blockSizeV1*2+blockSizeV2*2)))
|
2020-09-04 22:37:37 -04:00
|
|
|
} else {
|
2020-09-12 02:03:08 -04:00
|
|
|
apiRequestsMaxPerNode = cfg.RequestsMax
|
2020-09-04 22:37:37 -04:00
|
|
|
if len(globalEndpoints.Hostnames()) > 0 {
|
|
|
|
apiRequestsMaxPerNode /= len(globalEndpoints.Hostnames())
|
|
|
|
}
|
2020-04-14 15:46:37 -04:00
|
|
|
}
|
2020-12-04 12:32:35 -05:00
|
|
|
if cap(t.requestsPool) < apiRequestsMaxPerNode {
|
|
|
|
// Only replace if needed.
|
|
|
|
// Existing requests will use the previous limit,
|
|
|
|
// but new requests will use the new limit.
|
|
|
|
// There will be a short overlap window,
|
|
|
|
// but this shouldn't last long.
|
|
|
|
t.requestsPool = make(chan struct{}, apiRequestsMaxPerNode)
|
|
|
|
}
|
2020-09-12 02:03:08 -04:00
|
|
|
t.requestsDeadline = cfg.RequestsDeadline
|
2020-11-02 20:21:56 -05:00
|
|
|
t.listQuorum = cfg.GetListQuorum()
|
2020-11-05 14:49:56 -05:00
|
|
|
t.extendListLife = cfg.ExtendListLife
|
2021-02-02 06:15:06 -05:00
|
|
|
t.replicationWorkers = cfg.ReplicationWorkers
|
2020-11-02 20:21:56 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (t *apiConfig) getListQuorum() int {
|
|
|
|
t.mu.RLock()
|
|
|
|
defer t.mu.RUnlock()
|
|
|
|
|
|
|
|
return t.listQuorum
|
2020-06-04 17:58:34 -04:00
|
|
|
}
|
|
|
|
|
2020-11-05 14:49:56 -05:00
|
|
|
func (t *apiConfig) getExtendListLife() time.Duration {
|
|
|
|
t.mu.RLock()
|
|
|
|
defer t.mu.RUnlock()
|
|
|
|
|
|
|
|
return t.extendListLife
|
|
|
|
}
|
|
|
|
|
2020-06-04 17:58:34 -04:00
|
|
|
func (t *apiConfig) getCorsAllowOrigins() []string {
|
|
|
|
t.mu.RLock()
|
|
|
|
defer t.mu.RUnlock()
|
|
|
|
|
2020-09-12 02:03:08 -04:00
|
|
|
corsAllowOrigins := make([]string, len(t.corsAllowOrigins))
|
|
|
|
copy(corsAllowOrigins, t.corsAllowOrigins)
|
|
|
|
return corsAllowOrigins
|
2020-06-04 17:58:34 -04:00
|
|
|
}
|
|
|
|
|
2020-09-23 12:14:33 -04:00
|
|
|
func (t *apiConfig) getClusterDeadline() time.Duration {
|
2020-06-04 17:58:34 -04:00
|
|
|
t.mu.RLock()
|
|
|
|
defer t.mu.RUnlock()
|
|
|
|
|
2020-09-23 12:14:33 -04:00
|
|
|
if t.clusterDeadline == 0 {
|
2020-06-04 17:58:34 -04:00
|
|
|
return 10 * time.Second
|
|
|
|
}
|
2020-04-14 15:46:37 -04:00
|
|
|
|
2020-09-23 12:14:33 -04:00
|
|
|
return t.clusterDeadline
|
2020-04-14 15:46:37 -04:00
|
|
|
}
|
|
|
|
|
2020-12-03 22:23:19 -05:00
|
|
|
func (t *apiConfig) getRequestsPool() (chan struct{}, time.Duration) {
|
2020-04-14 15:46:37 -04:00
|
|
|
t.mu.RLock()
|
|
|
|
defer t.mu.RUnlock()
|
|
|
|
|
2020-06-04 17:58:34 -04:00
|
|
|
if t.requestsPool == nil {
|
2020-12-03 22:23:19 -05:00
|
|
|
return nil, time.Duration(0)
|
2020-11-04 11:25:42 -05:00
|
|
|
}
|
2020-04-14 15:46:37 -04:00
|
|
|
|
2020-12-03 22:23:19 -05:00
|
|
|
return t.requestsPool, t.requestsDeadline
|
2020-04-14 15:46:37 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// maxClients throttles the S3 API calls
|
|
|
|
func maxClients(f http.HandlerFunc) http.HandlerFunc {
|
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
2020-12-03 22:23:19 -05:00
|
|
|
pool, deadline := globalAPIConfig.getRequestsPool()
|
2020-04-14 15:46:37 -04:00
|
|
|
if pool == nil {
|
|
|
|
f.ServeHTTP(w, r)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-02-20 03:21:55 -05:00
|
|
|
globalHTTPStats.addRequestsInQueue(1)
|
|
|
|
|
2020-12-03 22:23:19 -05:00
|
|
|
deadlineTimer := time.NewTimer(deadline)
|
|
|
|
defer deadlineTimer.Stop()
|
|
|
|
|
2020-04-14 15:46:37 -04:00
|
|
|
select {
|
|
|
|
case pool <- struct{}{}:
|
|
|
|
defer func() { <-pool }()
|
2021-02-20 03:21:55 -05:00
|
|
|
globalHTTPStats.addRequestsInQueue(-1)
|
2020-04-14 15:46:37 -04:00
|
|
|
f.ServeHTTP(w, r)
|
2020-12-03 22:23:19 -05:00
|
|
|
case <-deadlineTimer.C:
|
2020-04-14 15:46:37 -04:00
|
|
|
// Send a http timeout message
|
|
|
|
writeErrorResponse(r.Context(), w,
|
|
|
|
errorCodes.ToAPIErr(ErrOperationMaxedOut),
|
|
|
|
r.URL, guessIsBrowserReq(r))
|
2021-02-20 03:21:55 -05:00
|
|
|
globalHTTPStats.addRequestsInQueue(-1)
|
2020-04-14 15:46:37 -04:00
|
|
|
return
|
|
|
|
case <-r.Context().Done():
|
2021-02-20 03:21:55 -05:00
|
|
|
globalHTTPStats.addRequestsInQueue(-1)
|
2020-04-14 15:46:37 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-02-02 06:15:06 -05:00
|
|
|
|
|
|
|
func (t *apiConfig) getReplicationWorkers() int {
|
|
|
|
t.mu.RLock()
|
|
|
|
defer t.mu.RUnlock()
|
|
|
|
|
|
|
|
return t.replicationWorkers
|
|
|
|
}
|