ListObjects Metadata Caching (#10648)

Design: https://gist.github.com/klauspost/025c09b48ed4a1293c917cecfabdf21c

Gist of improvements:

* Cross-server caching and listing will use the same data across servers and requests.
* Lists can be arbitrarily resumed at a constant speed.
* Metadata for all files scanned is stored for streaming retrieval.
* The existing bloom filters controlled by the crawler is used for validating caches.
* Concurrent requests for the same data (or parts of it) will not spawn additional walkers.
* Listing a subdirectory of an existing recursive cache will use the cache.
* All listing operations are fully streamable so the number of objects in a bucket no 
  longer dictates the amount of memory.
* Listings can be handled by any server within the cluster.
* Caches are cleaned up when out of date or superseded by a more recent one.
This commit is contained in:
Klaus Post
2020-10-28 09:18:35 -07:00
committed by GitHub
parent 51222cc664
commit a982baff27
65 changed files with 6328 additions and 742 deletions

View File

@@ -27,6 +27,8 @@ import (
"sync/atomic"
"time"
"github.com/minio/minio/cmd/logger"
xhttp "github.com/minio/minio/cmd/http"
xnet "github.com/minio/minio/pkg/net"
)
@@ -113,6 +115,7 @@ func (c *Client) Call(ctx context.Context, method string, values url.Values, bod
resp, err := c.httpClient.Do(req)
if err != nil {
if xnet.IsNetworkOrHostDown(err) {
logger.LogIf(ctx, err, "marking disk offline")
c.MarkOffline()
}
return nil, &NetworkError{err}
@@ -142,6 +145,7 @@ func (c *Client) Call(ctx context.Context, method string, values url.Values, bod
b, err := ioutil.ReadAll(io.LimitReader(resp.Body, c.MaxErrResponseSize))
if err != nil {
if xnet.IsNetworkOrHostDown(err) {
logger.LogIf(ctx, err, "marking disk offline")
c.MarkOffline()
}
return nil, err