2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2019-12-12 09:02:37 -05:00
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
2021-02-05 12:57:30 -05:00
|
|
|
"net/http"
|
2021-07-13 13:42:11 -04:00
|
|
|
"strings"
|
2019-12-12 09:02:37 -05:00
|
|
|
|
2020-01-08 06:31:43 -05:00
|
|
|
jsoniter "github.com/json-iterator/go"
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/hash"
|
|
|
|
"github.com/minio/minio/internal/logger"
|
2019-12-12 09:02:37 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2020-04-28 04:16:57 -04:00
|
|
|
dataUsageRoot = SlashSeparator
|
|
|
|
dataUsageBucket = minioMetaBucket + SlashSeparator + bucketMetaPrefix
|
|
|
|
|
2020-06-12 13:28:21 -04:00
|
|
|
dataUsageObjName = ".usage.json"
|
|
|
|
dataUsageCacheName = ".usage-cache.bin"
|
|
|
|
dataUsageBloomName = ".bloomcycle.bin"
|
2019-12-12 09:02:37 -05:00
|
|
|
)
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
// storeDataUsageInBackend will store all objects sent on the gui channel until closed.
|
2021-09-18 16:31:35 -04:00
|
|
|
func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan DataUsageInfo) {
|
2020-12-07 16:47:48 -05:00
|
|
|
for dataUsageInfo := range dui {
|
2021-05-11 05:02:32 -04:00
|
|
|
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
2020-03-19 12:47:47 -04:00
|
|
|
dataUsageJSON, err := json.Marshal(dataUsageInfo)
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
size := int64(len(dataUsageJSON))
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
r, err := hash.NewReader(bytes.NewReader(dataUsageJSON), size, "", "", size)
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
continue
|
|
|
|
}
|
2021-02-10 11:52:50 -05:00
|
|
|
_, err = objAPI.PutObject(ctx, dataUsageBucket, dataUsageObjName, NewPutObjReader(r), ObjectOptions{})
|
2020-04-28 04:16:57 -04:00
|
|
|
if !isErrBucketNotFound(err) {
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
}
|
2019-12-12 09:02:37 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-13 13:42:11 -04:00
|
|
|
// loadPrefixUsageFromBackend returns prefix usages found in passed buckets
|
|
|
|
// e.g.: /testbucket/prefix => 355601334
|
|
|
|
func loadPrefixUsageFromBackend(ctx context.Context, objAPI ObjectLayer, bucket string) (map[string]uint64, error) {
|
|
|
|
z, ok := objAPI.(*erasureServerPools)
|
|
|
|
if !ok {
|
2021-07-17 04:17:35 -04:00
|
|
|
// Prefix usage is empty
|
|
|
|
return map[string]uint64{}, nil
|
2021-07-13 13:42:11 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
cache := dataUsageCache{}
|
|
|
|
|
|
|
|
m := make(map[string]uint64)
|
|
|
|
for _, pool := range z.serverPools {
|
|
|
|
for _, er := range pool.sets {
|
|
|
|
// Load bucket usage prefixes
|
|
|
|
if err := cache.load(ctx, er, bucket+slashSeparator+dataUsageCacheName); err == nil {
|
|
|
|
root := cache.find(bucket)
|
|
|
|
if root == nil {
|
|
|
|
// We dont have usage information for this bucket in this
|
|
|
|
// set, go to the next set
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for id, usageInfo := range cache.flattenChildrens(*root) {
|
2021-08-22 19:46:45 -04:00
|
|
|
prefix := decodeDirObject(strings.TrimPrefix(id, bucket+slashSeparator))
|
|
|
|
// decodeDirObject to avoid any __XL_DIR__ objects
|
2021-07-13 13:42:11 -04:00
|
|
|
m[prefix] += uint64(usageInfo.Size)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return m, nil
|
|
|
|
}
|
|
|
|
|
2021-09-18 16:31:35 -04:00
|
|
|
func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsageInfo, error) {
|
2021-02-05 12:57:30 -05:00
|
|
|
r, err := objAPI.GetObjectNInfo(ctx, dataUsageBucket, dataUsageObjName, nil, http.Header{}, readLock, ObjectOptions{})
|
2019-12-12 09:02:37 -05:00
|
|
|
if err != nil {
|
2020-04-28 04:16:57 -04:00
|
|
|
if isErrObjectNotFound(err) || isErrBucketNotFound(err) {
|
2021-09-18 16:31:35 -04:00
|
|
|
return DataUsageInfo{}, nil
|
2020-01-21 17:07:49 -05:00
|
|
|
}
|
2021-09-18 16:31:35 -04:00
|
|
|
return DataUsageInfo{}, toObjectErr(err, dataUsageBucket, dataUsageObjName)
|
2019-12-12 09:02:37 -05:00
|
|
|
}
|
2021-02-05 12:57:30 -05:00
|
|
|
defer r.Close()
|
2019-12-12 09:02:37 -05:00
|
|
|
|
2021-09-18 16:31:35 -04:00
|
|
|
var dataUsageInfo DataUsageInfo
|
2020-01-08 06:31:43 -05:00
|
|
|
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
2021-02-05 12:57:30 -05:00
|
|
|
if err = json.NewDecoder(r).Decode(&dataUsageInfo); err != nil {
|
2021-09-18 16:31:35 -04:00
|
|
|
return DataUsageInfo{}, err
|
2019-12-12 09:02:37 -05:00
|
|
|
}
|
2020-05-27 09:45:43 -04:00
|
|
|
// For forward compatibility reasons, we need to add this code.
|
|
|
|
if len(dataUsageInfo.BucketsUsage) == 0 {
|
2021-09-18 16:31:35 -04:00
|
|
|
dataUsageInfo.BucketsUsage = make(map[string]BucketUsageInfo, len(dataUsageInfo.BucketSizes))
|
2020-05-27 09:45:43 -04:00
|
|
|
for bucket, size := range dataUsageInfo.BucketSizes {
|
2021-09-18 16:31:35 -04:00
|
|
|
dataUsageInfo.BucketsUsage[bucket] = BucketUsageInfo{Size: size}
|
2020-05-27 09:45:43 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// For backward compatibility reasons, we need to add this code.
|
|
|
|
if len(dataUsageInfo.BucketSizes) == 0 {
|
|
|
|
dataUsageInfo.BucketSizes = make(map[string]uint64, len(dataUsageInfo.BucketsUsage))
|
|
|
|
for bucket, bui := range dataUsageInfo.BucketsUsage {
|
|
|
|
dataUsageInfo.BucketSizes[bucket] = bui.Size
|
|
|
|
}
|
|
|
|
}
|
2021-09-18 16:31:35 -04:00
|
|
|
// For forward compatibility reasons, we need to add this code.
|
|
|
|
for bucket, bui := range dataUsageInfo.BucketsUsage {
|
|
|
|
if bui.ReplicatedSizeV1 > 0 || bui.ReplicationFailedCountV1 > 0 ||
|
|
|
|
bui.ReplicationFailedSizeV1 > 0 || bui.ReplicationPendingCountV1 > 0 {
|
2021-11-19 17:46:14 -05:00
|
|
|
cfg, _ := getReplicationConfig(GlobalContext, bucket)
|
|
|
|
if cfg != nil && cfg.RoleArn != "" {
|
|
|
|
dataUsageInfo.ReplicationInfo = make(map[string]BucketTargetUsageInfo)
|
|
|
|
dataUsageInfo.ReplicationInfo[cfg.RoleArn] = BucketTargetUsageInfo{
|
|
|
|
ReplicationFailedSize: bui.ReplicationFailedSizeV1,
|
|
|
|
ReplicationFailedCount: bui.ReplicationFailedCountV1,
|
|
|
|
ReplicatedSize: bui.ReplicatedSizeV1,
|
|
|
|
ReplicationPendingCount: bui.ReplicationPendingCountV1,
|
|
|
|
ReplicationPendingSize: bui.ReplicationPendingSizeV1,
|
|
|
|
}
|
2021-09-18 16:31:35 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-12-12 09:02:37 -05:00
|
|
|
return dataUsageInfo, nil
|
|
|
|
}
|