2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2016-02-19 19:04:29 -05:00
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-02-19 19:04:29 -05:00
|
|
|
|
|
|
|
import (
|
2018-11-14 20:36:41 -05:00
|
|
|
"bytes"
|
2020-05-04 17:33:49 -04:00
|
|
|
"context"
|
2016-04-29 17:24:10 -04:00
|
|
|
"encoding/hex"
|
2021-02-10 11:52:50 -05:00
|
|
|
"errors"
|
2016-04-29 17:24:10 -04:00
|
|
|
"fmt"
|
2018-09-20 22:22:09 -04:00
|
|
|
"io"
|
2018-05-15 21:20:22 -04:00
|
|
|
"math/rand"
|
2018-12-19 08:13:47 -05:00
|
|
|
"net"
|
2018-09-20 22:22:09 -04:00
|
|
|
"net/http"
|
2016-05-08 13:15:34 -04:00
|
|
|
"path"
|
2017-02-04 02:27:50 -05:00
|
|
|
"runtime"
|
2018-09-27 23:36:17 -04:00
|
|
|
"strconv"
|
2016-04-13 14:32:47 -04:00
|
|
|
"strings"
|
2018-09-20 22:22:09 -04:00
|
|
|
"sync"
|
2018-05-15 21:20:22 -04:00
|
|
|
"time"
|
2016-02-19 19:04:29 -05:00
|
|
|
"unicode/utf8"
|
2016-04-29 17:24:10 -04:00
|
|
|
|
2020-04-14 05:40:05 -04:00
|
|
|
"github.com/google/uuid"
|
2019-09-26 02:08:24 -04:00
|
|
|
"github.com/klauspost/compress/s2"
|
|
|
|
"github.com/klauspost/readahead"
|
2020-07-14 12:38:05 -04:00
|
|
|
"github.com/minio/minio-go/v7/pkg/s3utils"
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/config/compress"
|
|
|
|
"github.com/minio/minio/internal/config/dns"
|
|
|
|
"github.com/minio/minio/internal/config/storageclass"
|
|
|
|
"github.com/minio/minio/internal/crypto"
|
|
|
|
"github.com/minio/minio/internal/hash"
|
|
|
|
xhttp "github.com/minio/minio/internal/http"
|
|
|
|
"github.com/minio/minio/internal/ioutil"
|
2024-01-28 13:04:17 -05:00
|
|
|
xioutil "github.com/minio/minio/internal/ioutil"
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/logger"
|
2024-05-24 19:05:23 -04:00
|
|
|
"github.com/minio/pkg/v3/trie"
|
|
|
|
"github.com/minio/pkg/v3/wildcard"
|
2023-08-31 20:58:48 -04:00
|
|
|
"github.com/valyala/bytebufferpool"
|
2023-06-06 13:12:52 -04:00
|
|
|
"golang.org/x/exp/slices"
|
2016-04-29 17:24:10 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2019-04-09 14:39:42 -04:00
|
|
|
// MinIO meta bucket.
|
2016-07-12 18:21:29 -04:00
|
|
|
minioMetaBucket = ".minio.sys"
|
2016-05-03 19:10:24 -04:00
|
|
|
// Multipart meta prefix.
|
|
|
|
mpartMetaPrefix = "multipart"
|
2019-04-09 14:39:42 -04:00
|
|
|
// MinIO Multipart meta prefix.
|
2019-08-06 15:08:58 -04:00
|
|
|
minioMetaMultipartBucket = minioMetaBucket + SlashSeparator + mpartMetaPrefix
|
2021-02-26 12:52:27 -05:00
|
|
|
// MinIO tmp meta prefix.
|
2016-11-20 17:25:43 -05:00
|
|
|
minioMetaTmpBucket = minioMetaBucket + "/tmp"
|
2021-02-26 12:52:27 -05:00
|
|
|
// MinIO tmp meta prefix for deleted objects.
|
2022-01-27 11:13:03 -05:00
|
|
|
minioMetaTmpDeletedBucket = minioMetaTmpBucket + "/.trash"
|
2023-11-22 16:46:17 -05:00
|
|
|
|
2017-03-03 13:23:41 -05:00
|
|
|
// DNS separator (period), used for bucket name validation.
|
|
|
|
dnsDelimiter = "."
|
2019-09-26 02:08:24 -04:00
|
|
|
// On compressed files bigger than this;
|
|
|
|
compReadAheadSize = 100 << 20
|
|
|
|
// Read this many buffers ahead.
|
|
|
|
compReadAheadBuffers = 5
|
|
|
|
// Size of each buffer.
|
|
|
|
compReadAheadBufSize = 1 << 20
|
2022-07-13 10:52:15 -04:00
|
|
|
// Pad Encrypted+Compressed files to a multiple of this.
|
|
|
|
compPadEncrypted = 256
|
|
|
|
// Disable compressed file indices below this size
|
|
|
|
compMinIndexSize = 8 << 20
|
2016-02-19 19:04:29 -05:00
|
|
|
)
|
|
|
|
|
2019-04-09 14:39:42 -04:00
|
|
|
// isMinioBucket returns true if given bucket is a MinIO internal
|
2017-01-17 13:02:58 -05:00
|
|
|
// bucket and false otherwise.
|
|
|
|
func isMinioMetaBucketName(bucket string) bool {
|
2021-06-07 11:13:15 -04:00
|
|
|
return strings.HasPrefix(bucket, minioMetaBucket)
|
2017-01-17 13:02:58 -05:00
|
|
|
}
|
|
|
|
|
2017-03-03 13:23:41 -05:00
|
|
|
// IsValidBucketName verifies that a bucket name is in accordance with
|
|
|
|
// Amazon's requirements (i.e. DNS naming conventions). It must be 3-63
|
|
|
|
// characters long, and it must be a sequence of one or more labels
|
|
|
|
// separated by periods. Each label can contain lowercase ascii
|
|
|
|
// letters, decimal digits and hyphens, but must not begin or end with
|
|
|
|
// a hyphen. See:
|
|
|
|
// http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
|
2016-02-19 19:04:29 -05:00
|
|
|
func IsValidBucketName(bucket string) bool {
|
2017-01-17 13:02:58 -05:00
|
|
|
// Special case when bucket is equal to one of the meta buckets.
|
|
|
|
if isMinioMetaBucketName(bucket) {
|
2016-07-24 01:51:12 -04:00
|
|
|
return true
|
|
|
|
}
|
2016-02-19 19:04:29 -05:00
|
|
|
if len(bucket) < 3 || len(bucket) > 63 {
|
|
|
|
return false
|
|
|
|
}
|
2017-03-03 13:23:41 -05:00
|
|
|
|
|
|
|
// Split on dot and check each piece conforms to rules.
|
|
|
|
allNumbers := true
|
|
|
|
pieces := strings.Split(bucket, dnsDelimiter)
|
|
|
|
for _, piece := range pieces {
|
|
|
|
if len(piece) == 0 || piece[0] == '-' ||
|
|
|
|
piece[len(piece)-1] == '-' {
|
|
|
|
// Current piece has 0-length or starts or
|
|
|
|
// ends with a hyphen.
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
// Now only need to check if each piece is a valid
|
|
|
|
// 'label' in AWS terminology and if the bucket looks
|
|
|
|
// like an IP address.
|
|
|
|
isNotNumber := false
|
|
|
|
for i := 0; i < len(piece); i++ {
|
|
|
|
switch {
|
|
|
|
case (piece[i] >= 'a' && piece[i] <= 'z' ||
|
|
|
|
piece[i] == '-'):
|
|
|
|
// Found a non-digit character, so
|
|
|
|
// this piece is not a number.
|
|
|
|
isNotNumber = true
|
|
|
|
case piece[i] >= '0' && piece[i] <= '9':
|
|
|
|
// Nothing to do.
|
|
|
|
default:
|
|
|
|
// Found invalid character.
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
allNumbers = allNumbers && !isNotNumber
|
2016-02-19 19:04:29 -05:00
|
|
|
}
|
2017-03-03 13:23:41 -05:00
|
|
|
// Does the bucket name look like an IP address?
|
|
|
|
return !(len(pieces) == 4 && allNumbers)
|
2016-02-19 19:04:29 -05:00
|
|
|
}
|
|
|
|
|
2016-03-07 22:30:30 -05:00
|
|
|
// IsValidObjectName verifies an object name in accordance with Amazon's
|
|
|
|
// requirements. It cannot exceed 1024 characters and must be a valid UTF8
|
|
|
|
// string.
|
2016-04-13 14:32:47 -04:00
|
|
|
//
|
|
|
|
// See:
|
|
|
|
// http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
|
|
|
|
//
|
|
|
|
// You should avoid the following characters in a key name because of
|
|
|
|
// significant special handling for consistency across all
|
|
|
|
// applications.
|
|
|
|
//
|
|
|
|
// Rejects strings with following characters.
|
|
|
|
//
|
|
|
|
// - Backslash ("\")
|
2016-05-13 14:43:06 -04:00
|
|
|
//
|
2019-08-06 15:08:58 -04:00
|
|
|
// additionally minio does not support object names with trailing SlashSeparator.
|
2016-02-19 19:04:29 -05:00
|
|
|
func IsValidObjectName(object string) bool {
|
2016-05-13 14:43:06 -04:00
|
|
|
if len(object) == 0 {
|
2016-02-19 19:04:29 -05:00
|
|
|
return false
|
|
|
|
}
|
2019-12-06 02:16:06 -05:00
|
|
|
if HasSuffix(object, SlashSeparator) {
|
2016-05-13 14:43:06 -04:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
return IsValidObjectPrefix(object)
|
2016-02-19 19:04:29 -05:00
|
|
|
}
|
2016-03-22 19:03:08 -04:00
|
|
|
|
|
|
|
// IsValidObjectPrefix verifies whether the prefix is a valid object name.
|
|
|
|
// Its valid to have a empty prefix.
|
|
|
|
func IsValidObjectPrefix(object string) bool {
|
2017-04-24 21:13:46 -04:00
|
|
|
if hasBadPathComponent(object) {
|
|
|
|
return false
|
|
|
|
}
|
2016-05-13 14:43:06 -04:00
|
|
|
if !utf8.ValidString(object) {
|
|
|
|
return false
|
|
|
|
}
|
2020-02-05 21:59:38 -05:00
|
|
|
if strings.Contains(object, `//`) {
|
2016-05-13 14:43:06 -04:00
|
|
|
return false
|
|
|
|
}
|
2023-01-06 13:11:18 -05:00
|
|
|
// This is valid for AWS S3 but it will never
|
|
|
|
// work with file systems, we will reject here
|
|
|
|
// to return object name invalid rather than
|
|
|
|
// a cryptic error from the file system.
|
2023-03-20 16:16:00 -04:00
|
|
|
return !strings.ContainsRune(object, 0)
|
2016-03-22 19:03:08 -04:00
|
|
|
}
|
2016-04-16 14:43:03 -04:00
|
|
|
|
2019-07-12 00:38:12 -04:00
|
|
|
// checkObjectNameForLengthAndSlash -check for the validity of object name length and prefis as slash
|
|
|
|
func checkObjectNameForLengthAndSlash(bucket, object string) error {
|
|
|
|
// Check for the length of object name
|
|
|
|
if len(object) > 1024 {
|
|
|
|
return ObjectNameTooLong{
|
|
|
|
Bucket: bucket,
|
|
|
|
Object: object,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Check for slash as prefix in object name
|
2019-12-06 02:16:06 -05:00
|
|
|
if HasPrefix(object, SlashSeparator) {
|
2019-07-12 00:38:12 -04:00
|
|
|
return ObjectNamePrefixAsSlash{
|
|
|
|
Bucket: bucket,
|
|
|
|
Object: object,
|
|
|
|
}
|
|
|
|
}
|
2020-06-10 11:14:22 -04:00
|
|
|
if runtime.GOOS == globalWindowsOSName {
|
|
|
|
// Explicitly disallowed characters on windows.
|
|
|
|
// Avoids most problematic names.
|
2023-03-20 16:16:00 -04:00
|
|
|
if strings.ContainsAny(object, `\:*?"|<>`) {
|
2020-06-10 11:14:22 -04:00
|
|
|
return ObjectNameInvalid{
|
|
|
|
Bucket: bucket,
|
|
|
|
Object: object,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-07-12 00:38:12 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-08-06 15:08:58 -04:00
|
|
|
// SlashSeparator - slash separator.
|
|
|
|
const SlashSeparator = "/"
|
2016-04-21 21:05:26 -04:00
|
|
|
|
2023-04-15 13:25:25 -04:00
|
|
|
// SlashSeparatorChar - slash separator.
|
|
|
|
const SlashSeparatorChar = '/'
|
|
|
|
|
2016-04-21 21:05:26 -04:00
|
|
|
// retainSlash - retains slash from a path.
|
|
|
|
func retainSlash(s string) string {
|
2021-03-29 20:00:55 -04:00
|
|
|
if s == "" {
|
|
|
|
return s
|
|
|
|
}
|
2019-08-06 15:08:58 -04:00
|
|
|
return strings.TrimSuffix(s, SlashSeparator) + SlashSeparator
|
2016-04-21 21:05:26 -04:00
|
|
|
}
|
|
|
|
|
2020-02-21 00:59:57 -05:00
|
|
|
// pathsJoinPrefix - like pathJoin retains trailing SlashSeparator
|
|
|
|
// for all elements, prepends them with 'prefix' respectively.
|
|
|
|
func pathsJoinPrefix(prefix string, elem ...string) (paths []string) {
|
|
|
|
paths = make([]string, len(elem))
|
|
|
|
for i, e := range elem {
|
|
|
|
paths[i] = pathJoin(prefix, e)
|
|
|
|
}
|
|
|
|
return paths
|
|
|
|
}
|
|
|
|
|
2019-08-06 15:08:58 -04:00
|
|
|
// pathJoin - like path.Join() but retains trailing SlashSeparator of the last element
|
2016-05-09 03:46:54 -04:00
|
|
|
func pathJoin(elem ...string) string {
|
2023-08-31 20:58:48 -04:00
|
|
|
sb := bytebufferpool.Get()
|
|
|
|
defer func() {
|
|
|
|
sb.Reset()
|
|
|
|
bytebufferpool.Put(sb)
|
|
|
|
}()
|
|
|
|
|
|
|
|
return pathJoinBuf(sb, elem...)
|
2016-04-16 14:43:03 -04:00
|
|
|
}
|
2016-04-29 17:24:10 -04:00
|
|
|
|
2023-04-15 13:25:25 -04:00
|
|
|
// pathJoinBuf - like path.Join() but retains trailing SlashSeparator of the last element.
|
|
|
|
// Provide a string builder to reduce allocation.
|
2023-08-31 20:58:48 -04:00
|
|
|
func pathJoinBuf(dst *bytebufferpool.ByteBuffer, elem ...string) string {
|
2023-04-15 13:25:25 -04:00
|
|
|
trailingSlash := len(elem) > 0 && hasSuffixByte(elem[len(elem)-1], SlashSeparatorChar)
|
|
|
|
dst.Reset()
|
|
|
|
added := 0
|
|
|
|
for _, e := range elem {
|
|
|
|
if added > 0 || e != "" {
|
|
|
|
if added > 0 {
|
2023-08-31 20:58:48 -04:00
|
|
|
dst.WriteByte(SlashSeparatorChar)
|
2023-04-15 13:25:25 -04:00
|
|
|
}
|
|
|
|
dst.WriteString(e)
|
|
|
|
added += len(e)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if pathNeedsClean(dst.Bytes()) {
|
|
|
|
s := path.Clean(dst.String())
|
|
|
|
if trailingSlash {
|
|
|
|
return s + SlashSeparator
|
|
|
|
}
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
if trailingSlash {
|
2023-08-31 20:58:48 -04:00
|
|
|
dst.WriteByte(SlashSeparatorChar)
|
2023-04-15 13:25:25 -04:00
|
|
|
}
|
|
|
|
return dst.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
// hasSuffixByte returns true if the last byte of s is 'suffix'
|
|
|
|
func hasSuffixByte(s string, suffix byte) bool {
|
|
|
|
return len(s) > 0 && s[len(s)-1] == suffix
|
|
|
|
}
|
|
|
|
|
|
|
|
// pathNeedsClean returns whether path.Clean may change the path.
|
|
|
|
// Will detect all cases that will be cleaned,
|
|
|
|
// but may produce false positives on non-trivial paths.
|
|
|
|
func pathNeedsClean(path []byte) bool {
|
|
|
|
if len(path) == 0 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
rooted := path[0] == '/'
|
|
|
|
n := len(path)
|
|
|
|
|
|
|
|
r, w := 0, 0
|
|
|
|
if rooted {
|
|
|
|
r, w = 1, 1
|
|
|
|
}
|
|
|
|
|
|
|
|
for r < n {
|
|
|
|
switch {
|
|
|
|
case path[r] > 127:
|
|
|
|
// Non ascii.
|
|
|
|
return true
|
|
|
|
case path[r] == '/':
|
|
|
|
// multiple / elements
|
|
|
|
return true
|
|
|
|
case path[r] == '.' && (r+1 == n || path[r+1] == '/'):
|
|
|
|
// . element - assume it has to be cleaned.
|
|
|
|
return true
|
|
|
|
case path[r] == '.' && path[r+1] == '.' && (r+2 == n || path[r+2] == '/'):
|
|
|
|
// .. element: remove to last / - assume it has to be cleaned.
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
// real path element.
|
|
|
|
// add slash if needed
|
|
|
|
if rooted && w != 1 || !rooted && w != 0 {
|
|
|
|
w++
|
|
|
|
}
|
|
|
|
// copy element
|
|
|
|
for ; r < n && path[r] != '/'; r++ {
|
|
|
|
w++
|
|
|
|
}
|
|
|
|
// allow one slash, not at end
|
|
|
|
if r < n-1 && path[r] == '/' {
|
|
|
|
r++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Turn empty string into "."
|
|
|
|
if w == 0 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2016-11-22 19:52:37 -05:00
|
|
|
// mustGetUUID - get a random UUID.
|
|
|
|
func mustGetUUID() string {
|
2020-04-14 05:40:05 -04:00
|
|
|
u, err := uuid.NewRandom()
|
2016-11-22 19:52:37 -05:00
|
|
|
if err != nil {
|
2020-04-09 12:30:02 -04:00
|
|
|
logger.CriticalIf(GlobalContext, err)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-11-22 19:52:37 -05:00
|
|
|
|
2020-04-14 05:40:05 -04:00
|
|
|
return u.String()
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2023-09-18 13:00:54 -04:00
|
|
|
// mustGetUUIDBytes - get a random UUID as 16 bytes unencoded.
|
|
|
|
func mustGetUUIDBytes() []byte {
|
|
|
|
u, err := uuid.NewRandom()
|
|
|
|
if err != nil {
|
|
|
|
logger.CriticalIf(GlobalContext, err)
|
|
|
|
}
|
|
|
|
return u[:]
|
|
|
|
}
|
|
|
|
|
2016-04-29 17:24:10 -04:00
|
|
|
// Create an s3 compatible MD5sum for complete multipart transaction.
|
2019-05-08 21:35:40 -04:00
|
|
|
func getCompleteMultipartMD5(parts []CompletePart) string {
|
2016-04-29 17:24:10 -04:00
|
|
|
var finalMD5Bytes []byte
|
2016-05-08 05:38:35 -04:00
|
|
|
for _, part := range parts {
|
2019-04-01 15:19:52 -04:00
|
|
|
md5Bytes, err := hex.DecodeString(canonicalizeETag(part.ETag))
|
2016-04-29 17:24:10 -04:00
|
|
|
if err != nil {
|
2019-05-08 21:35:40 -04:00
|
|
|
finalMD5Bytes = append(finalMD5Bytes, []byte(part.ETag)...)
|
|
|
|
} else {
|
|
|
|
finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
|
2016-04-29 17:24:10 -04:00
|
|
|
}
|
|
|
|
}
|
2016-11-21 16:51:05 -05:00
|
|
|
s3MD5 := fmt.Sprintf("%s-%d", getMD5Hash(finalMD5Bytes), len(parts))
|
2019-05-08 21:35:40 -04:00
|
|
|
return s3MD5
|
2016-04-29 17:24:10 -04:00
|
|
|
}
|
|
|
|
|
2018-01-04 01:14:45 -05:00
|
|
|
// Clean unwanted fields from metadata
|
|
|
|
func cleanMetadata(metadata map[string]string) map[string]string {
|
|
|
|
// Remove STANDARD StorageClass
|
|
|
|
metadata = removeStandardStorageClass(metadata)
|
2020-01-20 11:45:59 -05:00
|
|
|
// Clean meta etag keys 'md5Sum', 'etag', "expires", "x-amz-tagging".
|
2021-09-18 16:31:35 -04:00
|
|
|
return cleanMetadataKeys(metadata, "md5Sum", "etag", "expires", xhttp.AmzObjectTagging, "last-modified", VersionPurgeStatusKey)
|
2017-05-14 15:05:51 -04:00
|
|
|
}
|
|
|
|
|
2018-01-04 01:14:45 -05:00
|
|
|
// Filter X-Amz-Storage-Class field only if it is set to STANDARD.
|
|
|
|
// This is done since AWS S3 doesn't return STANDARD Storage class as response header.
|
|
|
|
func removeStandardStorageClass(metadata map[string]string) map[string]string {
|
2019-10-07 01:50:24 -04:00
|
|
|
if metadata[xhttp.AmzStorageClass] == storageclass.STANDARD {
|
|
|
|
delete(metadata, xhttp.AmzStorageClass)
|
2018-01-04 01:14:45 -05:00
|
|
|
}
|
|
|
|
return metadata
|
|
|
|
}
|
|
|
|
|
|
|
|
// cleanMetadataKeys takes keyNames to be filtered
|
|
|
|
// and returns a new map with all the entries with keyNames removed.
|
|
|
|
func cleanMetadataKeys(metadata map[string]string, keyNames ...string) map[string]string {
|
2022-01-02 12:15:06 -05:00
|
|
|
newMeta := make(map[string]string, len(metadata))
|
2017-05-14 15:05:51 -04:00
|
|
|
for k, v := range metadata {
|
2023-06-06 13:12:52 -04:00
|
|
|
if slices.Contains(keyNames, k) {
|
2017-05-14 15:05:51 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
newMeta[k] = v
|
|
|
|
}
|
|
|
|
return newMeta
|
|
|
|
}
|
|
|
|
|
|
|
|
// Extracts etag value from the metadata.
|
|
|
|
func extractETag(metadata map[string]string) string {
|
2021-09-09 01:25:23 -04:00
|
|
|
etag, ok := metadata["etag"]
|
2017-05-14 15:05:51 -04:00
|
|
|
if !ok {
|
2021-09-09 01:25:23 -04:00
|
|
|
// md5Sum tag is kept for backward compatibility.
|
|
|
|
etag = metadata["md5Sum"]
|
2017-05-14 15:05:51 -04:00
|
|
|
}
|
|
|
|
// Success.
|
|
|
|
return etag
|
|
|
|
}
|
|
|
|
|
2019-12-06 02:16:06 -05:00
|
|
|
// HasPrefix - Prefix matcher string matches prefix in a platform specific way.
|
2017-02-04 02:27:50 -05:00
|
|
|
// For example on windows since its case insensitive we are supposed
|
|
|
|
// to do case insensitive checks.
|
2019-12-06 02:16:06 -05:00
|
|
|
func HasPrefix(s string, prefix string) bool {
|
2017-02-18 16:41:59 -05:00
|
|
|
if runtime.GOOS == globalWindowsOSName {
|
2023-07-06 19:02:08 -04:00
|
|
|
return stringsHasPrefixFold(s, prefix)
|
2017-02-04 02:27:50 -05:00
|
|
|
}
|
|
|
|
return strings.HasPrefix(s, prefix)
|
|
|
|
}
|
|
|
|
|
2019-12-06 02:16:06 -05:00
|
|
|
// HasSuffix - Suffix matcher string matches suffix in a platform specific way.
|
2017-02-04 02:27:50 -05:00
|
|
|
// For example on windows since its case insensitive we are supposed
|
|
|
|
// to do case insensitive checks.
|
2019-12-06 02:16:06 -05:00
|
|
|
func HasSuffix(s string, suffix string) bool {
|
2017-02-18 16:41:59 -05:00
|
|
|
if runtime.GOOS == globalWindowsOSName {
|
2017-02-04 02:27:50 -05:00
|
|
|
return strings.HasSuffix(strings.ToLower(s), strings.ToLower(suffix))
|
|
|
|
}
|
|
|
|
return strings.HasSuffix(s, suffix)
|
|
|
|
}
|
|
|
|
|
2017-02-18 16:41:59 -05:00
|
|
|
// Validates if two strings are equal.
|
|
|
|
func isStringEqual(s1 string, s2 string) bool {
|
|
|
|
if runtime.GOOS == globalWindowsOSName {
|
|
|
|
return strings.EqualFold(s1, s2)
|
|
|
|
}
|
|
|
|
return s1 == s2
|
|
|
|
}
|
|
|
|
|
2017-02-16 17:52:14 -05:00
|
|
|
// Ignores all reserved bucket names or invalid bucket names.
|
2019-03-05 13:42:32 -05:00
|
|
|
func isReservedOrInvalidBucket(bucketEntry string, strict bool) bool {
|
2020-10-08 15:32:32 -04:00
|
|
|
if bucketEntry == "" {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2019-08-06 15:08:58 -04:00
|
|
|
bucketEntry = strings.TrimSuffix(bucketEntry, SlashSeparator)
|
2019-03-05 13:42:32 -05:00
|
|
|
if strict {
|
|
|
|
if err := s3utils.CheckValidBucketNameStrict(bucketEntry); err != nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err := s3utils.CheckValidBucketName(bucketEntry); err != nil {
|
|
|
|
return true
|
|
|
|
}
|
2017-02-16 17:52:14 -05:00
|
|
|
}
|
2017-03-03 06:01:42 -05:00
|
|
|
return isMinioMetaBucket(bucketEntry) || isMinioReservedBucket(bucketEntry)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true if input bucket is a reserved minio meta bucket '.minio.sys'.
|
|
|
|
func isMinioMetaBucket(bucketName string) bool {
|
|
|
|
return bucketName == minioMetaBucket
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true if input bucket is a reserved minio bucket 'minio'.
|
|
|
|
func isMinioReservedBucket(bucketName string) bool {
|
|
|
|
return bucketName == minioReservedBucket
|
2017-02-16 17:52:14 -05:00
|
|
|
}
|
|
|
|
|
2018-05-11 15:02:30 -04:00
|
|
|
// returns a slice of hosts by reading a slice of DNS records
|
|
|
|
func getHostsSlice(records []dns.SrvRecord) []string {
|
2020-07-17 17:25:47 -04:00
|
|
|
hosts := make([]string, len(records))
|
|
|
|
for i, r := range records {
|
|
|
|
hosts[i] = net.JoinHostPort(r.Host, string(r.Port))
|
2018-05-11 15:02:30 -04:00
|
|
|
}
|
|
|
|
return hosts
|
|
|
|
}
|
|
|
|
|
2020-07-17 17:25:47 -04:00
|
|
|
// returns an online host (and corresponding port) from a slice of DNS records
|
|
|
|
func getHostFromSrv(records []dns.SrvRecord) (host string) {
|
|
|
|
hosts := getHostsSlice(records)
|
2020-09-22 18:34:27 -04:00
|
|
|
rng := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
|
2020-07-17 17:25:47 -04:00
|
|
|
var d net.Dialer
|
|
|
|
var retry int
|
|
|
|
for retry < len(hosts) {
|
|
|
|
ctx, cancel := context.WithTimeout(GlobalContext, 300*time.Millisecond)
|
|
|
|
|
|
|
|
host = hosts[rng.Intn(len(hosts))]
|
|
|
|
conn, err := d.DialContext(ctx, "tcp", host)
|
|
|
|
cancel()
|
|
|
|
if err != nil {
|
|
|
|
retry++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
conn.Close()
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
return host
|
2018-05-15 21:20:22 -04:00
|
|
|
}
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
// IsCompressed returns true if the object is marked as compressed.
|
2022-04-04 14:42:03 -04:00
|
|
|
func (o *ObjectInfo) IsCompressed() bool {
|
2018-09-27 23:36:17 -04:00
|
|
|
_, ok := o.UserDefined[ReservedMetadataPrefix+"compression"]
|
|
|
|
return ok
|
|
|
|
}
|
|
|
|
|
2019-09-26 02:08:24 -04:00
|
|
|
// IsCompressedOK returns whether the object is compressed and can be decompressed.
|
2022-04-04 14:42:03 -04:00
|
|
|
func (o *ObjectInfo) IsCompressedOK() (bool, error) {
|
2019-09-26 02:08:24 -04:00
|
|
|
scheme, ok := o.UserDefined[ReservedMetadataPrefix+"compression"]
|
|
|
|
if !ok {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
switch scheme {
|
|
|
|
case compressionAlgorithmV1, compressionAlgorithmV2:
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
return true, fmt.Errorf("unknown compression scheme: %s", scheme)
|
|
|
|
}
|
|
|
|
|
2020-05-24 14:19:17 -04:00
|
|
|
// GetActualSize - returns the actual size of the stored object
|
2023-09-16 05:28:06 -04:00
|
|
|
func (o ObjectInfo) GetActualSize() (int64, error) {
|
|
|
|
if o.ActualSize != nil {
|
|
|
|
return *o.ActualSize, nil
|
|
|
|
}
|
2020-05-24 14:19:17 -04:00
|
|
|
if o.IsCompressed() {
|
|
|
|
sizeStr, ok := o.UserDefined[ReservedMetadataPrefix+"actual-size"]
|
|
|
|
if !ok {
|
|
|
|
return -1, errInvalidDecompressedSize
|
|
|
|
}
|
2018-09-27 23:36:17 -04:00
|
|
|
size, err := strconv.ParseInt(sizeStr, 10, 64)
|
2020-05-24 14:19:17 -04:00
|
|
|
if err != nil {
|
|
|
|
return -1, errInvalidDecompressedSize
|
2018-09-27 23:36:17 -04:00
|
|
|
}
|
2020-05-24 14:19:17 -04:00
|
|
|
return size, nil
|
2018-09-27 23:36:17 -04:00
|
|
|
}
|
2021-02-03 18:19:08 -05:00
|
|
|
if _, ok := crypto.IsEncrypted(o.UserDefined); ok {
|
2024-01-15 03:57:49 -05:00
|
|
|
sizeStr, ok := o.UserDefined[ReservedMetadataPrefix+"actual-size"]
|
|
|
|
if ok {
|
|
|
|
size, err := strconv.ParseInt(sizeStr, 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
return -1, errObjectTampered
|
|
|
|
}
|
|
|
|
return size, nil
|
|
|
|
}
|
2021-01-05 23:08:35 -05:00
|
|
|
return o.DecryptedSize()
|
|
|
|
}
|
|
|
|
|
2020-05-24 14:19:17 -04:00
|
|
|
return o.Size, nil
|
2018-09-27 23:36:17 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Disabling compression for encrypted enabled requests.
|
|
|
|
// Using compression and encryption together enables room for side channel attacks.
|
|
|
|
// Eliminate non-compressible objects by extensions/content-types.
|
|
|
|
func isCompressible(header http.Header, object string) bool {
|
2020-12-04 12:32:35 -05:00
|
|
|
globalCompressConfigMu.Lock()
|
|
|
|
cfg := globalCompressConfig
|
|
|
|
globalCompressConfigMu.Unlock()
|
2020-12-22 12:19:32 -05:00
|
|
|
|
2022-07-17 11:43:14 -04:00
|
|
|
return !excludeForCompression(header, object, cfg)
|
2018-09-27 23:36:17 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Eliminate the non-compressible objects.
|
2019-10-23 01:59:13 -04:00
|
|
|
func excludeForCompression(header http.Header, object string, cfg compress.Config) bool {
|
2018-09-27 23:36:17 -04:00
|
|
|
objStr := object
|
2019-07-03 01:34:32 -04:00
|
|
|
contentType := header.Get(xhttp.ContentType)
|
2019-10-23 01:59:13 -04:00
|
|
|
if !cfg.Enabled {
|
2019-09-26 02:08:24 -04:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2022-07-17 11:43:14 -04:00
|
|
|
if crypto.Requested(header) && !cfg.AllowEncrypted {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2019-09-26 02:08:24 -04:00
|
|
|
// We strictly disable compression for standard extensions/content-types (`compressed`).
|
|
|
|
if hasStringSuffixInSlice(objStr, standardExcludeCompressExtensions) || hasPattern(standardExcludeCompressContentTypes, contentType) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Filter compression includes.
|
2024-03-11 07:55:34 -04:00
|
|
|
if len(cfg.Extensions) == 0 && len(cfg.MimeTypes) == 0 {
|
|
|
|
// Nothing to filter, include everything.
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-01-05 23:08:35 -05:00
|
|
|
if len(cfg.Extensions) > 0 && hasStringSuffixInSlice(objStr, cfg.Extensions) {
|
2024-03-11 07:55:34 -04:00
|
|
|
// Matched an extension to compress, do not exclude.
|
|
|
|
return false
|
2019-09-26 02:08:24 -04:00
|
|
|
}
|
|
|
|
|
2021-01-05 23:08:35 -05:00
|
|
|
if len(cfg.MimeTypes) > 0 && hasPattern(cfg.MimeTypes, contentType) {
|
2024-03-11 07:55:34 -04:00
|
|
|
// Matched an MIME type to compress, do not exclude.
|
|
|
|
return false
|
2018-09-27 23:36:17 -04:00
|
|
|
}
|
2024-03-11 07:55:34 -04:00
|
|
|
|
|
|
|
// Did not match any inclusion filters, exclude from compression.
|
|
|
|
return true
|
2018-09-27 23:36:17 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Utility which returns if a string is present in the list.
|
2024-03-11 07:55:34 -04:00
|
|
|
// Comparison is case insensitive. Explicit short-circuit if
|
|
|
|
// the list contains the wildcard "*".
|
2018-09-27 23:36:17 -04:00
|
|
|
func hasStringSuffixInSlice(str string, list []string) bool {
|
2019-09-26 02:08:24 -04:00
|
|
|
str = strings.ToLower(str)
|
2018-09-27 23:36:17 -04:00
|
|
|
for _, v := range list {
|
2024-03-11 07:55:34 -04:00
|
|
|
if v == "*" {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2019-09-26 02:08:24 -04:00
|
|
|
if strings.HasSuffix(str, strings.ToLower(v)) {
|
2018-09-27 23:36:17 -04:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true if any of the given wildcard patterns match the matchStr.
|
|
|
|
func hasPattern(patterns []string, matchStr string) bool {
|
|
|
|
for _, pattern := range patterns {
|
|
|
|
if ok := wildcard.MatchSimple(pattern, matchStr); ok {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the part file name which matches the partNumber and etag.
|
2020-09-21 04:18:13 -04:00
|
|
|
func getPartFile(entriesTrie *trie.Trie, partNumber int, etag string) (partFile string) {
|
|
|
|
for _, match := range entriesTrie.PrefixMatch(fmt.Sprintf("%.5d.%s.", partNumber, etag)) {
|
|
|
|
partFile = match
|
|
|
|
break
|
2018-09-27 23:36:17 -04:00
|
|
|
}
|
2020-09-21 04:18:13 -04:00
|
|
|
return partFile
|
2018-09-27 23:36:17 -04:00
|
|
|
}
|
|
|
|
|
2020-12-08 16:12:42 -05:00
|
|
|
func partNumberToRangeSpec(oi ObjectInfo, partNumber int) *HTTPRangeSpec {
|
|
|
|
if oi.Size == 0 || len(oi.Parts) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var start int64
|
2022-01-02 12:15:06 -05:00
|
|
|
end := int64(-1)
|
2020-12-08 16:12:42 -05:00
|
|
|
for i := 0; i < len(oi.Parts) && i < partNumber; i++ {
|
|
|
|
start = end + 1
|
|
|
|
end = start + oi.Parts[i].ActualSize - 1
|
|
|
|
}
|
|
|
|
|
|
|
|
return &HTTPRangeSpec{Start: start, End: end}
|
|
|
|
}
|
|
|
|
|
2019-09-26 02:08:24 -04:00
|
|
|
// Returns the compressed offset which should be skipped.
|
2021-01-05 23:08:35 -05:00
|
|
|
// If encrypted offsets are adjusted for encrypted block headers/trailers.
|
|
|
|
// Since de-compression is after decryption encryption overhead is only added to compressedOffset.
|
2022-07-11 20:30:56 -04:00
|
|
|
func getCompressedOffsets(oi ObjectInfo, offset int64, decrypt func([]byte) ([]byte, error)) (compressedOffset int64, partSkip int64, firstPart int, decryptSkip int64, seqNum uint32) {
|
2018-09-27 23:36:17 -04:00
|
|
|
var skipLength int64
|
|
|
|
var cumulativeActualSize int64
|
2021-01-05 23:08:35 -05:00
|
|
|
var firstPartIdx int
|
2024-01-15 03:57:49 -05:00
|
|
|
for i, part := range oi.Parts {
|
|
|
|
cumulativeActualSize += part.ActualSize
|
|
|
|
if cumulativeActualSize <= offset {
|
|
|
|
compressedOffset += part.Size
|
|
|
|
} else {
|
|
|
|
firstPartIdx = i
|
|
|
|
skipLength = cumulativeActualSize - part.ActualSize
|
|
|
|
break
|
2018-09-27 23:36:17 -04:00
|
|
|
}
|
|
|
|
}
|
2022-07-11 20:30:56 -04:00
|
|
|
partSkip = offset - skipLength
|
|
|
|
|
|
|
|
// Load index and skip more if feasible.
|
|
|
|
if partSkip > 0 && len(oi.Parts) > firstPartIdx && len(oi.Parts[firstPartIdx].Index) > 0 {
|
|
|
|
_, isEncrypted := crypto.IsEncrypted(oi.UserDefined)
|
|
|
|
if isEncrypted {
|
|
|
|
dec, err := decrypt(oi.Parts[firstPartIdx].Index)
|
|
|
|
if err == nil {
|
|
|
|
// Load Index
|
|
|
|
var idx s2.Index
|
2022-07-13 10:48:14 -04:00
|
|
|
_, err := idx.Load(s2.RestoreIndexHeaders(dec))
|
2022-07-11 20:30:56 -04:00
|
|
|
|
|
|
|
// Find compressed/uncompressed offsets of our partskip
|
|
|
|
compOff, uCompOff, err2 := idx.Find(partSkip)
|
|
|
|
|
|
|
|
if err == nil && err2 == nil && compOff > 0 {
|
|
|
|
// Encrypted.
|
|
|
|
const sseDAREEncPackageBlockSize = SSEDAREPackageBlockSize + SSEDAREPackageMetaSize
|
|
|
|
// Number of full blocks in skipped area
|
|
|
|
seqNum = uint32(compOff / SSEDAREPackageBlockSize)
|
|
|
|
// Skip this many inside a decrypted block to get to compression block start
|
|
|
|
decryptSkip = compOff % SSEDAREPackageBlockSize
|
|
|
|
// Skip this number of full blocks.
|
|
|
|
skipEnc := compOff / SSEDAREPackageBlockSize
|
|
|
|
skipEnc *= sseDAREEncPackageBlockSize
|
|
|
|
compressedOffset += skipEnc
|
|
|
|
// Skip this number of uncompressed bytes.
|
|
|
|
partSkip -= uCompOff
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Not encrypted
|
|
|
|
var idx s2.Index
|
2022-07-13 10:48:14 -04:00
|
|
|
_, err := idx.Load(s2.RestoreIndexHeaders(oi.Parts[firstPartIdx].Index))
|
2022-07-11 20:30:56 -04:00
|
|
|
|
|
|
|
// Find compressed/uncompressed offsets of our partskip
|
|
|
|
compOff, uCompOff, err2 := idx.Find(partSkip)
|
|
|
|
|
|
|
|
if err == nil && err2 == nil && compOff > 0 {
|
|
|
|
compressedOffset += compOff
|
|
|
|
partSkip -= uCompOff
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-01-21 11:43:03 -05:00
|
|
|
|
2022-07-11 20:30:56 -04:00
|
|
|
return compressedOffset, partSkip, firstPartIdx, decryptSkip, seqNum
|
2018-09-27 23:36:17 -04:00
|
|
|
}
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
// GetObjectReader is a type that wraps a reader with a lock to
|
|
|
|
// provide a ReadCloser interface that unlocks on Close()
|
|
|
|
type GetObjectReader struct {
|
2021-04-30 21:37:58 -04:00
|
|
|
io.Reader
|
|
|
|
ObjInfo ObjectInfo
|
2018-09-20 22:22:09 -04:00
|
|
|
cleanUpFns []func()
|
|
|
|
once sync.Once
|
|
|
|
}
|
|
|
|
|
2021-04-30 21:37:58 -04:00
|
|
|
// WithCleanupFuncs sets additional cleanup functions to be called when closing
|
|
|
|
// the GetObjectReader.
|
|
|
|
func (g *GetObjectReader) WithCleanupFuncs(fns ...func()) *GetObjectReader {
|
|
|
|
g.cleanUpFns = append(g.cleanUpFns, fns...)
|
|
|
|
return g
|
|
|
|
}
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
// NewGetObjectReaderFromReader sets up a GetObjectReader with a given
|
|
|
|
// reader. This ignores any object properties.
|
2020-04-21 01:01:59 -04:00
|
|
|
func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, opts ObjectOptions, cleanupFns ...func()) (*GetObjectReader, error) {
|
2020-07-17 16:01:22 -04:00
|
|
|
if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
|
|
|
|
// Call the cleanup funcs
|
|
|
|
for i := len(cleanupFns) - 1; i >= 0; i-- {
|
|
|
|
cleanupFns[i]()
|
2019-03-06 15:38:41 -05:00
|
|
|
}
|
2020-07-17 16:01:22 -04:00
|
|
|
return nil, PreConditionFailed{}
|
2019-03-06 15:38:41 -05:00
|
|
|
}
|
2018-09-20 22:22:09 -04:00
|
|
|
return &GetObjectReader{
|
|
|
|
ObjInfo: oi,
|
2021-04-30 21:37:58 -04:00
|
|
|
Reader: r,
|
2018-09-20 22:22:09 -04:00
|
|
|
cleanUpFns: cleanupFns,
|
2019-03-06 15:38:41 -05:00
|
|
|
}, nil
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// ObjReaderFn is a function type that takes a reader and returns
|
|
|
|
// GetObjectReader and an error. Request headers are passed to provide
|
|
|
|
// encryption parameters. cleanupFns allow cleanup funcs to be
|
|
|
|
// registered for calling after usage of the reader.
|
2021-06-24 12:44:00 -04:00
|
|
|
type ObjReaderFn func(inputReader io.Reader, h http.Header, cleanupFns ...func()) (r *GetObjectReader, err error)
|
2018-09-20 22:22:09 -04:00
|
|
|
|
|
|
|
// NewGetObjectReader creates a new GetObjectReader. The cleanUpFns
|
2021-04-30 21:37:58 -04:00
|
|
|
// are called on Close() in FIFO order as passed in ObjReadFn(). NOTE: It is
|
2018-09-20 22:22:09 -04:00
|
|
|
// assumed that clean up functions do not panic (otherwise, they may
|
|
|
|
// not all run!).
|
2021-04-30 21:37:58 -04:00
|
|
|
func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
|
2022-01-02 12:15:06 -05:00
|
|
|
fn ObjReaderFn, off, length int64, err error,
|
|
|
|
) {
|
2021-06-24 12:44:00 -04:00
|
|
|
if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
|
|
|
|
return nil, 0, 0, PreConditionFailed{}
|
|
|
|
}
|
|
|
|
|
2020-10-01 18:41:12 -04:00
|
|
|
if rs == nil && opts.PartNumber > 0 {
|
2020-12-08 16:12:42 -05:00
|
|
|
rs = partNumberToRangeSpec(oi, opts.PartNumber)
|
2020-10-01 18:41:12 -04:00
|
|
|
}
|
|
|
|
|
2021-02-03 18:19:08 -05:00
|
|
|
_, isEncrypted := crypto.IsEncrypted(oi.UserDefined)
|
2019-09-26 02:08:24 -04:00
|
|
|
isCompressed, err := oi.IsCompressedOK()
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, 0, err
|
|
|
|
}
|
2021-01-22 15:09:24 -05:00
|
|
|
|
2022-07-16 22:35:24 -04:00
|
|
|
// if object is encrypted and it is a restore request or if NoDecryption
|
|
|
|
// was requested, fetch content without decrypting.
|
|
|
|
if opts.Transition.RestoreRequest != nil || opts.NoDecryption {
|
2020-11-12 15:12:09 -05:00
|
|
|
isEncrypted = false
|
2021-01-22 15:09:24 -05:00
|
|
|
isCompressed = false
|
2020-11-12 15:12:09 -05:00
|
|
|
}
|
2021-01-22 15:09:24 -05:00
|
|
|
|
2021-01-05 23:08:35 -05:00
|
|
|
// Calculate range to read (different for encrypted/compressed objects)
|
2018-09-20 22:22:09 -04:00
|
|
|
switch {
|
2018-09-27 23:36:17 -04:00
|
|
|
case isCompressed:
|
2021-01-22 15:09:24 -05:00
|
|
|
var firstPart int
|
|
|
|
if opts.PartNumber > 0 {
|
|
|
|
// firstPart is an index to Parts slice,
|
|
|
|
// make sure that PartNumber uses the
|
|
|
|
// index value properly.
|
|
|
|
firstPart = opts.PartNumber - 1
|
|
|
|
}
|
|
|
|
|
2021-01-05 23:08:35 -05:00
|
|
|
// If compressed, we start from the beginning of the part.
|
2018-09-27 23:36:17 -04:00
|
|
|
// Read the decompressed size from the meta.json.
|
2020-05-24 14:19:17 -04:00
|
|
|
actualSize, err := oi.GetActualSize()
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, 0, err
|
2018-09-27 23:36:17 -04:00
|
|
|
}
|
2022-07-11 20:30:56 -04:00
|
|
|
var decryptSkip int64
|
|
|
|
var seqNum uint32
|
2021-05-21 17:07:16 -04:00
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
off, length = int64(0), oi.Size
|
|
|
|
decOff, decLength := int64(0), actualSize
|
|
|
|
if rs != nil {
|
|
|
|
off, length, err = rs.GetOffsetLength(actualSize)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, 0, err
|
|
|
|
}
|
2022-07-11 20:30:56 -04:00
|
|
|
decrypt := func(b []byte) ([]byte, error) {
|
|
|
|
return b, nil
|
|
|
|
}
|
|
|
|
if isEncrypted {
|
|
|
|
decrypt = oi.compressionIndexDecrypt
|
|
|
|
}
|
2019-09-26 02:08:24 -04:00
|
|
|
// In case of range based queries on multiparts, the offset and length are reduced.
|
2022-07-11 20:30:56 -04:00
|
|
|
off, decOff, firstPart, decryptSkip, seqNum = getCompressedOffsets(oi, off, decrypt)
|
2018-09-27 23:36:17 -04:00
|
|
|
decLength = length
|
|
|
|
length = oi.Size - off
|
|
|
|
// For negative length we read everything.
|
|
|
|
if decLength < 0 {
|
|
|
|
decLength = actualSize - decOff
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reply back invalid range if the input offset and length fall out of range.
|
|
|
|
if decOff > actualSize || decOff+decLength > actualSize {
|
|
|
|
return nil, 0, 0, errInvalidRange
|
|
|
|
}
|
|
|
|
}
|
2021-06-24 12:44:00 -04:00
|
|
|
fn = func(inputReader io.Reader, h http.Header, cFns ...func()) (r *GetObjectReader, err error) {
|
2021-01-05 23:08:35 -05:00
|
|
|
if isEncrypted {
|
|
|
|
copySource := h.Get(xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm) != ""
|
|
|
|
// Attach decrypter on inputReader
|
2022-07-11 20:30:56 -04:00
|
|
|
inputReader, err = DecryptBlocksRequestR(inputReader, h, seqNum, firstPart, oi, copySource)
|
2021-01-05 23:08:35 -05:00
|
|
|
if err != nil {
|
|
|
|
// Call the cleanup funcs
|
2021-06-24 12:44:00 -04:00
|
|
|
for i := len(cFns) - 1; i >= 0; i-- {
|
|
|
|
cFns[i]()
|
2021-01-05 23:08:35 -05:00
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-07-11 20:30:56 -04:00
|
|
|
if decryptSkip > 0 {
|
|
|
|
inputReader = ioutil.NewSkipReader(inputReader, decryptSkip)
|
|
|
|
}
|
2021-01-21 11:43:03 -05:00
|
|
|
oi.Size = decLength
|
2021-01-05 23:08:35 -05:00
|
|
|
}
|
2018-09-27 23:36:17 -04:00
|
|
|
// Decompression reader.
|
2022-07-11 20:30:56 -04:00
|
|
|
var dopts []s2.ReaderOption
|
2022-08-31 11:13:23 -04:00
|
|
|
if off > 0 || decOff > 0 {
|
2022-07-11 20:30:56 -04:00
|
|
|
// We are not starting at the beginning, so ignore stream identifiers.
|
|
|
|
dopts = append(dopts, s2.ReaderIgnoreStreamIdentifier())
|
|
|
|
}
|
|
|
|
s2Reader := s2.NewReader(inputReader, dopts...)
|
2019-09-26 02:08:24 -04:00
|
|
|
// Apply the skipLen and limit on the decompressed stream.
|
2021-01-22 15:09:24 -05:00
|
|
|
if decOff > 0 {
|
|
|
|
if err = s2Reader.Skip(decOff); err != nil {
|
|
|
|
// Call the cleanup funcs
|
2021-06-24 12:44:00 -04:00
|
|
|
for i := len(cFns) - 1; i >= 0; i-- {
|
|
|
|
cFns[i]()
|
2021-01-22 15:09:24 -05:00
|
|
|
}
|
|
|
|
return nil, err
|
2020-04-22 13:06:56 -04:00
|
|
|
}
|
2019-09-26 02:08:24 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
decReader := io.LimitReader(s2Reader, decLength)
|
|
|
|
if decLength > compReadAheadSize {
|
|
|
|
rah, err := readahead.NewReaderSize(decReader, compReadAheadBuffers, compReadAheadBufSize)
|
|
|
|
if err == nil {
|
|
|
|
decReader = rah
|
2021-04-30 21:37:58 -04:00
|
|
|
cFns = append([]func(){func() {
|
2019-09-26 02:08:24 -04:00
|
|
|
rah.Close()
|
2021-04-30 21:37:58 -04:00
|
|
|
}}, cFns...)
|
2019-09-26 02:08:24 -04:00
|
|
|
}
|
|
|
|
}
|
2018-09-27 23:36:17 -04:00
|
|
|
oi.Size = decLength
|
|
|
|
|
|
|
|
// Assemble the GetObjectReader
|
|
|
|
r = &GetObjectReader{
|
|
|
|
ObjInfo: oi,
|
2021-04-30 21:37:58 -04:00
|
|
|
Reader: decReader,
|
2019-03-06 15:38:41 -05:00
|
|
|
cleanUpFns: cFns,
|
2018-09-27 23:36:17 -04:00
|
|
|
}
|
|
|
|
return r, nil
|
|
|
|
}
|
2018-09-20 22:22:09 -04:00
|
|
|
|
2021-01-05 23:08:35 -05:00
|
|
|
case isEncrypted:
|
|
|
|
var seqNumber uint32
|
|
|
|
var partStart int
|
2021-01-22 15:09:24 -05:00
|
|
|
var skipLen int64
|
|
|
|
|
2021-01-05 23:08:35 -05:00
|
|
|
off, length, skipLen, seqNumber, partStart, err = oi.GetDecryptedRange(rs)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, 0, err
|
|
|
|
}
|
|
|
|
var decSize int64
|
|
|
|
decSize, err = oi.DecryptedSize()
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, 0, err
|
|
|
|
}
|
|
|
|
var decRangeLength int64
|
|
|
|
decRangeLength, err = rs.GetLength(decSize)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// We define a closure that performs decryption given
|
|
|
|
// a reader that returns the desired range of
|
|
|
|
// encrypted bytes. The header parameter is used to
|
|
|
|
// provide encryption parameters.
|
2021-06-24 12:44:00 -04:00
|
|
|
fn = func(inputReader io.Reader, h http.Header, cFns ...func()) (r *GetObjectReader, err error) {
|
2021-01-05 23:08:35 -05:00
|
|
|
copySource := h.Get(xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm) != ""
|
|
|
|
|
|
|
|
// Attach decrypter on inputReader
|
|
|
|
var decReader io.Reader
|
|
|
|
decReader, err = DecryptBlocksRequestR(inputReader, h, seqNumber, partStart, oi, copySource)
|
|
|
|
if err != nil {
|
|
|
|
// Call the cleanup funcs
|
|
|
|
for i := len(cFns) - 1; i >= 0; i-- {
|
|
|
|
cFns[i]()
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
oi.ETag = getDecryptedETag(h, oi, false)
|
|
|
|
|
|
|
|
// Apply the skipLen and limit on the
|
|
|
|
// decrypted stream
|
|
|
|
decReader = io.LimitReader(ioutil.NewSkipReader(decReader, skipLen), decRangeLength)
|
|
|
|
|
|
|
|
// Assemble the GetObjectReader
|
|
|
|
r = &GetObjectReader{
|
|
|
|
ObjInfo: oi,
|
2021-04-30 21:37:58 -04:00
|
|
|
Reader: decReader,
|
2021-01-05 23:08:35 -05:00
|
|
|
cleanUpFns: cFns,
|
|
|
|
}
|
|
|
|
return r, nil
|
|
|
|
}
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
default:
|
|
|
|
off, length, err = rs.GetOffsetLength(oi.Size)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, 0, err
|
|
|
|
}
|
2021-06-24 12:44:00 -04:00
|
|
|
fn = func(inputReader io.Reader, _ http.Header, cFns ...func()) (r *GetObjectReader, err error) {
|
2018-09-20 22:22:09 -04:00
|
|
|
r = &GetObjectReader{
|
|
|
|
ObjInfo: oi,
|
2021-04-30 21:37:58 -04:00
|
|
|
Reader: inputReader,
|
2019-03-06 15:38:41 -05:00
|
|
|
cleanUpFns: cFns,
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
|
|
|
return r, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return fn, off, length, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close - calls the cleanup actions in reverse order
|
|
|
|
func (g *GetObjectReader) Close() error {
|
2021-06-11 02:07:16 -04:00
|
|
|
if g == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2018-09-20 22:22:09 -04:00
|
|
|
// sync.Once is used here to ensure that Close() is
|
|
|
|
// idempotent.
|
|
|
|
g.once.Do(func() {
|
|
|
|
for i := len(g.cleanUpFns) - 1; i >= 0; i-- {
|
|
|
|
g.cleanUpFns[i]()
|
|
|
|
}
|
|
|
|
})
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-08-31 11:13:23 -04:00
|
|
|
// compressionIndexEncrypter returns a function that will read data from input,
|
|
|
|
// encrypt it using the provided key and return the result.
|
2022-07-11 20:30:56 -04:00
|
|
|
func compressionIndexEncrypter(key crypto.ObjectKey, input func() []byte) func() []byte {
|
|
|
|
var data []byte
|
|
|
|
var fetched bool
|
|
|
|
return func() []byte {
|
|
|
|
if !fetched {
|
|
|
|
data = input()
|
|
|
|
fetched = true
|
|
|
|
}
|
2022-08-31 11:13:23 -04:00
|
|
|
return metadataEncrypter(key)("compression-index", data)
|
2022-07-11 20:30:56 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-31 11:13:23 -04:00
|
|
|
// compressionIndexDecrypt reverses compressionIndexEncrypter.
|
2022-07-11 20:30:56 -04:00
|
|
|
func (o *ObjectInfo) compressionIndexDecrypt(input []byte) ([]byte, error) {
|
2022-08-31 11:13:23 -04:00
|
|
|
return o.metadataDecrypter()("compression-index", input)
|
2022-07-11 20:30:56 -04:00
|
|
|
}
|
|
|
|
|
2021-11-16 12:28:29 -05:00
|
|
|
// SealMD5CurrFn seals md5sum with object encryption key and returns sealed
|
2018-11-14 20:36:41 -05:00
|
|
|
// md5sum
|
|
|
|
type SealMD5CurrFn func([]byte) []byte
|
|
|
|
|
|
|
|
// PutObjReader is a type that wraps sio.EncryptReader and
|
|
|
|
// underlying hash.Reader in a struct
|
|
|
|
type PutObjReader struct {
|
|
|
|
*hash.Reader // actual data stream
|
|
|
|
rawReader *hash.Reader // original data stream
|
|
|
|
sealMD5Fn SealMD5CurrFn
|
|
|
|
}
|
|
|
|
|
|
|
|
// Size returns the absolute number of bytes the Reader
|
|
|
|
// will return during reading. It returns -1 for unlimited
|
|
|
|
// data.
|
|
|
|
func (p *PutObjReader) Size() int64 {
|
|
|
|
return p.Reader.Size()
|
|
|
|
}
|
|
|
|
|
|
|
|
// MD5CurrentHexString returns the current MD5Sum or encrypted MD5Sum
|
|
|
|
// as a hex encoded string
|
|
|
|
func (p *PutObjReader) MD5CurrentHexString() string {
|
|
|
|
md5sumCurr := p.rawReader.MD5Current()
|
2019-05-08 21:35:40 -04:00
|
|
|
var appendHyphen bool
|
|
|
|
// md5sumcurr is not empty in two scenarios
|
|
|
|
// - server is running in strict compatibility mode
|
|
|
|
// - client set Content-Md5 during PUT operation
|
|
|
|
if len(md5sumCurr) == 0 {
|
|
|
|
// md5sumCurr is only empty when we are running
|
|
|
|
// in non-compatibility mode.
|
|
|
|
md5sumCurr = make([]byte, 16)
|
|
|
|
rand.Read(md5sumCurr)
|
|
|
|
appendHyphen = true
|
|
|
|
}
|
2018-11-14 20:36:41 -05:00
|
|
|
if p.sealMD5Fn != nil {
|
|
|
|
md5sumCurr = p.sealMD5Fn(md5sumCurr)
|
|
|
|
}
|
2019-05-08 21:35:40 -04:00
|
|
|
if appendHyphen {
|
|
|
|
// Make sure to return etag string upto 32 length, for SSE
|
|
|
|
// requests ETag might be longer and the code decrypting the
|
|
|
|
// ETag ignores ETag in multipart ETag form i.e <hex>-N
|
|
|
|
return hex.EncodeToString(md5sumCurr)[:32] + "-1"
|
|
|
|
}
|
2018-11-14 20:36:41 -05:00
|
|
|
return hex.EncodeToString(md5sumCurr)
|
|
|
|
}
|
|
|
|
|
2021-02-10 11:52:50 -05:00
|
|
|
// WithEncryption sets up encrypted reader and the sealing for content md5sum
|
|
|
|
// using objEncKey. Unsealed md5sum is computed from the rawReader setup when
|
|
|
|
// NewPutObjReader was called. It returns an error if called on an uninitialized
|
|
|
|
// PutObjReader.
|
|
|
|
func (p *PutObjReader) WithEncryption(encReader *hash.Reader, objEncKey *crypto.ObjectKey) (*PutObjReader, error) {
|
|
|
|
if p.Reader == nil {
|
|
|
|
return nil, errors.New("put-object reader uninitialized")
|
2018-11-14 20:36:41 -05:00
|
|
|
}
|
2021-02-10 11:52:50 -05:00
|
|
|
p.Reader = encReader
|
|
|
|
p.sealMD5Fn = sealETagFn(*objEncKey)
|
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewPutObjReader returns a new PutObjReader. It uses given hash.Reader's
|
|
|
|
// MD5Current method to construct md5sum when requested downstream.
|
|
|
|
func NewPutObjReader(rawReader *hash.Reader) *PutObjReader {
|
|
|
|
return &PutObjReader{Reader: rawReader, rawReader: rawReader}
|
2018-11-14 20:36:41 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func sealETag(encKey crypto.ObjectKey, md5CurrSum []byte) []byte {
|
|
|
|
var emptyKey [32]byte
|
|
|
|
if bytes.Equal(encKey[:], emptyKey[:]) {
|
|
|
|
return md5CurrSum
|
|
|
|
}
|
|
|
|
return encKey.SealETag(md5CurrSum)
|
|
|
|
}
|
2018-11-27 16:23:32 -05:00
|
|
|
|
2018-11-14 20:36:41 -05:00
|
|
|
func sealETagFn(key crypto.ObjectKey) SealMD5CurrFn {
|
2018-11-27 16:23:32 -05:00
|
|
|
fn := func(md5sumcurr []byte) []byte {
|
2018-11-14 20:36:41 -05:00
|
|
|
return sealETag(key, md5sumcurr)
|
|
|
|
}
|
2018-11-27 16:23:32 -05:00
|
|
|
return fn
|
2018-11-14 20:36:41 -05:00
|
|
|
}
|
2019-01-05 17:16:43 -05:00
|
|
|
|
2021-08-16 14:55:07 -04:00
|
|
|
// compressOpts are the options for writing compressed data.
|
|
|
|
var compressOpts []s2.WriterOption
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
if runtime.GOARCH == "amd64" {
|
|
|
|
// On amd64 we have assembly and can use stronger compression.
|
|
|
|
compressOpts = append(compressOpts, s2.WriterBetterCompression())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-26 02:08:24 -04:00
|
|
|
// newS2CompressReader will read data from r, compress it and return the compressed data as a Reader.
|
|
|
|
// Use Close to ensure resources are released on incomplete streams.
|
2021-01-22 15:09:24 -05:00
|
|
|
//
|
|
|
|
// input 'on' is always recommended such that this function works
|
|
|
|
// properly, because we do not wish to create an object even if
|
|
|
|
// client closed the stream prematurely.
|
2022-07-13 10:52:15 -04:00
|
|
|
func newS2CompressReader(r io.Reader, on int64, encrypted bool) (rc io.ReadCloser, idx func() []byte) {
|
2019-09-26 02:08:24 -04:00
|
|
|
pr, pw := io.Pipe()
|
|
|
|
// Copy input to compressor
|
2022-07-13 10:52:15 -04:00
|
|
|
opts := compressOpts
|
|
|
|
if encrypted {
|
|
|
|
// The values used for padding are not a security concern,
|
|
|
|
// but we choose pseudo-random numbers instead of just zeros.
|
|
|
|
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
|
|
|
opts = append([]s2.WriterOption{s2.WriterPadding(compPadEncrypted), s2.WriterPaddingSrc(rng)}, compressOpts...)
|
|
|
|
}
|
|
|
|
comp := s2.NewWriter(pw, opts...)
|
2022-07-11 20:30:56 -04:00
|
|
|
indexCh := make(chan []byte, 1)
|
2019-09-26 02:08:24 -04:00
|
|
|
go func() {
|
2024-01-28 13:04:17 -05:00
|
|
|
defer xioutil.SafeClose(indexCh)
|
2021-01-22 15:09:24 -05:00
|
|
|
cn, err := io.Copy(comp, r)
|
2019-03-05 11:35:37 -05:00
|
|
|
if err != nil {
|
2019-09-26 02:08:24 -04:00
|
|
|
comp.Close()
|
|
|
|
pw.CloseWithError(err)
|
|
|
|
return
|
2019-03-05 11:35:37 -05:00
|
|
|
}
|
2021-01-22 15:09:24 -05:00
|
|
|
if on > 0 && on != cn {
|
|
|
|
// if client didn't sent all data
|
|
|
|
// from the client verify here.
|
|
|
|
comp.Close()
|
|
|
|
pw.CloseWithError(IncompleteBody{})
|
|
|
|
return
|
|
|
|
}
|
2019-09-26 02:08:24 -04:00
|
|
|
// Close the stream.
|
2022-07-13 10:52:15 -04:00
|
|
|
// If more than compMinIndexSize was written, generate index.
|
|
|
|
if cn > compMinIndexSize {
|
2022-07-11 20:30:56 -04:00
|
|
|
idx, err := comp.CloseIndex()
|
2022-07-13 10:48:14 -04:00
|
|
|
idx = s2.RemoveIndexHeaders(idx)
|
2022-07-11 20:30:56 -04:00
|
|
|
indexCh <- idx
|
|
|
|
pw.CloseWithError(err)
|
|
|
|
return
|
|
|
|
}
|
2021-05-11 12:18:37 -04:00
|
|
|
pw.CloseWithError(comp.Close())
|
2019-09-26 02:08:24 -04:00
|
|
|
}()
|
2022-07-11 20:30:56 -04:00
|
|
|
var gotIdx []byte
|
|
|
|
return pr, func() []byte {
|
|
|
|
if gotIdx != nil {
|
|
|
|
return gotIdx
|
|
|
|
}
|
|
|
|
// Will get index or nil if closed.
|
|
|
|
gotIdx = <-indexCh
|
|
|
|
return gotIdx
|
|
|
|
}
|
2019-03-05 11:35:37 -05:00
|
|
|
}
|
2021-03-31 12:11:37 -04:00
|
|
|
|
|
|
|
// compressSelfTest performs a self-test to ensure that compression
|
|
|
|
// algorithms completes a roundtrip. If any algorithm
|
|
|
|
// produces an incorrect checksum it fails with a hard error.
|
|
|
|
//
|
|
|
|
// compressSelfTest tries to catch any issue in the compression implementation
|
|
|
|
// early instead of silently corrupting data.
|
|
|
|
func compressSelfTest() {
|
|
|
|
// 4 MB block.
|
|
|
|
// Approx runtime ~30ms
|
|
|
|
data := make([]byte, 4<<20)
|
|
|
|
rng := rand.New(rand.NewSource(0))
|
|
|
|
for i := range data {
|
|
|
|
// Generate compressible stream...
|
|
|
|
data[i] = byte(rng.Int63() & 3)
|
|
|
|
}
|
|
|
|
failOnErr := func(err error) {
|
|
|
|
if err != nil {
|
|
|
|
logger.Fatal(errSelfTestFailure, "compress: error on self-test: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
const skip = 2<<20 + 511
|
2022-07-13 10:52:15 -04:00
|
|
|
r, _ := newS2CompressReader(bytes.NewBuffer(data), int64(len(data)), true)
|
2021-03-31 12:11:37 -04:00
|
|
|
b, err := io.ReadAll(r)
|
|
|
|
failOnErr(err)
|
|
|
|
failOnErr(r.Close())
|
|
|
|
// Decompression reader.
|
|
|
|
s2Reader := s2.NewReader(bytes.NewBuffer(b))
|
|
|
|
// Apply the skipLen on the decompressed stream.
|
|
|
|
failOnErr(s2Reader.Skip(skip))
|
|
|
|
got, err := io.ReadAll(s2Reader)
|
|
|
|
failOnErr(err)
|
|
|
|
if !bytes.Equal(got, data[skip:]) {
|
|
|
|
logger.Fatal(errSelfTestFailure, "compress: self-test roundtrip mismatch.")
|
|
|
|
}
|
|
|
|
}
|
2021-06-07 11:13:15 -04:00
|
|
|
|
|
|
|
// getDiskInfos returns the disk information for the provided disks.
|
|
|
|
// If a disk is nil or an error is returned the result will be nil as well.
|
2022-05-30 13:58:37 -04:00
|
|
|
func getDiskInfos(ctx context.Context, disks ...StorageAPI) []*DiskInfo {
|
2021-06-07 11:13:15 -04:00
|
|
|
res := make([]*DiskInfo, len(disks))
|
2024-01-25 15:45:46 -05:00
|
|
|
opts := DiskInfoOptions{}
|
2021-06-07 11:13:15 -04:00
|
|
|
for i, disk := range disks {
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
2024-01-25 15:45:46 -05:00
|
|
|
if di, err := disk.DiskInfo(ctx, opts); err == nil {
|
2021-06-07 11:13:15 -04:00
|
|
|
res[i] = &di
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
// hasSpaceFor returns whether the disks in `di` have space for and object of a given size.
|
2023-03-22 19:22:37 -04:00
|
|
|
func hasSpaceFor(di []*DiskInfo, size int64) (bool, error) {
|
2021-06-07 11:13:15 -04:00
|
|
|
// We multiply the size by 2 to account for erasure coding.
|
|
|
|
size *= 2
|
|
|
|
if size < 0 {
|
|
|
|
// If no size, assume diskAssumeUnknownSize.
|
|
|
|
size = diskAssumeUnknownSize
|
|
|
|
}
|
|
|
|
|
|
|
|
var available uint64
|
|
|
|
var total uint64
|
|
|
|
var nDisks int
|
|
|
|
for _, disk := range di {
|
2023-07-11 17:29:34 -04:00
|
|
|
if disk == nil || disk.Total == 0 {
|
2021-06-07 11:13:15 -04:00
|
|
|
// Disk offline, no inodes or something else is wrong.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
nDisks++
|
|
|
|
total += disk.Total
|
|
|
|
available += disk.Total - disk.Used
|
|
|
|
}
|
|
|
|
|
2023-07-11 17:29:34 -04:00
|
|
|
if nDisks < len(di)/2 || nDisks <= 0 {
|
2023-03-22 19:22:37 -04:00
|
|
|
return false, fmt.Errorf("not enough online disks to calculate the available space, expected (%d)/(%d)", (len(di)/2)+1, nDisks)
|
2021-06-07 11:13:15 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check we have enough on each disk, ignoring diskFillFraction.
|
|
|
|
perDisk := size / int64(nDisks)
|
|
|
|
for _, disk := range di {
|
2023-07-11 17:29:34 -04:00
|
|
|
if disk == nil || disk.Total == 0 {
|
2021-06-07 11:13:15 -04:00
|
|
|
continue
|
|
|
|
}
|
2023-11-14 12:31:16 -05:00
|
|
|
if !globalIsErasureSD && disk.FreeInodes < diskMinInodes && disk.UsedInodes > 0 {
|
2023-07-11 17:29:34 -04:00
|
|
|
// We have an inode count, but not enough inodes.
|
|
|
|
return false, nil
|
|
|
|
}
|
2021-06-07 11:13:15 -04:00
|
|
|
if int64(disk.Free) <= perDisk {
|
2023-03-22 19:22:37 -04:00
|
|
|
return false, nil
|
2021-06-07 11:13:15 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure we can fit "size" on to the disk without getting above the diskFillFraction
|
|
|
|
if available < uint64(size) {
|
2023-03-22 19:22:37 -04:00
|
|
|
return false, nil
|
2021-06-07 11:13:15 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// How much will be left after adding the file.
|
|
|
|
available -= uint64(size)
|
|
|
|
|
|
|
|
// wantLeft is how much space there at least must be left.
|
|
|
|
wantLeft := uint64(float64(total) * (1.0 - diskFillFraction))
|
2023-03-22 19:22:37 -04:00
|
|
|
return available > wantLeft, nil
|
2021-06-07 11:13:15 -04:00
|
|
|
}
|