mirror of
https://github.com/minio/minio.git
synced 2025-11-09 13:39:46 -05:00
upgrade golang-lint to the latest (#15600)
This commit is contained in:
@@ -390,8 +390,9 @@ func (lc Lifecycle) ComputeAction(obj ObjectOpts) Action {
|
||||
// ExpectedExpiryTime calculates the expiry, transition or restore date/time based on a object modtime.
|
||||
// The expected transition or restore time is always a midnight time following the the object
|
||||
// modification time plus the number of transition/restore days.
|
||||
// e.g. If the object modtime is `Thu May 21 13:42:50 GMT 2020` and the object should
|
||||
// transition in 1 day, then the expected transition time is `Fri, 23 May 2020 00:00:00 GMT`
|
||||
//
|
||||
// e.g. If the object modtime is `Thu May 21 13:42:50 GMT 2020` and the object should
|
||||
// transition in 1 day, then the expected transition time is `Fri, 23 May 2020 00:00:00 GMT`
|
||||
func ExpectedExpiryTime(modTime time.Time, days int) time.Time {
|
||||
if days == 0 {
|
||||
return modTime
|
||||
|
||||
@@ -60,7 +60,9 @@ type Config struct {
|
||||
|
||||
// BitrotScanCycle returns the configured cycle for the scanner healing
|
||||
// -1 for not enabled
|
||||
// 0 for contiunous bitrot scanning
|
||||
//
|
||||
// 0 for contiunous bitrot scanning
|
||||
//
|
||||
// >0 interval duration between cycles
|
||||
func (opts Config) BitrotScanCycle() (d time.Duration) {
|
||||
configMutex.RLock()
|
||||
|
||||
@@ -216,10 +216,13 @@ func validateParity(ssParity, rrsParity, setDriveCount int) (err error) {
|
||||
//
|
||||
// -- if input storage class is empty then standard is assumed
|
||||
// -- if input is RRS but RRS is not configured default '2' parity
|
||||
// for RRS is assumed
|
||||
//
|
||||
// for RRS is assumed
|
||||
//
|
||||
// -- if input is STANDARD but STANDARD is not configured '0' parity
|
||||
// is returned, the caller is expected to choose the right parity
|
||||
// at that point.
|
||||
//
|
||||
// is returned, the caller is expected to choose the right parity
|
||||
// at that point.
|
||||
func (sCfg Config) GetParityForSC(sc string) (parity int) {
|
||||
ConfigLock.RLock()
|
||||
defer ConfigLock.RUnlock()
|
||||
|
||||
@@ -25,32 +25,30 @@
|
||||
// with an unique key-encryption-key. Given the correct key-encryption-key the
|
||||
// sealed 'ObjectKey' can be unsealed and the object can be decrypted.
|
||||
//
|
||||
//
|
||||
// ## SSE-C
|
||||
//
|
||||
// SSE-C computes the key-encryption-key from the client-provided key, an
|
||||
// initialization vector (IV) and the bucket/object path.
|
||||
//
|
||||
// 1. Encrypt:
|
||||
// Input: ClientKey, bucket, object, metadata, object_data
|
||||
// - IV := Random({0,1}²⁵⁶)
|
||||
// - ObjectKey := SHA256(ClientKey || Random({0,1}²⁵⁶))
|
||||
// - KeyEncKey := HMAC-SHA256(ClientKey, IV || 'SSE-C' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
|
||||
// - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey)
|
||||
// - enc_object_data := DAREv2_Enc(ObjectKey, object_data)
|
||||
// - metadata <- IV
|
||||
// - metadata <- SealedKey
|
||||
// Output: enc_object_data, metadata
|
||||
//
|
||||
// 2. Decrypt:
|
||||
// Input: ClientKey, bucket, object, metadata, enc_object_data
|
||||
// - IV <- metadata
|
||||
// - SealedKey <- metadata
|
||||
// - KeyEncKey := HMAC-SHA256(ClientKey, IV || 'SSE-C' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
|
||||
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
|
||||
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
|
||||
// Output: object_data
|
||||
// 1. Encrypt:
|
||||
// Input: ClientKey, bucket, object, metadata, object_data
|
||||
// - IV := Random({0,1}²⁵⁶)
|
||||
// - ObjectKey := SHA256(ClientKey || Random({0,1}²⁵⁶))
|
||||
// - KeyEncKey := HMAC-SHA256(ClientKey, IV || 'SSE-C' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
|
||||
// - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey)
|
||||
// - enc_object_data := DAREv2_Enc(ObjectKey, object_data)
|
||||
// - metadata <- IV
|
||||
// - metadata <- SealedKey
|
||||
// Output: enc_object_data, metadata
|
||||
//
|
||||
// 2. Decrypt:
|
||||
// Input: ClientKey, bucket, object, metadata, enc_object_data
|
||||
// - IV <- metadata
|
||||
// - SealedKey <- metadata
|
||||
// - KeyEncKey := HMAC-SHA256(ClientKey, IV || 'SSE-C' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
|
||||
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
|
||||
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
|
||||
// Output: object_data
|
||||
//
|
||||
// ## SSE-S3
|
||||
//
|
||||
@@ -63,57 +61,57 @@
|
||||
// SSE-S3 with a single master key works as SSE-C where the master key is
|
||||
// used as the client-provided key.
|
||||
//
|
||||
// 1. Encrypt:
|
||||
// Input: MasterKey, bucket, object, metadata, object_data
|
||||
// - IV := Random({0,1}²⁵⁶)
|
||||
// - ObjectKey := SHA256(MasterKey || Random({0,1}²⁵⁶))
|
||||
// - KeyEncKey := HMAC-SHA256(MasterKey, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
|
||||
// - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey)
|
||||
// - enc_object_data := DAREv2_Enc(ObjectKey, object_data)
|
||||
// - metadata <- IV
|
||||
// - metadata <- SealedKey
|
||||
// Output: enc_object_data, metadata
|
||||
//
|
||||
// 2. Decrypt:
|
||||
// Input: MasterKey, bucket, object, metadata, enc_object_data
|
||||
// - IV <- metadata
|
||||
// - SealedKey <- metadata
|
||||
// - KeyEncKey := HMAC-SHA256(MasterKey, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
|
||||
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
|
||||
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
|
||||
// Output: object_data
|
||||
// 1. Encrypt:
|
||||
// Input: MasterKey, bucket, object, metadata, object_data
|
||||
// - IV := Random({0,1}²⁵⁶)
|
||||
// - ObjectKey := SHA256(MasterKey || Random({0,1}²⁵⁶))
|
||||
// - KeyEncKey := HMAC-SHA256(MasterKey, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
|
||||
// - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey)
|
||||
// - enc_object_data := DAREv2_Enc(ObjectKey, object_data)
|
||||
// - metadata <- IV
|
||||
// - metadata <- SealedKey
|
||||
// Output: enc_object_data, metadata
|
||||
//
|
||||
// 2. Decrypt:
|
||||
// Input: MasterKey, bucket, object, metadata, enc_object_data
|
||||
// - IV <- metadata
|
||||
// - SealedKey <- metadata
|
||||
// - KeyEncKey := HMAC-SHA256(MasterKey, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
|
||||
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
|
||||
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
|
||||
// Output: object_data
|
||||
//
|
||||
// ### SSE-S3 and KMS
|
||||
//
|
||||
// SSE-S3 requires that the KMS provides two functions:
|
||||
// 1. Generate(KeyID) -> (Key, EncKey)
|
||||
// 2. Unseal(KeyID, EncKey) -> Key
|
||||
//
|
||||
// 1. Encrypt:
|
||||
// Input: KeyID, bucket, object, metadata, object_data
|
||||
// - Key, EncKey := Generate(KeyID)
|
||||
// - IV := Random({0,1}²⁵⁶)
|
||||
// - ObjectKey := SHA256(Key, Random({0,1}²⁵⁶))
|
||||
// - KeyEncKey := HMAC-SHA256(Key, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
|
||||
// - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey)
|
||||
// - enc_object_data := DAREv2_Enc(ObjectKey, object_data)
|
||||
// - metadata <- IV
|
||||
// - metadata <- KeyID
|
||||
// - metadata <- EncKey
|
||||
// - metadata <- SealedKey
|
||||
// Output: enc_object_data, metadata
|
||||
// 1. Generate(KeyID) -> (Key, EncKey)
|
||||
//
|
||||
// 2. Decrypt:
|
||||
// Input: bucket, object, metadata, enc_object_data
|
||||
// - KeyID <- metadata
|
||||
// - EncKey <- metadata
|
||||
// - IV <- metadata
|
||||
// - SealedKey <- metadata
|
||||
// - Key := Unseal(KeyID, EncKey)
|
||||
// - KeyEncKey := HMAC-SHA256(Key, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
|
||||
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
|
||||
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
|
||||
// Output: object_data
|
||||
// 2. Unseal(KeyID, EncKey) -> Key
|
||||
//
|
||||
// 1. Encrypt:
|
||||
// Input: KeyID, bucket, object, metadata, object_data
|
||||
// - Key, EncKey := Generate(KeyID)
|
||||
// - IV := Random({0,1}²⁵⁶)
|
||||
// - ObjectKey := SHA256(Key, Random({0,1}²⁵⁶))
|
||||
// - KeyEncKey := HMAC-SHA256(Key, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
|
||||
// - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey)
|
||||
// - enc_object_data := DAREv2_Enc(ObjectKey, object_data)
|
||||
// - metadata <- IV
|
||||
// - metadata <- KeyID
|
||||
// - metadata <- EncKey
|
||||
// - metadata <- SealedKey
|
||||
// Output: enc_object_data, metadata
|
||||
//
|
||||
// 2. Decrypt:
|
||||
// Input: bucket, object, metadata, enc_object_data
|
||||
// - KeyID <- metadata
|
||||
// - EncKey <- metadata
|
||||
// - IV <- metadata
|
||||
// - SealedKey <- metadata
|
||||
// - Key := Unseal(KeyID, EncKey)
|
||||
// - KeyEncKey := HMAC-SHA256(Key, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
|
||||
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
|
||||
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
|
||||
// Output: object_data
|
||||
package crypto
|
||||
|
||||
@@ -43,9 +43,9 @@ const (
|
||||
)
|
||||
|
||||
// Type represents an AWS SSE type:
|
||||
// • SSE-C
|
||||
// • SSE-S3
|
||||
// • SSE-KMS
|
||||
// - SSE-C
|
||||
// - SSE-S3
|
||||
// - SSE-KMS
|
||||
type Type interface {
|
||||
fmt.Stringer
|
||||
|
||||
|
||||
@@ -206,7 +206,6 @@ func TestTwoSimultaneousLocksForDifferentResources(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test refreshing lock - refresh should always return true
|
||||
//
|
||||
func TestSuccessfulLockRefresh(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
|
||||
@@ -24,35 +24,35 @@
|
||||
// In general, an S3 ETag is an MD5 checksum of the object
|
||||
// content. However, there are many exceptions to this rule.
|
||||
//
|
||||
//
|
||||
// Single-part Upload
|
||||
// # Single-part Upload
|
||||
//
|
||||
// In case of a basic single-part PUT operation - without server
|
||||
// side encryption or object compression - the ETag of an object
|
||||
// is its content MD5.
|
||||
//
|
||||
//
|
||||
// Multi-part Upload
|
||||
// # Multi-part Upload
|
||||
//
|
||||
// The ETag of an object does not correspond to its content MD5
|
||||
// when the object is uploaded in multiple parts via the S3
|
||||
// multipart API. Instead, S3 first computes a MD5 of each part:
|
||||
// e1 := MD5(part-1)
|
||||
// e2 := MD5(part-2)
|
||||
// ...
|
||||
// eN := MD5(part-N)
|
||||
//
|
||||
// e1 := MD5(part-1)
|
||||
// e2 := MD5(part-2)
|
||||
// ...
|
||||
// eN := MD5(part-N)
|
||||
//
|
||||
// Then, the ETag of the object is computed as MD5 of all individual
|
||||
// part checksums. S3 also encodes the number of parts into the ETag
|
||||
// by appending a -<number-of-parts> at the end:
|
||||
// ETag := MD5(e1 || e2 || e3 ... || eN) || -N
|
||||
//
|
||||
// For example: ceb8853ddc5086cc4ab9e149f8f09c88-5
|
||||
// ETag := MD5(e1 || e2 || e3 ... || eN) || -N
|
||||
//
|
||||
// For example: ceb8853ddc5086cc4ab9e149f8f09c88-5
|
||||
//
|
||||
// However, this scheme is only used for multipart objects that are
|
||||
// not encrypted.
|
||||
//
|
||||
// Server-side Encryption
|
||||
// # Server-side Encryption
|
||||
//
|
||||
// S3 specifies three types of server-side-encryption - SSE-C, SSE-S3
|
||||
// and SSE-KMS - with different semantics w.r.t. ETags.
|
||||
@@ -75,12 +75,12 @@
|
||||
// in case of SSE-C or SSE-KMS except that the ETag is well-formed.
|
||||
//
|
||||
// To put all of this into a simple rule:
|
||||
// SSE-S3 : ETag == MD5
|
||||
// SSE-C : ETag != MD5
|
||||
// SSE-KMS: ETag != MD5
|
||||
//
|
||||
// SSE-S3 : ETag == MD5
|
||||
// SSE-C : ETag != MD5
|
||||
// SSE-KMS: ETag != MD5
|
||||
//
|
||||
// Encrypted ETags
|
||||
// # Encrypted ETags
|
||||
//
|
||||
// An S3 implementation has to remember the content MD5 of objects
|
||||
// in case of SSE-S3. However, storing the ETag of an encrypted
|
||||
@@ -94,8 +94,7 @@
|
||||
// encryption schemes. Such an ETag must be decrypted before sent to an
|
||||
// S3 client.
|
||||
//
|
||||
//
|
||||
// S3 Clients
|
||||
// # S3 Clients
|
||||
//
|
||||
// There are many different S3 client implementations. Most of them
|
||||
// access the ETag by looking for the HTTP response header key "Etag".
|
||||
@@ -206,27 +205,28 @@ func (e ETag) Parts() int {
|
||||
// ETag.
|
||||
//
|
||||
// In general, a caller has to distinguish the following cases:
|
||||
// - The object is a multipart object. In this case,
|
||||
// Format returns the ETag unmodified.
|
||||
// - The object is a SSE-KMS or SSE-C encrypted single-
|
||||
// part object. In this case, Format returns the last
|
||||
// 16 bytes of the encrypted ETag which will be a random
|
||||
// value.
|
||||
// - The object is a SSE-S3 encrypted single-part object.
|
||||
// In this case, the caller has to decrypt the ETag first
|
||||
// before calling Format.
|
||||
// S3 clients expect that the ETag of an SSE-S3 encrypted
|
||||
// single-part object is equal to the object's content MD5.
|
||||
// Formatting the SSE-S3 ETag before decryption will result
|
||||
// in a random-looking ETag which an S3 client will not accept.
|
||||
// - The object is a multipart object. In this case,
|
||||
// Format returns the ETag unmodified.
|
||||
// - The object is a SSE-KMS or SSE-C encrypted single-
|
||||
// part object. In this case, Format returns the last
|
||||
// 16 bytes of the encrypted ETag which will be a random
|
||||
// value.
|
||||
// - The object is a SSE-S3 encrypted single-part object.
|
||||
// In this case, the caller has to decrypt the ETag first
|
||||
// before calling Format.
|
||||
// S3 clients expect that the ETag of an SSE-S3 encrypted
|
||||
// single-part object is equal to the object's content MD5.
|
||||
// Formatting the SSE-S3 ETag before decryption will result
|
||||
// in a random-looking ETag which an S3 client will not accept.
|
||||
//
|
||||
// Hence, a caller has to check:
|
||||
// if method == SSE-S3 {
|
||||
// ETag, err := Decrypt(key, ETag)
|
||||
// if err != nil {
|
||||
// }
|
||||
// }
|
||||
// ETag = ETag.Format()
|
||||
//
|
||||
// if method == SSE-S3 {
|
||||
// ETag, err := Decrypt(key, ETag)
|
||||
// if err != nil {
|
||||
// }
|
||||
// }
|
||||
// ETag = ETag.Format()
|
||||
func (e ETag) Format() ETag {
|
||||
if !e.IsEncrypted() {
|
||||
return e
|
||||
@@ -359,8 +359,8 @@ func Parse(s string) (ETag, error) {
|
||||
|
||||
// parse parse s as an S3 ETag, returning the result.
|
||||
// It operates in one of two modes:
|
||||
// - strict
|
||||
// - non-strict
|
||||
// - strict
|
||||
// - non-strict
|
||||
//
|
||||
// In strict mode, parse only accepts ETags that
|
||||
// are AWS S3 compatible. In particular, an AWS
|
||||
|
||||
@@ -56,15 +56,14 @@ func (r wrapReader) ETag() ETag {
|
||||
// It is mainly used to provide a high-level io.Reader
|
||||
// access to the ETag computed by a low-level io.Reader:
|
||||
//
|
||||
// content := etag.NewReader(r.Body, nil)
|
||||
// content := etag.NewReader(r.Body, nil)
|
||||
//
|
||||
// compressedContent := Compress(content)
|
||||
// encryptedContent := Encrypt(compressedContent)
|
||||
//
|
||||
// // Now, we need an io.Reader that can access
|
||||
// // the ETag computed over the content.
|
||||
// reader := etag.Wrap(encryptedContent, content)
|
||||
// compressedContent := Compress(content)
|
||||
// encryptedContent := Encrypt(compressedContent)
|
||||
//
|
||||
// // Now, we need an io.Reader that can access
|
||||
// // the ETag computed over the content.
|
||||
// reader := etag.Wrap(encryptedContent, content)
|
||||
func Wrap(wrapped, content io.Reader) io.Reader {
|
||||
if t, ok := content.(Tagger); ok {
|
||||
return wrapReader{
|
||||
|
||||
@@ -72,7 +72,8 @@ func (f *Forwarder) ServeHTTP(w http.ResponseWriter, inReq *http.Request) {
|
||||
}
|
||||
|
||||
// customErrHandler is originally implemented to avoid having the following error
|
||||
// `http: proxy error: context canceled` printed by Golang
|
||||
//
|
||||
// `http: proxy error: context canceled` printed by Golang
|
||||
func (f *Forwarder) customErrHandler(w http.ResponseWriter, r *http.Request, err error) {
|
||||
if f.Logger != nil && err != context.Canceled {
|
||||
f.Logger(err)
|
||||
|
||||
@@ -39,7 +39,8 @@ import (
|
||||
|
||||
// Parse parses s as single-key KMS. The given string
|
||||
// is expected to have the following format:
|
||||
// <key-id>:<base64-key>
|
||||
//
|
||||
// <key-id>:<base64-key>
|
||||
//
|
||||
// The returned KMS implementation uses the parsed
|
||||
// key ID and key to derive new DEKs and decrypt ciphertext.
|
||||
|
||||
@@ -27,7 +27,8 @@ import (
|
||||
|
||||
// Target is the entity that we will receive
|
||||
// a single log entry and Send it to the log target
|
||||
// e.g. Send the log to a http server
|
||||
//
|
||||
// e.g. Send the log to a http server
|
||||
type Target interface {
|
||||
String() string
|
||||
Endpoint() string
|
||||
@@ -126,8 +127,9 @@ func initKafkaTargets(cfgMap map[string]kafka.Config) (tgts []Target, err error)
|
||||
}
|
||||
|
||||
// Split targets into two groups:
|
||||
// group1 contains all targets of type t
|
||||
// group2 contains the remaining targets
|
||||
//
|
||||
// group1 contains all targets of type t
|
||||
// group2 contains the remaining targets
|
||||
func splitTargets(targets []Target, t types.TargetType) (group1 []Target, group2 []Target) {
|
||||
for _, target := range targets {
|
||||
if target.Type() == t {
|
||||
|
||||
@@ -128,9 +128,9 @@ var progressHeader = []byte{
|
||||
//
|
||||
// Payload specification:
|
||||
// Progress message payload is an XML document containing information about the progress of a request.
|
||||
// * BytesScanned => Number of bytes that have been processed before being uncompressed (if the file is compressed).
|
||||
// * BytesProcessed => Number of bytes that have been processed after being uncompressed (if the file is compressed).
|
||||
// * BytesReturned => Current number of bytes of records payload data returned by S3.
|
||||
// - BytesScanned => Number of bytes that have been processed before being uncompressed (if the file is compressed).
|
||||
// - BytesProcessed => Number of bytes that have been processed after being uncompressed (if the file is compressed).
|
||||
// - BytesReturned => Current number of bytes of records payload data returned by S3.
|
||||
//
|
||||
// For uncompressed files, BytesScanned and BytesProcessed are equal.
|
||||
//
|
||||
@@ -138,11 +138,12 @@ var progressHeader = []byte{
|
||||
//
|
||||
// <?xml version="1.0" encoding="UTF-8"?>
|
||||
// <Progress>
|
||||
// <BytesScanned>512</BytesScanned>
|
||||
// <BytesProcessed>1024</BytesProcessed>
|
||||
// <BytesReturned>1024</BytesReturned>
|
||||
// </Progress>
|
||||
//
|
||||
// <BytesScanned>512</BytesScanned>
|
||||
// <BytesProcessed>1024</BytesProcessed>
|
||||
// <BytesReturned>1024</BytesReturned>
|
||||
//
|
||||
// </Progress>
|
||||
func newProgressMessage(bytesScanned, bytesProcessed, bytesReturned int64) []byte {
|
||||
payload := []byte(`<?xml version="1.0" encoding="UTF-8"?><Progress><BytesScanned>` +
|
||||
strconv.FormatInt(bytesScanned, 10) + `</BytesScanned><BytesProcessed>` +
|
||||
@@ -167,9 +168,9 @@ var statsHeader = []byte{
|
||||
//
|
||||
// Payload specification:
|
||||
// Stats message payload is an XML document containing information about a request's stats when processing is complete.
|
||||
// * BytesScanned => Number of bytes that have been processed before being uncompressed (if the file is compressed).
|
||||
// * BytesProcessed => Number of bytes that have been processed after being uncompressed (if the file is compressed).
|
||||
// * BytesReturned => Total number of bytes of records payload data returned by S3.
|
||||
// - BytesScanned => Number of bytes that have been processed before being uncompressed (if the file is compressed).
|
||||
// - BytesProcessed => Number of bytes that have been processed after being uncompressed (if the file is compressed).
|
||||
// - BytesReturned => Total number of bytes of records payload data returned by S3.
|
||||
//
|
||||
// For uncompressed files, BytesScanned and BytesProcessed are equal.
|
||||
//
|
||||
@@ -177,9 +178,11 @@ var statsHeader = []byte{
|
||||
//
|
||||
// <?xml version="1.0" encoding="UTF-8"?>
|
||||
// <Stats>
|
||||
// <BytesScanned>512</BytesScanned>
|
||||
// <BytesProcessed>1024</BytesProcessed>
|
||||
// <BytesReturned>1024</BytesReturned>
|
||||
//
|
||||
// <BytesScanned>512</BytesScanned>
|
||||
// <BytesProcessed>1024</BytesProcessed>
|
||||
// <BytesReturned>1024</BytesReturned>
|
||||
//
|
||||
// </Stats>
|
||||
func newStatsMessage(bytesScanned, bytesProcessed, bytesReturned int64) []byte {
|
||||
payload := []byte(`<?xml version="1.0" encoding="UTF-8"?><Stats><BytesScanned>` +
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
package smart
|
||||
|
||||
// Defined in <linux/nvme_ioctl.h>
|
||||
//
|
||||
//nolint:structcheck,deadcode
|
||||
type nvmePassthruCommand struct {
|
||||
opcode uint8
|
||||
@@ -138,6 +139,7 @@ type nvmeSMARTLog struct {
|
||||
} // 512 bytes
|
||||
|
||||
// NVMeDevice represents drive data about NVMe drives
|
||||
//
|
||||
//nolint:structcheck
|
||||
type NVMeDevice struct {
|
||||
Name string
|
||||
|
||||
Reference in New Issue
Block a user