Add erasure and compression self-tests (#11918)

Ensure that we don't use potentially broken algorithms for critical functions, whether it be a runtime problem or implementation problem for a specific platform.
This commit is contained in:
Klaus Post 2021-03-31 18:11:37 +02:00 committed by GitHub
parent 6b484f45c6
commit 0d8c74358d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 116 additions and 0 deletions

View File

@ -17,9 +17,15 @@
package cmd package cmd
import ( import (
"bytes"
"context" "context"
"encoding/hex"
"fmt"
"os"
"reflect"
"sync" "sync"
"github.com/cespare/xxhash/v2"
"github.com/klauspost/reedsolomon" "github.com/klauspost/reedsolomon"
"github.com/minio/minio/cmd/logger" "github.com/minio/minio/cmd/logger"
) )
@ -141,3 +147,69 @@ func (e *Erasure) ShardFileOffset(startOffset, length, totalLength int64) int64
} }
return tillOffset return tillOffset
} }
// erasureSelfTest performs a self-test to ensure that erasure
// algorithms compute expected erasure codes. If any algorithm
// produces an incorrect value it fails with a hard error.
//
// erasureSelfTest tries to catch any issue in the erasure implementation
// early instead of silently corrupting data.
func erasureSelfTest() {
// Approx runtime ~1ms
var testConfigs [][2]uint8
for total := uint8(4); total < 16; total++ {
for data := total / 2; data < total; data++ {
parity := total - data
testConfigs = append(testConfigs, [2]uint8{data, parity})
}
}
got := make(map[[2]uint8]map[ErasureAlgo]uint64, len(testConfigs))
// Copied from output of fmt.Printf("%#v", got) at the end.
want := map[[2]uint8]map[ErasureAlgo]uint64{{0x2, 0x2}: {0x1: 0x23fb21be2496f5d3}, {0x2, 0x3}: {0x1: 0xa5cd5600ba0d8e7c}, {0x3, 0x1}: {0x1: 0x60ab052148b010b4}, {0x3, 0x2}: {0x1: 0xe64927daef76435a}, {0x3, 0x3}: {0x1: 0x672f6f242b227b21}, {0x3, 0x4}: {0x1: 0x571e41ba23a6dc6}, {0x4, 0x1}: {0x1: 0x524eaa814d5d86e2}, {0x4, 0x2}: {0x1: 0x62b9552945504fef}, {0x4, 0x3}: {0x1: 0xcbf9065ee053e518}, {0x4, 0x4}: {0x1: 0x9a07581dcd03da8}, {0x4, 0x5}: {0x1: 0xbf2d27b55370113f}, {0x5, 0x1}: {0x1: 0xf71031a01d70daf}, {0x5, 0x2}: {0x1: 0x8e5845859939d0f4}, {0x5, 0x3}: {0x1: 0x7ad9161acbb4c325}, {0x5, 0x4}: {0x1: 0xc446b88830b4f800}, {0x5, 0x5}: {0x1: 0xabf1573cc6f76165}, {0x5, 0x6}: {0x1: 0x7b5598a85045bfb8}, {0x6, 0x1}: {0x1: 0xe2fc1e677cc7d872}, {0x6, 0x2}: {0x1: 0x7ed133de5ca6a58e}, {0x6, 0x3}: {0x1: 0x39ef92d0a74cc3c0}, {0x6, 0x4}: {0x1: 0xcfc90052bc25d20}, {0x6, 0x5}: {0x1: 0x71c96f6baeef9c58}, {0x6, 0x6}: {0x1: 0x4b79056484883e4c}, {0x6, 0x7}: {0x1: 0xb1a0e2427ac2dc1a}, {0x7, 0x1}: {0x1: 0x937ba2b7af467a22}, {0x7, 0x2}: {0x1: 0x5fd13a734d27d37a}, {0x7, 0x3}: {0x1: 0x3be2722d9b66912f}, {0x7, 0x4}: {0x1: 0x14c628e59011be3d}, {0x7, 0x5}: {0x1: 0xcc3b39ad4c083b9f}, {0x7, 0x6}: {0x1: 0x45af361b7de7a4ff}, {0x7, 0x7}: {0x1: 0x456cc320cec8a6e6}, {0x7, 0x8}: {0x1: 0x1867a9f4db315b5c}, {0x8, 0x1}: {0x1: 0xbc5756b9a9ade030}, {0x8, 0x2}: {0x1: 0xdfd7d9d0b3e36503}, {0x8, 0x3}: {0x1: 0x72bb72c2cdbcf99d}, {0x8, 0x4}: {0x1: 0x3ba5e9b41bf07f0}, {0x8, 0x5}: {0x1: 0xd7dabc15800f9d41}, {0x8, 0x6}: {0x1: 0xb482a6169fd270f}, {0x8, 0x7}: {0x1: 0x50748e0099d657e8}, {0x9, 0x1}: {0x1: 0xc77ae0144fcaeb6e}, {0x9, 0x2}: {0x1: 0x8a86c7dbebf27b68}, {0x9, 0x3}: {0x1: 0xa64e3be6d6fe7e92}, {0x9, 0x4}: {0x1: 0x239b71c41745d207}, {0x9, 0x5}: {0x1: 0x2d0803094c5a86ce}, {0x9, 0x6}: {0x1: 0xa3c2539b3af84874}, {0xa, 0x1}: {0x1: 0x7d30d91b89fcec21}, {0xa, 0x2}: {0x1: 0xfa5af9aa9f1857a3}, {0xa, 0x3}: {0x1: 0x84bc4bda8af81f90}, {0xa, 0x4}: {0x1: 0x6c1cba8631de994a}, {0xa, 0x5}: {0x1: 0x4383e58a086cc1ac}, {0xb, 0x1}: {0x1: 0x4ed2929a2df690b}, {0xb, 0x2}: {0x1: 0xecd6f1b1399775c0}, {0xb, 0x3}: {0x1: 0xc78cfbfc0dc64d01}, {0xb, 0x4}: {0x1: 0xb2643390973702d6}, {0xc, 0x1}: {0x1: 0x3b2a88686122d082}, {0xc, 0x2}: {0x1: 0xfd2f30a48a8e2e9}, {0xc, 0x3}: {0x1: 0xd5ce58368ae90b13}, {0xd, 0x1}: {0x1: 0x9c88e2a9d1b8fff8}, {0xd, 0x2}: {0x1: 0xcb8460aa4cf6613}, {0xe, 0x1}: {0x1: 0x78a28bbaec57996e}}
var testData [256]byte
for i := range testData {
testData[i] = byte(i)
}
ok := true
for algo := invalidErasureAlgo + 1; algo < lastErasureAlgo; algo++ {
for _, conf := range testConfigs {
failOnErr := func(err error) {
if err != nil {
logger.Fatal(errSelfTestFailure, "%v: error on self-test [d:%d,p:%d]: %v. Unsafe to start server.\n", algo, conf[0], conf[1], err)
}
}
e, err := NewErasure(context.Background(), int(conf[0]), int(conf[1]), blockSizeV2)
failOnErr(err)
encoded, err := e.EncodeData(GlobalContext, testData[:])
failOnErr(err)
hash := xxhash.New()
for i, data := range encoded {
// Write index to keep track of sizes of each.
_, err = hash.Write([]byte{byte(i)})
failOnErr(err)
_, err = hash.Write(data)
failOnErr(err)
got[conf] = map[ErasureAlgo]uint64{algo: hash.Sum64()}
}
if a, b := want[conf], got[conf]; !reflect.DeepEqual(a, b) {
fmt.Fprintf(os.Stderr, "%v: error on self-test [d:%d,p:%d]: want %#v, got %#v\n", algo, conf[0], conf[1], a, b)
ok = false
continue
}
// Delete first shard and reconstruct...
first := encoded[0]
encoded[0] = nil
failOnErr(e.DecodeDataBlocks(encoded))
if a, b := first, encoded[0]; !bytes.Equal(a, b) {
fmt.Fprintf(os.Stderr, "%v: error on self-test [d:%d,p:%d]: want %#v, got %#v\n", algo, conf[0], conf[1], hex.EncodeToString(a), hex.EncodeToString(b))
ok = false
continue
}
}
}
if !ok {
logger.Fatal(errSelfTestFailure, "Erasure Coding self test failed")
}
}

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"crypto/x509" "crypto/x509"
"errors"
"net/http" "net/http"
"os" "os"
"sync" "sync"
@ -295,6 +296,8 @@ var (
// Add new variable global values here. // Add new variable global values here.
) )
var errSelfTestFailure = errors.New("self test failed. unsafe to start server")
// Returns minio global information, as a key value map. // Returns minio global information, as a key value map.
// returned list of global values is not an exhaustive // returned list of global values is not an exhaustive
// list. Feel free to add new relevant fields. // list. Feel free to add new relevant fields.

View File

@ -954,3 +954,40 @@ func newS2CompressReader(r io.Reader, on int64) io.ReadCloser {
}() }()
return pr return pr
} }
// compressSelfTest performs a self-test to ensure that compression
// algorithms completes a roundtrip. If any algorithm
// produces an incorrect checksum it fails with a hard error.
//
// compressSelfTest tries to catch any issue in the compression implementation
// early instead of silently corrupting data.
func compressSelfTest() {
// 4 MB block.
// Approx runtime ~30ms
data := make([]byte, 4<<20)
rng := rand.New(rand.NewSource(0))
for i := range data {
// Generate compressible stream...
data[i] = byte(rng.Int63() & 3)
}
failOnErr := func(err error) {
if err != nil {
logger.Fatal(errSelfTestFailure, "compress: error on self-test: %v", err)
}
}
const skip = 2<<20 + 511
r := newS2CompressReader(bytes.NewBuffer(data), int64(len(data)))
b, err := io.ReadAll(r)
failOnErr(err)
failOnErr(r.Close())
// Decompression reader.
s2Reader := s2.NewReader(bytes.NewBuffer(b))
// Apply the skipLen on the decompressed stream.
failOnErr(s2Reader.Skip(skip))
got, err := io.ReadAll(s2Reader)
failOnErr(err)
if !bytes.Equal(got, data[skip:]) {
logger.Fatal(errSelfTestFailure, "compress: self-test roundtrip mismatch.")
}
}

View File

@ -412,6 +412,10 @@ func serverMain(ctx *cli.Context) {
globalConsoleSys = NewConsoleLogger(GlobalContext) globalConsoleSys = NewConsoleLogger(GlobalContext)
logger.AddTarget(globalConsoleSys) logger.AddTarget(globalConsoleSys)
// Perform any self-tests
erasureSelfTest()
compressSelfTest()
// Handle all server command args. // Handle all server command args.
serverHandleCmdArgs(ctx) serverHandleCmdArgs(ctx)