Rename of structs and methods (#6230)

Rename of ErasureStorage to Erasure (and rename of related variables and methods)
This commit is contained in:
Krishna Srinivas 2018-08-23 23:35:37 -07:00 committed by kannappanr
parent 2211a5f1b8
commit 52f6d5aafc
20 changed files with 171 additions and 173 deletions

View File

@ -23,7 +23,7 @@ import (
"github.com/minio/highwayhash" "github.com/minio/highwayhash"
"github.com/minio/minio/cmd/logger" "github.com/minio/minio/cmd/logger"
"github.com/minio/sha256-simd" sha256 "github.com/minio/sha256-simd"
"golang.org/x/crypto/blake2b" "golang.org/x/crypto/blake2b"
) )
@ -70,7 +70,7 @@ func (a BitrotAlgorithm) New() hash.Hash {
} }
} }
// Available reports whether the given algorihm is available. // Available reports whether the given algorithm is available.
func (a BitrotAlgorithm) Available() bool { func (a BitrotAlgorithm) Available() bool {
_, ok := bitrotAlgorithms[a] _, ok := bitrotAlgorithms[a]
return ok return ok

View File

@ -127,8 +127,8 @@ func (p *parallelReader) Read() ([][]byte, error) {
return nil, errXLReadQuorum return nil, errXLReadQuorum
} }
// ReadFile reads from readers, reconstructs data if needed and writes the data to the writer. // Decode reads from readers, reconstructs data if needed and writes the data to the writer.
func (s ErasureStorage) ReadFile(ctx context.Context, writer io.Writer, readers []*bitrotReader, offset, length, totalLength int64) error { func (e Erasure) Decode(ctx context.Context, writer io.Writer, readers []*bitrotReader, offset, length, totalLength int64) error {
if offset < 0 || length < 0 { if offset < 0 || length < 0 {
logger.LogIf(ctx, errInvalidArgument) logger.LogIf(ctx, errInvalidArgument)
return errInvalidArgument return errInvalidArgument
@ -141,27 +141,27 @@ func (s ErasureStorage) ReadFile(ctx context.Context, writer io.Writer, readers
return nil return nil
} }
reader := newParallelReader(readers, s.dataBlocks, offset, totalLength, s.blockSize) reader := newParallelReader(readers, e.dataBlocks, offset, totalLength, e.blockSize)
startBlock := offset / s.blockSize startBlock := offset / e.blockSize
endBlock := (offset + length) / s.blockSize endBlock := (offset + length) / e.blockSize
var bytesWritten int64 var bytesWritten int64
for block := startBlock; block <= endBlock; block++ { for block := startBlock; block <= endBlock; block++ {
var blockOffset, blockLength int64 var blockOffset, blockLength int64
switch { switch {
case startBlock == endBlock: case startBlock == endBlock:
blockOffset = offset % s.blockSize blockOffset = offset % e.blockSize
blockLength = length blockLength = length
case block == startBlock: case block == startBlock:
blockOffset = offset % s.blockSize blockOffset = offset % e.blockSize
blockLength = s.blockSize - blockOffset blockLength = e.blockSize - blockOffset
case block == endBlock: case block == endBlock:
blockOffset = 0 blockOffset = 0
blockLength = (offset + length) % s.blockSize blockLength = (offset + length) % e.blockSize
default: default:
blockOffset = 0 blockOffset = 0
blockLength = s.blockSize blockLength = e.blockSize
} }
if blockLength == 0 { if blockLength == 0 {
break break
@ -170,11 +170,11 @@ func (s ErasureStorage) ReadFile(ctx context.Context, writer io.Writer, readers
if err != nil { if err != nil {
return err return err
} }
if err = s.ErasureDecodeDataBlocks(bufs); err != nil { if err = e.DecodeDataBlocks(bufs); err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return err return err
} }
n, err := writeDataBlocks(ctx, writer, bufs, s.dataBlocks, blockOffset, blockLength) n, err := writeDataBlocks(ctx, writer, bufs, e.dataBlocks, blockOffset, blockLength)
if err != nil { if err != nil {
return err return err
} }

View File

@ -32,7 +32,7 @@ func (d badDisk) ReadFile(volume string, path string, offset int64, buf []byte,
return 0, errFaultyDisk return 0, errFaultyDisk
} }
var erasureReadFileTests = []struct { var erasureDecodeTests = []struct {
dataBlocks int dataBlocks int
onDisks, offDisks int onDisks, offDisks int
blocksize, data int64 blocksize, data int64
@ -81,13 +81,13 @@ var erasureReadFileTests = []struct {
{dataBlocks: 8, onDisks: 12, offDisks: 4, blocksize: int64(blockSizeV1), data: int64(2 * blockSizeV1), offset: 11, length: int64(blockSizeV1) + 2*1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 37 {dataBlocks: 8, onDisks: 12, offDisks: 4, blocksize: int64(blockSizeV1), data: int64(2 * blockSizeV1), offset: 11, length: int64(blockSizeV1) + 2*1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 37
} }
func TestErasureReadFile(t *testing.T) { func TestErasureDecode(t *testing.T) {
for i, test := range erasureReadFileTests { for i, test := range erasureDecodeTests {
setup, err := newErasureTestSetup(test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize) setup, err := newErasureTestSetup(test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
if err != nil { if err != nil {
t.Fatalf("Test %d: failed to create test setup: %v", i, err) t.Fatalf("Test %d: failed to create test setup: %v", i, err)
} }
storage, err := NewErasureStorage(context.Background(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize) erasure, err := NewErasure(context.Background(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
if err != nil { if err != nil {
setup.Remove() setup.Remove()
t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err) t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
@ -108,7 +108,7 @@ func TestErasureReadFile(t *testing.T) {
for i, disk := range disks { for i, disk := range disks {
writers[i] = newBitrotWriter(disk, "testbucket", "object", writeAlgorithm) writers[i] = newBitrotWriter(disk, "testbucket", "object", writeAlgorithm)
} }
n, err := storage.CreateFile(context.Background(), bytes.NewReader(data[:]), writers, buffer, storage.dataBlocks+1) n, err := erasure.Encode(context.Background(), bytes.NewReader(data[:]), writers, buffer, erasure.dataBlocks+1)
if err != nil { if err != nil {
setup.Remove() setup.Remove()
t.Fatalf("Test %d: failed to create erasure test file: %v", i, err) t.Fatalf("Test %d: failed to create erasure test file: %v", i, err)
@ -129,12 +129,12 @@ func TestErasureReadFile(t *testing.T) {
if disk == OfflineDisk { if disk == OfflineDisk {
continue continue
} }
endOffset := getErasureShardFileEndOffset(test.offset, test.length, test.data, test.blocksize, storage.dataBlocks) endOffset := getErasureShardFileEndOffset(test.offset, test.length, test.data, test.blocksize, erasure.dataBlocks)
bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", writeAlgorithm, endOffset, writers[index].Sum()) bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", writeAlgorithm, endOffset, writers[index].Sum())
} }
writer := bytes.NewBuffer(nil) writer := bytes.NewBuffer(nil)
err = storage.ReadFile(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data) err = erasure.Decode(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data)
if err != nil && !test.shouldFail { if err != nil && !test.shouldFail {
t.Errorf("Test %d: should pass but failed with: %v", i, err) t.Errorf("Test %d: should pass but failed with: %v", i, err)
} }
@ -157,7 +157,7 @@ func TestErasureReadFile(t *testing.T) {
if disk == OfflineDisk { if disk == OfflineDisk {
continue continue
} }
endOffset := getErasureShardFileEndOffset(test.offset, test.length, test.data, test.blocksize, storage.dataBlocks) endOffset := getErasureShardFileEndOffset(test.offset, test.length, test.data, test.blocksize, erasure.dataBlocks)
bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", writeAlgorithm, endOffset, writers[index].Sum()) bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", writeAlgorithm, endOffset, writers[index].Sum())
} }
for j := range disks[:test.offDisks] { for j := range disks[:test.offDisks] {
@ -167,7 +167,7 @@ func TestErasureReadFile(t *testing.T) {
bitrotReaders[0] = nil bitrotReaders[0] = nil
} }
writer.Reset() writer.Reset()
err = storage.ReadFile(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data) err = erasure.Decode(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data)
if err != nil && !test.shouldFailQuorum { if err != nil && !test.shouldFailQuorum {
t.Errorf("Test %d: should pass but failed with: %v", i, err) t.Errorf("Test %d: should pass but failed with: %v", i, err)
} }
@ -184,10 +184,10 @@ func TestErasureReadFile(t *testing.T) {
} }
} }
// Test erasureReadFile with random offset and lengths. // Test erasureDecode with random offset and lengths.
// This test is t.Skip()ed as it a long time to run, hence should be run // This test is t.Skip()ed as it a long time to run, hence should be run
// explicitly after commenting out t.Skip() // explicitly after commenting out t.Skip()
func TestErasureReadFileRandomOffsetLength(t *testing.T) { func TestErasureDecodeRandomOffsetLength(t *testing.T) {
// Comment the following line to run this test. // Comment the following line to run this test.
t.SkipNow() t.SkipNow()
// Initialize environment needed for the test. // Initialize environment needed for the test.
@ -201,7 +201,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
} }
defer setup.Remove() defer setup.Remove()
disks := setup.disks disks := setup.disks
storage, err := NewErasureStorage(context.Background(), dataBlocks, parityBlocks, blockSize) erasure, err := NewErasure(context.Background(), dataBlocks, parityBlocks, blockSize)
if err != nil { if err != nil {
t.Fatalf("failed to create ErasureStorage: %v", err) t.Fatalf("failed to create ErasureStorage: %v", err)
} }
@ -226,7 +226,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
// Create a test file to read from. // Create a test file to read from.
buffer := make([]byte, blockSize, 2*blockSize) buffer := make([]byte, blockSize, 2*blockSize)
n, err := storage.CreateFile(context.Background(), bytes.NewReader(data), writers, buffer, storage.dataBlocks+1) n, err := erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -239,7 +239,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
// Verify erasureReadFile() for random offsets and lengths. // Verify erasure.Decode() for random offsets and lengths.
for i := 0; i < iterations; i++ { for i := 0; i < iterations; i++ {
offset := r.Int63n(length) offset := r.Int63n(length)
readLen := r.Int63n(length - offset) readLen := r.Int63n(length - offset)
@ -252,10 +252,10 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
if disk == OfflineDisk { if disk == OfflineDisk {
continue continue
} }
endOffset := getErasureShardFileEndOffset(offset, readLen, length, blockSize, storage.dataBlocks) endOffset := getErasureShardFileEndOffset(offset, readLen, length, blockSize, erasure.dataBlocks)
bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", DefaultBitrotAlgorithm, endOffset, writers[index].Sum()) bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", DefaultBitrotAlgorithm, endOffset, writers[index].Sum())
} }
err = storage.ReadFile(context.Background(), buf, bitrotReaders, offset, readLen, length) err = erasure.Decode(context.Background(), buf, bitrotReaders, offset, readLen, length)
if err != nil { if err != nil {
t.Fatal(err, offset, readLen) t.Fatal(err, offset, readLen)
} }
@ -269,14 +269,14 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
// Benchmarks // Benchmarks
func benchmarkErasureRead(data, parity, dataDown, parityDown int, size int64, b *testing.B) { func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64, b *testing.B) {
setup, err := newErasureTestSetup(data, parity, blockSizeV1) setup, err := newErasureTestSetup(data, parity, blockSizeV1)
if err != nil { if err != nil {
b.Fatalf("failed to create test setup: %v", err) b.Fatalf("failed to create test setup: %v", err)
} }
defer setup.Remove() defer setup.Remove()
disks := setup.disks disks := setup.disks
storage, err := NewErasureStorage(context.Background(), data, parity, blockSizeV1) erasure, err := NewErasure(context.Background(), data, parity, blockSizeV1)
if err != nil { if err != nil {
b.Fatalf("failed to create ErasureStorage: %v", err) b.Fatalf("failed to create ErasureStorage: %v", err)
} }
@ -291,7 +291,7 @@ func benchmarkErasureRead(data, parity, dataDown, parityDown int, size int64, b
content := make([]byte, size) content := make([]byte, size)
buffer := make([]byte, blockSizeV1, 2*blockSizeV1) buffer := make([]byte, blockSizeV1, 2*blockSizeV1)
_, err = storage.CreateFile(context.Background(), bytes.NewReader(content), writers, buffer, storage.dataBlocks+1) _, err = erasure.Encode(context.Background(), bytes.NewReader(content), writers, buffer, erasure.dataBlocks+1)
if err != nil { if err != nil {
b.Fatalf("failed to create erasure test file: %v", err) b.Fatalf("failed to create erasure test file: %v", err)
} }
@ -312,62 +312,62 @@ func benchmarkErasureRead(data, parity, dataDown, parityDown int, size int64, b
if writers[index] == nil { if writers[index] == nil {
continue continue
} }
endOffset := getErasureShardFileEndOffset(0, size, size, storage.blockSize, storage.dataBlocks) endOffset := getErasureShardFileEndOffset(0, size, size, erasure.blockSize, erasure.dataBlocks)
bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", DefaultBitrotAlgorithm, endOffset, writers[index].Sum()) bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", DefaultBitrotAlgorithm, endOffset, writers[index].Sum())
} }
if err = storage.ReadFile(context.Background(), bytes.NewBuffer(content[:0]), bitrotReaders, 0, size, size); err != nil { if err = erasure.Decode(context.Background(), bytes.NewBuffer(content[:0]), bitrotReaders, 0, size, size); err != nil {
panic(err) panic(err)
} }
} }
} }
func BenchmarkErasureReadQuick(b *testing.B) { func BenchmarkErasureDecodeQuick(b *testing.B) {
const size = 12 * 1024 * 1024 const size = 12 * 1024 * 1024
b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureRead(2, 2, 0, 0, size, b) }) b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 0, 0, size, b) })
b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureRead(2, 2, 0, 1, size, b) }) b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 0, 1, size, b) })
b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureRead(2, 2, 1, 0, size, b) }) b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 1, 0, size, b) })
b.Run(" X0|X0 ", func(b *testing.B) { benchmarkErasureRead(2, 2, 1, 1, size, b) }) b.Run(" X0|X0 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 1, 1, size, b) })
} }
func BenchmarkErasureRead_4_64KB(b *testing.B) { func BenchmarkErasureDecode_4_64KB(b *testing.B) {
const size = 64 * 1024 const size = 64 * 1024
b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureRead(2, 2, 0, 0, size, b) }) b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 0, 0, size, b) })
b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureRead(2, 2, 0, 1, size, b) }) b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 0, 1, size, b) })
b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureRead(2, 2, 1, 0, size, b) }) b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 1, 0, size, b) })
b.Run(" X0|X0 ", func(b *testing.B) { benchmarkErasureRead(2, 2, 1, 1, size, b) }) b.Run(" X0|X0 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 1, 1, size, b) })
b.Run(" 00|XX ", func(b *testing.B) { benchmarkErasureRead(2, 2, 0, 2, size, b) }) b.Run(" 00|XX ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 0, 2, size, b) })
b.Run(" XX|00 ", func(b *testing.B) { benchmarkErasureRead(2, 2, 2, 0, size, b) }) b.Run(" XX|00 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 2, 0, size, b) })
} }
func BenchmarkErasureRead_8_20MB(b *testing.B) { func BenchmarkErasureDecode_8_20MB(b *testing.B) {
const size = 20 * 1024 * 1024 const size = 20 * 1024 * 1024
b.Run(" 0000|0000 ", func(b *testing.B) { benchmarkErasureRead(4, 4, 0, 0, size, b) }) b.Run(" 0000|0000 ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 0, 0, size, b) })
b.Run(" 0000|X000 ", func(b *testing.B) { benchmarkErasureRead(4, 4, 0, 1, size, b) }) b.Run(" 0000|X000 ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 0, 1, size, b) })
b.Run(" X000|0000 ", func(b *testing.B) { benchmarkErasureRead(4, 4, 1, 0, size, b) }) b.Run(" X000|0000 ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 1, 0, size, b) })
b.Run(" X000|X000 ", func(b *testing.B) { benchmarkErasureRead(4, 4, 1, 1, size, b) }) b.Run(" X000|X000 ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 1, 1, size, b) })
b.Run(" 0000|XXXX ", func(b *testing.B) { benchmarkErasureRead(4, 4, 0, 4, size, b) }) b.Run(" 0000|XXXX ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 0, 4, size, b) })
b.Run(" XX00|XX00 ", func(b *testing.B) { benchmarkErasureRead(4, 4, 2, 2, size, b) }) b.Run(" XX00|XX00 ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 2, 2, size, b) })
b.Run(" XXXX|0000 ", func(b *testing.B) { benchmarkErasureRead(4, 4, 4, 0, size, b) }) b.Run(" XXXX|0000 ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 4, 0, size, b) })
} }
func BenchmarkErasureRead_12_30MB(b *testing.B) { func BenchmarkErasureDecode_12_30MB(b *testing.B) {
const size = 30 * 1024 * 1024 const size = 30 * 1024 * 1024
b.Run(" 000000|000000 ", func(b *testing.B) { benchmarkErasureRead(6, 6, 0, 0, size, b) }) b.Run(" 000000|000000 ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 0, 0, size, b) })
b.Run(" 000000|X00000 ", func(b *testing.B) { benchmarkErasureRead(6, 6, 0, 1, size, b) }) b.Run(" 000000|X00000 ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 0, 1, size, b) })
b.Run(" X00000|000000 ", func(b *testing.B) { benchmarkErasureRead(6, 6, 1, 0, size, b) }) b.Run(" X00000|000000 ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 1, 0, size, b) })
b.Run(" X00000|X00000 ", func(b *testing.B) { benchmarkErasureRead(6, 6, 1, 1, size, b) }) b.Run(" X00000|X00000 ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 1, 1, size, b) })
b.Run(" 000000|XXXXXX ", func(b *testing.B) { benchmarkErasureRead(6, 6, 0, 6, size, b) }) b.Run(" 000000|XXXXXX ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 0, 6, size, b) })
b.Run(" XXX000|XXX000 ", func(b *testing.B) { benchmarkErasureRead(6, 6, 3, 3, size, b) }) b.Run(" XXX000|XXX000 ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 3, 3, size, b) })
b.Run(" XXXXXX|000000 ", func(b *testing.B) { benchmarkErasureRead(6, 6, 6, 0, size, b) }) b.Run(" XXXXXX|000000 ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 6, 0, size, b) })
} }
func BenchmarkErasureRead_16_40MB(b *testing.B) { func BenchmarkErasureDecode_16_40MB(b *testing.B) {
const size = 40 * 1024 * 1024 const size = 40 * 1024 * 1024
b.Run(" 00000000|00000000 ", func(b *testing.B) { benchmarkErasureRead(8, 8, 0, 0, size, b) }) b.Run(" 00000000|00000000 ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 0, 0, size, b) })
b.Run(" 00000000|X0000000 ", func(b *testing.B) { benchmarkErasureRead(8, 8, 0, 1, size, b) }) b.Run(" 00000000|X0000000 ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 0, 1, size, b) })
b.Run(" X0000000|00000000 ", func(b *testing.B) { benchmarkErasureRead(8, 8, 1, 0, size, b) }) b.Run(" X0000000|00000000 ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 1, 0, size, b) })
b.Run(" X0000000|X0000000 ", func(b *testing.B) { benchmarkErasureRead(8, 8, 1, 1, size, b) }) b.Run(" X0000000|X0000000 ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 1, 1, size, b) })
b.Run(" 00000000|XXXXXXXX ", func(b *testing.B) { benchmarkErasureRead(8, 8, 0, 8, size, b) }) b.Run(" 00000000|XXXXXXXX ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 0, 8, size, b) })
b.Run(" XXXX0000|XXXX0000 ", func(b *testing.B) { benchmarkErasureRead(8, 8, 4, 4, size, b) }) b.Run(" XXXX0000|XXXX0000 ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 4, 4, size, b) })
b.Run(" XXXXXXXX|00000000 ", func(b *testing.B) { benchmarkErasureRead(8, 8, 8, 0, size, b) }) b.Run(" XXXXXXXX|00000000 ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 8, 0, size, b) })
} }

View File

@ -69,8 +69,8 @@ func (p *parallelWriter) Append(ctx context.Context, blocks [][]byte) error {
return reduceWriteQuorumErrs(ctx, p.errs, objectOpIgnoredErrs, p.writeQuorum) return reduceWriteQuorumErrs(ctx, p.errs, objectOpIgnoredErrs, p.writeQuorum)
} }
// CreateFile reads from the reader, erasure-encodes the data and writes to the writers. // Encode reads from the reader, erasure-encodes the data and writes to the writers.
func (s *ErasureStorage) CreateFile(ctx context.Context, src io.Reader, writers []*bitrotWriter, buf []byte, quorum int) (total int64, err error) { func (e *Erasure) Encode(ctx context.Context, src io.Reader, writers []*bitrotWriter, buf []byte, quorum int) (total int64, err error) {
writer := &parallelWriter{ writer := &parallelWriter{
writers: writers, writers: writers,
writeQuorum: quorum, writeQuorum: quorum,
@ -90,7 +90,7 @@ func (s *ErasureStorage) CreateFile(ctx context.Context, src io.Reader, writers
break break
} }
// We take care of the situation where if n == 0 and total == 0 by creating empty data and parity files. // We take care of the situation where if n == 0 and total == 0 by creating empty data and parity files.
blocks, err = s.ErasureEncode(ctx, buf[:n]) blocks, err = e.EncodeData(ctx, buf[:n])
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return 0, err return 0, err

View File

@ -34,7 +34,7 @@ func (a badDisk) AppendFile(volume string, path string, buf []byte) error {
const oneMiByte = 1 * humanize.MiByte const oneMiByte = 1 * humanize.MiByte
var erasureCreateFileTests = []struct { var erasureEncodeTests = []struct {
dataBlocks int dataBlocks int
onDisks, offDisks int onDisks, offDisks int
blocksize, data int64 blocksize, data int64
@ -64,14 +64,14 @@ var erasureCreateFileTests = []struct {
{dataBlocks: 10, onDisks: 16, offDisks: 8, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 19 {dataBlocks: 10, onDisks: 16, offDisks: 8, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 19
} }
func TestErasureCreateFile(t *testing.T) { func TestErasureEncode(t *testing.T) {
for i, test := range erasureCreateFileTests { for i, test := range erasureEncodeTests {
setup, err := newErasureTestSetup(test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize) setup, err := newErasureTestSetup(test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
if err != nil { if err != nil {
t.Fatalf("Test %d: failed to create test setup: %v", i, err) t.Fatalf("Test %d: failed to create test setup: %v", i, err)
} }
disks := setup.disks disks := setup.disks
storage, err := NewErasureStorage(context.Background(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize) erasure, err := NewErasure(context.Background(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
if err != nil { if err != nil {
setup.Remove() setup.Remove()
t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err) t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
@ -90,7 +90,7 @@ func TestErasureCreateFile(t *testing.T) {
} }
writers[i] = newBitrotWriter(disk, "testbucket", "object", test.algorithm) writers[i] = newBitrotWriter(disk, "testbucket", "object", test.algorithm)
} }
n, err := storage.CreateFile(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, storage.dataBlocks+1) n, err := erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
if err != nil && !test.shouldFail { if err != nil && !test.shouldFail {
t.Errorf("Test %d: should pass but failed with: %v", i, err) t.Errorf("Test %d: should pass but failed with: %v", i, err)
} }
@ -119,7 +119,7 @@ func TestErasureCreateFile(t *testing.T) {
if test.offDisks > 0 { if test.offDisks > 0 {
writers[0] = nil writers[0] = nil
} }
n, err = storage.CreateFile(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, storage.dataBlocks+1) n, err = erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
if err != nil && !test.shouldFailQuorum { if err != nil && !test.shouldFailQuorum {
t.Errorf("Test %d: should pass but failed with: %v", i, err) t.Errorf("Test %d: should pass but failed with: %v", i, err)
} }
@ -138,13 +138,13 @@ func TestErasureCreateFile(t *testing.T) {
// Benchmarks // Benchmarks
func benchmarkErasureWrite(data, parity, dataDown, parityDown int, size int64, b *testing.B) { func benchmarkErasureEncode(data, parity, dataDown, parityDown int, size int64, b *testing.B) {
setup, err := newErasureTestSetup(data, parity, blockSizeV1) setup, err := newErasureTestSetup(data, parity, blockSizeV1)
if err != nil { if err != nil {
b.Fatalf("failed to create test setup: %v", err) b.Fatalf("failed to create test setup: %v", err)
} }
defer setup.Remove() defer setup.Remove()
storage, err := NewErasureStorage(context.Background(), data, parity, blockSizeV1) erasure, err := NewErasure(context.Background(), data, parity, blockSizeV1)
if err != nil { if err != nil {
b.Fatalf("failed to create ErasureStorage: %v", err) b.Fatalf("failed to create ErasureStorage: %v", err)
} }
@ -170,50 +170,50 @@ func benchmarkErasureWrite(data, parity, dataDown, parityDown int, size int64, b
} }
writers[i] = newBitrotWriter(disk, "testbucket", "object", DefaultBitrotAlgorithm) writers[i] = newBitrotWriter(disk, "testbucket", "object", DefaultBitrotAlgorithm)
} }
_, err := storage.CreateFile(context.Background(), bytes.NewReader(content), writers, buffer, storage.dataBlocks+1) _, err := erasure.Encode(context.Background(), bytes.NewReader(content), writers, buffer, erasure.dataBlocks+1)
if err != nil { if err != nil {
panic(err) panic(err)
} }
} }
} }
func BenchmarkErasureWriteQuick(b *testing.B) { func BenchmarkErasureEncodeQuick(b *testing.B) {
const size = 12 * 1024 * 1024 const size = 12 * 1024 * 1024
b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureWrite(2, 2, 0, 0, size, b) }) b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 0, 0, size, b) })
b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureWrite(2, 2, 0, 1, size, b) }) b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 0, 1, size, b) })
b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureWrite(2, 2, 1, 0, size, b) }) b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 1, 0, size, b) })
} }
func BenchmarkErasureWrite_4_64KB(b *testing.B) { func BenchmarkErasureEncode_4_64KB(b *testing.B) {
const size = 64 * 1024 const size = 64 * 1024
b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureWrite(2, 2, 0, 0, size, b) }) b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 0, 0, size, b) })
b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureWrite(2, 2, 0, 1, size, b) }) b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 0, 1, size, b) })
b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureWrite(2, 2, 1, 0, size, b) }) b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 1, 0, size, b) })
} }
func BenchmarkErasureWrite_8_20MB(b *testing.B) { func BenchmarkErasureEncode_8_20MB(b *testing.B) {
const size = 20 * 1024 * 1024 const size = 20 * 1024 * 1024
b.Run(" 0000|0000 ", func(b *testing.B) { benchmarkErasureWrite(4, 4, 0, 0, size, b) }) b.Run(" 0000|0000 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 0, 0, size, b) })
b.Run(" 0000|X000 ", func(b *testing.B) { benchmarkErasureWrite(4, 4, 0, 1, size, b) }) b.Run(" 0000|X000 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 0, 1, size, b) })
b.Run(" X000|0000 ", func(b *testing.B) { benchmarkErasureWrite(4, 4, 1, 0, size, b) }) b.Run(" X000|0000 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 1, 0, size, b) })
b.Run(" 0000|XXX0 ", func(b *testing.B) { benchmarkErasureWrite(4, 4, 0, 3, size, b) }) b.Run(" 0000|XXX0 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 0, 3, size, b) })
b.Run(" XXX0|0000 ", func(b *testing.B) { benchmarkErasureWrite(4, 4, 3, 0, size, b) }) b.Run(" XXX0|0000 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 3, 0, size, b) })
} }
func BenchmarkErasureWrite_12_30MB(b *testing.B) { func BenchmarkErasureEncode_12_30MB(b *testing.B) {
const size = 30 * 1024 * 1024 const size = 30 * 1024 * 1024
b.Run(" 000000|000000 ", func(b *testing.B) { benchmarkErasureWrite(6, 6, 0, 0, size, b) }) b.Run(" 000000|000000 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 0, 0, size, b) })
b.Run(" 000000|X00000 ", func(b *testing.B) { benchmarkErasureWrite(6, 6, 0, 1, size, b) }) b.Run(" 000000|X00000 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 0, 1, size, b) })
b.Run(" X00000|000000 ", func(b *testing.B) { benchmarkErasureWrite(6, 6, 1, 0, size, b) }) b.Run(" X00000|000000 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 1, 0, size, b) })
b.Run(" 000000|XXXXX0 ", func(b *testing.B) { benchmarkErasureWrite(6, 6, 0, 5, size, b) }) b.Run(" 000000|XXXXX0 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 0, 5, size, b) })
b.Run(" XXXXX0|000000 ", func(b *testing.B) { benchmarkErasureWrite(6, 6, 5, 0, size, b) }) b.Run(" XXXXX0|000000 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 5, 0, size, b) })
} }
func BenchmarkErasureWrite_16_40MB(b *testing.B) { func BenchmarkErasureEncode_16_40MB(b *testing.B) {
const size = 40 * 1024 * 1024 const size = 40 * 1024 * 1024
b.Run(" 00000000|00000000 ", func(b *testing.B) { benchmarkErasureWrite(8, 8, 0, 0, size, b) }) b.Run(" 00000000|00000000 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 0, 0, size, b) })
b.Run(" 00000000|X0000000 ", func(b *testing.B) { benchmarkErasureWrite(8, 8, 0, 1, size, b) }) b.Run(" 00000000|X0000000 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 0, 1, size, b) })
b.Run(" X0000000|00000000 ", func(b *testing.B) { benchmarkErasureWrite(8, 8, 1, 0, size, b) }) b.Run(" X0000000|00000000 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 1, 0, size, b) })
b.Run(" 00000000|XXXXXXX0 ", func(b *testing.B) { benchmarkErasureWrite(8, 8, 0, 7, size, b) }) b.Run(" 00000000|XXXXXXX0 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 0, 7, size, b) })
b.Run(" XXXXXXX0|00000000 ", func(b *testing.B) { benchmarkErasureWrite(8, 8, 7, 0, size, b) }) b.Run(" XXXXXXX0|00000000 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 7, 0, size, b) })
} }

View File

@ -23,20 +23,20 @@ import (
"github.com/minio/minio/cmd/logger" "github.com/minio/minio/cmd/logger"
) )
// HealFile heals the shard files on non-nil writers. Note that the quorum passed is 1 // Heal heals the shard files on non-nil writers. Note that the quorum passed is 1
// as healing should continue even if it has been successful healing only one shard file. // as healing should continue even if it has been successful healing only one shard file.
func (s ErasureStorage) HealFile(ctx context.Context, readers []*bitrotReader, writers []*bitrotWriter, size int64) error { func (e Erasure) Heal(ctx context.Context, readers []*bitrotReader, writers []*bitrotWriter, size int64) error {
r, w := io.Pipe() r, w := io.Pipe()
go func() { go func() {
if err := s.ReadFile(ctx, w, readers, 0, size, size); err != nil { if err := e.Decode(ctx, w, readers, 0, size, size); err != nil {
w.CloseWithError(err) w.CloseWithError(err)
return return
} }
w.Close() w.Close()
}() }()
buf := make([]byte, s.blockSize) buf := make([]byte, e.blockSize)
// quorum is 1 because CreateFile should continue writing as long as we are writing to even 1 disk. // quorum is 1 because CreateFile should continue writing as long as we are writing to even 1 disk.
n, err := s.CreateFile(ctx, r, writers, buf, 1) n, err := e.Encode(ctx, r, writers, buf, 1)
if err != nil { if err != nil {
return err return err
} }

View File

@ -24,7 +24,7 @@ import (
"testing" "testing"
) )
var erasureHealFileTests = []struct { var erasureHealTests = []struct {
dataBlocks, disks int dataBlocks, disks int
// number of offline disks is also number of staleDisks for // number of offline disks is also number of staleDisks for
@ -60,8 +60,8 @@ var erasureHealFileTests = []struct {
{dataBlocks: 2, disks: 4, offDisks: 1, badDisks: 0, badStaleDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte * 64, algorithm: SHA256, shouldFail: false}, // 19 {dataBlocks: 2, disks: 4, offDisks: 1, badDisks: 0, badStaleDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte * 64, algorithm: SHA256, shouldFail: false}, // 19
} }
func TestErasureHealFile(t *testing.T) { func TestErasureHeal(t *testing.T) {
for i, test := range erasureHealFileTests { for i, test := range erasureHealTests {
if test.offDisks < test.badStaleDisks { if test.offDisks < test.badStaleDisks {
// test case sanity check // test case sanity check
t.Fatalf("Test %d: Bad test case - number of stale disks cannot be less than number of badstale disks", i) t.Fatalf("Test %d: Bad test case - number of stale disks cannot be less than number of badstale disks", i)
@ -73,7 +73,7 @@ func TestErasureHealFile(t *testing.T) {
t.Fatalf("Test %d: failed to setup XL environment: %v", i, err) t.Fatalf("Test %d: failed to setup XL environment: %v", i, err)
} }
disks := setup.disks disks := setup.disks
storage, err := NewErasureStorage(context.Background(), test.dataBlocks, test.disks-test.dataBlocks, test.blocksize) erasure, err := NewErasure(context.Background(), test.dataBlocks, test.disks-test.dataBlocks, test.blocksize)
if err != nil { if err != nil {
setup.Remove() setup.Remove()
t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err) t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
@ -88,7 +88,7 @@ func TestErasureHealFile(t *testing.T) {
for i, disk := range disks { for i, disk := range disks {
writers[i] = newBitrotWriter(disk, "testbucket", "testobject", test.algorithm) writers[i] = newBitrotWriter(disk, "testbucket", "testobject", test.algorithm)
} }
_, err = storage.CreateFile(context.Background(), bytes.NewReader(data), writers, buffer, storage.dataBlocks+1) _, err = erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
if err != nil { if err != nil {
setup.Remove() setup.Remove()
t.Fatalf("Test %d: failed to create random test data: %v", i, err) t.Fatalf("Test %d: failed to create random test data: %v", i, err)
@ -96,7 +96,7 @@ func TestErasureHealFile(t *testing.T) {
readers := make([]*bitrotReader, len(disks)) readers := make([]*bitrotReader, len(disks))
for i, disk := range disks { for i, disk := range disks {
shardFilesize := getErasureShardFileSize(test.blocksize, test.size, storage.dataBlocks) shardFilesize := getErasureShardFileSize(test.blocksize, test.size, erasure.dataBlocks)
readers[i] = newBitrotReader(disk, "testbucket", "testobject", test.algorithm, shardFilesize, writers[i].Sum()) readers[i] = newBitrotReader(disk, "testbucket", "testobject", test.algorithm, shardFilesize, writers[i].Sum())
} }
@ -126,7 +126,7 @@ func TestErasureHealFile(t *testing.T) {
} }
// test case setup is complete - now call Healfile() // test case setup is complete - now call Healfile()
err = storage.HealFile(context.Background(), readers, staleWriters, test.size) err = erasure.Heal(context.Background(), readers, staleWriters, test.size)
if err != nil && !test.shouldFail { if err != nil && !test.shouldFail {
t.Errorf("Test %d: should pass but it failed with: %v", i, err) t.Errorf("Test %d: should pass but it failed with: %v", i, err)
} }

View File

@ -23,26 +23,23 @@ import (
"github.com/minio/minio/cmd/logger" "github.com/minio/minio/cmd/logger"
) )
// OfflineDisk represents an unavailable disk. // Erasure - erasure encoding details.
var OfflineDisk StorageAPI // zero value is nil type Erasure struct {
encoder reedsolomon.Encoder
// ErasureStorage - erasure encoding details.
type ErasureStorage struct {
erasure reedsolomon.Encoder
dataBlocks, parityBlocks int dataBlocks, parityBlocks int
blockSize int64 blockSize int64
} }
// NewErasureStorage creates a new ErasureStorage. // NewErasure creates a new ErasureStorage.
func NewErasureStorage(ctx context.Context, dataBlocks, parityBlocks int, blockSize int64) (s ErasureStorage, err error) { func NewErasure(ctx context.Context, dataBlocks, parityBlocks int, blockSize int64) (e Erasure, err error) {
shardsize := int(ceilFrac(blockSize, int64(dataBlocks))) shardsize := int(ceilFrac(blockSize, int64(dataBlocks)))
erasure, err := reedsolomon.New(dataBlocks, parityBlocks, reedsolomon.WithAutoGoroutines(shardsize)) erasure, err := reedsolomon.New(dataBlocks, parityBlocks, reedsolomon.WithAutoGoroutines(shardsize))
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return s, err return e, err
} }
s = ErasureStorage{ e = Erasure{
erasure: erasure, encoder: erasure,
dataBlocks: dataBlocks, dataBlocks: dataBlocks,
parityBlocks: parityBlocks, parityBlocks: parityBlocks,
blockSize: blockSize, blockSize: blockSize,
@ -50,30 +47,30 @@ func NewErasureStorage(ctx context.Context, dataBlocks, parityBlocks int, blockS
return return
} }
// ErasureEncode encodes the given data and returns the erasure-coded data. // EncodeData encodes the given data and returns the erasure-coded data.
// It returns an error if the erasure coding failed. // It returns an error if the erasure coding failed.
func (s *ErasureStorage) ErasureEncode(ctx context.Context, data []byte) ([][]byte, error) { func (e *Erasure) EncodeData(ctx context.Context, data []byte) ([][]byte, error) {
if len(data) == 0 { if len(data) == 0 {
return make([][]byte, s.dataBlocks+s.parityBlocks), nil return make([][]byte, e.dataBlocks+e.parityBlocks), nil
} }
encoded, err := s.erasure.Split(data) encoded, err := e.encoder.Split(data)
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return nil, err return nil, err
} }
if err = s.erasure.Encode(encoded); err != nil { if err = e.encoder.Encode(encoded); err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return nil, err return nil, err
} }
return encoded, nil return encoded, nil
} }
// ErasureDecodeDataBlocks decodes the given erasure-coded data. // DecodeDataBlocks decodes the given erasure-coded data.
// It only decodes the data blocks but does not verify them. // It only decodes the data blocks but does not verify them.
// It returns an error if the decoding failed. // It returns an error if the decoding failed.
func (s *ErasureStorage) ErasureDecodeDataBlocks(data [][]byte) error { func (e *Erasure) DecodeDataBlocks(data [][]byte) error {
needsReconstruction := false needsReconstruction := false
for _, b := range data[:s.dataBlocks] { for _, b := range data[:e.dataBlocks] {
if b == nil { if b == nil {
needsReconstruction = true needsReconstruction = true
break break
@ -82,16 +79,16 @@ func (s *ErasureStorage) ErasureDecodeDataBlocks(data [][]byte) error {
if !needsReconstruction { if !needsReconstruction {
return nil return nil
} }
if err := s.erasure.ReconstructData(data); err != nil { if err := e.encoder.ReconstructData(data); err != nil {
return err return err
} }
return nil return nil
} }
// ErasureDecodeDataAndParityBlocks decodes the given erasure-coded data and verifies it. // DecodeDataAndParityBlocks decodes the given erasure-coded data and verifies it.
// It returns an error if the decoding failed. // It returns an error if the decoding failed.
func (s *ErasureStorage) ErasureDecodeDataAndParityBlocks(ctx context.Context, data [][]byte) error { func (e *Erasure) DecodeDataAndParityBlocks(ctx context.Context, data [][]byte) error {
if err := s.erasure.Reconstruct(data); err != nil { if err := e.encoder.Reconstruct(data); err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return err return err
} }

View File

@ -25,7 +25,7 @@ import (
"testing" "testing"
) )
var erasureDecodeTests = []struct { var erasureEncodeDecodeTests = []struct {
dataBlocks, parityBlocks int dataBlocks, parityBlocks int
missingData, missingParity int missingData, missingParity int
reconstructParity bool reconstructParity bool
@ -43,20 +43,20 @@ var erasureDecodeTests = []struct {
{dataBlocks: 8, parityBlocks: 4, missingData: 2, missingParity: 2, reconstructParity: false, shouldFail: false}, {dataBlocks: 8, parityBlocks: 4, missingData: 2, missingParity: 2, reconstructParity: false, shouldFail: false},
} }
func TestErasureDecode(t *testing.T) { func TestErasureEncodeDecode(t *testing.T) {
data := make([]byte, 256) data := make([]byte, 256)
if _, err := io.ReadFull(rand.Reader, data); err != nil { if _, err := io.ReadFull(rand.Reader, data); err != nil {
t.Fatalf("Failed to read random data: %v", err) t.Fatalf("Failed to read random data: %v", err)
} }
for i, test := range erasureDecodeTests { for i, test := range erasureEncodeDecodeTests {
buffer := make([]byte, len(data), 2*len(data)) buffer := make([]byte, len(data), 2*len(data))
copy(buffer, data) copy(buffer, data)
storage, err := NewErasureStorage(context.Background(), test.dataBlocks, test.parityBlocks, blockSizeV1) erasure, err := NewErasure(context.Background(), test.dataBlocks, test.parityBlocks, blockSizeV1)
if err != nil { if err != nil {
t.Fatalf("Test %d: failed to create erasure storage: %v", i, err) t.Fatalf("Test %d: failed to create erasure: %v", i, err)
} }
encoded, err := storage.ErasureEncode(context.Background(), buffer) encoded, err := erasure.EncodeData(context.Background(), buffer)
if err != nil { if err != nil {
t.Fatalf("Test %d: failed to encode data: %v", i, err) t.Fatalf("Test %d: failed to encode data: %v", i, err)
} }
@ -69,9 +69,9 @@ func TestErasureDecode(t *testing.T) {
} }
if test.reconstructParity { if test.reconstructParity {
err = storage.ErasureDecodeDataAndParityBlocks(context.Background(), encoded) err = erasure.DecodeDataAndParityBlocks(context.Background(), encoded)
} else { } else {
err = storage.ErasureDecodeDataBlocks(encoded) err = erasure.DecodeDataBlocks(encoded)
} }
if err == nil && test.shouldFail { if err == nil && test.shouldFail {

View File

@ -261,7 +261,7 @@ func (fs *FSObjects) StorageInfo(ctx context.Context) StorageInfo {
storageInfo := StorageInfo{ storageInfo := StorageInfo{
Used: used, Used: used,
} }
storageInfo.Backend.Type = FS storageInfo.Backend.Type = BackendFS
return storageInfo return storageInfo
} }

View File

@ -124,7 +124,7 @@ func (c *minioCollector) Collect(ch chan<- prometheus.Metric) {
var totalDisks, offlineDisks int var totalDisks, offlineDisks int
// Setting totalDisks to 1 and offlineDisks to 0 in FS mode // Setting totalDisks to 1 and offlineDisks to 0 in FS mode
if s.Backend.Type == FS { if s.Backend.Type == BackendFS {
totalDisks = 1 totalDisks = 1
offlineDisks = 0 offlineDisks = 0
} else { } else {

View File

@ -31,9 +31,9 @@ type BackendType int
const ( const (
Unknown BackendType = iota Unknown BackendType = iota
// Filesystem backend. // Filesystem backend.
FS BackendFS
// Multi disk Erasure (single, distributed) backend. // Multi disk BackendErasure (single, distributed) backend.
Erasure BackendErasure
// Add your own backend. // Add your own backend.
) )

View File

@ -174,7 +174,7 @@ func printObjectAPIMsg() {
// Get formatted disk/storage info message. // Get formatted disk/storage info message.
func getStorageInfoMsg(storageInfo StorageInfo) string { func getStorageInfoMsg(storageInfo StorageInfo) string {
var msg string var msg string
if storageInfo.Backend.Type == Erasure { if storageInfo.Backend.Type == BackendErasure {
diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", storageInfo.Backend.OnlineDisks, storageInfo.Backend.OfflineDisks) diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", storageInfo.Backend.OnlineDisks, storageInfo.Backend.OfflineDisks)
msg += colorBlue("Status:") + fmt.Sprintf(getFormatStr(len(diskInfo), 8), diskInfo) msg += colorBlue("Status:") + fmt.Sprintf(getFormatStr(len(diskInfo), 8), diskInfo)
} }

View File

@ -30,7 +30,7 @@ import (
// Tests if we generate storage info. // Tests if we generate storage info.
func TestStorageInfoMsg(t *testing.T) { func TestStorageInfoMsg(t *testing.T) {
infoStorage := StorageInfo{} infoStorage := StorageInfo{}
infoStorage.Backend.Type = Erasure infoStorage.Backend.Type = BackendErasure
infoStorage.Backend.OnlineDisks = 7 infoStorage.Backend.OnlineDisks = 7
infoStorage.Backend.OfflineDisks = 1 infoStorage.Backend.OfflineDisks = 1

View File

@ -17,7 +17,6 @@
package cmd package cmd
import ( import (
"fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -355,7 +354,6 @@ func testStorageAPIReadFile(t *testing.T, storage StorageAPI) {
expectErr := (err != nil) expectErr := (err != nil)
if expectErr != testCase.expectErr { if expectErr != testCase.expectErr {
fmt.Println(err)
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
} }

View File

@ -272,7 +272,7 @@ func newXLSets(endpoints EndpointList, format *formatXLV3, setCount int, drivesP
// StorageInfo - combines output of StorageInfo across all erasure coded object sets. // StorageInfo - combines output of StorageInfo across all erasure coded object sets.
func (s *xlSets) StorageInfo(ctx context.Context) StorageInfo { func (s *xlSets) StorageInfo(ctx context.Context) StorageInfo {
var storageInfo StorageInfo var storageInfo StorageInfo
storageInfo.Backend.Type = Erasure storageInfo.Backend.Type = BackendErasure
for _, set := range s.sets { for _, set := range s.sets {
lstorageInfo := set.StorageInfo(ctx) lstorageInfo := set.StorageInfo(ctx)
storageInfo.Used = storageInfo.Used + lstorageInfo.Used storageInfo.Used = storageInfo.Used + lstorageInfo.Used

View File

@ -446,7 +446,7 @@ func healObject(ctx context.Context, storageDisks []StorageAPI, bucket string, o
// Heal each part. erasureHealFile() will write the healed // Heal each part. erasureHealFile() will write the healed
// part to .minio/tmp/uuid/ which needs to be renamed later to // part to .minio/tmp/uuid/ which needs to be renamed later to
// the final location. // the final location.
storage, err := NewErasureStorage(ctx, latestMeta.Erasure.DataBlocks, erasure, err := NewErasure(ctx, latestMeta.Erasure.DataBlocks,
latestMeta.Erasure.ParityBlocks, latestMeta.Erasure.BlockSize) latestMeta.Erasure.ParityBlocks, latestMeta.Erasure.BlockSize)
if err != nil { if err != nil {
return result, toObjectErr(err, bucket, object) return result, toObjectErr(err, bucket, object)
@ -455,7 +455,7 @@ func healObject(ctx context.Context, storageDisks []StorageAPI, bucket string, o
for partIndex := 0; partIndex < len(latestMeta.Parts); partIndex++ { for partIndex := 0; partIndex < len(latestMeta.Parts); partIndex++ {
partName := latestMeta.Parts[partIndex].Name partName := latestMeta.Parts[partIndex].Name
partSize := latestMeta.Parts[partIndex].Size partSize := latestMeta.Parts[partIndex].Size
erasure := latestMeta.Erasure erasureInfo := latestMeta.Erasure
var algorithm BitrotAlgorithm var algorithm BitrotAlgorithm
bitrotReaders := make([]*bitrotReader, len(latestDisks)) bitrotReaders := make([]*bitrotReader, len(latestDisks))
for i, disk := range latestDisks { for i, disk := range latestDisks {
@ -464,7 +464,7 @@ func healObject(ctx context.Context, storageDisks []StorageAPI, bucket string, o
} }
info := partsMetadata[i].Erasure.GetChecksumInfo(partName) info := partsMetadata[i].Erasure.GetChecksumInfo(partName)
algorithm = info.Algorithm algorithm = info.Algorithm
endOffset := getErasureShardFileEndOffset(0, partSize, partSize, erasure.BlockSize, storage.dataBlocks) endOffset := getErasureShardFileEndOffset(0, partSize, partSize, erasureInfo.BlockSize, erasure.dataBlocks)
bitrotReaders[i] = newBitrotReader(disk, bucket, pathJoin(object, partName), algorithm, endOffset, info.Hash) bitrotReaders[i] = newBitrotReader(disk, bucket, pathJoin(object, partName), algorithm, endOffset, info.Hash)
} }
bitrotWriters := make([]*bitrotWriter, len(outDatedDisks)) bitrotWriters := make([]*bitrotWriter, len(outDatedDisks))
@ -474,7 +474,7 @@ func healObject(ctx context.Context, storageDisks []StorageAPI, bucket string, o
} }
bitrotWriters[i] = newBitrotWriter(disk, minioMetaTmpBucket, pathJoin(tmpID, partName), algorithm) bitrotWriters[i] = newBitrotWriter(disk, minioMetaTmpBucket, pathJoin(tmpID, partName), algorithm)
} }
hErr := storage.HealFile(ctx, bitrotReaders, bitrotWriters, partSize) hErr := erasure.Heal(ctx, bitrotReaders, bitrotWriters, partSize)
if hErr != nil { if hErr != nil {
return result, toObjectErr(hErr, bucket, object) return result, toObjectErr(hErr, bucket, object)
} }

View File

@ -369,7 +369,7 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
} }
} }
storage, err := NewErasureStorage(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize) erasure, err := NewErasure(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize)
if err != nil { if err != nil {
return pi, toObjectErr(err, bucket, object) return pi, toObjectErr(err, bucket, object)
} }
@ -397,7 +397,7 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
} }
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tmpPartPath, DefaultBitrotAlgorithm) writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tmpPartPath, DefaultBitrotAlgorithm)
} }
n, err := storage.CreateFile(ctx, data, writers, buffer, storage.dataBlocks+1) n, err := erasure.Encode(ctx, data, writers, buffer, erasure.dataBlocks+1)
if err != nil { if err != nil {
return pi, toObjectErr(err, bucket, object) return pi, toObjectErr(err, bucket, object)
} }

View File

@ -262,7 +262,7 @@ func (xl xlObjects) getObject(ctx context.Context, bucket, object string, startO
} }
var totalBytesRead int64 var totalBytesRead int64
storage, err := NewErasureStorage(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize) erasure, err := NewErasure(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize)
if err != nil { if err != nil {
return toObjectErr(err, bucket, object) return toObjectErr(err, bucket, object)
} }
@ -292,7 +292,7 @@ func (xl xlObjects) getObject(ctx context.Context, bucket, object string, startO
bitrotReaders[index] = newBitrotReader(disk, bucket, pathJoin(object, partName), checksumInfo.Algorithm, endOffset, checksumInfo.Hash) bitrotReaders[index] = newBitrotReader(disk, bucket, pathJoin(object, partName), checksumInfo.Algorithm, endOffset, checksumInfo.Hash)
} }
err := storage.ReadFile(ctx, writer, bitrotReaders, partOffset, partLength, partSize) err := erasure.Decode(ctx, writer, bitrotReaders, partOffset, partLength, partSize)
if err != nil { if err != nil {
return toObjectErr(err, bucket, object) return toObjectErr(err, bucket, object)
} }
@ -608,7 +608,7 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string,
// Total size of the written object // Total size of the written object
var sizeWritten int64 var sizeWritten int64
storage, err := NewErasureStorage(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize) erasure, err := NewErasure(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize)
if err != nil { if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
@ -667,7 +667,7 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string,
} }
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj, DefaultBitrotAlgorithm) writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj, DefaultBitrotAlgorithm)
} }
n, erasureErr := storage.CreateFile(ctx, curPartReader, writers, buffer, storage.dataBlocks+1) n, erasureErr := erasure.Encode(ctx, curPartReader, writers, buffer, erasure.dataBlocks+1)
if erasureErr != nil { if erasureErr != nil {
return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj) return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj)
} }

View File

@ -30,6 +30,9 @@ const (
xlMetaJSONFile = "xl.json" xlMetaJSONFile = "xl.json"
) )
// OfflineDisk represents an unavailable disk.
var OfflineDisk StorageAPI // zero value is nil
// xlObjects - Implements XL object layer. // xlObjects - Implements XL object layer.
type xlObjects struct { type xlObjects struct {
// name space mutex for object layer. // name space mutex for object layer.
@ -137,7 +140,7 @@ func getStorageInfo(disks []StorageAPI) StorageInfo {
} }
storageInfo.Used = used storageInfo.Used = used
storageInfo.Backend.Type = Erasure storageInfo.Backend.Type = BackendErasure
storageInfo.Backend.OnlineDisks = onlineDisks storageInfo.Backend.OnlineDisks = onlineDisks
storageInfo.Backend.OfflineDisks = offlineDisks storageInfo.Backend.OfflineDisks = offlineDisks