Merge pull request #294 from fkautz/pr_out_renaming_singledisk_to_encoded

This commit is contained in:
Frederick F. Kautz IV 2015-03-14 11:39:03 -07:00
commit b441a27c67
11 changed files with 21 additions and 839 deletions

View File

@ -1,38 +0,0 @@
package erasure
import (
"errors"
"io"
"github.com/minio-io/minio/pkg/storage/donut/erasure/erasure1"
)
// EncoderTechnique - encoder matrix type
type EncoderTechnique int
const (
// Vandermonde matrix type
Vandermonde EncoderTechnique = iota
// Cauchy matrix type
Cauchy
)
// Write latest donut format
func Write(target io.Writer, length uint32, k, m uint8, technique EncoderTechnique, data io.Reader) error {
var versionedTechnique erasure1.EncoderTechnique
switch {
case technique == Vandermonde:
versionedTechnique = erasure1.Vandermonde
case technique == Cauchy:
versionedTechnique = erasure1.Cauchy
default:
errors.New("Unknown encoder technique")
}
return erasure1.Write(target, length, k, m, versionedTechnique, data)
}
// Read any donut format
func Read(reader io.Reader) (io.Reader, error) {
// when version2 is created, create a method in version2 that can transform version1 structure to version2
return nil, errors.New("Not Implemented")
}

View File

@ -1,108 +0,0 @@
/*
* Mini Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package erasure1
import (
"bytes"
"encoding/binary"
"encoding/gob"
"errors"
"io"
"strconv"
)
// Version - format version
const (
Version = uint32(1)
)
// DataHeader represents the structure serialized to gob.
type DataHeader struct {
OriginalLength uint32
// Data Blocks
EncoderK uint8
// Parity Blocks
EncoderM uint8
// Matrix Technique
EncoderTechnique EncoderTechnique
}
// EncoderTechnique specified the Matrix type used in encoding
type EncoderTechnique int
const (
// Vandermonde matrix type
Vandermonde EncoderTechnique = iota
// Cauchy matrix type
Cauchy
)
// validate populated header
func validateHeader(header DataHeader) error {
if header.EncoderTechnique > 1 {
return errors.New("Invalid encoder technique")
}
return nil
}
// Write returns error upon any failure
func Write(target io.Writer, length uint32, k, m uint8, technique EncoderTechnique, data io.Reader) error {
header := DataHeader{
OriginalLength: length,
EncoderK: k,
EncoderM: m,
EncoderTechnique: technique,
}
if err := validateHeader(header); err != nil {
return err
}
var headerBuffer bytes.Buffer
// encode header
encoder := gob.NewEncoder(&headerBuffer)
encoder.Encode(header)
// write version
binary.Write(target, binary.LittleEndian, Version)
// write encoded header
if _, err := io.Copy(target, &headerBuffer); err != nil {
return err
}
// write data
if _, err := io.Copy(target, data); err != nil {
return err
}
return nil
}
// ReadHeader - read an erasure block
func ReadHeader(reader io.Reader) (dataHeader DataHeader, err error) {
versionArray := make([]byte, 4)
if err := binary.Read(reader, binary.LittleEndian, versionArray); err != nil {
return dataHeader, err
}
version := binary.LittleEndian.Uint32(versionArray)
if version > Version || version == 0 {
return dataHeader, errors.New("Unknown version: " + strconv.FormatUint(uint64(version), 10))
}
decoder := gob.NewDecoder(reader)
err = decoder.Decode(&dataHeader)
return dataHeader, err
}

View File

@ -1,86 +0,0 @@
/*
* Mini Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package erasure1
import (
"bytes"
"encoding/binary"
"encoding/gob"
"io"
"testing"
. "gopkg.in/check.v1"
)
func Test(t *testing.T) { TestingT(t) }
type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) TestSingleWrite(c *C) {
var testBuffer bytes.Buffer
testData := "Hello, World"
testHeader := DataHeader{
OriginalLength: uint32(len(testData)),
EncoderK: 8,
EncoderM: 8,
EncoderTechnique: Cauchy,
}
err := Write(&testBuffer, testHeader.OriginalLength, testHeader.EncoderK, testHeader.EncoderM, testHeader.EncoderTechnique, bytes.NewBufferString(testData))
c.Assert(err, IsNil)
actualVersion := make([]byte, 4)
_, err = testBuffer.Read(actualVersion)
c.Assert(err, IsNil)
c.Assert(binary.LittleEndian.Uint32(actualVersion), DeepEquals, uint32(1))
actualHeader := DataHeader{}
decoder := gob.NewDecoder(&testBuffer)
decoder.Decode(&actualHeader)
c.Assert(actualHeader, DeepEquals, testHeader)
var actualData bytes.Buffer
dataLength, err := io.Copy(&actualData, &testBuffer)
c.Assert(dataLength, Equals, int64(len(testData)))
c.Assert(actualData.Bytes(), DeepEquals, []byte(testData))
c.Assert(err, IsNil)
}
func (s *MySuite) TestReadWrite(c *C) {
var testBuffer bytes.Buffer
testData := "Hello, World"
testHeader := DataHeader{
OriginalLength: uint32(len(testData)),
EncoderK: 8,
EncoderM: 8,
EncoderTechnique: Cauchy,
}
err := Write(&testBuffer, testHeader.OriginalLength, testHeader.EncoderK, testHeader.EncoderM, testHeader.EncoderTechnique, bytes.NewBufferString(testData))
c.Assert(err, IsNil)
header, err := ReadHeader(&testBuffer)
c.Assert(err, IsNil)
c.Assert(header, DeepEquals, testHeader)
c.Assert(testBuffer.Len(), Equals, len(testData))
c.Assert(testBuffer.Bytes(), DeepEquals, []byte(testData))
}

View File

@ -1,222 +0,0 @@
/*
* Mini Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fragment
import (
"bytes"
"encoding/binary"
"errors"
"io"
"github.com/minio-io/minio/pkg/utils/checksum/crc32c"
"github.com/minio-io/minio/pkg/utils/crypto/sha512"
)
/*
DONUT v1 Spec
**********************
BlockStart uint32 // Magic="MINI"=1229867341
VersionMajor uint32
Reserved uint64
DataLen uint64
HeaderCrc32c uint32
BlockData uint32 // Magic="DATA"=1096040772
Data io.Reader // matches length
HeaderCrc32c uint32
DataSha512 [64]byte
BlockLen uint64 // length of entire frame, inclusive of MINI and INIM
BlockEnd uint32 // Magic="INIM"=1296649801
*/
// Magic list
var (
MagicMINI = binary.LittleEndian.Uint32([]byte{'M', 'I', 'N', 'I'})
MagicDATA = binary.LittleEndian.Uint32([]byte{'D', 'A', 'T', 'A'})
MagicINIM = binary.LittleEndian.Uint32([]byte{'I', 'N', 'I', 'M'})
)
// DonutFrameHeader -
// --------------
// BlockStart uint32
// VersionMajor uint32
// Reserved uint64
// DataLen uint64
// --------------
type DonutFrameHeader struct {
MagicMINI uint32
Version uint32
Reserved uint64
DataLength uint64
}
// Crc32c checksum
type Crc32c uint32
// Sha512 checksum
type Sha512 [sha512.Size]byte
// DonutFrameFooter -
// --------------
// DataSha512 [64]byte
// BlockLen uint64
// BlockEnd uint32
// --------------
type DonutFrameFooter struct {
DataSha512 Sha512
OffsetToMINI uint64
MagicINIM uint32
}
// Data buffer
type Data bytes.Buffer
// Write - write donut format to input io.Writer, returns error upon any failure
func Write(target io.Writer, reader io.Reader, length uint64) error {
// write header
header := DonutFrameHeader{
MagicMINI: MagicMINI,
Version: 1,
Reserved: 0,
DataLength: length,
}
var headerBytes bytes.Buffer
binary.Write(&headerBytes, binary.LittleEndian, header)
headerCrc := crc32c.Sum32(headerBytes.Bytes())
binary.Write(&headerBytes, binary.LittleEndian, headerCrc)
binary.Write(&headerBytes, binary.LittleEndian, MagicDATA)
// write header
headerLen, err := io.Copy(target, &headerBytes)
if err != nil {
return err
}
// write DATA
// create sha512 tee
sumReader, sumWriter := io.Pipe()
defer sumWriter.Close()
checksumChannel := make(chan checksumValue)
go generateChecksum(sumReader, checksumChannel)
teeReader := io.TeeReader(reader, sumWriter)
dataLength, err := io.Copy(target, teeReader)
if err != nil {
return err
}
if uint64(dataLength) != length {
return errors.New("Specified data length and amount written mismatched")
}
sumWriter.Close()
dataChecksum := <-checksumChannel
if dataChecksum.err != nil {
return dataChecksum.err
}
// generate footer
frameFooter := DonutFrameFooter{
DataSha512: dataChecksum.checksum,
OffsetToMINI: length + uint64(headerLen) + uint64(80), /*footer size*/
MagicINIM: MagicINIM,
}
var frameFooterBytes bytes.Buffer
binary.Write(&frameFooterBytes, binary.LittleEndian, frameFooter)
// write footer crc
footerChecksum := crc32c.Sum32(frameFooterBytes.Bytes())
if err := binary.Write(target, binary.LittleEndian, footerChecksum); err != nil {
return err
}
// write write footer
_, err = io.Copy(target, &frameFooterBytes)
if err != nil {
return err
}
return nil
}
// Read - reads a donut fragment
func Read(reader io.Reader) (io.Reader, error) {
header, err := ReadHeader(reader)
if err != nil {
return nil, err
}
sumReader, sumWriter := io.Pipe()
teeReader := io.TeeReader(reader, sumWriter)
defer sumWriter.Close()
checksumChannel := make(chan checksumValue)
go generateChecksum(sumReader, checksumChannel)
data := make([]byte, header.DataLength)
teeReader.Read(data)
sumWriter.Close()
// read crc
footerBuffer := make([]byte, 80)
reader.Read(footerBuffer)
expectedCrc := binary.LittleEndian.Uint32(footerBuffer[:4])
actualCrc := crc32c.Sum32(footerBuffer[4:])
if expectedCrc != actualCrc {
// TODO perhaps we should return data and still report error?
return nil, errors.New("Expected CRC doesn't match for footer")
}
footer := DonutFrameFooter{}
err = binary.Read(bytes.NewBuffer(footerBuffer[4:]), binary.LittleEndian, &footer)
if err != nil {
return nil, err
}
return bytes.NewBuffer(data), nil
}
// ReadHeader - reads the header of a donut
func ReadHeader(reader io.Reader) (header DonutFrameHeader, err error) {
headerSlice := make([]byte, 32)
headerLength, err := reader.Read(headerSlice)
if err != nil {
return header, err
}
if headerLength != 32 {
return header, errors.New("EOF found while reading donut header")
}
actualCrc := crc32c.Sum32(headerSlice[:24])
expectedCrc := binary.LittleEndian.Uint32(headerSlice[24:28])
if actualCrc != expectedCrc {
return header, errors.New("CRC for donut did not match")
}
err = binary.Read(bytes.NewBuffer(headerSlice[0:24]), binary.LittleEndian, &header)
return header, nil
}
type checksumValue struct {
checksum Sha512
err error
}
// calculate sha512 over channel
func generateChecksum(reader io.Reader, c chan<- checksumValue) {
checksum, err := sha512.SumStream(reader)
result := checksumValue{
checksum: checksum,
err: err,
}
c <- result
}

View File

@ -1,160 +0,0 @@
/*
* Mini Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fragment
import (
"bytes"
"crypto/sha512"
"encoding/binary"
"io"
"testing"
"github.com/minio-io/minio/pkg/utils/checksum/crc32c"
. "gopkg.in/check.v1"
)
func Test(t *testing.T) { TestingT(t) }
type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) TestSingleWrite(c *C) {
//var b io.ReadWriteSeeker
var testBuffer bytes.Buffer
testData := "Hello, World"
testLength := uint64(len(testData))
err := Write(&testBuffer, bytes.NewBufferString(testData), testLength)
c.Assert(err, IsNil)
testBufferLength := uint64(testBuffer.Len())
// we test our crc here too
headerBytes := testBuffer.Bytes()[:24]
expectedCrc := crc32c.Sum32(headerBytes)
// magic mini
magicMini := make([]byte, 4)
testBuffer.Read(magicMini)
c.Assert(magicMini, DeepEquals, []byte{'M', 'I', 'N', 'I'})
// major version
version := make([]byte, 4)
testBuffer.Read(version)
c.Assert(binary.LittleEndian.Uint32(version), DeepEquals, uint32(1))
// reserved
reserved := make([]byte, 8)
testBuffer.Read(reserved)
c.Assert(binary.LittleEndian.Uint64(reserved), DeepEquals, uint64(0))
// data length
length := make([]byte, 8)
testBuffer.Read(length)
c.Assert(binary.LittleEndian.Uint64(length), DeepEquals, testLength)
// test crc
bufCrc := make([]byte, 4)
testBuffer.Read(bufCrc)
c.Assert(binary.LittleEndian.Uint32(bufCrc), DeepEquals, expectedCrc)
// magic DATA
magicData := make([]byte, 4)
testBuffer.Read(magicData)
c.Assert(magicData, DeepEquals, []byte{'D', 'A', 'T', 'A'})
// data
actualData := make([]byte, int32(testLength))
testBuffer.Read(actualData)
c.Assert(string(actualData), DeepEquals, testData)
// extract footer crc32c
actualFooterCrc := make([]byte, 4)
testBuffer.Read(actualFooterCrc)
remainingBytes := testBuffer.Bytes()
remainingSum := crc32c.Sum32(remainingBytes)
c.Assert(binary.LittleEndian.Uint32(actualFooterCrc), DeepEquals, remainingSum)
// sha512
expectedSha512 := sha512.Sum512([]byte(testData))
actualSha512 := make([]byte, 64)
testBuffer.Read(actualSha512)
c.Assert(actualSha512, DeepEquals, expectedSha512[:])
// length
actualLength := make([]byte, 8)
testBuffer.Read(actualLength)
c.Assert(testBufferLength, DeepEquals, binary.LittleEndian.Uint64(actualLength))
// magic INIM
magicInim := make([]byte, 4)
testBuffer.Read(magicInim)
c.Assert(magicInim, DeepEquals, []byte{'I', 'N', 'I', 'M'})
// ensure no extra data is in the file
c.Assert(testBuffer.Len(), Equals, 0)
}
func (s *MySuite) TestLengthMismatchInWrite(c *C) {
var testData bytes.Buffer
err := Write(&testData, bytes.NewBufferString("hello, world"), 5)
c.Assert(err, Not(IsNil))
}
func (s *MySuite) TestWriteAndRead(c *C) {
testData := "Hello, World"
testLength := uint64(len(testData))
var testBuffer bytes.Buffer
err := Write(&testBuffer, bytes.NewBufferString(testData), testLength)
c.Assert(err, IsNil)
testReader, err := Read(&testBuffer)
c.Assert(err, IsNil)
var actualData bytes.Buffer
length, err := io.Copy(&actualData, testReader)
c.Assert(int64(len(testData)), Equals, length)
c.Assert([]byte(testData), DeepEquals, actualData.Bytes())
}
var buf = make([]byte, 1024*1024*8)
func benchmarkSize(b *testing.B, size int) {
b.SetBytes(int64(size))
target := new(bytes.Buffer)
for i := 0; i < b.N; i++ {
Write(target, bytes.NewReader(buf[:size]), uint64(size))
}
}
func BenchmarkDonut1M(b *testing.B) {
benchmarkSize(b, 1024*1024)
}
func BenchmarkDonut2M(b *testing.B) {
benchmarkSize(b, 1024*1024*2)
}
func BenchmarkDonut4M(b *testing.B) {
benchmarkSize(b, 1024*1024*4)
}
func BenchmarkDonut8M(b *testing.B) {
benchmarkSize(b, 1024*1024*8)
}

View File

@ -1,83 +0,0 @@
/*
* Mini Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package objectv1
import (
"bytes"
"encoding/binary"
"encoding/gob"
"errors"
"io"
"strconv"
"time"
)
// Package Version
const Version = uint32(1)
// ObjectType is the type of object stored. It is either an Object or Multipart Object.
type ObjectType uint8
const (
// Object is a full object
Object ObjectType = iota
// MultipartObject is a collection of Objects uploaded separately that represent a large object.
MultipartObject
)
// ObjectMetadata contains information necessary to reconstruct the object and basic object metadata.
type ObjectMetadata struct {
Bucket string
Key string
ErasurePart uint16
EncodedPart uint8
ContentType string
Created time.Time
Length uint64
Md5 []byte
ObjectType ObjectType
}
// Write an encoded part to a writer
func Write(target io.Writer, metadata ObjectMetadata, reader io.Reader) error {
buffer := new(bytes.Buffer)
binary.Write(buffer, binary.LittleEndian, uint32(Version))
encoder := gob.NewEncoder(buffer)
if err := encoder.Encode(metadata); err != nil {
return err
}
reader = io.MultiReader(buffer, reader)
_, err := io.Copy(target, reader)
return err
}
// ReadMetadata reads the first elements from the stream and returns the object metadata
func ReadMetadata(reader io.Reader) (metadata ObjectMetadata, err error) {
versionBytes := make([]byte, 4)
if err := binary.Read(reader, binary.LittleEndian, versionBytes); err != nil {
return metadata, err
}
var version uint32
version = binary.LittleEndian.Uint32(versionBytes)
if version != 1 {
return metadata, errors.New("Unknown Version: " + strconv.FormatUint(uint64(version), 10))
}
decoder := gob.NewDecoder(reader)
err = decoder.Decode(&metadata)
return metadata, err
}

View File

@ -1,76 +0,0 @@
/*
* Mini Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package objectv1
import (
"testing"
"bytes"
"crypto/md5"
"encoding/binary"
"encoding/gob"
. "gopkg.in/check.v1"
"io"
"time"
)
func Test(t *testing.T) { TestingT(t) }
type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) TestObjectV1ReadWrite(c *C) {
var buffer bytes.Buffer
data := []byte("Hello, World")
hash := md5.New()
sum := hash.Sum(data)
objectMetadata := ObjectMetadata{
Bucket: "bucket",
Key: "key",
ErasurePart: 1,
EncodedPart: 2,
ObjectType: Object,
Created: time.Now(),
ContentType: "application/text",
Md5: sum,
Length: uint64(len(sum)),
}
err := Write(&buffer, objectMetadata, bytes.NewBuffer(data))
c.Assert(err, IsNil)
versionBuffer := make([]byte, 4)
buffer.Read(versionBuffer)
c.Assert(binary.LittleEndian.Uint32(versionBuffer), Equals, uint32(1))
actualMetadata := ObjectMetadata{}
decoder := gob.NewDecoder(&buffer)
decoder.Decode(&actualMetadata)
c.Assert(actualMetadata, DeepEquals, objectMetadata)
var actualData bytes.Buffer
_, err = io.Copy(&actualData, &buffer)
c.Assert(err, IsNil)
c.Assert(actualData.Bytes(), DeepEquals, data)
}

View File

@ -14,46 +14,37 @@
* limitations under the License.
*/
package singledisk
package encoded
import (
"bytes"
"errors"
"github.com/minio-io/minio/pkg/encoding/erasure"
"github.com/minio-io/minio/pkg/storage"
"github.com/minio-io/minio/pkg/storage/donut/erasure/erasure1"
"github.com/minio-io/minio/pkg/storage/donut/object/objectv1"
"github.com/minio-io/minio/pkg/utils/split"
"io"
"os"
"path"
)
// StorageDriver creates a new single disk storage driver using donut without encoding.
type StorageDriver struct {
root string
donutBox DonutBox
}
// DonutBox is an interface specifying how the storage driver should interact with its underlying system.
type DonutBox interface {
Store(objectv1.ObjectMetadata, erasure1.DataHeader, io.Reader)
Store() error
Get() (io.Reader, error)
ListObjects(bucket string) ([]string, error)
ListBuckets() ([]string, error)
}
// Start a single disk subsystem
func Start(root string, donutBox DonutBox) (chan<- string, <-chan error, storage.Storage) {
func Start() (chan<- string, <-chan error, storage.Storage) {
ctrlChannel := make(chan string)
errorChannel := make(chan error)
s := new(StorageDriver)
s.root = root
s.donutBox = donutBox
go start(ctrlChannel, errorChannel, s)
return ctrlChannel, errorChannel, s
}
func start(ctrlChannel <-chan string, errorChannel chan<- error, s *StorageDriver) {
err := os.MkdirAll(s.root, 0700)
errorChannel <- err
close(errorChannel)
}
@ -64,8 +55,7 @@ func (diskStorage StorageDriver) ListBuckets() ([]storage.BucketMetadata, error)
// CreateBucket creates a new bucket
func (diskStorage StorageDriver) CreateBucket(bucket string) error {
bucketPath := path.Join(diskStorage.root, bucket)
return os.MkdirAll(bucketPath, 0600)
return errors.New("Not Implemented")
}
// CreateBucketPolicy sets a bucket's access policy
@ -79,7 +69,7 @@ func (diskStorage StorageDriver) GetBucketPolicy(bucket string) (storage.BucketP
}
// GetObject retrieves an object and writes it to a writer
func (diskStorage StorageDriver) GetObject(w io.Writer, bucket, object string) (int64, error) {
func (diskStorage StorageDriver) GetObject(target io.Writer, bucket, key string) (int64, error) {
return 0, errors.New("Not Implemented")
}
@ -89,8 +79,8 @@ func (diskStorage StorageDriver) GetPartialObject(w io.Writer, bucket, object st
}
// GetObjectMetadata retrieves an object's metadata
func (diskStorage StorageDriver) GetObjectMetadata(bucket, object string, prefix string) (storage.ObjectMetadata, error) {
return storage.ObjectMetadata{}, errors.New("Not Implemented")
func (diskStorage StorageDriver) GetObjectMetadata(bucket, key string, prefix string) (metadata storage.ObjectMetadata, err error) {
return metadata, errors.New("Not Implemented")
}
// ListObjects lists objects
@ -100,42 +90,5 @@ func (diskStorage StorageDriver) ListObjects(bucket string, resources storage.Bu
// CreateObject creates a new object
func (diskStorage StorageDriver) CreateObject(bucket string, key string, contentType string, data io.Reader) error {
// test if object exists
// split object into erasure parts
erasureParts := split.Stream(data, 10*1024*1024)
// set up encoder
params, err := erasure.ParseEncoderParams(8, 8, erasure.Cauchy)
if err != nil {
return err
}
encoder := erasure.NewEncoder(params)
// for each erasure part
erasurePartIndex := 1
for erasurePart := range erasureParts {
if erasurePart.Err != nil {
return erasurePart.Err
}
// encode each erasure part into encoded parts
encodedParts, length := encoder.Encode(erasurePart.Data)
// for each encoded part
for encodedPartIndex, encodedPart := range encodedParts {
objectMetadata := objectv1.ObjectMetadata{
Bucket: bucket,
Key: key,
ErasurePart: uint16(erasurePartIndex),
EncodedPart: uint8(encodedPartIndex),
ContentType: contentType,
}
erasureMetadata := erasure1.DataHeader{
OriginalLength: uint32(length),
EncoderK: 8,
EncoderM: 8,
EncoderTechnique: erasure1.Cauchy,
}
// store encoded part
diskStorage.donutBox.Store(objectMetadata, erasureMetadata, bytes.NewBuffer(encodedPart))
erasurePartIndex = erasurePartIndex + 1
}
}
return errors.New("Not Implemented")
}

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
package singledisk
package encoded
import (
"io/ioutil"
@ -39,7 +39,7 @@ func (s *MySuite) TestAPISuite(c *C) {
path, err := ioutil.TempDir(os.TempDir(), "minio-fs-")
c.Check(err, IsNil)
storageList = append(storageList, path)
_, _, store := Start(path, nil) // TODO Make InMemory driver
_, _, store := Start() // TODO Make InMemory driver
return store
}
mstorage.APITestSuite(c, create)

View File

@ -227,16 +227,17 @@ func (storage *Storage) ListObjects(bucket string, resources mstorage.BucketReso
return results, resources, nil
}
type byBucketName []mstorage.BucketMetadata
// ByBucketName is a type for sorting bucket metadata by bucket name
type ByBucketName []mstorage.BucketMetadata
// Len of bucket name
func (b byBucketName) Len() int { return len(b) }
func (b ByBucketName) Len() int { return len(b) }
// Swap bucket i, j
func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b ByBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
// Less
func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
func (b ByBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
// ListBuckets - List buckets from memory
func (storage *Storage) ListBuckets() ([]mstorage.BucketMetadata, error) {
@ -244,7 +245,7 @@ func (storage *Storage) ListBuckets() ([]mstorage.BucketMetadata, error) {
for _, bucket := range storage.bucketdata {
results = append(results, bucket.metadata)
}
sort.Sort(byBucketName(results))
sort.Sort(ByBucketName(results))
return results, nil
}

View File

@ -66,8 +66,9 @@ func testMultipleObjectCreation(c *check.C, create func() Storage) {
etags := make(map[string]string)
for key, value := range objects {
var byteBuffer bytes.Buffer
storage.GetObject(&byteBuffer, "bucket", key)
c.Assert(bytes.Equal(value, byteBuffer.Bytes()), check.Equals, true)
_, err := storage.GetObject(&byteBuffer, "bucket", key)
c.Assert(err, check.IsNil)
c.Assert(byteBuffer.Bytes(), check.DeepEquals, value)
metadata, err := storage.GetObjectMetadata("bucket", key, "")
c.Assert(err, check.IsNil)