mirror of
https://github.com/minio/minio.git
synced 2025-11-07 04:42:56 -05:00
Removing old server
Removing storage drivers
This commit is contained in:
@@ -1,11 +0,0 @@
|
||||
all: build test
|
||||
.PHONY: all
|
||||
|
||||
build:
|
||||
@godep go build
|
||||
|
||||
test: build
|
||||
@godep go test -race -coverprofile=cover.out
|
||||
|
||||
clean:
|
||||
@rm -v cover.out
|
||||
@@ -1,152 +0,0 @@
|
||||
/*
|
||||
* Mini Object Storage, (C) 2014 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package appendstorage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio-io/minio/pkg/storage"
|
||||
"github.com/minio-io/minio/pkg/utils/checksum/crc32c"
|
||||
)
|
||||
|
||||
type appendStorage struct {
|
||||
RootDir string
|
||||
file *os.File
|
||||
objects map[string]Header
|
||||
objectsFile string
|
||||
}
|
||||
|
||||
type Header struct {
|
||||
Path string
|
||||
Offset int64
|
||||
Length int
|
||||
Crc uint32
|
||||
}
|
||||
|
||||
func NewStorage(rootDir string, slice int) (storage.ObjectStorage, error) {
|
||||
rootPath := path.Join(rootDir, strconv.Itoa(slice))
|
||||
// TODO verify and fix partial writes
|
||||
file, err := os.OpenFile(rootPath, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0600)
|
||||
if err != nil {
|
||||
return &appendStorage{}, err
|
||||
}
|
||||
objectsFile := path.Join(rootDir, strconv.Itoa(slice)+".map")
|
||||
objects := make(map[string]Header)
|
||||
if _, err := os.Stat(objectsFile); err == nil {
|
||||
mapFile, err := os.Open(objectsFile)
|
||||
defer mapFile.Close()
|
||||
if err != nil {
|
||||
return &appendStorage{}, nil
|
||||
}
|
||||
dec := gob.NewDecoder(mapFile)
|
||||
err = dec.Decode(&objects)
|
||||
if err != nil && err != io.EOF {
|
||||
return &appendStorage{}, nil
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return &appendStorage{}, err
|
||||
}
|
||||
return &appendStorage{
|
||||
RootDir: rootDir,
|
||||
file: file,
|
||||
objects: objects,
|
||||
objectsFile: objectsFile,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (storage *appendStorage) Get(objectPath string) (io.Reader, error) {
|
||||
header, ok := storage.objects[objectPath]
|
||||
if ok == false {
|
||||
return nil, errors.New("Object not found")
|
||||
}
|
||||
|
||||
offset := header.Offset
|
||||
length := header.Length
|
||||
crc := header.Crc
|
||||
|
||||
object := make([]byte, length)
|
||||
_, err := storage.file.ReadAt(object, offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newcrc, err := crc32c.Crc32c(object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if newcrc != crc {
|
||||
return nil, err
|
||||
}
|
||||
return bytes.NewBuffer(object), nil
|
||||
}
|
||||
|
||||
func (aStorage *appendStorage) Put(objectPath string, object io.Reader) error {
|
||||
header := Header{
|
||||
Path: objectPath,
|
||||
Offset: 0,
|
||||
Length: 0,
|
||||
Crc: 0,
|
||||
}
|
||||
offset, err := aStorage.file.Seek(0, os.SEEK_END)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
objectBytes, err := ioutil.ReadAll(object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := aStorage.file.Write(objectBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
header.Offset = offset
|
||||
header.Length = len(objectBytes)
|
||||
header.Crc, err = crc32c.Crc32c(objectBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
aStorage.objects[objectPath] = header
|
||||
var mapBuffer bytes.Buffer
|
||||
encoder := gob.NewEncoder(&mapBuffer)
|
||||
encoder.Encode(aStorage.objects)
|
||||
ioutil.WriteFile(aStorage.objectsFile, mapBuffer.Bytes(), 0600)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (aStorage *appendStorage) List(objectPath string) ([]storage.ObjectDescription, error) {
|
||||
var objectDescList []storage.ObjectDescription
|
||||
for objectName, _ := range aStorage.objects {
|
||||
if strings.HasPrefix(objectName, objectPath) {
|
||||
var objectDescription storage.ObjectDescription
|
||||
objectDescription.Name = objectName
|
||||
objectDescription.Md5sum = ""
|
||||
objectDescription.Murmur3 = ""
|
||||
objectDescList = append(objectDescList, objectDescription)
|
||||
}
|
||||
}
|
||||
if len(objectDescList) == 0 {
|
||||
return nil, errors.New("No objects found")
|
||||
}
|
||||
return objectDescList, nil
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
package appendstorage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/minio-io/minio/pkg/storage"
|
||||
"github.com/minio-io/minio/pkg/utils"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type AppendStorageSuite struct{}
|
||||
|
||||
var _ = Suite(&AppendStorageSuite{})
|
||||
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
func (s *AppendStorageSuite) TestAppendStoragePutAtRootPath(c *C) {
|
||||
rootDir, err := utils.MakeTempTestDir()
|
||||
c.Assert(err, IsNil)
|
||||
defer os.RemoveAll(rootDir)
|
||||
|
||||
var objectStorage storage.ObjectStorage
|
||||
objectStorage, err = NewStorage(rootDir, 0)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
err = objectStorage.Put("path1", bytes.NewBuffer([]byte("object1")))
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// assert object1 was created in correct path
|
||||
objectResult1, err := objectStorage.Get("path1")
|
||||
c.Assert(err, IsNil)
|
||||
object1, _ := ioutil.ReadAll(objectResult1)
|
||||
c.Assert(string(object1), Equals, "object1")
|
||||
|
||||
err = objectStorage.Put("path2", bytes.NewBuffer([]byte("object2")))
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// assert object1 was created in correct path
|
||||
objectResult2, err := objectStorage.Get("path2")
|
||||
c.Assert(err, IsNil)
|
||||
object2, _ := ioutil.ReadAll(objectResult2)
|
||||
c.Assert(string(object2), Equals, "object2")
|
||||
|
||||
objectResult1, err = objectStorage.Get("path1")
|
||||
c.Assert(err, IsNil)
|
||||
object1, _ = ioutil.ReadAll(objectResult1)
|
||||
c.Assert(string(object1), Equals, "object1")
|
||||
}
|
||||
|
||||
func (s *AppendStorageSuite) TestAppendStoragePutDirPath(c *C) {
|
||||
rootDir, err := utils.MakeTempTestDir()
|
||||
c.Assert(err, IsNil)
|
||||
defer os.RemoveAll(rootDir)
|
||||
|
||||
var objectStorage storage.ObjectStorage
|
||||
objectStorage, err = NewStorage(rootDir, 0)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// add object 1
|
||||
objectStorage.Put("path1/path2/path3", bytes.NewBuffer([]byte("object")))
|
||||
|
||||
// assert object1 was created in correct path
|
||||
objectResult1, err := objectStorage.Get("path1/path2/path3")
|
||||
c.Assert(err, IsNil)
|
||||
object1, _ := ioutil.ReadAll(objectResult1)
|
||||
c.Assert(string(object1), Equals, "object")
|
||||
|
||||
// add object 2
|
||||
objectStorage.Put("path1/path1/path1", bytes.NewBuffer([]byte("object2")))
|
||||
|
||||
// assert object1 was created in correct path
|
||||
objectResult2, err := objectStorage.Get("path1/path1/path1")
|
||||
c.Assert(err, IsNil)
|
||||
object2, _ := ioutil.ReadAll(objectResult2)
|
||||
c.Assert(string(object2), Equals, "object2")
|
||||
}
|
||||
|
||||
func (s *AppendStorageSuite) TestSerialization(c *C) {
|
||||
rootDir, err := utils.MakeTempTestDir()
|
||||
c.Assert(err, IsNil)
|
||||
defer os.RemoveAll(rootDir)
|
||||
|
||||
objectStorage, err := NewStorage(rootDir, 0)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
err = objectStorage.Put("path1", bytes.NewBuffer([]byte("object1")))
|
||||
c.Assert(err, IsNil)
|
||||
err = objectStorage.Put("path2", bytes.NewBuffer([]byte("object2")))
|
||||
c.Assert(err, IsNil)
|
||||
err = objectStorage.Put("path3/obj3", bytes.NewBuffer([]byte("object3")))
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
es := objectStorage.(*appendStorage)
|
||||
es.file.Close()
|
||||
|
||||
objectStorage2, err := NewStorage(rootDir, 0)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
objectResult1, err := objectStorage2.Get("path1")
|
||||
c.Assert(err, IsNil)
|
||||
object1, _ := ioutil.ReadAll(objectResult1)
|
||||
c.Assert(string(object1), Equals, "object1")
|
||||
|
||||
objectResult2, err := objectStorage2.Get("path2")
|
||||
c.Assert(err, IsNil)
|
||||
object2, _ := ioutil.ReadAll(objectResult2)
|
||||
c.Assert(string(object2), Equals, "object2")
|
||||
|
||||
objectResult3, err := objectStorage2.Get("path3/obj3")
|
||||
c.Assert(err, IsNil)
|
||||
object3, _ := ioutil.ReadAll(objectResult3)
|
||||
c.Assert(string(object3), Equals, "object3")
|
||||
}
|
||||
@@ -1,288 +0,0 @@
|
||||
/*
|
||||
* Mini Object Storage, (C) 2014 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package encodedstorage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/gob"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio-io/minio/pkg/storage"
|
||||
"github.com/minio-io/minio/pkg/storage/appendstorage"
|
||||
"github.com/minio-io/minio/pkg/storage/erasure"
|
||||
"github.com/minio-io/minio/pkg/utils/split"
|
||||
"github.com/spaolacci/murmur3"
|
||||
)
|
||||
|
||||
type encodedStorage struct {
|
||||
RootDir string
|
||||
K int
|
||||
M int
|
||||
BlockSize uint64
|
||||
objects map[string]StorageEntry
|
||||
diskStorage []storage.ObjectStorage
|
||||
}
|
||||
|
||||
type StorageEntry struct {
|
||||
Path string
|
||||
Md5sum []byte
|
||||
Murmurhash uint64
|
||||
Blocks []StorageBlockEntry
|
||||
Encoderparams erasure.EncoderParams
|
||||
}
|
||||
|
||||
type StorageBlockEntry struct {
|
||||
Index int
|
||||
Length int
|
||||
}
|
||||
|
||||
type storeRequest struct {
|
||||
path string
|
||||
data []byte
|
||||
}
|
||||
|
||||
type storeResponse struct {
|
||||
data []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func NewStorage(rootDir string, k, m int, blockSize uint64) (storage.ObjectStorage, error) {
|
||||
// create storage files
|
||||
if k == 0 || m == 0 {
|
||||
return nil, errors.New("Invalid protection level")
|
||||
}
|
||||
|
||||
storageNodes := make([]storage.ObjectStorage, k+m)
|
||||
for i := 0; i < k+m; i++ {
|
||||
storageNode, err := appendstorage.NewStorage(rootDir, i)
|
||||
storageNodes[i] = storageNode
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
objects := make(map[string]StorageEntry)
|
||||
indexPath := path.Join(rootDir, "index")
|
||||
if _, err := os.Stat(indexPath); err == nil {
|
||||
indexFile, err := os.Open(indexPath)
|
||||
defer indexFile.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
encoder := gob.NewDecoder(indexFile)
|
||||
err = encoder.Decode(&objects)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
newStorage := encodedStorage{
|
||||
RootDir: rootDir,
|
||||
K: k,
|
||||
M: m,
|
||||
BlockSize: blockSize,
|
||||
objects: objects,
|
||||
diskStorage: storageNodes,
|
||||
}
|
||||
return &newStorage, nil
|
||||
}
|
||||
|
||||
func (eStorage *encodedStorage) Get(objectPath string) (io.Reader, error) {
|
||||
entry, ok := eStorage.objects[objectPath]
|
||||
if ok == false {
|
||||
return nil, errors.New("Object not found")
|
||||
}
|
||||
reader, writer := io.Pipe()
|
||||
go eStorage.readObject(objectPath, entry, writer)
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
func (eStorage *encodedStorage) List(objectPath string) ([]storage.ObjectDescription, error) {
|
||||
var objectDescList []storage.ObjectDescription
|
||||
for objectName, objectEntry := range eStorage.objects {
|
||||
if strings.HasPrefix(objectName, objectPath) {
|
||||
var objectDescription storage.ObjectDescription
|
||||
objectDescription.Name = objectName
|
||||
objectDescription.Md5sum = hex.EncodeToString(objectEntry.Md5sum)
|
||||
objectDescription.Murmur3 = strconv.FormatUint(objectEntry.Murmurhash, 16)
|
||||
objectDescList = append(objectDescList, objectDescription)
|
||||
}
|
||||
}
|
||||
if len(objectDescList) == 0 {
|
||||
return nil, errors.New("No objects found")
|
||||
}
|
||||
return objectDescList, nil
|
||||
}
|
||||
|
||||
func (eStorage *encodedStorage) Put(objectPath string, object io.Reader) error {
|
||||
// split
|
||||
chunks := split.SplitStream(object, eStorage.BlockSize)
|
||||
|
||||
// for each chunk
|
||||
encoderParameters, err := erasure.ParseEncoderParams(eStorage.K, eStorage.M, erasure.CAUCHY)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encoder := erasure.NewEncoder(encoderParameters)
|
||||
entry := StorageEntry{
|
||||
Path: objectPath,
|
||||
Md5sum: nil,
|
||||
Murmurhash: 0,
|
||||
Blocks: make([]StorageBlockEntry, 0),
|
||||
Encoderparams: erasure.EncoderParams{
|
||||
K: eStorage.K,
|
||||
M: eStorage.M,
|
||||
Technique: erasure.CAUCHY,
|
||||
},
|
||||
}
|
||||
// Hash
|
||||
murmur := murmur3.Sum64([]byte(objectPath))
|
||||
// allocate md5
|
||||
hash := md5.New()
|
||||
i := 0
|
||||
// encode
|
||||
for chunk := range chunks {
|
||||
if chunk.Err == nil {
|
||||
// encode each
|
||||
blocks, length := encoder.Encode(chunk.Data)
|
||||
// store each
|
||||
storeErrors := eStorage.storeBlocks(objectPath+"$"+strconv.Itoa(i), blocks)
|
||||
for _, err := range storeErrors {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// md5sum only after chunk is committed to disk
|
||||
hash.Write(chunk.Data)
|
||||
blockEntry := StorageBlockEntry{
|
||||
Index: i,
|
||||
Length: length,
|
||||
}
|
||||
entry.Blocks = append(entry.Blocks, blockEntry)
|
||||
} else {
|
||||
return chunk.Err
|
||||
}
|
||||
i++
|
||||
}
|
||||
entry.Md5sum = hash.Sum(nil)
|
||||
entry.Murmurhash = murmur
|
||||
eStorage.objects[objectPath] = entry
|
||||
var gobBuffer bytes.Buffer
|
||||
gobEncoder := gob.NewEncoder(&gobBuffer)
|
||||
gobEncoder.Encode(eStorage.objects)
|
||||
ioutil.WriteFile(path.Join(eStorage.RootDir, "index"), gobBuffer.Bytes(), 0600)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (eStorage *encodedStorage) storeBlocks(path string, blocks [][]byte) []error {
|
||||
returnChannels := make([]<-chan error, len(eStorage.diskStorage))
|
||||
for i, store := range eStorage.diskStorage {
|
||||
returnChannels[i] = storageRoutine(store, path, bytes.NewBuffer(blocks[i]))
|
||||
}
|
||||
returnErrors := make([]error, 0)
|
||||
for _, returnChannel := range returnChannels {
|
||||
for returnValue := range returnChannel {
|
||||
if returnValue != nil {
|
||||
returnErrors = append(returnErrors, returnValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
return returnErrors
|
||||
}
|
||||
|
||||
func (eStorage *encodedStorage) readObject(objectPath string, entry StorageEntry, writer *io.PipeWriter) {
|
||||
ep, err := erasure.ParseEncoderParams(entry.Encoderparams.K, entry.Encoderparams.M, entry.Encoderparams.Technique)
|
||||
if err != nil {
|
||||
writer.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
encoder := erasure.NewEncoder(ep)
|
||||
for i, chunk := range entry.Blocks {
|
||||
blockSlices := eStorage.getBlockSlices(objectPath + "$" + strconv.Itoa(i))
|
||||
if len(blockSlices) == 0 {
|
||||
writer.CloseWithError(errors.New("slices missing!!"))
|
||||
return
|
||||
}
|
||||
var blocks [][]byte
|
||||
for _, slice := range blockSlices {
|
||||
if slice.err != nil {
|
||||
writer.CloseWithError(slice.err)
|
||||
return
|
||||
}
|
||||
blocks = append(blocks, slice.data)
|
||||
}
|
||||
data, err := encoder.Decode(blocks, chunk.Length)
|
||||
if err != nil {
|
||||
writer.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
bytesWritten := 0
|
||||
for bytesWritten != len(data) {
|
||||
written, err := writer.Write(data[bytesWritten:len(data)])
|
||||
if err != nil {
|
||||
writer.CloseWithError(err)
|
||||
}
|
||||
bytesWritten += written
|
||||
}
|
||||
}
|
||||
writer.Close()
|
||||
}
|
||||
|
||||
func (eStorage *encodedStorage) getBlockSlices(objectPath string) []storeResponse {
|
||||
responses := make([]<-chan storeResponse, 0)
|
||||
for i := 0; i < len(eStorage.diskStorage); i++ {
|
||||
response := getSlice(eStorage.diskStorage[i], objectPath)
|
||||
responses = append(responses, response)
|
||||
}
|
||||
results := make([]storeResponse, 0)
|
||||
for _, response := range responses {
|
||||
results = append(results, <-response)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func getSlice(store storage.ObjectStorage, path string) <-chan storeResponse {
|
||||
out := make(chan storeResponse)
|
||||
go func() {
|
||||
obj, err := store.Get(path)
|
||||
if err != nil {
|
||||
out <- storeResponse{data: nil, err: err}
|
||||
} else {
|
||||
data, err := ioutil.ReadAll(obj)
|
||||
out <- storeResponse{data: data, err: err}
|
||||
}
|
||||
close(out)
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
func storageRoutine(store storage.ObjectStorage, path string, data io.Reader) <-chan error {
|
||||
out := make(chan error)
|
||||
go func() {
|
||||
if err := store.Put(path, data); err != nil {
|
||||
out <- err
|
||||
}
|
||||
close(out)
|
||||
}()
|
||||
return out
|
||||
}
|
||||
@@ -1,99 +0,0 @@
|
||||
package encodedstorage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/minio-io/minio/pkg/storage"
|
||||
"github.com/minio-io/minio/pkg/utils"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type EncodedStorageSuite struct{}
|
||||
|
||||
var _ = Suite(&EncodedStorageSuite{})
|
||||
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
func (s *EncodedStorageSuite) TestFileStoragePutAtRootPath(c *C) {
|
||||
rootDir, err := utils.MakeTempTestDir()
|
||||
c.Assert(err, IsNil)
|
||||
defer os.RemoveAll(rootDir)
|
||||
|
||||
var objectStorage storage.ObjectStorage
|
||||
objectStorage, err = NewStorage(rootDir, 10, 6, 1024)
|
||||
c.Assert(err, IsNil)
|
||||
objectBuffer := bytes.NewBuffer([]byte("object1"))
|
||||
objectStorage.Put("path1", objectBuffer)
|
||||
|
||||
// assert object1 was created in correct path
|
||||
objectResult1, err := objectStorage.Get("path1")
|
||||
c.Assert(err, IsNil)
|
||||
object1, _ := ioutil.ReadAll(objectResult1)
|
||||
c.Assert(string(object1), Equals, "object1")
|
||||
|
||||
objectList, err := objectStorage.List("")
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(objectList[0].Name, Equals, "path1")
|
||||
}
|
||||
|
||||
func (s *EncodedStorageSuite) TestFileStoragePutDirPath(c *C) {
|
||||
rootDir, err := utils.MakeTempTestDir()
|
||||
c.Assert(err, IsNil)
|
||||
defer os.RemoveAll(rootDir)
|
||||
|
||||
var objectStorage storage.ObjectStorage
|
||||
objectStorage, err = NewStorage(rootDir, 10, 6, 1024)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
objectBuffer1 := bytes.NewBuffer([]byte("object1"))
|
||||
objectStorage.Put("path1/path2/path3", objectBuffer1)
|
||||
|
||||
// assert object1 was created in correct path
|
||||
objectResult1, err := objectStorage.Get("path1/path2/path3")
|
||||
c.Assert(err, IsNil)
|
||||
object1, _ := ioutil.ReadAll(objectResult1)
|
||||
c.Assert(string(object1), Equals, "object1")
|
||||
|
||||
// add second object
|
||||
objectBuffer2 := bytes.NewBuffer([]byte("object2"))
|
||||
err = objectStorage.Put("path2/path2/path2", objectBuffer2)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// add third object
|
||||
objectBuffer3 := bytes.NewBuffer([]byte("object3"))
|
||||
err = objectStorage.Put("object3", objectBuffer3)
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
|
||||
func (s *EncodedStorageSuite) TestObjectWithChunking(c *C) {
|
||||
rootDir, err := utils.MakeTempTestDir()
|
||||
c.Assert(err, IsNil)
|
||||
defer os.RemoveAll(rootDir)
|
||||
|
||||
var objectStorage storage.ObjectStorage
|
||||
objectStorage, err = NewStorage(rootDir, 10, 6, 1024)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
var buffer bytes.Buffer
|
||||
for i := 0; i <= 2048; i++ {
|
||||
buffer.Write([]byte(strconv.Itoa(i)))
|
||||
}
|
||||
|
||||
reader := bytes.NewReader(buffer.Bytes())
|
||||
|
||||
err = objectStorage.Put("object", reader)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
objectStorage2, err := NewStorage(rootDir, 10, 6, 1024)
|
||||
c.Assert(err, IsNil)
|
||||
objectResult, err := objectStorage2.Get("object")
|
||||
c.Assert(err, IsNil)
|
||||
result, err := ioutil.ReadAll(objectResult)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(bytes.Compare(result, buffer.Bytes()), Equals, 0)
|
||||
|
||||
}
|
||||
@@ -1,15 +1 @@
|
||||
package storage
|
||||
|
||||
import "io"
|
||||
|
||||
type ObjectStorage interface {
|
||||
List(objectPath string) ([]ObjectDescription, error)
|
||||
Get(path string) (io.Reader, error)
|
||||
Put(path string, object io.Reader) error
|
||||
}
|
||||
|
||||
type ObjectDescription struct {
|
||||
Name string
|
||||
Md5sum string
|
||||
Murmur3 string
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user