mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
fsStorage should use appendStorage with offsets
- also takes in 'blockSize' cli option. - robustness fixes
This commit is contained in:
parent
b570b486de
commit
fc50291cac
2
Makefile
2
Makefile
@ -32,7 +32,7 @@ build-split: build-strbyteconv
|
|||||||
build-strbyteconv:
|
build-strbyteconv:
|
||||||
@godep go test -race -coverprofile=cover.out github.com/minio-io/minio/pkgs/strbyteconv
|
@godep go test -race -coverprofile=cover.out github.com/minio-io/minio/pkgs/strbyteconv
|
||||||
|
|
||||||
build-storage: build-storage-fs build-storage-append build-storage-encoded
|
build-storage: build-storage-append build-storage-encoded build-storage-fs
|
||||||
|
|
||||||
build-storage-fs:
|
build-storage-fs:
|
||||||
@godep go test -race -coverprofile=cover.out github.com/minio-io/minio/pkgs/storage/fsstorage
|
@godep go test -race -coverprofile=cover.out github.com/minio-io/minio/pkgs/storage/fsstorage
|
||||||
|
@ -14,7 +14,7 @@ import (
|
|||||||
func fsGetList(config inputConfig) (io.Reader, error) {
|
func fsGetList(config inputConfig) (io.Reader, error) {
|
||||||
var objectStorage storage.ObjectStorage
|
var objectStorage storage.ObjectStorage
|
||||||
rootDir := path.Join(config.rootDir, config.storageDriver)
|
rootDir := path.Join(config.rootDir, config.storageDriver)
|
||||||
objectStorage, _ = fsstorage.NewStorage(rootDir)
|
objectStorage, _ = fsstorage.NewStorage(rootDir, config.blockSize)
|
||||||
objectList, err := objectStorage.List()
|
objectList, err := objectStorage.List()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -30,7 +30,7 @@ func fsGetList(config inputConfig) (io.Reader, error) {
|
|||||||
func fsGet(config inputConfig, objectPath string) (io.Reader, error) {
|
func fsGet(config inputConfig, objectPath string) (io.Reader, error) {
|
||||||
var objectStorage storage.ObjectStorage
|
var objectStorage storage.ObjectStorage
|
||||||
rootDir := path.Join(config.rootDir, config.storageDriver)
|
rootDir := path.Join(config.rootDir, config.storageDriver)
|
||||||
objectStorage, _ = fsstorage.NewStorage(rootDir)
|
objectStorage, _ = fsstorage.NewStorage(rootDir, config.blockSize)
|
||||||
object, err := objectStorage.Get(objectPath)
|
object, err := objectStorage.Get(objectPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -45,7 +45,7 @@ func fsPut(config inputConfig, objectPath string, reader io.Reader) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var objectStorage storage.ObjectStorage
|
var objectStorage storage.ObjectStorage
|
||||||
objectStorage, _ = fsstorage.NewStorage(rootDir)
|
objectStorage, _ = fsstorage.NewStorage(rootDir, config.blockSize)
|
||||||
if err = objectStorage.Put(objectPath, reader); err != nil {
|
if err = objectStorage.Put(objectPath, reader); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -46,5 +46,8 @@ func get(c *cli.Context) {
|
|||||||
log.Fatal("Unknown driver")
|
log.Fatal("Unknown driver")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if objectReader == nil {
|
||||||
|
log.Fatal("Object not found")
|
||||||
|
}
|
||||||
io.Copy(os.Stdout, objectReader)
|
io.Copy(os.Stdout, objectReader)
|
||||||
}
|
}
|
||||||
|
@ -72,6 +72,16 @@ func main() {
|
|||||||
Value: "erasure",
|
Value: "erasure",
|
||||||
Usage: "erasure",
|
Usage: "erasure",
|
||||||
},
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "protection-level",
|
||||||
|
Value: "10,6",
|
||||||
|
Usage: "data,parity",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "block-size",
|
||||||
|
Value: "1M",
|
||||||
|
Usage: "Size of blocks. Examples: 1K, 1M, full",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -228,7 +228,7 @@ func SimpleEncodedStorageDriver(bucket string, input chan ObjectRequest, config
|
|||||||
}
|
}
|
||||||
|
|
||||||
func SimpleFileStorageDriver(bucket string, input chan ObjectRequest, config GatewayConfig) {
|
func SimpleFileStorageDriver(bucket string, input chan ObjectRequest, config GatewayConfig) {
|
||||||
fileStorage, _ := fsstorage.NewStorage(config.DataDir)
|
fileStorage, _ := fsstorage.NewStorage(config.DataDir, config.BlockSize)
|
||||||
for request := range input {
|
for request := range input {
|
||||||
switch request.requestType {
|
switch request.requestType {
|
||||||
case "GET":
|
case "GET":
|
||||||
|
@ -51,6 +51,10 @@ type storeResponse struct {
|
|||||||
|
|
||||||
func NewStorage(rootDir string, k, m int, blockSize uint64) (storage.ObjectStorage, error) {
|
func NewStorage(rootDir string, k, m int, blockSize uint64) (storage.ObjectStorage, error) {
|
||||||
// create storage files
|
// create storage files
|
||||||
|
if k == 0 || m == 0 {
|
||||||
|
return nil, errors.New("Invalid protection level")
|
||||||
|
}
|
||||||
|
|
||||||
storageNodes := make([]storage.ObjectStorage, k+m)
|
storageNodes := make([]storage.ObjectStorage, k+m)
|
||||||
for i := 0; i < k+m; i++ {
|
for i := 0; i < k+m; i++ {
|
||||||
storageNode, err := appendstorage.NewStorage(rootDir, i)
|
storageNode, err := appendstorage.NewStorage(rootDir, i)
|
||||||
@ -192,6 +196,10 @@ func (eStorage *encodedStorage) readObject(objectPath string, entry StorageEntry
|
|||||||
encoder := erasure.NewEncoder(ep)
|
encoder := erasure.NewEncoder(ep)
|
||||||
for i, chunk := range entry.Blocks {
|
for i, chunk := range entry.Blocks {
|
||||||
blockSlices := eStorage.getBlockSlices(objectPath + "$" + strconv.Itoa(i))
|
blockSlices := eStorage.getBlockSlices(objectPath + "$" + strconv.Itoa(i))
|
||||||
|
if len(blockSlices) == 0 {
|
||||||
|
writer.CloseWithError(errors.New("slices missing!!"))
|
||||||
|
return
|
||||||
|
}
|
||||||
var blocks [][]byte
|
var blocks [][]byte
|
||||||
for _, slice := range blockSlices {
|
for _, slice := range blockSlices {
|
||||||
if slice.err != nil {
|
if slice.err != nil {
|
||||||
|
@ -1,57 +1,156 @@
|
|||||||
package fsstorage
|
package fsstorage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/md5"
|
||||||
|
"encoding/gob"
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/minio-io/minio/pkgs/split"
|
||||||
"github.com/minio-io/minio/pkgs/storage"
|
"github.com/minio-io/minio/pkgs/storage"
|
||||||
|
"github.com/minio-io/minio/pkgs/storage/appendstorage"
|
||||||
)
|
)
|
||||||
|
|
||||||
type fileSystemStorage struct {
|
type fileSystemStorage struct {
|
||||||
RootDir string
|
RootDir string
|
||||||
|
BlockSize uint64
|
||||||
|
diskStorage []storage.ObjectStorage
|
||||||
|
objects map[string]StorageEntry
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStorage(rootDir string) (storage.ObjectStorage, error) {
|
type StorageEntry struct {
|
||||||
|
Path string
|
||||||
|
Md5sum []byte
|
||||||
|
ChunkLength int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewStorage(rootDir string, blockSize uint64) (storage.ObjectStorage, error) {
|
||||||
|
var storageNodes []storage.ObjectStorage
|
||||||
|
storageNode, err := appendstorage.NewStorage(rootDir, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
storageNodes = append(storageNodes, storageNode)
|
||||||
|
objects := make(map[string]StorageEntry)
|
||||||
|
indexPath := path.Join(rootDir, "index")
|
||||||
|
if _, err := os.Stat(indexPath); err == nil {
|
||||||
|
indexFile, err := os.Open(indexPath)
|
||||||
|
defer indexFile.Close()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
encoder := gob.NewDecoder(indexFile)
|
||||||
|
err = encoder.Decode(&objects)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
newStorage := fileSystemStorage{
|
newStorage := fileSystemStorage{
|
||||||
RootDir: rootDir,
|
RootDir: rootDir,
|
||||||
|
diskStorage: storageNodes,
|
||||||
|
BlockSize: blockSize,
|
||||||
|
objects: objects,
|
||||||
}
|
}
|
||||||
return &newStorage, nil
|
return &newStorage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fsStorage *fileSystemStorage) List() ([]storage.ObjectDescription, error) {
|
func (fsStorage *fileSystemStorage) List() ([]storage.ObjectDescription, error) {
|
||||||
fileInfos, err := ioutil.ReadDir(fsStorage.RootDir)
|
var objectDescList []storage.ObjectDescription
|
||||||
if err != nil {
|
for objectName, objectEntry := range fsStorage.objects {
|
||||||
return nil, err
|
var objectDescription storage.ObjectDescription
|
||||||
|
objectDescription.Name = objectName
|
||||||
|
objectDescription.Md5sum = hex.EncodeToString(objectEntry.Md5sum)
|
||||||
|
objectDescription.Protectionlevel = ""
|
||||||
|
objectDescList = append(objectDescList, objectDescription)
|
||||||
}
|
}
|
||||||
|
if len(objectDescList) == 0 {
|
||||||
|
return nil, errors.New("No objects found")
|
||||||
|
}
|
||||||
|
return objectDescList, nil
|
||||||
|
}
|
||||||
|
|
||||||
var descriptions []storage.ObjectDescription
|
func (fsStorage *fileSystemStorage) Get(objectPath string) (io.Reader, error) {
|
||||||
|
entry, ok := fsStorage.objects[objectPath]
|
||||||
|
if ok == false {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
reader, writer := io.Pipe()
|
||||||
|
go fsStorage.readObject(objectPath, entry, writer)
|
||||||
|
return reader, nil
|
||||||
|
}
|
||||||
|
|
||||||
for _, fi := range fileInfos {
|
func (fsStorage *fileSystemStorage) readObject(objectPath string, entry StorageEntry, writer *io.PipeWriter) {
|
||||||
description := storage.ObjectDescription{
|
appendStorage := fsStorage.diskStorage[0]
|
||||||
Name: fi.Name(),
|
for i := 0; i < entry.ChunkLength; i++ {
|
||||||
Md5sum: "",
|
chunkObjectPath := objectPath + "$" + strconv.Itoa(i)
|
||||||
Protectionlevel: "",
|
chunkObject, err := appendStorage.Get(chunkObjectPath)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
writer.CloseWithError(err)
|
||||||
|
}
|
||||||
|
data, readErr := ioutil.ReadAll(chunkObject)
|
||||||
|
|
||||||
|
if readErr != nil {
|
||||||
|
writer.CloseWithError(readErr)
|
||||||
|
}
|
||||||
|
bytesWritten := 0
|
||||||
|
for bytesWritten != len(data) {
|
||||||
|
written, err := writer.Write(data[bytesWritten:len(data)])
|
||||||
|
if err != nil {
|
||||||
|
writer.CloseWithError(err)
|
||||||
|
}
|
||||||
|
bytesWritten += written
|
||||||
}
|
}
|
||||||
descriptions = append(descriptions, description)
|
|
||||||
}
|
}
|
||||||
return descriptions, nil
|
writer.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (storage *fileSystemStorage) Get(objectPath string) (io.Reader, error) {
|
func (fsStorage *fileSystemStorage) Put(objectPath string, object io.Reader) error {
|
||||||
return os.Open(path.Join(storage.RootDir, objectPath))
|
// split
|
||||||
|
chunks := make(chan split.SplitMessage)
|
||||||
|
go split.SplitStream(object, fsStorage.BlockSize, chunks)
|
||||||
|
|
||||||
|
entry := StorageEntry{
|
||||||
|
Path: objectPath,
|
||||||
|
Md5sum: nil,
|
||||||
|
ChunkLength: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
hash := md5.New()
|
||||||
|
i := 0
|
||||||
|
for chunk := range chunks {
|
||||||
|
if chunk.Err != nil {
|
||||||
|
return chunk.Err
|
||||||
|
}
|
||||||
|
err := fsStorage.storeBlocks(objectPath, i, chunk.Data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// md5sum only after chunk is committed to disk
|
||||||
|
hash.Write(chunk.Data)
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
entry.Md5sum = hash.Sum(nil)
|
||||||
|
entry.ChunkLength = i
|
||||||
|
fsStorage.objects[objectPath] = entry
|
||||||
|
var gobBuffer bytes.Buffer
|
||||||
|
gobEncoder := gob.NewEncoder(&gobBuffer)
|
||||||
|
gobEncoder.Encode(fsStorage.objects)
|
||||||
|
ioutil.WriteFile(path.Join(fsStorage.RootDir, "index"), gobBuffer.Bytes(), 0600)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (storage *fileSystemStorage) Put(objectPath string, object io.Reader) error {
|
func (fsStorage *fileSystemStorage) storeBlocks(objectPath string, index int, chunk []byte) error {
|
||||||
err := os.MkdirAll(filepath.Dir(path.Join(storage.RootDir, objectPath)), 0700)
|
appendStorage := fsStorage.diskStorage[0]
|
||||||
if err != nil {
|
path := objectPath + "$" + strconv.Itoa(index)
|
||||||
|
if err := appendStorage.Put(path, bytes.NewBuffer(chunk)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
objectBytes, err := ioutil.ReadAll(object)
|
return nil
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return ioutil.WriteFile(path.Join(storage.RootDir, objectPath), objectBytes, 0600)
|
|
||||||
}
|
}
|
||||||
|
@ -26,15 +26,19 @@ func (s *fileSystemStorageSuite) TestfileStoragePutAtRootPath(c *C) {
|
|||||||
defer os.RemoveAll(rootDir)
|
defer os.RemoveAll(rootDir)
|
||||||
|
|
||||||
var objectStorage storage.ObjectStorage
|
var objectStorage storage.ObjectStorage
|
||||||
objectStorage, _ = NewStorage(rootDir)
|
objectStorage, _ = NewStorage(rootDir, 1024)
|
||||||
|
|
||||||
objectBuffer := bytes.NewBuffer([]byte("object1"))
|
objectBuffer := bytes.NewBuffer([]byte("object1"))
|
||||||
objectStorage.Put("path1", objectBuffer)
|
err = objectStorage.Put("path1", objectBuffer)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
// assert object1 was created in correct path
|
// assert object1 was created in correct path
|
||||||
objectResult1, err := objectStorage.Get("path1")
|
objectResult1, err := objectStorage.Get("path1")
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
object1, _ := ioutil.ReadAll(objectResult1)
|
|
||||||
|
object1, err := ioutil.ReadAll(objectResult1)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
c.Assert(string(object1), Equals, "object1")
|
c.Assert(string(object1), Equals, "object1")
|
||||||
|
|
||||||
objectList, err := objectStorage.List()
|
objectList, err := objectStorage.List()
|
||||||
|
Loading…
Reference in New Issue
Block a user