minio/pkgs/storage/encodedstorage/encoded_storage.go

289 lines
7.4 KiB
Go
Raw Normal View History

2014-12-13 17:27:06 -05:00
/*
* Mini Object Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
2014-12-11 03:32:32 -05:00
package encodedstorage
import (
"bytes"
2014-12-11 22:43:58 -05:00
"crypto/md5"
2014-12-11 03:32:32 -05:00
"encoding/gob"
"encoding/hex"
2014-12-11 03:32:32 -05:00
"errors"
"io"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
2014-12-11 03:32:32 -05:00
"github.com/minio-io/minio/pkgs/erasure"
"github.com/minio-io/minio/pkgs/split"
"github.com/minio-io/minio/pkgs/storage"
"github.com/minio-io/minio/pkgs/storage/appendstorage"
"github.com/spaolacci/murmur3"
2014-12-11 03:32:32 -05:00
)
type encodedStorage struct {
RootDir string
K int
M int
BlockSize uint64
objects map[string]StorageEntry
diskStorage []storage.ObjectStorage
}
2014-12-11 04:45:58 -05:00
type StorageEntry struct {
Path string
2014-12-11 22:43:58 -05:00
Md5sum []byte
Murmurhash uint64
2014-12-11 04:45:58 -05:00
Blocks []StorageBlockEntry
Encoderparams erasure.EncoderParams
2014-12-11 04:45:58 -05:00
}
type StorageBlockEntry struct {
Index int
Length int
}
type storeRequest struct {
path string
data []byte
}
type storeResponse struct {
data []byte
err error
}
2014-12-11 03:32:32 -05:00
func NewStorage(rootDir string, k, m int, blockSize uint64) (storage.ObjectStorage, error) {
// create storage files
if k == 0 || m == 0 {
return nil, errors.New("Invalid protection level")
}
2014-12-11 04:45:58 -05:00
storageNodes := make([]storage.ObjectStorage, k+m)
for i := 0; i < k+m; i++ {
2014-12-11 03:32:32 -05:00
storageNode, err := appendstorage.NewStorage(rootDir, i)
storageNodes[i] = storageNode
if err != nil {
return nil, err
}
}
objects := make(map[string]StorageEntry)
indexPath := path.Join(rootDir, "index")
if _, err := os.Stat(indexPath); err == nil {
indexFile, err := os.Open(indexPath)
defer indexFile.Close()
if err != nil {
return nil, err
}
encoder := gob.NewDecoder(indexFile)
err = encoder.Decode(&objects)
2014-12-11 04:45:58 -05:00
if err != nil && err != io.EOF {
2014-12-11 03:32:32 -05:00
return nil, err
}
}
newStorage := encodedStorage{
RootDir: rootDir,
K: k,
M: m,
BlockSize: blockSize,
objects: objects,
diskStorage: storageNodes,
}
return &newStorage, nil
}
func (eStorage *encodedStorage) Get(objectPath string) (io.Reader, error) {
entry, ok := eStorage.objects[objectPath]
if ok == false {
return nil, errors.New("Object not found")
2014-12-11 03:32:32 -05:00
}
reader, writer := io.Pipe()
go eStorage.readObject(objectPath, entry, writer)
return reader, nil
}
func (eStorage *encodedStorage) List(objectPath string) ([]storage.ObjectDescription, error) {
var objectDescList []storage.ObjectDescription
for objectName, objectEntry := range eStorage.objects {
if strings.HasPrefix(objectName, objectPath) {
var objectDescription storage.ObjectDescription
objectDescription.Name = objectName
objectDescription.Md5sum = hex.EncodeToString(objectEntry.Md5sum)
objectDescription.Murmur3 = strconv.FormatUint(objectEntry.Murmurhash, 16)
objectDescList = append(objectDescList, objectDescription)
}
}
if len(objectDescList) == 0 {
return nil, errors.New("No objects found")
}
return objectDescList, nil
2014-12-11 03:32:32 -05:00
}
func (eStorage *encodedStorage) Put(objectPath string, object io.Reader) error {
// split
chunks := split.SplitStream(object, eStorage.BlockSize)
2014-12-11 03:32:32 -05:00
// for each chunk
encoderParameters, err := erasure.ParseEncoderParams(eStorage.K, eStorage.M, erasure.CAUCHY)
if err != nil {
return err
}
encoder := erasure.NewEncoder(encoderParameters)
entry := StorageEntry{
Path: objectPath,
Md5sum: nil,
Murmurhash: 0,
Blocks: make([]StorageBlockEntry, 0),
Encoderparams: erasure.EncoderParams{
K: eStorage.K,
M: eStorage.M,
Technique: erasure.CAUCHY,
},
2014-12-11 03:32:32 -05:00
}
// Hash
murmur := murmur3.Sum64([]byte(objectPath))
2014-12-11 22:43:58 -05:00
// allocate md5
hash := md5.New()
2014-12-11 03:32:32 -05:00
i := 0
// encode
for chunk := range chunks {
if chunk.Err == nil {
// encode each
blocks, length := encoder.Encode(chunk.Data)
// store each
storeErrors := eStorage.storeBlocks(objectPath+"$"+strconv.Itoa(i), blocks)
for _, err := range storeErrors {
if err != nil {
return err
}
}
// md5sum only after chunk is committed to disk
hash.Write(chunk.Data)
2014-12-11 03:32:32 -05:00
blockEntry := StorageBlockEntry{
Index: i,
Length: length,
}
entry.Blocks = append(entry.Blocks, blockEntry)
} else {
return chunk.Err
}
i++
}
2014-12-11 22:43:58 -05:00
entry.Md5sum = hash.Sum(nil)
entry.Murmurhash = murmur
2014-12-11 03:32:32 -05:00
eStorage.objects[objectPath] = entry
var gobBuffer bytes.Buffer
gobEncoder := gob.NewEncoder(&gobBuffer)
gobEncoder.Encode(eStorage.objects)
ioutil.WriteFile(path.Join(eStorage.RootDir, "index"), gobBuffer.Bytes(), 0600)
return nil
}
func (eStorage *encodedStorage) storeBlocks(path string, blocks [][]byte) []error {
returnChannels := make([]<-chan error, len(eStorage.diskStorage))
for i, store := range eStorage.diskStorage {
returnChannels[i] = storageRoutine(store, path, bytes.NewBuffer(blocks[i]))
}
returnErrors := make([]error, 0)
for _, returnChannel := range returnChannels {
for returnValue := range returnChannel {
if returnValue != nil {
returnErrors = append(returnErrors, returnValue)
}
}
}
return returnErrors
}
func (eStorage *encodedStorage) readObject(objectPath string, entry StorageEntry, writer *io.PipeWriter) {
ep, err := erasure.ParseEncoderParams(entry.Encoderparams.K, entry.Encoderparams.M, entry.Encoderparams.Technique)
if err != nil {
writer.CloseWithError(err)
return
}
encoder := erasure.NewEncoder(ep)
2014-12-11 03:32:32 -05:00
for i, chunk := range entry.Blocks {
blockSlices := eStorage.getBlockSlices(objectPath + "$" + strconv.Itoa(i))
if len(blockSlices) == 0 {
writer.CloseWithError(errors.New("slices missing!!"))
return
}
2014-12-11 03:32:32 -05:00
var blocks [][]byte
for _, slice := range blockSlices {
if slice.err != nil {
2014-12-11 04:45:58 -05:00
writer.CloseWithError(slice.err)
2014-12-11 03:32:32 -05:00
return
}
blocks = append(blocks, slice.data)
}
data, err := encoder.Decode(blocks, chunk.Length)
if err != nil {
writer.CloseWithError(err)
return
}
bytesWritten := 0
for bytesWritten != len(data) {
written, err := writer.Write(data[bytesWritten:len(data)])
if err != nil {
writer.CloseWithError(err)
}
bytesWritten += written
}
}
writer.Close()
}
func (eStorage *encodedStorage) getBlockSlices(objectPath string) []storeResponse {
responses := make([]<-chan storeResponse, 0)
for i := 0; i < len(eStorage.diskStorage); i++ {
response := getSlice(eStorage.diskStorage[i], objectPath)
responses = append(responses, response)
}
results := make([]storeResponse, 0)
for _, response := range responses {
results = append(results, <-response)
}
return results
}
func getSlice(store storage.ObjectStorage, path string) <-chan storeResponse {
out := make(chan storeResponse)
go func() {
obj, err := store.Get(path)
if err != nil {
out <- storeResponse{data: nil, err: err}
} else {
data, err := ioutil.ReadAll(obj)
out <- storeResponse{data: data, err: err}
}
close(out)
}()
return out
}
func storageRoutine(store storage.ObjectStorage, path string, data io.Reader) <-chan error {
out := make(chan error)
go func() {
if err := store.Put(path, data); err != nil {
out <- err
}
close(out)
}()
return out
}