2015-03-14 14:34:26 -04:00
|
|
|
/*
|
|
|
|
* Mini Object Storage, (C) 2015 Minio, Inc.
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package encoded
|
|
|
|
|
|
|
|
import (
|
2015-03-15 22:02:00 -04:00
|
|
|
"bytes"
|
2015-03-16 23:41:00 -04:00
|
|
|
"crypto/md5"
|
|
|
|
"encoding/hex"
|
2015-03-14 14:34:26 -04:00
|
|
|
"errors"
|
2015-03-16 21:20:01 -04:00
|
|
|
"io"
|
2015-03-16 23:41:00 -04:00
|
|
|
"sort"
|
2015-03-16 21:20:01 -04:00
|
|
|
"strconv"
|
2015-03-16 23:41:00 -04:00
|
|
|
"strings"
|
2015-03-16 21:20:01 -04:00
|
|
|
"time"
|
|
|
|
|
2015-03-14 21:02:19 -04:00
|
|
|
"github.com/minio-io/minio/pkg/donutbox"
|
2015-03-15 20:30:42 -04:00
|
|
|
"github.com/minio-io/minio/pkg/encoding/erasure"
|
2015-03-14 14:34:26 -04:00
|
|
|
"github.com/minio-io/minio/pkg/storage"
|
2015-03-15 20:30:42 -04:00
|
|
|
"github.com/minio-io/minio/pkg/utils/split"
|
2015-03-14 14:34:26 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
// StorageDriver creates a new single disk storage driver using donut without encoding.
|
|
|
|
type StorageDriver struct {
|
2015-03-14 21:02:19 -04:00
|
|
|
donutBox donutbox.DonutBox
|
2015-03-14 14:34:26 -04:00
|
|
|
}
|
|
|
|
|
2015-03-16 21:20:01 -04:00
|
|
|
const (
|
|
|
|
blockSize = 10 * 1024 * 1024
|
|
|
|
)
|
|
|
|
|
2015-03-14 14:34:26 -04:00
|
|
|
// Start a single disk subsystem
|
2015-03-15 03:41:06 -04:00
|
|
|
func Start(donutBox donutbox.DonutBox) (chan<- string, <-chan error, storage.Storage) {
|
2015-03-14 14:34:26 -04:00
|
|
|
ctrlChannel := make(chan string)
|
|
|
|
errorChannel := make(chan error)
|
|
|
|
s := new(StorageDriver)
|
2015-03-15 03:41:06 -04:00
|
|
|
s.donutBox = donutBox
|
2015-03-14 14:34:26 -04:00
|
|
|
go start(ctrlChannel, errorChannel, s)
|
|
|
|
return ctrlChannel, errorChannel, s
|
|
|
|
}
|
|
|
|
|
|
|
|
func start(ctrlChannel <-chan string, errorChannel chan<- error, s *StorageDriver) {
|
|
|
|
close(errorChannel)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListBuckets returns a list of buckets
|
|
|
|
func (diskStorage StorageDriver) ListBuckets() ([]storage.BucketMetadata, error) {
|
|
|
|
return nil, errors.New("Not Implemented")
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateBucket creates a new bucket
|
|
|
|
func (diskStorage StorageDriver) CreateBucket(bucket string) error {
|
2015-03-15 15:28:24 -04:00
|
|
|
return diskStorage.donutBox.CreateBucket(bucket)
|
2015-03-14 14:34:26 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// CreateBucketPolicy sets a bucket's access policy
|
|
|
|
func (diskStorage StorageDriver) CreateBucketPolicy(bucket string, p storage.BucketPolicy) error {
|
|
|
|
return errors.New("Not Implemented")
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetBucketPolicy returns a bucket's access policy
|
|
|
|
func (diskStorage StorageDriver) GetBucketPolicy(bucket string) (storage.BucketPolicy, error) {
|
|
|
|
return storage.BucketPolicy{}, errors.New("Not Implemented")
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetObject retrieves an object and writes it to a writer
|
|
|
|
func (diskStorage StorageDriver) GetObject(target io.Writer, bucket, key string) (int64, error) {
|
2015-03-15 22:02:00 -04:00
|
|
|
metadata, err := diskStorage.donutBox.GetObjectMetadata(bucket, key, 0)
|
2015-03-16 20:40:59 -04:00
|
|
|
k, err := strconv.Atoi(metadata["erasureK"])
|
|
|
|
if err != nil {
|
|
|
|
return 0, errors.New("Cannot parse erasureK")
|
|
|
|
}
|
|
|
|
m, err := strconv.Atoi(metadata["erasureM"])
|
|
|
|
if err != nil {
|
|
|
|
return 0, errors.New("Cannot parse erasureM")
|
|
|
|
}
|
|
|
|
columnCount := k + m
|
2015-03-16 21:20:01 -04:00
|
|
|
bs, err := strconv.Atoi(metadata["blockSize"])
|
2015-03-15 22:02:00 -04:00
|
|
|
if err != nil {
|
2015-03-16 20:40:59 -04:00
|
|
|
return 0, errors.New("Cannot parse blockSize")
|
2015-03-15 22:02:00 -04:00
|
|
|
}
|
2015-03-16 20:40:59 -04:00
|
|
|
size, err := strconv.Atoi(metadata["size"])
|
|
|
|
if err != nil {
|
|
|
|
return 0, errors.New("Cannot parse length")
|
|
|
|
}
|
2015-03-16 21:20:01 -04:00
|
|
|
chunkCount := size/bs + 1
|
2015-03-15 22:02:00 -04:00
|
|
|
var readers []io.Reader
|
|
|
|
for column := 0; column < columnCount; column++ {
|
|
|
|
reader, err := diskStorage.donutBox.GetObjectReader(bucket, key, uint(column))
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
readers = append(readers, reader)
|
|
|
|
}
|
|
|
|
|
2015-03-16 20:40:59 -04:00
|
|
|
totalWritten := int64(size)
|
|
|
|
totalRemaining := int64(size)
|
2015-03-15 22:02:00 -04:00
|
|
|
if err != err {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
params, err := erasure.ParseEncoderParams(8, 8, erasure.Cauchy)
|
|
|
|
decoder := erasure.NewEncoder(params)
|
|
|
|
for chunk := 0; chunk < chunkCount; chunk++ {
|
|
|
|
blocks := make([][]byte, columnCount)
|
|
|
|
for column := 0; column < columnCount; column++ {
|
|
|
|
var block bytes.Buffer
|
|
|
|
limitReader := io.LimitReader(readers[column], int64(blockSize))
|
|
|
|
_, err := io.Copy(&block, limitReader)
|
|
|
|
if err != nil {
|
|
|
|
return totalWritten, err
|
|
|
|
}
|
|
|
|
blocks[column] = block.Bytes()
|
|
|
|
}
|
|
|
|
curBlockSize := blockSize
|
|
|
|
if totalRemaining < int64(blockSize) {
|
|
|
|
curBlockSize = int(totalRemaining)
|
|
|
|
}
|
|
|
|
original, err := decoder.Decode(blocks, curBlockSize)
|
|
|
|
if err != nil {
|
|
|
|
return totalWritten, err
|
|
|
|
}
|
|
|
|
curWritten, err := io.Copy(target, bytes.NewBuffer(original))
|
|
|
|
totalRemaining = totalRemaining - curWritten
|
|
|
|
if err != nil {
|
|
|
|
return totalWritten, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return totalWritten, nil
|
2015-03-14 14:34:26 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetPartialObject retrieves an object and writes it to a writer
|
|
|
|
func (diskStorage StorageDriver) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) {
|
|
|
|
return 0, errors.New("Not Implemented")
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetObjectMetadata retrieves an object's metadata
|
2015-03-16 20:40:59 -04:00
|
|
|
func (diskStorage StorageDriver) GetObjectMetadata(bucket, key string, prefix string) (storage.ObjectMetadata, error) {
|
|
|
|
metadata, err := diskStorage.donutBox.GetObjectMetadata(bucket, key, 0)
|
|
|
|
if err != nil {
|
|
|
|
return storage.ObjectMetadata{}, err
|
|
|
|
}
|
|
|
|
created, err := time.Parse(metadata["created"], time.RFC3339Nano)
|
|
|
|
size, err := strconv.ParseInt(metadata["size"], 10, 64)
|
|
|
|
objectMetadata := storage.ObjectMetadata{
|
|
|
|
Bucket: bucket,
|
|
|
|
Key: key,
|
|
|
|
ContentType: metadata["contentType"],
|
|
|
|
Created: created,
|
2015-03-16 21:03:03 -04:00
|
|
|
Md5: metadata["md5"],
|
2015-03-16 20:40:59 -04:00
|
|
|
Size: size,
|
|
|
|
}
|
|
|
|
return objectMetadata, nil
|
2015-03-14 14:34:26 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjects lists objects
|
|
|
|
func (diskStorage StorageDriver) ListObjects(bucket string, resources storage.BucketResourcesMetadata) ([]storage.ObjectMetadata, storage.BucketResourcesMetadata, error) {
|
2015-03-16 23:41:00 -04:00
|
|
|
objects, err := diskStorage.donutBox.ListObjectsInBucket(bucket, resources.Prefix)
|
|
|
|
if err != nil {
|
|
|
|
return nil, storage.BucketResourcesMetadata{}, err
|
|
|
|
}
|
|
|
|
var results []storage.ObjectMetadata
|
|
|
|
sort.Strings(objects)
|
|
|
|
for _, object := range withoutDelimiter(objects, resources.Prefix, resources.Delimiter) {
|
|
|
|
if len(results) < resources.Maxkeys {
|
|
|
|
objectMetadata, err := diskStorage.GetObjectMetadata(bucket, object, "")
|
|
|
|
if err != nil {
|
|
|
|
return nil, storage.BucketResourcesMetadata{}, err
|
|
|
|
}
|
|
|
|
results = append(results, objectMetadata)
|
|
|
|
} else {
|
|
|
|
resources.IsTruncated = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if resources.Delimiter != "" {
|
|
|
|
objects = trimPrefixWithDelimiter(objects, resources.Prefix, resources.Delimiter)
|
|
|
|
objects = beforeDelimiter(objects, resources.Delimiter)
|
|
|
|
objects = removeDuplicates(objects)
|
|
|
|
resources.CommonPrefixes = objects
|
|
|
|
}
|
|
|
|
return results, resources, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func withoutDelimiter(inputs []string, prefix, delim string) (results []string) {
|
|
|
|
if delim == "" {
|
|
|
|
return inputs
|
|
|
|
}
|
|
|
|
for _, input := range inputs {
|
|
|
|
input = strings.TrimPrefix(input, prefix)
|
|
|
|
if !strings.Contains(input, delim) {
|
|
|
|
results = append(results, prefix+input)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return results
|
|
|
|
}
|
|
|
|
|
|
|
|
func trimPrefixWithDelimiter(inputs []string, prefix, delim string) (results []string) {
|
|
|
|
for _, input := range inputs {
|
|
|
|
input = strings.TrimPrefix(input, prefix)
|
|
|
|
if strings.Contains(input, delim) {
|
|
|
|
results = append(results, input)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return results
|
|
|
|
}
|
|
|
|
|
|
|
|
func beforeDelimiter(inputs []string, delim string) (results []string) {
|
|
|
|
for _, input := range inputs {
|
|
|
|
results = append(results, strings.Split(input, delim)[0]+delim)
|
|
|
|
}
|
|
|
|
return results
|
|
|
|
}
|
|
|
|
|
|
|
|
func removeDuplicates(inputs []string) (results []string) {
|
|
|
|
keys := make(map[string]string)
|
|
|
|
for _, input := range inputs {
|
|
|
|
keys[input] = input
|
|
|
|
}
|
|
|
|
for result := range keys {
|
|
|
|
results = append(results, result)
|
|
|
|
}
|
|
|
|
return results
|
2015-03-14 14:34:26 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// CreateObject creates a new object
|
2015-03-15 20:30:42 -04:00
|
|
|
func (diskStorage StorageDriver) CreateObject(bucketKey string, objectKey string, contentType string, reader io.Reader) error {
|
|
|
|
// split stream
|
|
|
|
splitStream := split.Stream(reader, uint64(blockSize))
|
|
|
|
writers := make([]*donutbox.NewObject, 16)
|
|
|
|
for i := 0; i < 16; i++ {
|
|
|
|
newWriter, err := diskStorage.donutBox.GetObjectWriter(bucketKey, objectKey, uint(i), uint(blockSize))
|
|
|
|
if err != nil {
|
|
|
|
closeAllWritersWithError(writers, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
writers[i] = newWriter
|
|
|
|
}
|
|
|
|
totalLength := uint64(0)
|
2015-03-15 22:02:00 -04:00
|
|
|
chunkCount := 0
|
2015-03-16 23:41:00 -04:00
|
|
|
hasher := md5.New()
|
2015-03-15 20:30:42 -04:00
|
|
|
for chunk := range splitStream {
|
|
|
|
params, err := erasure.ParseEncoderParams(8, 8, erasure.Cauchy)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-03-16 23:41:00 -04:00
|
|
|
hasher.Write(chunk.Data)
|
2015-03-15 20:30:42 -04:00
|
|
|
totalLength = totalLength + uint64(len(chunk.Data))
|
2015-03-15 22:02:00 -04:00
|
|
|
chunkCount = chunkCount + 1
|
2015-03-15 20:30:42 -04:00
|
|
|
encoder := erasure.NewEncoder(params)
|
|
|
|
if chunk.Err == nil {
|
|
|
|
parts, _ := encoder.Encode(chunk.Data)
|
|
|
|
for index, part := range parts {
|
|
|
|
if _, err := writers[index].Write(part); err != nil {
|
|
|
|
closeAllWritersWithError(writers, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
closeAllWritersWithError(writers, chunk.Err)
|
|
|
|
return chunk.Err
|
|
|
|
}
|
|
|
|
// encode data
|
|
|
|
// write
|
|
|
|
}
|
|
|
|
// close connections
|
|
|
|
|
2015-03-16 20:40:59 -04:00
|
|
|
metadataObj := storage.ObjectMetadata{
|
|
|
|
Bucket: bucketKey,
|
|
|
|
Key: objectKey,
|
|
|
|
|
|
|
|
ContentType: contentType,
|
|
|
|
Created: time.Now(),
|
2015-03-16 23:41:00 -04:00
|
|
|
Md5: hex.EncodeToString(hasher.Sum(nil)),
|
2015-03-16 20:40:59 -04:00
|
|
|
Size: int64(totalLength),
|
|
|
|
}
|
|
|
|
|
|
|
|
metadata := createMetadata(metadataObj, blockSize, 8, 8, "Cauchy")
|
|
|
|
|
2015-03-15 20:30:42 -04:00
|
|
|
for column := uint(0); column < 16; column++ {
|
|
|
|
writers[column].SetMetadata(metadata)
|
|
|
|
}
|
|
|
|
|
2015-03-15 22:02:00 -04:00
|
|
|
// TODO capture errors in writers, enough should pass before returning
|
|
|
|
closeAllWriters(writers)
|
|
|
|
|
2015-03-15 20:30:42 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func closeAllWriters(writers []*donutbox.NewObject) {
|
|
|
|
for _, writer := range writers {
|
|
|
|
if writer != nil {
|
|
|
|
writer.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func closeAllWritersWithError(writers []*donutbox.NewObject, err error) {
|
|
|
|
for _, writer := range writers {
|
|
|
|
if writer != nil {
|
|
|
|
writer.CloseWithError(err)
|
|
|
|
}
|
|
|
|
}
|
2015-03-14 14:34:26 -04:00
|
|
|
}
|
2015-03-16 20:40:59 -04:00
|
|
|
|
|
|
|
func createMetadata(metadataObject storage.ObjectMetadata, blockSize int, k, m uint8, technique string) map[string]string {
|
|
|
|
metadata := make(map[string]string)
|
|
|
|
metadata["bucket"] = metadataObject.Bucket
|
|
|
|
metadata["key"] = metadataObject.Key
|
|
|
|
metadata["contentType"] = metadataObject.ContentType
|
|
|
|
metadata["created"] = metadataObject.Created.Format(time.RFC3339Nano)
|
2015-03-16 21:03:03 -04:00
|
|
|
metadata["md5"] = metadataObject.Md5
|
2015-03-16 20:40:59 -04:00
|
|
|
metadata["size"] = strconv.FormatInt(metadataObject.Size, 10)
|
|
|
|
|
|
|
|
metadata["blockSize"] = strconv.FormatUint(uint64(blockSize), 10)
|
|
|
|
metadata["erasureK"] = strconv.FormatUint(uint64(k), 10)
|
|
|
|
metadata["erasureM"] = strconv.FormatUint(uint64(m), 10)
|
|
|
|
metadata["erasureTechnique"] = technique
|
|
|
|
return metadata
|
|
|
|
}
|