Merge remote-tracking branch 'origin/master' into HEAD

Conflicts:
	pkg/drivers/donut/donut.go
	pkg/storage/donut/bucket.go
	pkg/storage/donut/donut.go
	pkg/storage/donut/donut_test.go
	pkg/storage/donut/donutdriver.go
This commit is contained in:
Frederick F. Kautz IV
2015-03-24 21:04:08 -07:00
59 changed files with 1681 additions and 1239 deletions

View File

@@ -6,11 +6,13 @@ import (
"strings"
)
type bucketDriver struct {
nodes []string
type donutBucket struct {
nodes []string
objects map[string][]byte
}
func (b bucketDriver) GetNodes() ([]string, error) {
// GetNodes - get list of associated nodes for a given bucket
func (b donutBucket) GetNodes() ([]string, error) {
var nodes []string
for _, node := range b.nodes {
nodes = append(nodes, node)
@@ -18,7 +20,7 @@ func (b bucketDriver) GetNodes() ([]string, error) {
return nodes, nil
}
func (b bucketDriver) AddNode(nodeID, bucketID string) error {
func (b donutBucket) AddNode(nodeID, bucketID string) error {
tokens := strings.Split(bucketID, ":")
if len(tokens) != 3 {
return errors.New("Bucket ID malformed: " + bucketID)

View File

@@ -1,51 +1,181 @@
package donut
import (
"errors"
"io"
"sort"
"strconv"
"strings"
)
// INTERFACES
// Donut interface
type Donut interface {
CreateBucket(bucket string) error
GetObject(bucket, object string) (io.ReadCloser, error)
GetObjectMetadata(bucket, object string) (map[string]string, error)
GetObjectWriter(bucket, object string) (ObjectWriter, error)
ListBuckets() ([]string, error)
ListObjects(bucket string) ([]string, error)
type donut struct {
buckets map[string]Bucket
nodes map[string]Node
}
// Bucket interface
type Bucket interface {
GetNodes() ([]string, error)
AddNode(nodeID, bucketID string) error
// NewDonut - instantiate new donut driver
func NewDonut(root string) (Donut, error) {
nodes := make(map[string]Node)
nodes["localhost"] = &localDirectoryNode{root: root}
driver := &donut{
buckets: make(map[string]Bucket),
nodes: nodes,
}
for nodeID, node := range nodes {
bucketIDs, err := node.GetBuckets()
if err != nil {
return nil, err
}
for _, bucketID := range bucketIDs {
tokens := strings.Split(bucketID, ":")
if _, ok := driver.buckets[tokens[0]]; ok {
// found bucket, skip creating
} else {
bucket := donutBucket{
nodes: make([]string, 16),
}
// TODO catch errors
driver.buckets[tokens[0]] = bucket
}
if err = driver.buckets[tokens[0]].AddNode(nodeID, bucketID); err != nil {
return nil, err
}
}
}
return driver, nil
}
// Node interface
type Node interface {
CreateBucket(bucket string) error
GetBuckets() ([]string, error)
GetDonutMetadata(bucket, object string) (map[string]string, error)
GetMetadata(bucket, object string) (map[string]string, error)
GetReader(bucket, object string) (io.ReadCloser, error)
GetWriter(bucket, object string) (Writer, error)
ListObjects(bucket string) ([]string, error)
// CreateBucket - create a new bucket
func (d donut) CreateBucket(bucketName string) error {
if _, ok := d.buckets[bucketName]; ok == false {
bucketName = strings.TrimSpace(bucketName)
if bucketName == "" {
return errors.New("Cannot create bucket with no name")
}
// assign nodes
// TODO assign other nodes
nodes := make([]string, 16)
for i := 0; i < 16; i++ {
nodes[i] = "localhost"
if node, ok := d.nodes["localhost"]; ok {
node.CreateBucket(bucketName + ":0:" + strconv.Itoa(i))
}
}
bucket := donutBucket{
nodes: nodes,
}
d.buckets[bucketName] = bucket
return nil
}
return errors.New("Bucket exists")
}
// ObjectWriter interface
type ObjectWriter interface {
Close() error
CloseWithError(error) error
GetMetadata() (map[string]string, error)
SetMetadata(map[string]string) error
Write([]byte) (int, error)
// ListBuckets - list all buckets
func (d donut) ListBuckets() ([]string, error) {
var buckets []string
for bucket := range d.buckets {
buckets = append(buckets, bucket)
}
sort.Strings(buckets)
return buckets, nil
}
// Writer interface
type Writer interface {
ObjectWriter
GetDonutMetadata() (map[string]string, error)
SetDonutMetadata(map[string]string) error
// GetObjectWriter - get a new writer interface for a new object
func (d donut) GetObjectWriter(bucketName, objectName string) (ObjectWriter, error) {
if bucket, ok := d.buckets[bucketName]; ok == true {
writers := make([]Writer, 16)
nodes, err := bucket.GetNodes()
if err != nil {
return nil, err
}
for i, nodeID := range nodes {
if node, ok := d.nodes[nodeID]; ok == true {
writer, err := node.GetWriter(bucketName+":0:"+strconv.Itoa(i), objectName)
if err != nil {
for _, writerToClose := range writers {
if writerToClose != nil {
writerToClose.CloseWithError(err)
}
}
return nil, err
}
writers[i] = writer
}
}
return newErasureWriter(writers), nil
}
return nil, errors.New("Bucket not found")
}
// GetObjectReader - get a new reader interface for a new object
func (d donut) GetObjectReader(bucketName, objectName string) (io.ReadCloser, error) {
r, w := io.Pipe()
if bucket, ok := d.buckets[bucketName]; ok == true {
readers := make([]io.ReadCloser, 16)
nodes, err := bucket.GetNodes()
if err != nil {
return nil, err
}
var metadata map[string]string
for i, nodeID := range nodes {
if node, ok := d.nodes[nodeID]; ok == true {
bucketID := bucketName + ":0:" + strconv.Itoa(i)
reader, err := node.GetReader(bucketID, objectName)
if err != nil {
return nil, err
}
readers[i] = reader
if metadata == nil {
metadata, err = node.GetDonutMetadata(bucketID, objectName)
if err != nil {
return nil, err
}
}
}
}
go erasureReader(readers, metadata, w)
return r, nil
}
return nil, errors.New("Bucket not found")
}
// GetObjectMetadata returns metadata for a given object in a bucket
func (d donut) GetObjectMetadata(bucketName, object string) (map[string]string, error) {
if bucket, ok := d.buckets[bucketName]; ok {
nodes, err := bucket.GetNodes()
if err != nil {
return nil, err
}
if node, ok := d.nodes[nodes[0]]; ok {
bucketID := bucketName + ":0:0"
metadata, err := node.GetMetadata(bucketID, object)
if err != nil {
return nil, err
}
donutMetadata, err := node.GetDonutMetadata(bucketID, object)
if err != nil {
return nil, err
}
metadata["sys.created"] = donutMetadata["created"]
metadata["sys.md5"] = donutMetadata["md5"]
metadata["sys.size"] = donutMetadata["size"]
return metadata, nil
}
return nil, errors.New("Cannot connect to node: " + nodes[0])
}
return nil, errors.New("Bucket not found")
}
// ListObjects - list all the available objects in a bucket
func (d donut) ListObjects(bucketName string) ([]string, error) {
if bucket, ok := d.buckets[bucketName]; ok {
nodes, err := bucket.GetNodes()
if err != nil {
return nil, err
}
if node, ok := d.nodes[nodes[0]]; ok {
return node.ListObjects(bucketName + ":0:0")
}
}
return nil, errors.New("Bucket not found")
}

View File

@@ -1,14 +1,14 @@
package donut
import (
"testing"
"bytes"
. "gopkg.in/check.v1"
"io"
"io/ioutil"
"os"
"testing"
"time"
. "gopkg.in/check.v1"
)
func Test(t *testing.T) { TestingT(t) }
@@ -21,7 +21,7 @@ func (s *MySuite) TestEmptyBucket(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonutDriver(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
// check buckets are empty
@@ -34,7 +34,7 @@ func (s *MySuite) TestBucketWithoutNameFails(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonutDriver(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
// fail to create new bucket without a name
err = donut.CreateBucket("")
@@ -48,7 +48,7 @@ func (s *MySuite) TestCreateBucketAndList(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonutDriver(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
// create bucket
err = donut.CreateBucket("foo")
@@ -64,7 +64,7 @@ func (s *MySuite) TestCreateBucketWithSameNameFails(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonutDriver(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
err = donut.CreateBucket("foo")
c.Assert(err, IsNil)
@@ -77,7 +77,7 @@ func (s *MySuite) TestCreateMultipleBucketsAndList(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonutDriver(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
// add a second bucket
err = donut.CreateBucket("foo")
@@ -102,7 +102,7 @@ func (s *MySuite) TestNewObjectFailsWithoutBucket(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonutDriver(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
writer, err := donut.GetObjectWriter("foo", "obj")
@@ -114,7 +114,7 @@ func (s *MySuite) TestNewObjectFailsWithEmptyName(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonutDriver(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
writer, err := donut.GetObjectWriter("foo", "")
@@ -130,7 +130,7 @@ func (s *MySuite) TestNewObjectCanBeWritten(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonutDriver(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
err = donut.CreateBucket("foo")
@@ -161,7 +161,7 @@ func (s *MySuite) TestNewObjectCanBeWritten(c *C) {
c.Assert(err, IsNil)
reader, err := donut.GetObject("foo", "obj")
reader, err := donut.GetObjectReader("foo", "obj")
c.Assert(err, IsNil)
var actualData bytes.Buffer
@@ -183,7 +183,7 @@ func (s *MySuite) TestMultipleNewObjects(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonutDriver(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
c.Assert(donut.CreateBucket("foo"), IsNil)
@@ -199,7 +199,7 @@ func (s *MySuite) TestMultipleNewObjects(c *C) {
// c.Skip("not complete")
reader, err := donut.GetObject("foo", "obj1")
reader, err := donut.GetObjectReader("foo", "obj1")
c.Assert(err, IsNil)
var readerBuffer1 bytes.Buffer
_, err = io.Copy(&readerBuffer1, reader)
@@ -207,7 +207,7 @@ func (s *MySuite) TestMultipleNewObjects(c *C) {
// c.Skip("Not Implemented")
c.Assert(readerBuffer1.Bytes(), DeepEquals, []byte("one"))
reader, err = donut.GetObject("foo", "obj2")
reader, err = donut.GetObjectReader("foo", "obj2")
c.Assert(err, IsNil)
var readerBuffer2 bytes.Buffer
_, err = io.Copy(&readerBuffer2, reader)
@@ -224,7 +224,7 @@ func (s *MySuite) TestSysPrefixShouldFail(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonutDriver(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
c.Assert(donut.CreateBucket("foo"), IsNil)

View File

@@ -1,176 +0,0 @@
package donut
import (
"errors"
"io"
"sort"
"strconv"
"strings"
)
type donutDriver struct {
buckets map[string]Bucket
nodes map[string]Node
}
// NewDonutDriver - instantiate new donut driver
func NewDonutDriver(root string) (Donut, error) {
nodes := make(map[string]Node)
nodes["localhost"] = &localDirectoryNode{root: root}
driver := &donutDriver{
buckets: make(map[string]Bucket),
nodes: nodes,
}
for nodeID, node := range nodes {
bucketIDs, err := node.GetBuckets()
if err != nil {
return nil, err
}
for _, bucketID := range bucketIDs {
tokens := strings.Split(bucketID, ":")
if _, ok := driver.buckets[tokens[0]]; ok {
// found bucket, skip creating
} else {
bucket := bucketDriver{
nodes: make([]string, 16),
}
// TODO catch errors
driver.buckets[tokens[0]] = bucket
}
if err = driver.buckets[tokens[0]].AddNode(nodeID, bucketID); err != nil {
return nil, err
}
}
}
return driver, nil
}
func (driver donutDriver) CreateBucket(bucketName string) error {
if _, ok := driver.buckets[bucketName]; ok == false {
bucketName = strings.TrimSpace(bucketName)
if bucketName == "" {
return errors.New("Cannot create bucket with no name")
}
// assign nodes
// TODO assign other nodes
nodes := make([]string, 16)
for i := 0; i < 16; i++ {
nodes[i] = "localhost"
if node, ok := driver.nodes["localhost"]; ok {
node.CreateBucket(bucketName + ":0:" + strconv.Itoa(i))
}
}
bucket := bucketDriver{
nodes: nodes,
}
driver.buckets[bucketName] = bucket
return nil
}
return errors.New("Bucket exists")
}
func (driver donutDriver) ListBuckets() ([]string, error) {
var buckets []string
for bucket := range driver.buckets {
buckets = append(buckets, bucket)
}
sort.Strings(buckets)
return buckets, nil
}
func (driver donutDriver) GetObjectWriter(bucketName, objectName string) (ObjectWriter, error) {
if bucket, ok := driver.buckets[bucketName]; ok == true {
writers := make([]Writer, 16)
nodes, err := bucket.GetNodes()
if err != nil {
return nil, err
}
for i, nodeID := range nodes {
if node, ok := driver.nodes[nodeID]; ok == true {
writer, err := node.GetWriter(bucketName+":0:"+strconv.Itoa(i), objectName)
if err != nil {
for _, writerToClose := range writers {
if writerToClose != nil {
writerToClose.CloseWithError(err)
}
}
return nil, err
}
writers[i] = writer
}
}
return newErasureWriter(writers), nil
}
return nil, errors.New("Bucket not found")
}
func (driver donutDriver) GetObject(bucketName, objectName string) (io.ReadCloser, error) {
r, w := io.Pipe()
if bucket, ok := driver.buckets[bucketName]; ok == true {
readers := make([]io.ReadCloser, 16)
nodes, err := bucket.GetNodes()
if err != nil {
return nil, err
}
var metadata map[string]string
for i, nodeID := range nodes {
if node, ok := driver.nodes[nodeID]; ok == true {
bucketID := bucketName + ":0:" + strconv.Itoa(i)
reader, err := node.GetReader(bucketID, objectName)
if err != nil {
return nil, err
}
readers[i] = reader
if metadata == nil {
metadata, err = node.GetDonutMetadata(bucketID, objectName)
if err != nil {
return nil, err
}
}
}
}
go erasureReader(readers, metadata, w)
return r, nil
}
return nil, errors.New("Bucket not found")
}
// GetObjectMetadata returns metadata for a given object in a bucket
func (driver donutDriver) GetObjectMetadata(bucketName, object string) (map[string]string, error) {
if bucket, ok := driver.buckets[bucketName]; ok {
nodes, err := bucket.GetNodes()
if err != nil {
return nil, err
}
if node, ok := driver.nodes[nodes[0]]; ok {
bucketID := bucketName + ":0:0"
metadata, err := node.GetMetadata(bucketID, object)
if err != nil {
return nil, err
}
donutMetadata, err := node.GetDonutMetadata(bucketID, object)
if err != nil {
return nil, err
}
metadata["sys.created"] = donutMetadata["created"]
metadata["sys.md5"] = donutMetadata["md5"]
metadata["sys.size"] = donutMetadata["size"]
return metadata, nil
}
return nil, errors.New("Cannot connect to node: " + nodes[0])
}
return nil, errors.New("Bucket not found")
}
func (driver donutDriver) ListObjects(bucketName string) ([]string, error) {
if bucket, ok := driver.buckets[bucketName]; ok {
nodes, err := bucket.GetNodes()
if err != nil {
return nil, err
}
if node, ok := driver.nodes[nodes[0]]; ok {
return node.ListObjects(bucketName + ":0:0")
}
}
return nil, errors.New("Bucket not found")
}

View File

@@ -2,29 +2,70 @@ package donut
import (
"bytes"
"errors"
"io"
"strconv"
"strings"
"time"
"crypto/md5"
"encoding/hex"
"errors"
"github.com/minio-io/minio/pkg/encoding/erasure"
"github.com/minio-io/minio/pkg/utils/split"
"strings"
)
// getErasureTechnique - convert technique string into Technique type
func getErasureTechnique(technique string) (erasure.Technique, error) {
switch true {
case technique == "Cauchy":
return erasure.Cauchy, nil
case technique == "Vandermonde":
return erasure.Cauchy, nil
default:
return erasure.None, errors.New("Invalid erasure technique")
}
}
// erasureReader - returns aligned streaming reads over a PipeWriter
func erasureReader(readers []io.ReadCloser, donutMetadata map[string]string, writer *io.PipeWriter) {
// TODO handle errors
totalChunks, _ := strconv.Atoi(donutMetadata["chunkCount"])
totalLeft, _ := strconv.Atoi(donutMetadata["size"])
blockSize, _ := strconv.Atoi(donutMetadata["blockSize"])
k, _ := strconv.Atoi(donutMetadata["erasureK"])
m, _ := strconv.Atoi(donutMetadata["erasureM"])
expectedMd5sum, _ := hex.DecodeString(donutMetadata["md5"])
totalChunks, err := strconv.Atoi(donutMetadata["chunkCount"])
if err != nil {
writer.CloseWithError(err)
return
}
totalLeft, err := strconv.Atoi(donutMetadata["size"])
if err != nil {
writer.CloseWithError(err)
return
}
blockSize, err := strconv.Atoi(donutMetadata["blockSize"])
if err != nil {
writer.CloseWithError(err)
return
}
k, err := strconv.Atoi(donutMetadata["erasureK"])
if err != nil {
writer.CloseWithError(err)
return
}
m, err := strconv.Atoi(donutMetadata["erasureM"])
if err != nil {
writer.CloseWithError(err)
return
}
expectedMd5sum, err := hex.DecodeString(donutMetadata["md5"])
if err != nil {
writer.CloseWithError(err)
return
}
technique, err := getErasureTechnique(donutMetadata["erasureTechnique"])
if err != nil {
writer.CloseWithError(err)
return
}
summer := md5.New()
// TODO select technique properly
params, _ := erasure.ParseEncoderParams(uint8(k), uint8(m), erasure.Cauchy)
params, _ := erasure.ParseEncoderParams(uint8(k), uint8(m), technique)
encoder := erasure.NewEncoder(params)
for _, reader := range readers {
defer reader.Close()
@@ -34,13 +75,17 @@ func erasureReader(readers []io.ReadCloser, donutMetadata map[string]string, wri
if blockSize < totalLeft {
curBlockSize = blockSize
}
curChunkSize := erasure.GetEncodedChunkLen(curBlockSize, uint8(k))
curChunkSize := erasure.GetEncodedBlockLen(curBlockSize, uint8(k))
encodedBytes := make([][]byte, 16)
for i, reader := range readers {
var bytesBuffer bytes.Buffer
// TODO watch for errors
io.CopyN(&bytesBuffer, reader, int64(curChunkSize))
_, err := io.CopyN(&bytesBuffer, reader, int64(curChunkSize))
if err != nil {
writer.CloseWithError(err)
return
}
encodedBytes[i] = bytesBuffer.Bytes()
}
decodedData, err := encoder.Decode(encodedBytes, curBlockSize)
@@ -49,7 +94,11 @@ func erasureReader(readers []io.ReadCloser, donutMetadata map[string]string, wri
return
}
summer.Write(decodedData)
io.Copy(writer, bytes.NewBuffer(decodedData))
_, err = io.Copy(writer, bytes.NewBuffer(decodedData))
if err != nil {
writer.CloseWithError(err)
return
}
totalLeft = totalLeft - blockSize
}
actualMd5sum := summer.Sum(nil)
@@ -58,6 +107,7 @@ func erasureReader(readers []io.ReadCloser, donutMetadata map[string]string, wri
return
}
writer.Close()
return
}
// erasure writer
@@ -70,6 +120,7 @@ type erasureWriter struct {
isClosed <-chan bool
}
// newErasureWriter - get a new writer
func newErasureWriter(writers []Writer) ObjectWriter {
r, w := io.Pipe()
isClosed := make(chan bool)

View File

@@ -0,0 +1,51 @@
package donut
import (
"io"
)
// Collection of Donut specification interfaces
// Donut interface
type Donut interface {
CreateBucket(bucket string) error
GetObjectReader(bucket, object string) (io.ReadCloser, error)
GetObjectWriter(bucket, object string) (ObjectWriter, error)
GetObjectMetadata(bucket, object string) (map[string]string, error)
ListBuckets() ([]string, error)
ListObjects(bucket string) ([]string, error)
}
// Bucket interface
type Bucket interface {
GetNodes() ([]string, error)
AddNode(nodeID, bucketID string) error
}
// Node interface
type Node interface {
CreateBucket(bucket string) error
GetBuckets() ([]string, error)
GetDonutMetadata(bucket, object string) (map[string]string, error)
GetMetadata(bucket, object string) (map[string]string, error)
GetReader(bucket, object string) (io.ReadCloser, error)
GetWriter(bucket, object string) (Writer, error)
ListObjects(bucket string) ([]string, error)
}
// ObjectWriter interface
type ObjectWriter interface {
Close() error
CloseWithError(error) error
GetMetadata() (map[string]string, error)
SetMetadata(map[string]string) error
Write([]byte) (int, error)
}
// Writer interface
type Writer interface {
ObjectWriter
GetDonutMetadata() (map[string]string, error)
SetDonutMetadata(map[string]string) error
}

View File

@@ -41,7 +41,7 @@ func (d localDirectoryNode) GetWriter(bucket, object string) (Writer, error) {
if err != nil {
return nil, err
}
return newDonutFileWriter(objectPath)
return newDonutObjectWriter(objectPath)
}
func (d localDirectoryNode) GetReader(bucket, object string) (io.ReadCloser, error) {

View File

@@ -7,12 +7,12 @@ import (
"path"
)
func newDonutFileWriter(objectDir string) (Writer, error) {
func newDonutObjectWriter(objectDir string) (Writer, error) {
dataFile, err := os.OpenFile(path.Join(objectDir, "data"), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return nil, err
}
return donutFileWriter{
return donutObjectWriter{
root: objectDir,
file: dataFile,
metadata: make(map[string]string),
@@ -20,7 +20,7 @@ func newDonutFileWriter(objectDir string) (Writer, error) {
}, nil
}
type donutFileWriter struct {
type donutObjectWriter struct {
root string
file *os.File
metadata map[string]string
@@ -28,11 +28,11 @@ type donutFileWriter struct {
err error
}
func (d donutFileWriter) Write(data []byte) (int, error) {
func (d donutObjectWriter) Write(data []byte) (int, error) {
return d.file.Write(data)
}
func (d donutFileWriter) Close() error {
func (d donutObjectWriter) Close() error {
if d.err != nil {
return d.err
}
@@ -44,14 +44,14 @@ func (d donutFileWriter) Close() error {
return d.file.Close()
}
func (d donutFileWriter) CloseWithError(err error) error {
func (d donutObjectWriter) CloseWithError(err error) error {
if d.err != nil {
d.err = err
}
return d.Close()
}
func (d donutFileWriter) SetMetadata(metadata map[string]string) error {
func (d donutObjectWriter) SetMetadata(metadata map[string]string) error {
for k := range d.metadata {
delete(d.metadata, k)
}
@@ -61,7 +61,7 @@ func (d donutFileWriter) SetMetadata(metadata map[string]string) error {
return nil
}
func (d donutFileWriter) GetMetadata() (map[string]string, error) {
func (d donutObjectWriter) GetMetadata() (map[string]string, error) {
metadata := make(map[string]string)
for k, v := range d.metadata {
metadata[k] = v
@@ -69,7 +69,7 @@ func (d donutFileWriter) GetMetadata() (map[string]string, error) {
return metadata, nil
}
func (d donutFileWriter) SetDonutMetadata(metadata map[string]string) error {
func (d donutObjectWriter) SetDonutMetadata(metadata map[string]string) error {
for k := range d.donutMetadata {
delete(d.donutMetadata, k)
}
@@ -79,7 +79,7 @@ func (d donutFileWriter) SetDonutMetadata(metadata map[string]string) error {
return nil
}
func (d donutFileWriter) GetDonutMetadata() (map[string]string, error) {
func (d donutObjectWriter) GetDonutMetadata() (map[string]string, error) {
donutMetadata := make(map[string]string)
for k, v := range d.donutMetadata {
donutMetadata[k] = v

View File

@@ -1,39 +0,0 @@
package donut
import (
"errors"
)
type objectWriter struct {
metadata map[string]string
}
func (obj objectWriter) Write(data []byte) (length int, err error) {
return 11, nil
}
func (obj objectWriter) Close() error {
return nil
}
func (obj objectWriter) CloseWithError(err error) error {
return errors.New("Not Implemented")
}
func (obj objectWriter) SetMetadata(metadata map[string]string) error {
for k := range obj.metadata {
delete(obj.metadata, k)
}
for k, v := range metadata {
obj.metadata[k] = v
}
return nil
}
func (obj objectWriter) GetMetadata() (map[string]string, error) {
ret := make(map[string]string)
for k, v := range obj.metadata {
ret[k] = v
}
return ret, nil
}

View File

@@ -1,264 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donutstorage
import (
"errors"
"github.com/minio-io/minio/pkg/storage"
"github.com/minio-io/minio/pkg/storage/donut"
"io"
"sort"
"strconv"
"strings"
"time"
)
// Storage creates a new single disk storage driver using donut without encoding.
type Storage struct {
donut donut.Donut
}
const (
blockSize = 10 * 1024 * 1024
)
// Start a single disk subsystem
func Start(path string) (chan<- string, <-chan error, storage.Storage) {
ctrlChannel := make(chan string)
errorChannel := make(chan error)
s := new(Storage)
// TODO donut driver should be passed in as Start param and driven by config
var err error
s.donut, err = donut.NewDonutDriver(path)
if err != nil {
errorChannel <- err
}
go start(ctrlChannel, errorChannel, s)
return ctrlChannel, errorChannel, s
}
func start(ctrlChannel <-chan string, errorChannel chan<- error, s *Storage) {
close(errorChannel)
}
// ListBuckets returns a list of buckets
func (donutStorage Storage) ListBuckets() (results []storage.BucketMetadata, err error) {
buckets, err := donutStorage.donut.ListBuckets()
if err != nil {
return nil, err
}
for _, bucket := range buckets {
if err != nil {
return nil, err
}
result := storage.BucketMetadata{
Name: bucket,
// TODO Add real created date
Created: time.Now(),
}
results = append(results, result)
}
return results, nil
}
// CreateBucket creates a new bucket
func (donutStorage Storage) CreateBucket(bucket string) error {
return donutStorage.donut.CreateBucket(bucket)
}
// GetBucketMetadata retrieves an bucket's metadata
func (donutStorage Storage) GetBucketMetadata(bucket string) (storage.BucketMetadata, error) {
return storage.BucketMetadata{}, errors.New("Not Implemented")
}
// CreateBucketPolicy sets a bucket's access policy
func (donutStorage Storage) CreateBucketPolicy(bucket string, p storage.BucketPolicy) error {
return errors.New("Not Implemented")
}
// GetBucketPolicy returns a bucket's access policy
func (donutStorage Storage) GetBucketPolicy(bucket string) (storage.BucketPolicy, error) {
return storage.BucketPolicy{}, errors.New("Not Implemented")
}
// GetObject retrieves an object and writes it to a writer
func (donutStorage Storage) GetObject(target io.Writer, bucket, key string) (int64, error) {
reader, err := donutStorage.donut.GetObject(bucket, key)
if err != nil {
return 0, storage.ObjectNotFound{
Bucket: bucket,
Object: key,
}
}
return io.Copy(target, reader)
}
// GetPartialObject retrieves an object and writes it to a writer
func (donutStorage Storage) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) {
return 0, errors.New("Not Implemented")
}
// GetObjectMetadata retrieves an object's metadata
func (donutStorage Storage) GetObjectMetadata(bucket, key string, prefix string) (storage.ObjectMetadata, error) {
metadata, err := donutStorage.donut.GetObjectMetadata(bucket, key)
created, err := time.Parse(time.RFC3339Nano, metadata["sys.created"])
if err != nil {
return storage.ObjectMetadata{}, err
}
size, err := strconv.ParseInt(metadata["sys.size"], 10, 64)
if err != nil {
return storage.ObjectMetadata{}, err
}
objectMetadata := storage.ObjectMetadata{
Bucket: bucket,
Key: key,
ContentType: metadata["contentType"],
Created: created,
Md5: metadata["sys.md5"],
Size: size,
}
return objectMetadata, nil
}
// ListObjects lists objects
func (donutStorage Storage) ListObjects(bucket string, resources storage.BucketResourcesMetadata) ([]storage.ObjectMetadata, storage.BucketResourcesMetadata, error) {
// TODO Fix IsPrefixSet && IsDelimiterSet and use them
objects, err := donutStorage.donut.ListObjects(bucket)
if err != nil {
return nil, storage.BucketResourcesMetadata{}, err
}
sort.Strings(objects)
if resources.Prefix != "" {
objects = filterPrefix(objects, resources.Prefix)
objects = removePrefix(objects, resources.Prefix)
}
if resources.Maxkeys <= 0 || resources.Maxkeys > 1000 {
resources.Maxkeys = 1000
}
var actualObjects []string
var commonPrefixes []string
if strings.TrimSpace(resources.Delimiter) != "" {
actualObjects = filterDelimited(objects, resources.Delimiter)
commonPrefixes = filterNotDelimited(objects, resources.Delimiter)
commonPrefixes = extractDir(commonPrefixes, resources.Delimiter)
commonPrefixes = uniqueObjects(commonPrefixes)
resources.CommonPrefixes = commonPrefixes
} else {
actualObjects = objects
}
var results []storage.ObjectMetadata
for _, object := range actualObjects {
if len(results) >= resources.Maxkeys {
resources.IsTruncated = true
break
}
metadata, err := donutStorage.GetObjectMetadata(bucket, resources.Prefix+object, "")
if err != nil {
return nil, storage.BucketResourcesMetadata{}, err
}
results = append(results, metadata)
}
return results, resources, nil
}
func filterPrefix(objects []string, prefix string) []string {
var results []string
for _, object := range objects {
if strings.HasPrefix(object, prefix) {
results = append(results, object)
}
}
return results
}
func removePrefix(objects []string, prefix string) []string {
var results []string
for _, object := range objects {
results = append(results, strings.TrimPrefix(object, prefix))
}
return results
}
func filterDelimited(objects []string, delim string) []string {
var results []string
for _, object := range objects {
if !strings.Contains(object, delim) {
results = append(results, object)
}
}
return results
}
func filterNotDelimited(objects []string, delim string) []string {
var results []string
for _, object := range objects {
if strings.Contains(object, delim) {
results = append(results, object)
}
}
return results
}
func extractDir(objects []string, delim string) []string {
var results []string
for _, object := range objects {
parts := strings.Split(object, delim)
results = append(results, parts[0]+"/")
}
return results
}
func uniqueObjects(objects []string) []string {
objectMap := make(map[string]string)
for _, v := range objects {
objectMap[v] = v
}
var results []string
for k := range objectMap {
results = append(results, k)
}
sort.Strings(results)
return results
}
// CreateObject creates a new object
func (donutStorage Storage) CreateObject(bucketKey, objectKey, contentType, expectedMd5sum string, reader io.Reader) error {
writer, err := donutStorage.donut.GetObjectWriter(bucketKey, objectKey)
if err != nil {
return err
}
if _, err := io.Copy(writer, reader); err != nil {
return err
}
if contentType == "" {
contentType = "application/octet-stream"
}
contentType = strings.TrimSpace(contentType)
metadata := make(map[string]string)
metadata["bucket"] = bucketKey
metadata["object"] = objectKey
metadata["contentType"] = contentType
if err = writer.SetMetadata(metadata); err != nil {
return err
}
return writer.Close()
}

View File

@@ -1,54 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donutstorage
import (
"io/ioutil"
"os"
"testing"
mstorage "github.com/minio-io/minio/pkg/storage"
. "gopkg.in/check.v1"
)
func Test(t *testing.T) { TestingT(t) }
type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) TestAPISuite(c *C) {
// c.Skip("Not Implemented")
var storageList []string
create := func() mstorage.Storage {
path, err := ioutil.TempDir(os.TempDir(), "minio-fs-")
c.Check(err, IsNil)
storageList = append(storageList, path)
_, _, store := Start(path) // TODO Make InMemory driver
return store
}
mstorage.APITestSuite(c, create)
removeRoots(c, storageList)
}
func removeRoots(c *C, roots []string) {
for _, root := range roots {
err := os.RemoveAll(root)
c.Check(err, IsNil)
}
}

View File

@@ -1,39 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package file
import (
"os"
"sync"
)
// Start filesystem channel
func Start(root string) (chan<- string, <-chan error, *Storage) {
ctrlChannel := make(chan string)
errorChannel := make(chan error)
s := Storage{}
s.root = root
s.lock = new(sync.Mutex)
go start(ctrlChannel, errorChannel, &s)
return ctrlChannel, errorChannel, &s
}
func start(ctrlChannel <-chan string, errorChannel chan<- error, s *Storage) {
err := os.MkdirAll(s.root, 0700)
errorChannel <- err
close(errorChannel)
}

View File

@@ -1,133 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package file
import (
"os"
"path"
"sort"
"strings"
"io/ioutil"
"path/filepath"
mstorage "github.com/minio-io/minio/pkg/storage"
)
/// Bucket Operations
// ListBuckets - Get service
func (storage *Storage) ListBuckets() ([]mstorage.BucketMetadata, error) {
files, err := ioutil.ReadDir(storage.root)
if err != nil {
return []mstorage.BucketMetadata{}, mstorage.EmbedError("bucket", "", err)
}
var metadataList []mstorage.BucketMetadata
for _, file := range files {
// Skip policy files
if strings.HasSuffix(file.Name(), "_policy.json") {
continue
}
if !file.IsDir() {
return []mstorage.BucketMetadata{}, mstorage.BackendCorrupted{Path: storage.root}
}
metadata := mstorage.BucketMetadata{
Name: file.Name(),
Created: file.ModTime(), // TODO - provide real created time
}
metadataList = append(metadataList, metadata)
}
return metadataList, nil
}
// CreateBucket - PUT Bucket
func (storage *Storage) CreateBucket(bucket string) error {
storage.lock.Lock()
defer storage.lock.Unlock()
// verify bucket path legal
if mstorage.IsValidBucket(bucket) == false {
return mstorage.BucketNameInvalid{Bucket: bucket}
}
// get bucket path
bucketDir := path.Join(storage.root, bucket)
// check if bucket exists
if _, err := os.Stat(bucketDir); err == nil {
return mstorage.BucketExists{
Bucket: bucket,
}
}
// make bucket
err := os.Mkdir(bucketDir, 0700)
if err != nil {
return mstorage.EmbedError(bucket, "", err)
}
return nil
}
// ListObjects - GET bucket (list objects)
func (storage *Storage) ListObjects(bucket string, resources mstorage.BucketResourcesMetadata) ([]mstorage.ObjectMetadata, mstorage.BucketResourcesMetadata, error) {
p := bucketDir{}
p.files = make(map[string]os.FileInfo)
if mstorage.IsValidBucket(bucket) == false {
return []mstorage.ObjectMetadata{}, resources, mstorage.BucketNameInvalid{Bucket: bucket}
}
if resources.Prefix != "" && mstorage.IsValidObject(resources.Prefix) == false {
return []mstorage.ObjectMetadata{}, resources, mstorage.ObjectNameInvalid{Bucket: bucket, Object: resources.Prefix}
}
rootPrefix := path.Join(storage.root, bucket)
// check bucket exists
if _, err := os.Stat(rootPrefix); os.IsNotExist(err) {
return []mstorage.ObjectMetadata{}, resources, mstorage.BucketNotFound{Bucket: bucket}
}
p.root = rootPrefix
err := filepath.Walk(rootPrefix, p.getAllFiles)
if err != nil {
return []mstorage.ObjectMetadata{}, resources, mstorage.EmbedError(bucket, "", err)
}
var metadataList []mstorage.ObjectMetadata
var metadata mstorage.ObjectMetadata
// Populate filtering mode
resources.Mode = mstorage.GetMode(resources)
for name, file := range p.files {
if len(metadataList) >= resources.Maxkeys {
resources.IsTruncated = true
goto ret
}
metadata, resources, err = storage.filter(bucket, name, file, resources)
if err != nil {
return []mstorage.ObjectMetadata{}, resources, mstorage.EmbedError(bucket, "", err)
}
if metadata.Bucket != "" {
metadataList = append(metadataList, metadata)
}
}
ret:
sort.Sort(byObjectKey(metadataList))
return metadataList, resources, nil
}

View File

@@ -1,89 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package file
import (
"bufio"
"bytes"
"os"
"strings"
"sync"
mstorage "github.com/minio-io/minio/pkg/storage"
)
// Storage - file local variables
type Storage struct {
root string
lock *sync.Mutex
}
// Metadata - carries metadata about object
type Metadata struct {
Md5sum []byte
ContentType string
}
func appendUniq(slice []string, i string) []string {
for _, ele := range slice {
if ele == i {
return slice
}
}
return append(slice, i)
}
type bucketDir struct {
files map[string]os.FileInfo
root string
}
func (p *bucketDir) getAllFiles(object string, fl os.FileInfo, err error) error {
if err != nil {
return err
}
if fl.Mode().IsRegular() {
if strings.HasSuffix(object, "$metadata") {
return nil
}
_p := strings.Split(object, p.root+"/")
if len(_p) > 1 {
p.files[_p[1]] = fl
}
}
return nil
}
func delimiter(object, delimiter string) string {
readBuffer := bytes.NewBufferString(object)
reader := bufio.NewReader(readBuffer)
stringReader := strings.NewReader(delimiter)
delimited, _ := stringReader.ReadByte()
delimitedStr, _ := reader.ReadString(delimited)
return delimitedStr
}
type byObjectKey []mstorage.ObjectMetadata
// Len
func (b byObjectKey) Len() int { return len(b) }
// Swap
func (b byObjectKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
// Less
func (b byObjectKey) Less(i, j int) bool { return b[i].Key < b[j].Key }

View File

@@ -1,94 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package file
import (
"os"
"strings"
mstorage "github.com/minio-io/minio/pkg/storage"
)
// TODO handle resources.Marker
func (storage *Storage) filter(bucket, name string, file os.FileInfo, resources mstorage.BucketResourcesMetadata) (mstorage.ObjectMetadata, mstorage.BucketResourcesMetadata, error) {
var err error
var metadata mstorage.ObjectMetadata
switch true {
// Both delimiter and Prefix is present
case resources.IsDelimiterPrefixSet():
if strings.HasPrefix(name, resources.Prefix) {
trimmedName := strings.TrimPrefix(name, resources.Prefix)
delimitedName := delimiter(trimmedName, resources.Delimiter)
switch true {
case name == resources.Prefix:
// Use resources.Prefix to filter out delimited files
metadata, err = storage.GetObjectMetadata(bucket, name, resources.Prefix)
if err != nil {
return mstorage.ObjectMetadata{}, resources, mstorage.EmbedError(bucket, "", err)
}
case delimitedName == file.Name():
// Use resources.Prefix to filter out delimited files
metadata, err = storage.GetObjectMetadata(bucket, name, resources.Prefix)
if err != nil {
return mstorage.ObjectMetadata{}, resources, mstorage.EmbedError(bucket, "", err)
}
case delimitedName != "":
if delimitedName == resources.Delimiter {
resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, resources.Prefix+delimitedName)
} else {
resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, delimitedName)
}
}
}
// Delimiter present and Prefix is absent
case resources.IsDelimiterSet():
delimitedName := delimiter(name, resources.Delimiter)
switch true {
case delimitedName == "":
// Do not strip prefix object output
metadata, err = storage.GetObjectMetadata(bucket, name, "")
if err != nil {
return mstorage.ObjectMetadata{}, resources, mstorage.EmbedError(bucket, "", err)
}
case delimitedName == file.Name():
// Do not strip prefix object output
metadata, err = storage.GetObjectMetadata(bucket, name, "")
if err != nil {
return mstorage.ObjectMetadata{}, resources, mstorage.EmbedError(bucket, "", err)
}
case delimitedName != "":
resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, delimitedName)
}
// Delimiter is absent and only Prefix is present
case resources.IsPrefixSet():
if strings.HasPrefix(name, resources.Prefix) {
// Do not strip prefix object output
metadata, err = storage.GetObjectMetadata(bucket, name, "")
if err != nil {
return mstorage.ObjectMetadata{}, resources, mstorage.EmbedError(bucket, "", err)
}
}
case resources.IsDefault():
metadata, err = storage.GetObjectMetadata(bucket, name, "")
if err != nil {
return mstorage.ObjectMetadata{}, resources, mstorage.EmbedError(bucket, "", err)
}
}
return metadata, resources, nil
}

View File

@@ -1,276 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package file
import (
"bytes"
"io"
"os"
"path"
"strings"
"crypto/md5"
"encoding/base64"
"encoding/gob"
"encoding/hex"
mstorage "github.com/minio-io/minio/pkg/storage"
)
/// Object Operations
// GetPartialObject - GET object from range
func (storage *Storage) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) {
// validate bucket
if mstorage.IsValidBucket(bucket) == false {
return 0, mstorage.BucketNameInvalid{Bucket: bucket}
}
// validate object
if mstorage.IsValidObject(object) == false {
return 0, mstorage.ObjectNameInvalid{Bucket: bucket, Object: object}
}
objectPath := path.Join(storage.root, bucket, object)
filestat, err := os.Stat(objectPath)
switch err := err.(type) {
case nil:
{
if filestat.IsDir() {
return 0, mstorage.ObjectNotFound{Bucket: bucket, Object: object}
}
}
default:
{
if os.IsNotExist(err) {
return 0, mstorage.ObjectNotFound{Bucket: bucket, Object: object}
}
return 0, mstorage.EmbedError(bucket, object, err)
}
}
file, err := os.Open(objectPath)
defer file.Close()
if err != nil {
return 0, mstorage.EmbedError(bucket, object, err)
}
_, err = file.Seek(start, os.SEEK_SET)
if err != nil {
return 0, mstorage.EmbedError(bucket, object, err)
}
count, err := io.CopyN(w, file, length)
if err != nil {
return count, mstorage.EmbedError(bucket, object, err)
}
return count, nil
}
// GetObject - GET object from key
func (storage *Storage) GetObject(w io.Writer, bucket string, object string) (int64, error) {
// validate bucket
if mstorage.IsValidBucket(bucket) == false {
return 0, mstorage.BucketNameInvalid{Bucket: bucket}
}
// validate object
if mstorage.IsValidObject(object) == false {
return 0, mstorage.ObjectNameInvalid{Bucket: bucket, Object: object}
}
objectPath := path.Join(storage.root, bucket, object)
filestat, err := os.Stat(objectPath)
switch err := err.(type) {
case nil:
{
if filestat.IsDir() {
return 0, mstorage.ObjectNotFound{Bucket: bucket, Object: object}
}
}
default:
{
if os.IsNotExist(err) {
return 0, mstorage.ObjectNotFound{Bucket: bucket, Object: object}
}
return 0, mstorage.EmbedError(bucket, object, err)
}
}
file, err := os.Open(objectPath)
defer file.Close()
if err != nil {
return 0, mstorage.EmbedError(bucket, object, err)
}
count, err := io.Copy(w, file)
if err != nil {
return count, mstorage.EmbedError(bucket, object, err)
}
return count, nil
}
// GetObjectMetadata - HEAD object
func (storage *Storage) GetObjectMetadata(bucket, object, prefix string) (mstorage.ObjectMetadata, error) {
if mstorage.IsValidBucket(bucket) == false {
return mstorage.ObjectMetadata{}, mstorage.BucketNameInvalid{Bucket: bucket}
}
if mstorage.IsValidObject(object) == false {
return mstorage.ObjectMetadata{}, mstorage.ObjectNameInvalid{Bucket: bucket, Object: bucket}
}
// Do not use path.Join() since path.Join strips off any object names with '/', use them as is
// in a static manner so that we can send a proper 'ObjectNotFound' reply back upon os.Stat()
objectPath := storage.root + "/" + bucket + "/" + object
stat, err := os.Stat(objectPath)
if os.IsNotExist(err) {
return mstorage.ObjectMetadata{}, mstorage.ObjectNotFound{Bucket: bucket, Object: object}
}
_, err = os.Stat(objectPath + "$metadata")
if os.IsNotExist(err) {
return mstorage.ObjectMetadata{}, mstorage.ObjectNotFound{Bucket: bucket, Object: object}
}
file, err := os.Open(objectPath + "$metadata")
defer file.Close()
if err != nil {
return mstorage.ObjectMetadata{}, mstorage.EmbedError(bucket, object, err)
}
var deserializedMetadata Metadata
decoder := gob.NewDecoder(file)
err = decoder.Decode(&deserializedMetadata)
if err != nil {
return mstorage.ObjectMetadata{}, mstorage.EmbedError(bucket, object, err)
}
contentType := "application/octet-stream"
if deserializedMetadata.ContentType != "" {
contentType = deserializedMetadata.ContentType
}
contentType = strings.TrimSpace(contentType)
etag := bucket + "#" + path.Base(object)
if len(deserializedMetadata.Md5sum) != 0 {
etag = hex.EncodeToString(deserializedMetadata.Md5sum)
}
trimmedObject := strings.TrimPrefix(object, prefix)
metadata := mstorage.ObjectMetadata{
Bucket: bucket,
Key: trimmedObject,
Created: stat.ModTime(),
Size: stat.Size(),
Md5: etag,
ContentType: contentType,
}
return metadata, nil
}
// CreateObject - PUT object
func (storage *Storage) CreateObject(bucket, key, contentType, md5sum string, data io.Reader) error {
// TODO Commits should stage then move instead of writing directly
storage.lock.Lock()
defer storage.lock.Unlock()
// check bucket name valid
if mstorage.IsValidBucket(bucket) == false {
return mstorage.BucketNameInvalid{Bucket: bucket}
}
// check bucket exists
if _, err := os.Stat(path.Join(storage.root, bucket)); os.IsNotExist(err) {
return mstorage.BucketNotFound{Bucket: bucket}
}
// verify object path legal
if mstorage.IsValidObject(key) == false {
return mstorage.ObjectNameInvalid{Bucket: bucket, Object: key}
}
// verify content type
if contentType == "" {
contentType = "application/octet-stream"
}
contentType = strings.TrimSpace(contentType)
// get object path
objectPath := path.Join(storage.root, bucket, key)
objectDir := path.Dir(objectPath)
if _, err := os.Stat(objectDir); os.IsNotExist(err) {
err = os.MkdirAll(objectDir, 0700)
if err != nil {
return mstorage.EmbedError(bucket, key, err)
}
}
// check if object exists
if _, err := os.Stat(objectPath); !os.IsNotExist(err) {
return mstorage.ObjectExists{
Bucket: bucket,
Object: key,
}
}
// write object
file, err := os.OpenFile(objectPath, os.O_WRONLY|os.O_CREATE, 0600)
defer file.Close()
if err != nil {
return mstorage.EmbedError(bucket, key, err)
}
h := md5.New()
mw := io.MultiWriter(file, h)
_, err = io.Copy(mw, data)
if err != nil {
return mstorage.EmbedError(bucket, key, err)
}
//
file, err = os.OpenFile(objectPath+"$metadata", os.O_WRONLY|os.O_CREATE, 0600)
defer file.Close()
if err != nil {
return mstorage.EmbedError(bucket, key, err)
}
metadata := &Metadata{
ContentType: contentType,
Md5sum: h.Sum(nil),
}
// serialize metadata to gob
encoder := gob.NewEncoder(file)
err = encoder.Encode(metadata)
if err != nil {
return mstorage.EmbedError(bucket, key, err)
}
// Verify data received to be correct, Content-MD5 received
if md5sum != "" {
var data []byte
data, err = base64.StdEncoding.DecodeString(md5sum)
if err != nil {
return mstorage.InvalidDigest{Bucket: bucket, Key: key, Md5: md5sum}
}
if !bytes.Equal(metadata.Md5sum, data) {
return mstorage.BadDigest{Bucket: bucket, Key: key, Md5: md5sum}
}
}
return nil
}

View File

@@ -1,111 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package file
import (
"os"
"path"
"encoding/json"
mstorage "github.com/minio-io/minio/pkg/storage"
)
// GetBucketPolicy - GET bucket policy
func (storage *Storage) GetBucketPolicy(bucket string) (mstorage.BucketPolicy, error) {
storage.lock.Lock()
defer storage.lock.Unlock()
var p mstorage.BucketPolicy
// verify bucket path legal
if mstorage.IsValidBucket(bucket) == false {
return mstorage.BucketPolicy{}, mstorage.BucketNameInvalid{Bucket: bucket}
}
// get bucket path
bucketDir := path.Join(storage.root, bucket)
// check if bucket exists
if _, err := os.Stat(bucketDir); err != nil {
return mstorage.BucketPolicy{}, mstorage.BucketNotFound{Bucket: bucket}
}
// get policy path
bucketPolicy := path.Join(storage.root, bucket+"_policy.json")
filestat, err := os.Stat(bucketPolicy)
if os.IsNotExist(err) {
return mstorage.BucketPolicy{}, mstorage.BucketPolicyNotFound{Bucket: bucket}
}
if filestat.IsDir() {
return mstorage.BucketPolicy{}, mstorage.BackendCorrupted{Path: bucketPolicy}
}
file, err := os.OpenFile(bucketPolicy, os.O_RDONLY, 0666)
defer file.Close()
if err != nil {
return mstorage.BucketPolicy{}, mstorage.EmbedError(bucket, "", err)
}
encoder := json.NewDecoder(file)
err = encoder.Decode(&p)
if err != nil {
return mstorage.BucketPolicy{}, mstorage.EmbedError(bucket, "", err)
}
return p, nil
}
// CreateBucketPolicy - PUT bucket policy
func (storage *Storage) CreateBucketPolicy(bucket string, p mstorage.BucketPolicy) error {
storage.lock.Lock()
defer storage.lock.Unlock()
// verify bucket path legal
if mstorage.IsValidBucket(bucket) == false {
return mstorage.BucketNameInvalid{Bucket: bucket}
}
// get bucket path
bucketDir := path.Join(storage.root, bucket)
// check if bucket exists
if _, err := os.Stat(bucketDir); err != nil {
return mstorage.BucketNotFound{
Bucket: bucket,
}
}
// get policy path
bucketPolicy := path.Join(storage.root, bucket+"_policy.json")
filestat, ret := os.Stat(bucketPolicy)
if !os.IsNotExist(ret) {
if filestat.IsDir() {
return mstorage.BackendCorrupted{Path: bucketPolicy}
}
}
file, err := os.OpenFile(bucketPolicy, os.O_WRONLY|os.O_CREATE, 0600)
defer file.Close()
if err != nil {
return mstorage.EmbedError(bucket, "", err)
}
encoder := json.NewEncoder(file)
err = encoder.Encode(p)
if err != nil {
return mstorage.EmbedError(bucket, "", err)
}
return nil
}

View File

@@ -1,53 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package file
import (
"io/ioutil"
"os"
"testing"
mstorage "github.com/minio-io/minio/pkg/storage"
. "gopkg.in/check.v1"
)
func Test(t *testing.T) { TestingT(t) }
type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) TestAPISuite(c *C) {
var storageList []string
create := func() mstorage.Storage {
path, err := ioutil.TempDir(os.TempDir(), "minio-file-")
c.Check(err, IsNil)
storageList = append(storageList, path)
_, _, store := Start(path)
return store
}
mstorage.APITestSuite(c, create)
removeRoots(c, storageList)
}
func removeRoots(c *C, roots []string) {
for _, root := range roots {
err := os.RemoveAll(root)
c.Check(err, IsNil)
}
}

View File

@@ -1,260 +0,0 @@
/*
* Minio Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package memory
import (
"bufio"
"bytes"
"fmt"
"io"
"sort"
"strings"
"sync"
"time"
"crypto/md5"
"encoding/hex"
mstorage "github.com/minio-io/minio/pkg/storage"
)
// Storage - local variables
type Storage struct {
bucketdata map[string]storedBucket
objectdata map[string]storedObject
lock *sync.RWMutex
}
type storedBucket struct {
metadata mstorage.BucketMetadata
// owner string // TODO
// id string // TODO
}
type storedObject struct {
metadata mstorage.ObjectMetadata
data []byte
}
// Start memory object server
func Start() (chan<- string, <-chan error, *Storage) {
ctrlChannel := make(chan string)
errorChannel := make(chan error)
go start(ctrlChannel, errorChannel)
return ctrlChannel, errorChannel, &Storage{
bucketdata: make(map[string]storedBucket),
objectdata: make(map[string]storedObject),
lock: new(sync.RWMutex),
}
}
func start(ctrlChannel <-chan string, errorChannel chan<- error) {
close(errorChannel)
}
// GetObject - GET object from memory buffer
func (storage *Storage) GetObject(w io.Writer, bucket string, object string) (int64, error) {
// get object
key := object
if val, ok := storage.objectdata[key]; ok {
objectBuffer := bytes.NewBuffer(val.data)
written, err := io.Copy(w, objectBuffer)
return written, err
}
return 0, mstorage.ObjectNotFound{Bucket: bucket, Object: object}
}
// GetPartialObject - GET object from memory buffer range
func (storage *Storage) GetPartialObject(w io.Writer, bucket, object string, start, end int64) (int64, error) {
return 0, mstorage.APINotImplemented{API: "GetPartialObject"}
}
// CreateBucketPolicy - Not implemented
func (storage *Storage) CreateBucketPolicy(bucket string, policy mstorage.BucketPolicy) error {
return mstorage.APINotImplemented{API: "PutBucketPolicy"}
}
// GetBucketPolicy - Not implemented
func (storage *Storage) GetBucketPolicy(bucket string) (mstorage.BucketPolicy, error) {
return mstorage.BucketPolicy{}, mstorage.APINotImplemented{API: "GetBucketPolicy"}
}
// CreateObject - PUT object to memory buffer
func (storage *Storage) CreateObject(bucket, key, contentType, md5sum string, data io.Reader) error {
storage.lock.Lock()
defer storage.lock.Unlock()
if _, ok := storage.bucketdata[bucket]; ok == false {
return mstorage.BucketNotFound{Bucket: bucket}
}
if _, ok := storage.objectdata[key]; ok == true {
return mstorage.ObjectExists{Bucket: bucket, Object: key}
}
if contentType == "" {
contentType = "application/octet-stream"
}
contentType = strings.TrimSpace(contentType)
var bytesBuffer bytes.Buffer
var newObject = storedObject{}
if _, ok := io.Copy(&bytesBuffer, data); ok == nil {
size := bytesBuffer.Len()
md5SumBytes := md5.Sum(bytesBuffer.Bytes())
md5Sum := hex.EncodeToString(md5SumBytes[:])
newObject.metadata = mstorage.ObjectMetadata{
Bucket: bucket,
Key: key,
ContentType: contentType,
Created: time.Now(),
Md5: md5Sum,
Size: int64(size),
}
newObject.data = bytesBuffer.Bytes()
}
storage.objectdata[key] = newObject
return nil
}
// CreateBucket - create bucket in memory
func (storage *Storage) CreateBucket(bucketName string) error {
storage.lock.Lock()
defer storage.lock.Unlock()
if !mstorage.IsValidBucket(bucketName) {
return mstorage.BucketNameInvalid{Bucket: bucketName}
}
if _, ok := storage.bucketdata[bucketName]; ok == true {
return mstorage.BucketExists{Bucket: bucketName}
}
var newBucket = storedBucket{}
newBucket.metadata = mstorage.BucketMetadata{}
newBucket.metadata.Name = bucketName
newBucket.metadata.Created = time.Now()
storage.bucketdata[bucketName] = newBucket
return nil
}
func delimiter(object, delimiter string) string {
readBuffer := bytes.NewBufferString(object)
reader := bufio.NewReader(readBuffer)
stringReader := strings.NewReader(delimiter)
delimited, _ := stringReader.ReadByte()
delimitedStr, _ := reader.ReadString(delimited)
return delimitedStr
}
func appendUniq(slice []string, i string) []string {
for _, ele := range slice {
if ele == i {
return slice
}
}
return append(slice, i)
}
// ListObjects - list objects from memory
func (storage *Storage) ListObjects(bucket string, resources mstorage.BucketResourcesMetadata) ([]mstorage.ObjectMetadata, mstorage.BucketResourcesMetadata, error) {
if _, ok := storage.bucketdata[bucket]; ok == false {
return []mstorage.ObjectMetadata{}, mstorage.BucketResourcesMetadata{IsTruncated: false}, mstorage.BucketNotFound{Bucket: bucket}
}
var results []mstorage.ObjectMetadata
var keys []string
for key := range storage.objectdata {
switch true {
// Prefix absent, delimit object key based on delimiter
case resources.Delimiter != "" && resources.Prefix == "":
delimitedName := delimiter(key, resources.Delimiter)
switch true {
case delimitedName == "" || delimitedName == key:
keys = appendUniq(keys, key)
case delimitedName != "":
resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, delimitedName)
}
// Prefix present, delimit object key with prefix key based on delimiter
case resources.Delimiter != "" && resources.Prefix != "" && strings.HasPrefix(key, resources.Prefix):
trimmedName := strings.TrimPrefix(key, resources.Prefix)
delimitedName := delimiter(trimmedName, resources.Delimiter)
fmt.Println(trimmedName, delimitedName, key, resources.Prefix)
switch true {
case key == resources.Prefix:
keys = appendUniq(keys, key)
// DelimitedName - requires resources.Prefix as it was trimmed off earlier in the flow
case key == resources.Prefix+delimitedName:
keys = appendUniq(keys, key)
case delimitedName != "":
if delimitedName == resources.Delimiter {
resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, resources.Prefix+delimitedName)
} else {
resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, delimitedName)
}
}
// Prefix present, nothing to delimit
case resources.Delimiter == "" && resources.Prefix != "" && strings.HasPrefix(key, resources.Prefix):
keys = appendUniq(keys, key)
// Prefix and delimiter absent
case resources.Prefix == "" && resources.Delimiter == "":
keys = appendUniq(keys, key)
}
}
sort.Strings(keys)
for _, key := range keys {
if len(results) == resources.Maxkeys {
return results, mstorage.BucketResourcesMetadata{IsTruncated: true}, nil
}
object := storage.objectdata[key]
if bucket == object.metadata.Bucket {
results = append(results, object.metadata)
}
}
return results, resources, nil
}
// ByBucketName is a type for sorting bucket metadata by bucket name
type ByBucketName []mstorage.BucketMetadata
// Len of bucket name
func (b ByBucketName) Len() int { return len(b) }
// Swap bucket i, j
func (b ByBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
// Less
func (b ByBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
// ListBuckets - List buckets from memory
func (storage *Storage) ListBuckets() ([]mstorage.BucketMetadata, error) {
var results []mstorage.BucketMetadata
for _, bucket := range storage.bucketdata {
results = append(results, bucket.metadata)
}
sort.Sort(ByBucketName(results))
return results, nil
}
// GetObjectMetadata - get object metadata from memory
func (storage *Storage) GetObjectMetadata(bucket, key, prefix string) (mstorage.ObjectMetadata, error) {
if object, ok := storage.objectdata[key]; ok == true {
return object.metadata, nil
}
return mstorage.ObjectMetadata{}, mstorage.ObjectNotFound{Bucket: bucket, Object: key}
}

View File

@@ -1,40 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package memory
import (
"testing"
mstorage "github.com/minio-io/minio/pkg/storage"
. "gopkg.in/check.v1"
)
func Test(t *testing.T) { TestingT(t) }
type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) TestAPISuite(c *C) {
create := func() mstorage.Storage {
_, _, store := Start()
return store
}
mstorage.APITestSuite(c, create)
}

View File

@@ -1,150 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package storage
import (
"io"
"regexp"
"time"
"unicode/utf8"
)
// Storage - generic API interface
type Storage interface {
// Bucket Operations
ListBuckets() ([]BucketMetadata, error)
CreateBucket(bucket string) error
CreateBucketPolicy(bucket string, p BucketPolicy) error
GetBucketPolicy(bucket string) (BucketPolicy, error)
// Object Operations
GetObject(w io.Writer, bucket, object string) (int64, error)
GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error)
GetObjectMetadata(bucket string, object string, prefix string) (ObjectMetadata, error)
ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, error)
CreateObject(bucket string, key string, contentType string, md5sum string, data io.Reader) error
}
// BucketMetadata - name and create date
type BucketMetadata struct {
Name string
Created time.Time
}
// ObjectMetadata - object key and its relevant metadata
type ObjectMetadata struct {
Bucket string
Key string
ContentType string
Created time.Time
Md5 string
Size int64
}
// FilterMode type
type FilterMode int
// FilterMode list
const (
DelimiterPrefixMode FilterMode = iota
DelimiterMode
PrefixMode
DefaultMode
)
// BucketResourcesMetadata - various types of bucket resources
type BucketResourcesMetadata struct {
Prefix string
Marker string
Maxkeys int
Delimiter string
IsTruncated bool
CommonPrefixes []string
Mode FilterMode
Policy bool
// TODO
Logging string
Notification string
}
// GetMode - Populate filter mode
func GetMode(resources BucketResourcesMetadata) FilterMode {
var f FilterMode
switch true {
case resources.Delimiter != "" && resources.Prefix != "":
f = DelimiterPrefixMode
case resources.Delimiter != "" && resources.Prefix == "":
f = DelimiterMode
case resources.Delimiter == "" && resources.Prefix != "":
f = PrefixMode
case resources.Delimiter == "" && resources.Prefix == "":
f = DefaultMode
}
return f
}
// IsDelimiterPrefixSet Delimiter and Prefix set
func (b BucketResourcesMetadata) IsDelimiterPrefixSet() bool {
return b.Mode == DelimiterPrefixMode
}
// IsDelimiterSet Delimiter set
func (b BucketResourcesMetadata) IsDelimiterSet() bool {
return b.Mode == DelimiterMode
}
// IsPrefixSet Prefix set
func (b BucketResourcesMetadata) IsPrefixSet() bool {
return b.Mode == PrefixMode
}
// IsDefault No query values
func (b BucketResourcesMetadata) IsDefault() bool {
return b.Mode == DefaultMode
}
// IsValidBucket - verify bucket name in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
func IsValidBucket(bucket string) bool {
if len(bucket) < 3 || len(bucket) > 63 {
return false
}
if bucket[0] == '.' || bucket[len(bucket)-1] == '.' {
return false
}
if match, _ := regexp.MatchString("\\.\\.", bucket); match == true {
return false
}
// We don't support buckets with '.' in them
match, _ := regexp.MatchString("^[a-zA-Z][a-zA-Z0-9\\-]+[a-zA-Z0-9]$", bucket)
return match
}
// IsValidObject - verify object name in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
func IsValidObject(object string) bool {
if len(object) > 1024 || len(object) == 0 {
return false
}
if !utf8.ValidString(object) {
return false
}
return true
}

View File

@@ -1,391 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package storage
import (
"bytes"
"math/rand"
"strconv"
"gopkg.in/check.v1"
"time"
)
// APITestSuite - collection of API tests
func APITestSuite(c *check.C, create func() Storage) {
testCreateBucket(c, create)
testMultipleObjectCreation(c, create)
testPaging(c, create)
testObjectOverwriteFails(c, create)
testNonExistantBucketOperations(c, create)
testBucketRecreateFails(c, create)
testPutObjectInSubdir(c, create)
testListBuckets(c, create)
testListBucketsOrder(c, create)
testListObjectsTestsForNonExistantBucket(c, create)
testNonExistantObjectInBucket(c, create)
testGetDirectoryReturnsObjectNotFound(c, create)
testDefaultContentType(c, create)
//testContentMd5Set(c, create) TODO
}
func testCreateBucket(c *check.C, create func() Storage) {
storage := create()
err := storage.CreateBucket("bucket")
c.Assert(err, check.IsNil)
}
func testMultipleObjectCreation(c *check.C, create func() Storage) {
objects := make(map[string][]byte)
storage := create()
err := storage.CreateBucket("bucket")
c.Assert(err, check.IsNil)
for i := 0; i < 10; i++ {
randomPerm := rand.Perm(10)
randomString := ""
for _, num := range randomPerm {
randomString = randomString + strconv.Itoa(num)
}
key := "obj" + strconv.Itoa(i)
objects[key] = []byte(randomString)
err := storage.CreateObject("bucket", key, "", "", bytes.NewBufferString(randomString))
c.Assert(err, check.IsNil)
}
// ensure no duplicate etags
etags := make(map[string]string)
for key, value := range objects {
var byteBuffer bytes.Buffer
_, err := storage.GetObject(&byteBuffer, "bucket", key)
c.Assert(err, check.IsNil)
c.Assert(byteBuffer.Bytes(), check.DeepEquals, value)
metadata, err := storage.GetObjectMetadata("bucket", key, "")
c.Assert(err, check.IsNil)
c.Assert(metadata.Size, check.Equals, int64(len(value)))
_, ok := etags[metadata.Md5]
c.Assert(ok, check.Equals, false)
etags[metadata.Md5] = metadata.Md5
}
}
func testPaging(c *check.C, create func() Storage) {
storage := create()
storage.CreateBucket("bucket")
resources := BucketResourcesMetadata{}
objects, resources, err := storage.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(len(objects), check.Equals, 0)
c.Assert(resources.IsTruncated, check.Equals, false)
// check before paging occurs
for i := 0; i < 5; i++ {
key := "obj" + strconv.Itoa(i)
storage.CreateObject("bucket", key, "", "", bytes.NewBufferString(key))
resources.Maxkeys = 5
objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(len(objects), check.Equals, i+1)
c.Assert(resources.IsTruncated, check.Equals, false)
c.Assert(err, check.IsNil)
}
// check after paging occurs pages work
for i := 6; i <= 10; i++ {
key := "obj" + strconv.Itoa(i)
storage.CreateObject("bucket", key, "", "", bytes.NewBufferString(key))
resources.Maxkeys = 5
objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(len(objects), check.Equals, 5)
c.Assert(resources.IsTruncated, check.Equals, true)
c.Assert(err, check.IsNil)
}
// check paging with prefix at end returns less objects
{
storage.CreateObject("bucket", "newPrefix", "", "", bytes.NewBufferString("prefix1"))
storage.CreateObject("bucket", "newPrefix2", "", "", bytes.NewBufferString("prefix2"))
resources.Prefix = "new"
resources.Maxkeys = 5
objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(len(objects), check.Equals, 2)
}
// check ordering of pages
{
resources.Prefix = ""
resources.Maxkeys = 1000
objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(objects[0].Key, check.Equals, "newPrefix")
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
c.Assert(objects[2].Key, check.Equals, "obj0")
c.Assert(objects[3].Key, check.Equals, "obj1")
c.Assert(objects[4].Key, check.Equals, "obj10")
}
// check delimited results with delimiter and prefix
{
storage.CreateObject("bucket", "this/is/delimited", "", "", bytes.NewBufferString("prefix1"))
storage.CreateObject("bucket", "this/is/also/a/delimited/file", "", "", bytes.NewBufferString("prefix2"))
var prefixes []string
resources.CommonPrefixes = prefixes // allocate new everytime
resources.Delimiter = "/"
resources.Prefix = "this/is/"
resources.Maxkeys = 10
objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(len(objects), check.Equals, 1)
c.Assert(resources.CommonPrefixes[0], check.Equals, "also/")
}
time.Sleep(time.Second)
// check delimited results with delimiter without prefix
{
var prefixes []string
resources.CommonPrefixes = prefixes // allocate new everytime
resources.Delimiter = "/"
resources.Prefix = ""
resources.Maxkeys = 1000
objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(objects[0].Key, check.Equals, "newPrefix")
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
c.Assert(objects[2].Key, check.Equals, "obj0")
c.Assert(objects[3].Key, check.Equals, "obj1")
c.Assert(objects[4].Key, check.Equals, "obj10")
c.Assert(resources.CommonPrefixes[0], check.Equals, "this/")
}
// check ordering of results with prefix
{
resources.Prefix = "obj"
resources.Delimiter = ""
resources.Maxkeys = 1000
objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(objects[0].Key, check.Equals, "obj0")
c.Assert(objects[1].Key, check.Equals, "obj1")
c.Assert(objects[2].Key, check.Equals, "obj10")
c.Assert(objects[3].Key, check.Equals, "obj2")
c.Assert(objects[4].Key, check.Equals, "obj3")
}
// check ordering of results with prefix and no paging
{
resources.Prefix = "new"
resources.Maxkeys = 5
objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(objects[0].Key, check.Equals, "newPrefix")
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
}
}
func testObjectOverwriteFails(c *check.C, create func() Storage) {
storage := create()
storage.CreateBucket("bucket")
err := storage.CreateObject("bucket", "object", "", "", bytes.NewBufferString("one"))
c.Assert(err, check.IsNil)
err = storage.CreateObject("bucket", "object", "", "", bytes.NewBufferString("three"))
c.Assert(err, check.Not(check.IsNil))
var bytesBuffer bytes.Buffer
length, err := storage.GetObject(&bytesBuffer, "bucket", "object")
c.Assert(length, check.Equals, int64(len("one")))
c.Assert(err, check.IsNil)
c.Assert(string(bytesBuffer.Bytes()), check.Equals, "one")
}
func testNonExistantBucketOperations(c *check.C, create func() Storage) {
storage := create()
err := storage.CreateObject("bucket", "object", "", "", bytes.NewBufferString("one"))
c.Assert(err, check.Not(check.IsNil))
}
func testBucketRecreateFails(c *check.C, create func() Storage) {
storage := create()
err := storage.CreateBucket("string")
c.Assert(err, check.IsNil)
err = storage.CreateBucket("string")
c.Assert(err, check.Not(check.IsNil))
}
func testPutObjectInSubdir(c *check.C, create func() Storage) {
storage := create()
err := storage.CreateBucket("bucket")
c.Assert(err, check.IsNil)
err = storage.CreateObject("bucket", "dir1/dir2/object", "", "", bytes.NewBufferString("hello world"))
c.Assert(err, check.IsNil)
var bytesBuffer bytes.Buffer
length, err := storage.GetObject(&bytesBuffer, "bucket", "dir1/dir2/object")
c.Assert(len(bytesBuffer.Bytes()), check.Equals, len("hello world"))
c.Assert(int64(len(bytesBuffer.Bytes())), check.Equals, length)
c.Assert(err, check.IsNil)
}
func testListBuckets(c *check.C, create func() Storage) {
storage := create()
// test empty list
buckets, err := storage.ListBuckets()
c.Assert(err, check.IsNil)
c.Assert(len(buckets), check.Equals, 0)
// add one and test exists
err = storage.CreateBucket("bucket1")
c.Assert(err, check.IsNil)
buckets, err = storage.ListBuckets()
c.Assert(len(buckets), check.Equals, 1)
c.Assert(err, check.IsNil)
// add two and test exists
err = storage.CreateBucket("bucket2")
c.Assert(err, check.IsNil)
buckets, err = storage.ListBuckets()
c.Assert(len(buckets), check.Equals, 2)
c.Assert(err, check.IsNil)
// add three and test exists + prefix
err = storage.CreateBucket("bucket22")
buckets, err = storage.ListBuckets()
c.Assert(len(buckets), check.Equals, 3)
c.Assert(err, check.IsNil)
}
func testListBucketsOrder(c *check.C, create func() Storage) {
// if implementation contains a map, order of map keys will vary.
// this ensures they return in the same order each time
for i := 0; i < 10; i++ {
storage := create()
// add one and test exists
storage.CreateBucket("bucket1")
storage.CreateBucket("bucket2")
buckets, err := storage.ListBuckets()
c.Assert(len(buckets), check.Equals, 2)
c.Assert(err, check.IsNil)
c.Assert(buckets[0].Name, check.Equals, "bucket1")
c.Assert(buckets[1].Name, check.Equals, "bucket2")
}
}
func testListObjectsTestsForNonExistantBucket(c *check.C, create func() Storage) {
storage := create()
resources := BucketResourcesMetadata{Prefix: "", Maxkeys: 1000}
objects, resources, err := storage.ListObjects("bucket", resources)
c.Assert(err, check.Not(check.IsNil))
c.Assert(resources.IsTruncated, check.Equals, false)
c.Assert(len(objects), check.Equals, 0)
}
func testNonExistantObjectInBucket(c *check.C, create func() Storage) {
storage := create()
err := storage.CreateBucket("bucket")
c.Assert(err, check.IsNil)
var byteBuffer bytes.Buffer
length, err := storage.GetObject(&byteBuffer, "bucket", "dir1")
c.Assert(length, check.Equals, int64(0))
c.Assert(err, check.Not(check.IsNil))
c.Assert(len(byteBuffer.Bytes()), check.Equals, 0)
switch err := err.(type) {
case ObjectNotFound:
{
c.Assert(err, check.ErrorMatches, "Object not Found: bucket#dir1")
}
default:
{
c.Assert(err, check.Equals, "fails")
}
}
}
func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() Storage) {
storage := create()
err := storage.CreateBucket("bucket")
c.Assert(err, check.IsNil)
err = storage.CreateObject("bucket", "dir1/dir2/object", "", "", bytes.NewBufferString("hello world"))
c.Assert(err, check.IsNil)
var byteBuffer bytes.Buffer
length, err := storage.GetObject(&byteBuffer, "bucket", "dir1")
c.Assert(length, check.Equals, int64(0))
switch err := err.(type) {
case ObjectNotFound:
{
c.Assert(err.Bucket, check.Equals, "bucket")
c.Assert(err.Object, check.Equals, "dir1")
}
default:
{
// force a failure with a line number
c.Assert(err, check.Equals, "ObjectNotFound")
}
}
c.Assert(len(byteBuffer.Bytes()), check.Equals, 0)
var byteBuffer2 bytes.Buffer
length, err = storage.GetObject(&byteBuffer, "bucket", "dir1/")
c.Assert(length, check.Equals, int64(0))
switch err := err.(type) {
case ObjectNotFound:
{
c.Assert(err.Bucket, check.Equals, "bucket")
c.Assert(err.Object, check.Equals, "dir1/")
}
default:
{
// force a failure with a line number
c.Assert(err, check.Equals, "ObjectNotFound")
}
}
c.Assert(len(byteBuffer2.Bytes()), check.Equals, 0)
}
func testDefaultContentType(c *check.C, create func() Storage) {
storage := create()
err := storage.CreateBucket("bucket")
c.Assert(err, check.IsNil)
// test empty
err = storage.CreateObject("bucket", "one", "", "", bytes.NewBufferString("one"))
metadata, err := storage.GetObjectMetadata("bucket", "one", "")
c.Assert(err, check.IsNil)
c.Assert(metadata.ContentType, check.Equals, "application/octet-stream")
// test custom
storage.CreateObject("bucket", "two", "application/text", "", bytes.NewBufferString("two"))
metadata, err = storage.GetObjectMetadata("bucket", "two", "")
c.Assert(err, check.IsNil)
c.Assert(metadata.ContentType, check.Equals, "application/text")
// test trim space
storage.CreateObject("bucket", "three", "\tapplication/json ", "", bytes.NewBufferString("three"))
metadata, err = storage.GetObjectMetadata("bucket", "three", "")
c.Assert(err, check.IsNil)
c.Assert(metadata.ContentType, check.Equals, "application/json")
}
/*
func testContentMd5Set(c *check.C, create func() Storage) {
storage := create()
err := storage.CreateBucket("bucket")
c.Assert(err, check.IsNil)
// test md5 invalid
err = storage.CreateObject("bucket", "one", "", "NWJiZjVhNTIzMjhlNzQzOWFlNmU3MTlkZmU3MTIyMDA", bytes.NewBufferString("one"))
c.Assert(err, check.Not(check.IsNil))
err = storage.CreateObject("bucket", "two", "", "NWJiZjVhNTIzMjhlNzQzOWFlNmU3MTlkZmU3MTIyMDA=", bytes.NewBufferString("one"))
c.Assert(err, check.IsNil)
}
*/

View File

@@ -1,194 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package storage
import (
"encoding/json"
"io"
"strings"
)
// User - canonical
type User struct {
AWS string
}
// Statement - minio policy statement
type Statement struct {
Sid string
Effect string
Principal User
Action []string
Resource []string
// add Condition struct/var TODO - fix it in future if necessary
}
// BucketPolicy - minio policy collection
type BucketPolicy struct {
Version string // date in 0000-00-00 format
Statement []Statement
}
// Resource delimiter
const (
MinioResource = "minio:::"
)
// TODO support canonical user
// Principal delimiter
const (
MinioPrincipal = "minio::"
)
// Action map
var SupportedActionMap = map[string]bool{
"*": true,
"minio:GetObject": true,
"minio:ListBucket": true,
"minio:PutObject": true,
"minio:CreateBucket": true,
"minio:GetBucketPolicy": true,
"minio:DeleteBucketPolicy": true,
"minio:ListAllMyBuckets": true,
"minio:PutBucketPolicy": true,
}
// Effect map
var SupportedEffectMap = map[string]bool{
"Allow": true,
"Deny": true,
}
func isValidAction(action []string) bool {
for _, a := range action {
if !SupportedActionMap[a] {
return false
}
}
return true
}
func isValidEffect(effect string) bool {
if SupportedEffectMap[effect] {
return true
}
return false
}
func isValidResource(resources []string) bool {
var ok bool
for _, resource := range resources {
switch true {
case strings.HasPrefix(resource, AwsResource):
bucket := strings.SplitAfter(resource, AwsResource)[1]
ok = true
if len(bucket) == 0 {
ok = false
}
case strings.HasPrefix(resource, MinioResource):
bucket := strings.SplitAfter(resource, MinioResource)[1]
ok = true
if len(bucket) == 0 {
ok = false
}
default:
ok = false
}
}
return ok
}
func isValidPrincipal(principal string) bool {
var ok bool
if principal == "*" {
return true
}
switch true {
case strings.HasPrefix(principal, AwsPrincipal):
username := strings.SplitAfter(principal, AwsPrincipal)[1]
ok = true
if len(username) == 0 {
ok = false
}
case strings.HasPrefix(principal, MinioPrincipal):
username := strings.SplitAfter(principal, MinioPrincipal)[1]
ok = true
if len(username) == 0 {
ok = false
}
default:
ok = false
}
return ok
}
// Parsepolicy - validate request body is proper JSON and in accordance with policy standards
func Parsepolicy(data io.Reader) (BucketPolicy, bool) {
var policy BucketPolicy
decoder := json.NewDecoder(data)
err := decoder.Decode(&policy)
if err != nil {
goto error
}
if len(policy.Version) == 0 {
goto error
}
_, err = parseDate(policy.Version)
if err != nil {
goto error
}
if len(policy.Statement) == 0 {
goto error
}
for _, statement := range policy.Statement {
if len(statement.Sid) == 0 {
goto error
}
if len(statement.Effect) == 0 {
goto error
}
if !isValidEffect(statement.Effect) {
goto error
}
if len(statement.Principal.AWS) == 0 {
goto error
}
if !isValidPrincipal(statement.Principal.AWS) {
goto error
}
if len(statement.Action) == 0 {
goto error
}
if !isValidAction(statement.Action) && !isValidActionS3(statement.Action) {
goto error
}
if len(statement.Resource) == 0 {
goto error
}
if !isValidResource(statement.Resource) {
goto error
}
}
return policy, true
error:
return BucketPolicy{}, false
}

View File

@@ -1,52 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package storage
// This file implements compatability layer for AWS clients
// Resource delimiter
const (
AwsResource = "arn:aws:s3:::"
)
// TODO support canonical user
// Principal delimiter
const (
AwsPrincipal = "arn:aws:iam::"
)
// Action map
var SupportedActionMapCompat = map[string]bool{
"*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketPolicy": true,
"s3:DeleteBucketPolicy": true,
"s3:ListAllMyBuckets": true,
"s3:PutBucketPolicy": true,
}
func isValidActionS3(action []string) bool {
for _, a := range action {
if !SupportedActionMapCompat[a] {
return false
}
}
return true
}

View File

@@ -1,62 +0,0 @@
package storage
import (
"errors"
"fmt"
"strconv"
"strings"
)
// Date - [0000-00-00]
type Date struct {
Year int16
Month byte
Day byte
}
// String output in yyyy-mm-dd format
func (d Date) String() string {
return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day)
}
// IsZero true if date is 0000-00-00
func (d Date) IsZero() bool {
return d.Day == 0 && d.Month == 0 && d.Year == 0
}
// Convert string date in format YYYY-MM-DD to Date.
// Leading and trailing spaces are ignored. If format is invalid returns zero.
func parseDate(str string) (d Date, err error) {
str = strings.TrimSpace(str)
if str == "0000-00-00" {
return
}
var (
y, m, n int
)
if len(str) != 10 || str[4] != '-' || str[7] != '-' {
err = errors.New("Invalid 0000-00-000 style DATE string: " + str)
return
}
if y, err = strconv.Atoi(str[0:4]); err != nil {
return
}
if m, err = strconv.Atoi(str[5:7]); err != nil {
return
}
if m < 1 || m > 12 {
err = errors.New("Invalid 0000-00-000 style DATE string: " + str)
return
}
if n, err = strconv.Atoi(str[8:10]); err != nil {
return
}
if n < 1 || n > 31 {
err = errors.New("Invalid 0000-00-000 style DATE string: " + str)
return
}
d.Year = int16(y)
d.Month = byte(m)
d.Day = byte(n)
return
}

View File

@@ -1,163 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package storage
// BackendError - generic disk backend error
type BackendError struct {
Path string
}
// BackendCorrupted - path has corrupted data
type BackendCorrupted BackendError
// APINotImplemented - generic API not implemented error
type APINotImplemented struct {
API string
}
// GenericBucketError - generic bucket error
type GenericBucketError struct {
Bucket string
}
// GenericObjectError - generic object error
type GenericObjectError struct {
Bucket string
Object string
}
// ImplementationError - generic implementation error
type ImplementationError struct {
Bucket string
Object string
Err error
}
// DigestError - Generic Md5 error
type DigestError struct {
Bucket string
Key string
Md5 string
}
/// Bucket related errors
// BucketPolicyNotFound - missing bucket policy
type BucketPolicyNotFound GenericBucketError
// BucketNameInvalid - bucketname provided is invalid
type BucketNameInvalid GenericBucketError
// BucketExists - bucket already exists
type BucketExists GenericBucketError
// BucketNotFound - requested bucket not found
type BucketNotFound GenericBucketError
/// Object related errors
// ObjectNotFound - requested object not found
type ObjectNotFound GenericObjectError
// ObjectExists - object already exists
type ObjectExists GenericObjectError
// ObjectNameInvalid - object name provided is invalid
type ObjectNameInvalid GenericObjectError
// BadDigest - md5 mismatch from data received
type BadDigest DigestError
// InvalidDigest - md5 in request header invalid
type InvalidDigest DigestError
// Return string an error formatted as the given text
func (e ImplementationError) Error() string {
error := ""
if e.Bucket != "" {
error = error + "Bucket: " + e.Bucket + " "
}
if e.Object != "" {
error = error + "Object: " + e.Object + " "
}
error = error + "Error: " + e.Err.Error()
return error
}
// EmbedError - wrapper function for error object
func EmbedError(bucket, object string, err error) ImplementationError {
return ImplementationError{
Bucket: bucket,
Object: object,
Err: err,
}
}
// Return string an error formatted as the given text
func (e BucketPolicyNotFound) Error() string {
return "Bucket policy not found for: " + e.Bucket
}
// Return string an error formatted as the given text
func (e ObjectNotFound) Error() string {
return "Object not Found: " + e.Bucket + "#" + e.Object
}
// Return string an error formatted as the given text
func (e APINotImplemented) Error() string {
return "Api not implemented: " + e.API
}
// Return string an error formatted as the given text
func (e ObjectExists) Error() string {
return "Object exists: " + e.Bucket + "#" + e.Object
}
// Return string an error formatted as the given text
func (e BucketNameInvalid) Error() string {
return "Bucket name invalid: " + e.Bucket
}
// Return string an error formatted as the given text
func (e BucketExists) Error() string {
return "Bucket exists: " + e.Bucket
}
// Return string an error formatted as the given text
func (e BucketNotFound) Error() string {
return "Bucket not Found: " + e.Bucket
}
// Return string an error formatted as the given text
func (e ObjectNameInvalid) Error() string {
return "Object name invalid: " + e.Bucket + "#" + e.Object
}
// Return string an error formatted as the given text
func (e BackendCorrupted) Error() string {
return "Backend corrupted: " + e.Path
}
// Return string an error formatted as the given text
func (e BadDigest) Error() string {
return "Md5 provided " + e.Md5 + " mismatches for: " + e.Bucket + "#" + e.Key
}
// Return string an error formatted as the given text
func (e InvalidDigest) Error() string {
return "Md5 provided " + e.Md5 + " is invalid"
}