mirror of https://github.com/minio/minio.git
Merge pull request #455 from fkautz/pr_out_moving_object_storage_drivers_back_to_minio
This commit is contained in:
commit
60b30bdd0a
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"ImportPath": "github.com/minio-io/minio",
|
||||
"GoVersion": "go1.4",
|
||||
"GoVersion": "go1.4.2",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
|
@ -24,7 +24,7 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/minio-io/donut",
|
||||
"Rev": "2b8c72e28768f47511730adae5e780550fdc0236"
|
||||
"Rev": "107c0aff49fb961c2c7bfd1e5d1c26c958accbdc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/minio-io/erasure",
|
||||
|
@ -34,10 +34,6 @@
|
|||
"ImportPath": "github.com/minio-io/iodine",
|
||||
"Rev": "55cc4d4256c68fbd6f0775f1a25e37e6a2f6457e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/minio-io/objectdriver",
|
||||
"Rev": "64e55f18297dd962102afc7e81ea63488fc4706a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/objx",
|
||||
"Rev": "cbeaeb16a013161a98496fad62933b1d21786672"
|
||||
|
|
|
@ -22,12 +22,14 @@ import (
|
|||
"github.com/minio-io/iodine"
|
||||
)
|
||||
|
||||
// donut struct internal data
|
||||
type donut struct {
|
||||
name string
|
||||
buckets map[string]Bucket
|
||||
nodes map[string]Node
|
||||
}
|
||||
|
||||
// config files used inside Donut
|
||||
const (
|
||||
donutObjectMetadataConfig = "donutObjectMetadata.json"
|
||||
objectMetadataConfig = "objectMetadata.json"
|
||||
|
@ -35,7 +37,7 @@ const (
|
|||
)
|
||||
|
||||
// attachDonutNode - wrapper function to instantiate a new node for associated donut
|
||||
// based on the configuration
|
||||
// based on the provided configuration
|
||||
func (d donut) attachDonutNode(hostname string, disks []string) error {
|
||||
node, err := NewNode(hostname)
|
||||
if err != nil {
|
||||
|
|
|
@ -26,12 +26,13 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio-io/iodine"
|
||||
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/minio-io/iodine"
|
||||
)
|
||||
|
||||
// internal struct carrying bucket specific information
|
||||
type bucket struct {
|
||||
name string
|
||||
donutName string
|
||||
|
@ -56,6 +57,7 @@ func NewBucket(bucketName, donutName string, nodes map[string]Node) (Bucket, err
|
|||
return b, nil
|
||||
}
|
||||
|
||||
// ListObjects - list all objects
|
||||
func (b bucket) ListObjects() (map[string]Object, error) {
|
||||
nodeSlice := 0
|
||||
for _, node := range b.nodes {
|
||||
|
@ -91,6 +93,7 @@ func (b bucket) ListObjects() (map[string]Object, error) {
|
|||
return b.objects, nil
|
||||
}
|
||||
|
||||
// GetObject - get object
|
||||
func (b bucket) GetObject(objectName string) (reader io.ReadCloser, size int64, err error) {
|
||||
reader, writer := io.Pipe()
|
||||
// get list of objects
|
||||
|
@ -103,6 +106,7 @@ func (b bucket) GetObject(objectName string) (reader io.ReadCloser, size int64,
|
|||
if !ok {
|
||||
return nil, 0, iodine.New(os.ErrNotExist, nil)
|
||||
}
|
||||
// verify if objectMetadata is readable, before we serve the request
|
||||
objectMetadata, err := object.GetObjectMetadata()
|
||||
if err != nil {
|
||||
return nil, 0, iodine.New(err, nil)
|
||||
|
@ -114,14 +118,17 @@ func (b bucket) GetObject(objectName string) (reader io.ReadCloser, size int64,
|
|||
if err != nil {
|
||||
return nil, 0, iodine.New(err, nil)
|
||||
}
|
||||
// verify if donutObjectMetadata is readable, before we server the request
|
||||
donutObjectMetadata, err := object.GetDonutObjectMetadata()
|
||||
if err != nil {
|
||||
return nil, 0, iodine.New(err, nil)
|
||||
}
|
||||
// read and reply back to GetObject() request in a go-routine
|
||||
go b.readEncodedData(b.normalizeObjectName(objectName), writer, donutObjectMetadata)
|
||||
return reader, size, nil
|
||||
}
|
||||
|
||||
// PutObject - put a new object
|
||||
func (b bucket) PutObject(objectName string, objectData io.Reader, expectedMD5Sum string, metadata map[string]string) error {
|
||||
if objectName == "" || objectData == nil {
|
||||
return iodine.New(errors.New("invalid argument"), nil)
|
||||
|
@ -135,6 +142,7 @@ func (b bucket) PutObject(objectName string, objectData io.Reader, expectedMD5Su
|
|||
donutObjectMetadata := make(map[string]string)
|
||||
objectMetadata["version"] = "1.0"
|
||||
donutObjectMetadata["version"] = "1.0"
|
||||
// if total writers are only '1' do not compute erasure
|
||||
switch len(writers) == 1 {
|
||||
case true:
|
||||
mw := io.MultiWriter(writers[0], summer)
|
||||
|
@ -145,24 +153,29 @@ func (b bucket) PutObject(objectName string, objectData io.Reader, expectedMD5Su
|
|||
donutObjectMetadata["sys.size"] = strconv.FormatInt(totalLength, 10)
|
||||
objectMetadata["size"] = strconv.FormatInt(totalLength, 10)
|
||||
case false:
|
||||
// calculate data and parity dictated by total number of writers
|
||||
k, m, err := b.getDataAndParity(len(writers))
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
// encoded data with k, m and write
|
||||
chunkCount, totalLength, err := b.writeEncodedData(k, m, writers, objectData, summer)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
/// donutMetadata section
|
||||
donutObjectMetadata["sys.blockSize"] = strconv.Itoa(10 * 1024 * 1024)
|
||||
donutObjectMetadata["sys.chunkCount"] = strconv.Itoa(chunkCount)
|
||||
donutObjectMetadata["sys.erasureK"] = strconv.FormatUint(uint64(k), 10)
|
||||
donutObjectMetadata["sys.erasureM"] = strconv.FormatUint(uint64(m), 10)
|
||||
donutObjectMetadata["sys.erasureTechnique"] = "Cauchy"
|
||||
donutObjectMetadata["sys.size"] = strconv.Itoa(totalLength)
|
||||
// keep size inside objectMetadata as well for Object API requests
|
||||
objectMetadata["size"] = strconv.Itoa(totalLength)
|
||||
}
|
||||
objectMetadata["bucket"] = b.name
|
||||
objectMetadata["object"] = objectName
|
||||
// store all user provided metadata
|
||||
for k, v := range metadata {
|
||||
objectMetadata[k] = v
|
||||
}
|
|
@ -33,6 +33,9 @@ import (
|
|||
"github.com/minio-io/minio/pkg/utils/split"
|
||||
)
|
||||
|
||||
/// This file contains all the internal functions used by Bucket interface
|
||||
|
||||
// isMD5SumEqual - returns error if md5sum mismatches, other its `nil`
|
||||
func (b bucket) isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error {
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" && strings.TrimSpace(actualMD5Sum) != "" {
|
||||
expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum)
|
||||
|
@ -51,6 +54,7 @@ func (b bucket) isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error {
|
|||
return iodine.New(errors.New("invalid argument"), nil)
|
||||
}
|
||||
|
||||
// writeObjectMetadata - write additional object metadata
|
||||
func (b bucket) writeObjectMetadata(objectName string, objectMetadata map[string]string) error {
|
||||
if len(objectMetadata) == 0 {
|
||||
return iodine.New(errors.New("invalid argument"), nil)
|
||||
|
@ -71,6 +75,7 @@ func (b bucket) writeObjectMetadata(objectName string, objectMetadata map[string
|
|||
return nil
|
||||
}
|
||||
|
||||
// writeDonutObjectMetadata - write donut related object metadata
|
||||
func (b bucket) writeDonutObjectMetadata(objectName string, objectMetadata map[string]string) error {
|
||||
if len(objectMetadata) == 0 {
|
||||
return iodine.New(errors.New("invalid argument"), nil)
|
||||
|
@ -91,12 +96,20 @@ func (b bucket) writeDonutObjectMetadata(objectName string, objectMetadata map[s
|
|||
return nil
|
||||
}
|
||||
|
||||
// This a temporary normalization of object path, need to find a better way
|
||||
// TODO - This a temporary normalization of objectNames, need to find a better way
|
||||
//
|
||||
// normalizedObjectName - all objectNames with "/" get normalized to a simple objectName
|
||||
//
|
||||
// example:
|
||||
// user provided value - "this/is/my/deep/directory/structure"
|
||||
// donut normalized value - "this-is-my-deep-directory-structure"
|
||||
//
|
||||
func (b bucket) normalizeObjectName(objectName string) string {
|
||||
// replace every '/' with '-'
|
||||
return strings.Replace(objectName, "/", "-", -1)
|
||||
}
|
||||
|
||||
// getDataAndParity - calculate k, m (data and parity) values from number of disks
|
||||
func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err error) {
|
||||
if totalWriters <= 1 {
|
||||
return 0, 0, iodine.New(errors.New("invalid argument"), nil)
|
||||
|
@ -112,6 +125,7 @@ func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err error)
|
|||
return k, m, nil
|
||||
}
|
||||
|
||||
// writeEncodedData -
|
||||
func (b bucket) writeEncodedData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, summer hash.Hash) (int, int, error) {
|
||||
chunks := split.Stream(objectData, 10*1024*1024)
|
||||
encoder, err := NewEncoder(k, m, "Cauchy")
|
||||
|
@ -137,6 +151,7 @@ func (b bucket) writeEncodedData(k, m uint8, writers []io.WriteCloser, objectDat
|
|||
return chunkCount, totalLength, nil
|
||||
}
|
||||
|
||||
// readEncodedData -
|
||||
func (b bucket) readEncodedData(objectName string, writer *io.PipeWriter, donutObjectMetadata map[string]string) {
|
||||
expectedMd5sum, err := hex.DecodeString(donutObjectMetadata["sys.md5"])
|
||||
if err != nil {
|
||||
|
@ -152,7 +167,7 @@ func (b bucket) readEncodedData(objectName string, writer *io.PipeWriter, donutO
|
|||
mwriter := io.MultiWriter(writer, hasher)
|
||||
switch len(readers) == 1 {
|
||||
case false:
|
||||
totalChunks, totalLeft, blockSize, k, m, err := b.metadata2Values(donutObjectMetadata)
|
||||
totalChunks, totalLeft, blockSize, k, m, err := b.donutMetadata2Values(donutObjectMetadata)
|
||||
if err != nil {
|
||||
writer.CloseWithError(iodine.New(err, nil))
|
||||
return
|
||||
|
@ -169,7 +184,7 @@ func (b bucket) readEncodedData(objectName string, writer *io.PipeWriter, donutO
|
|||
return
|
||||
}
|
||||
for i := 0; i < totalChunks; i++ {
|
||||
decodedData, err := b.decodeData(totalLeft, blockSize, readers, encoder, writer)
|
||||
decodedData, err := b.decodeEncodedData(totalLeft, blockSize, readers, encoder, writer)
|
||||
if err != nil {
|
||||
writer.CloseWithError(iodine.New(err, nil))
|
||||
return
|
||||
|
@ -198,7 +213,8 @@ func (b bucket) readEncodedData(objectName string, writer *io.PipeWriter, donutO
|
|||
return
|
||||
}
|
||||
|
||||
func (b bucket) decodeData(totalLeft, blockSize int64, readers []io.ReadCloser, encoder Encoder, writer *io.PipeWriter) ([]byte, error) {
|
||||
// decodeEncodedData -
|
||||
func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers []io.ReadCloser, encoder Encoder, writer *io.PipeWriter) ([]byte, error) {
|
||||
var curBlockSize int64
|
||||
if blockSize < totalLeft {
|
||||
curBlockSize = blockSize
|
||||
|
@ -225,7 +241,8 @@ func (b bucket) decodeData(totalLeft, blockSize int64, readers []io.ReadCloser,
|
|||
return decodedData, nil
|
||||
}
|
||||
|
||||
func (b bucket) metadata2Values(donutObjectMetadata map[string]string) (totalChunks int, totalLeft, blockSize int64, k, m uint64, err error) {
|
||||
// donutMetadata2Values -
|
||||
func (b bucket) donutMetadata2Values(donutObjectMetadata map[string]string) (totalChunks int, totalLeft, blockSize int64, k, m uint64, err error) {
|
||||
totalChunks, err = strconv.Atoi(donutObjectMetadata["sys.chunkCount"])
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, 0, iodine.New(err, nil)
|
||||
|
@ -249,6 +266,7 @@ func (b bucket) metadata2Values(donutObjectMetadata map[string]string) (totalChu
|
|||
return totalChunks, totalLeft, blockSize, k, m, nil
|
||||
}
|
||||
|
||||
// getDiskReaders -
|
||||
func (b bucket) getDiskReaders(objectName, objectMeta string) ([]io.ReadCloser, error) {
|
||||
var readers []io.ReadCloser
|
||||
nodeSlice := 0
|
||||
|
@ -272,6 +290,7 @@ func (b bucket) getDiskReaders(objectName, objectMeta string) ([]io.ReadCloser,
|
|||
return readers, nil
|
||||
}
|
||||
|
||||
// getDiskWriters -
|
||||
func (b bucket) getDiskWriters(objectName, objectMeta string) ([]io.WriteCloser, error) {
|
||||
var writers []io.WriteCloser
|
||||
nodeSlice := 0
|
|
@ -0,0 +1,75 @@
|
|||
package donut
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func appendUniq(slice []string, i string) []string {
|
||||
for _, ele := range slice {
|
||||
if ele == i {
|
||||
return slice
|
||||
}
|
||||
}
|
||||
return append(slice, i)
|
||||
}
|
||||
|
||||
func filterPrefix(objects []string, prefix string) []string {
|
||||
var results []string
|
||||
for _, object := range objects {
|
||||
if strings.HasPrefix(object, prefix) {
|
||||
results = append(results, object)
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func removePrefix(objects []string, prefix string) []string {
|
||||
var results []string
|
||||
for _, object := range objects {
|
||||
results = append(results, strings.TrimPrefix(object, prefix))
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func filterDelimited(objects []string, delim string) []string {
|
||||
var results []string
|
||||
for _, object := range objects {
|
||||
if !strings.Contains(object, delim) {
|
||||
results = append(results, object)
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func filterNotDelimited(objects []string, delim string) []string {
|
||||
var results []string
|
||||
for _, object := range objects {
|
||||
if strings.Contains(object, delim) {
|
||||
results = append(results, object)
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func extractDir(objects []string, delim string) []string {
|
||||
var results []string
|
||||
for _, object := range objects {
|
||||
parts := strings.Split(object, delim)
|
||||
results = append(results, parts[0]+delim)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func uniqueObjects(objects []string) []string {
|
||||
objectMap := make(map[string]string)
|
||||
for _, v := range objects {
|
||||
objectMap[v] = v
|
||||
}
|
||||
var results []string
|
||||
for k := range objectMap {
|
||||
results = append(results, k)
|
||||
}
|
||||
sort.Strings(results)
|
||||
return results
|
||||
}
|
|
@ -27,6 +27,7 @@ import (
|
|||
"github.com/minio-io/iodine"
|
||||
)
|
||||
|
||||
// internal disk struct
|
||||
type disk struct {
|
||||
root string
|
||||
order int
|
||||
|
@ -63,14 +64,17 @@ func NewDisk(diskPath string, diskOrder int) (Disk, error) {
|
|||
return nil, iodine.New(errors.New("unsupported filesystem"), nil)
|
||||
}
|
||||
|
||||
// GetPath - get root disk path
|
||||
func (d disk) GetPath() string {
|
||||
return d.root
|
||||
}
|
||||
|
||||
// GetOrder - get order of disk present in graph
|
||||
func (d disk) GetOrder() int {
|
||||
return d.order
|
||||
}
|
||||
|
||||
// GetFSInfo - get disk filesystem and its usage information
|
||||
func (d disk) GetFSInfo() map[string]string {
|
||||
s := syscall.Statfs_t{}
|
||||
err := syscall.Statfs(d.root, &s)
|
||||
|
@ -82,10 +86,12 @@ func (d disk) GetFSInfo() map[string]string {
|
|||
return d.filesystem
|
||||
}
|
||||
|
||||
// MakeDir - make a directory inside disk root path
|
||||
func (d disk) MakeDir(dirname string) error {
|
||||
return os.MkdirAll(path.Join(d.root, dirname), 0700)
|
||||
}
|
||||
|
||||
// ListDir - list a directory inside disk root path, get only directories
|
||||
func (d disk) ListDir(dirname string) ([]os.FileInfo, error) {
|
||||
contents, err := ioutil.ReadDir(path.Join(d.root, dirname))
|
||||
if err != nil {
|
||||
|
@ -101,6 +107,7 @@ func (d disk) ListDir(dirname string) ([]os.FileInfo, error) {
|
|||
return directories, nil
|
||||
}
|
||||
|
||||
// ListFiles - list a directory inside disk root path, get only files
|
||||
func (d disk) ListFiles(dirname string) ([]os.FileInfo, error) {
|
||||
contents, err := ioutil.ReadDir(path.Join(d.root, dirname))
|
||||
if err != nil {
|
||||
|
@ -116,6 +123,7 @@ func (d disk) ListFiles(dirname string) ([]os.FileInfo, error) {
|
|||
return files, nil
|
||||
}
|
||||
|
||||
// MakeFile - create a file inside disk root path
|
||||
func (d disk) MakeFile(filename string) (*os.File, error) {
|
||||
if filename == "" {
|
||||
return nil, iodine.New(errors.New("Invalid argument"), nil)
|
||||
|
@ -132,6 +140,7 @@ func (d disk) MakeFile(filename string) (*os.File, error) {
|
|||
return dataFile, nil
|
||||
}
|
||||
|
||||
// OpenFile - read a file inside disk root path
|
||||
func (d disk) OpenFile(filename string) (*os.File, error) {
|
||||
if filename == "" {
|
||||
return nil, iodine.New(errors.New("Invalid argument"), nil)
|
|
@ -22,7 +22,9 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
// Convert bytes to human readable string. Like a 2 MB, 64.2 KB, 52 B
|
||||
/// This file contains all the internal functions used inside Disk interface
|
||||
|
||||
// formatBytes - Convert bytes to human readable string. Like a 2 MB, 64.2 KB, 52 B
|
||||
func (d disk) formatBytes(i int64) (result string) {
|
||||
switch {
|
||||
case i > (1024 * 1024 * 1024 * 1024):
|
||||
|
@ -40,6 +42,7 @@ func (d disk) formatBytes(i int64) (result string) {
|
|||
return
|
||||
}
|
||||
|
||||
// fsType2StrinMap - list of filesystems supported by donut
|
||||
var fsType2StringMap = map[string]string{
|
||||
"137d": "EXT",
|
||||
"ef51": "EXT2OLD",
|
||||
|
@ -53,6 +56,7 @@ var fsType2StringMap = map[string]string{
|
|||
"58465342": "XFS",
|
||||
}
|
||||
|
||||
// getFSType - get filesystem type
|
||||
func (d disk) getFSType(fsType int64) string {
|
||||
fsTypeHex := strconv.FormatInt(fsType, 16)
|
||||
fsTypeString, ok := fsType2StringMap[fsTypeHex]
|
|
@ -24,6 +24,7 @@ import (
|
|||
"github.com/minio-io/iodine"
|
||||
)
|
||||
|
||||
// encoder internal struct
|
||||
type encoder struct {
|
||||
encoder *encoding.Erasure
|
||||
k, m uint8
|
||||
|
@ -65,6 +66,8 @@ func NewEncoder(k, m uint8, technique string) (Encoder, error) {
|
|||
return e, nil
|
||||
}
|
||||
|
||||
// TODO - think again if this is needed
|
||||
// GetEncodedBlockLen - wrapper around erasure function with the same name
|
||||
func (e encoder) GetEncodedBlockLen(dataLength int) (int, error) {
|
||||
if dataLength <= 0 {
|
||||
return 0, iodine.New(errors.New("invalid argument"), nil)
|
||||
|
@ -72,6 +75,7 @@ func (e encoder) GetEncodedBlockLen(dataLength int) (int, error) {
|
|||
return encoding.GetEncodedBlockLen(dataLength, e.k), nil
|
||||
}
|
||||
|
||||
// Encode - erasure code input bytes
|
||||
func (e encoder) Encode(data []byte) (encodedData [][]byte, err error) {
|
||||
if data == nil {
|
||||
return nil, iodine.New(errors.New("invalid argument"), nil)
|
||||
|
@ -83,6 +87,7 @@ func (e encoder) Encode(data []byte) (encodedData [][]byte, err error) {
|
|||
return encodedData, nil
|
||||
}
|
||||
|
||||
// Decode - erasure decode input encoded bytes
|
||||
func (e encoder) Decode(encodedData [][]byte, dataLength int) (data []byte, err error) {
|
||||
decodedData, err := e.encoder.Decode(encodedData, dataLength)
|
||||
if err != nil {
|
|
@ -21,44 +21,6 @@ import (
|
|||
"os"
|
||||
)
|
||||
|
||||
// Collection of Donut specification interfaces
|
||||
|
||||
// Donut interface
|
||||
type Donut interface {
|
||||
ObjectStorage
|
||||
Management
|
||||
}
|
||||
|
||||
// ObjectStorage interface
|
||||
type ObjectStorage interface {
|
||||
// Storage service Operations
|
||||
GetBucketMetadata(bucket string) (map[string]string, error)
|
||||
SetBucketMetadata(bucket string, metadata map[string]string) error
|
||||
ListBuckets() ([]string, error)
|
||||
MakeBucket(bucket string) error
|
||||
|
||||
// Bucket Operations
|
||||
ListObjects(bucket, prefix, marker, delim string, maxKeys int) (result []string, prefixes []string, isTruncated bool, err error)
|
||||
|
||||
// Object Operations
|
||||
GetObject(bucket, object string) (io.ReadCloser, int64, error)
|
||||
GetObjectMetadata(bucket, object string) (map[string]string, error)
|
||||
PutObject(bucket, object, expectedMD5Sum string, reader io.ReadCloser, metadata map[string]string) error
|
||||
}
|
||||
|
||||
// Management is a donut management system interface
|
||||
type Management interface {
|
||||
Heal() error
|
||||
Rebalance() error
|
||||
Info() (map[string][]string, error)
|
||||
|
||||
AttachNode(node Node) error
|
||||
DetachNode(node Node) error
|
||||
|
||||
SaveConfig() error
|
||||
LoadConfig() error
|
||||
}
|
||||
|
||||
// Encoder interface
|
||||
type Encoder interface {
|
||||
GetEncodedBlockLen(dataLength int) (int, error)
|
|
@ -22,6 +22,7 @@ import (
|
|||
"github.com/minio-io/iodine"
|
||||
)
|
||||
|
||||
// node struct internal
|
||||
type node struct {
|
||||
hostname string
|
||||
disks map[string]Disk
|
||||
|
@ -40,14 +41,17 @@ func NewNode(hostname string) (Node, error) {
|
|||
return n, nil
|
||||
}
|
||||
|
||||
// GetNodeName - return hostname
|
||||
func (n node) GetNodeName() string {
|
||||
return n.hostname
|
||||
}
|
||||
|
||||
// ListDisks - return number of disks
|
||||
func (n node) ListDisks() (map[string]Disk, error) {
|
||||
return n.disks, nil
|
||||
}
|
||||
|
||||
// AttachDisk - attach a disk
|
||||
func (n node) AttachDisk(disk Disk) error {
|
||||
if disk == nil {
|
||||
return iodine.New(errors.New("Invalid argument"), nil)
|
||||
|
@ -56,15 +60,18 @@ func (n node) AttachDisk(disk Disk) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// DetachDisk - detach a disk
|
||||
func (n node) DetachDisk(disk Disk) error {
|
||||
delete(n.disks, disk.GetPath())
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveConfig - save node configuration
|
||||
func (n node) SaveConfig() error {
|
||||
return errors.New("Not Implemented")
|
||||
}
|
||||
|
||||
// LoadConfig - load node configuration from saved configs
|
||||
func (n node) LoadConfig() error {
|
||||
return errors.New("Not Implemented")
|
||||
}
|
|
@ -24,6 +24,7 @@ import (
|
|||
"io/ioutil"
|
||||
)
|
||||
|
||||
// object internal struct
|
||||
type object struct {
|
||||
name string
|
||||
objectPath string
|
57
Godeps/_workspace/src/github.com/minio-io/donut/donut_public_interfaces.go
generated
vendored
Normal file
57
Godeps/_workspace/src/github.com/minio-io/donut/donut_public_interfaces.go
generated
vendored
Normal file
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package donut
|
||||
|
||||
import "io"
|
||||
|
||||
// Collection of Donut specification interfaces
|
||||
|
||||
// Donut is a collection of object storage and management interface
|
||||
type Donut interface {
|
||||
ObjectStorage
|
||||
Management
|
||||
}
|
||||
|
||||
// ObjectStorage is a donut object storage interface
|
||||
type ObjectStorage interface {
|
||||
// Storage service Operations
|
||||
GetBucketMetadata(bucket string) (map[string]string, error)
|
||||
SetBucketMetadata(bucket string, metadata map[string]string) error
|
||||
ListBuckets() ([]string, error)
|
||||
MakeBucket(bucket string) error
|
||||
|
||||
// Bucket Operations
|
||||
ListObjects(bucket, prefix, marker, delim string, maxKeys int) (result []string, prefixes []string, isTruncated bool, err error)
|
||||
|
||||
// Object Operations
|
||||
GetObject(bucket, object string) (io.ReadCloser, int64, error)
|
||||
GetObjectMetadata(bucket, object string) (map[string]string, error)
|
||||
PutObject(bucket, object, expectedMD5Sum string, reader io.ReadCloser, metadata map[string]string) error
|
||||
}
|
||||
|
||||
// Management is a donut management system interface
|
||||
type Management interface {
|
||||
Heal() error
|
||||
Rebalance() error
|
||||
Info() (map[string][]string, error)
|
||||
|
||||
AttachNode(node Node) error
|
||||
DetachNode(node Node) error
|
||||
|
||||
SaveConfig() error
|
||||
LoadConfig() error
|
||||
}
|
|
@ -1,3 +1,19 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package donut
|
||||
|
||||
import (
|
||||
|
@ -8,6 +24,7 @@ import (
|
|||
"github.com/minio-io/iodine"
|
||||
)
|
||||
|
||||
// Rebalance -
|
||||
func (d donut) Rebalance() error {
|
||||
var totalOffSetLength int
|
||||
var newDisks []Disk
|
|
@ -1,3 +1,19 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package donut
|
||||
|
||||
import (
|
||||
|
@ -37,19 +53,21 @@ func createTestNodeDiskMap(p string) map[string][]string {
|
|||
return nodes
|
||||
}
|
||||
|
||||
func (s *MySuite) TestEmptyBucket(c *C) {
|
||||
// test empty donut
|
||||
func (s *MySuite) TestEmptyDonut(c *C) {
|
||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
||||
c.Assert(err, IsNil)
|
||||
defer os.RemoveAll(root)
|
||||
donut, err := NewDonut("test", createTestNodeDiskMap(root))
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// check buckets are empty
|
||||
// check donut is empty
|
||||
buckets, err := donut.ListBuckets()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(buckets, IsNil)
|
||||
}
|
||||
|
||||
// test make bucket without name
|
||||
func (s *MySuite) TestBucketWithoutNameFails(c *C) {
|
||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
||||
c.Assert(err, IsNil)
|
||||
|
@ -64,6 +82,23 @@ func (s *MySuite) TestBucketWithoutNameFails(c *C) {
|
|||
c.Assert(err, Not(IsNil))
|
||||
}
|
||||
|
||||
// test empty bucket
|
||||
func (s *MySuite) TestEmptyBucket(c *C) {
|
||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
||||
c.Assert(err, IsNil)
|
||||
defer os.RemoveAll(root)
|
||||
donut, err := NewDonut("test", createTestNodeDiskMap(root))
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
c.Assert(donut.MakeBucket("foo"), IsNil)
|
||||
// check if bucket is empty
|
||||
objects, _, istruncated, err := donut.ListObjects("foo", "", "", "", 1)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(objects, IsNil)
|
||||
c.Assert(istruncated, Equals, false)
|
||||
}
|
||||
|
||||
// test bucket list
|
||||
func (s *MySuite) TestMakeBucketAndList(c *C) {
|
||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
||||
c.Assert(err, IsNil)
|
||||
|
@ -80,6 +115,7 @@ func (s *MySuite) TestMakeBucketAndList(c *C) {
|
|||
c.Assert(buckets, DeepEquals, []string{"foo"})
|
||||
}
|
||||
|
||||
// test re-create bucket
|
||||
func (s *MySuite) TestMakeBucketWithSameNameFails(c *C) {
|
||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
||||
c.Assert(err, IsNil)
|
||||
|
@ -93,6 +129,7 @@ func (s *MySuite) TestMakeBucketWithSameNameFails(c *C) {
|
|||
c.Assert(err, Not(IsNil))
|
||||
}
|
||||
|
||||
// test make multiple buckets
|
||||
func (s *MySuite) TestCreateMultipleBucketsAndList(c *C) {
|
||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
||||
c.Assert(err, IsNil)
|
||||
|
@ -118,6 +155,7 @@ func (s *MySuite) TestCreateMultipleBucketsAndList(c *C) {
|
|||
c.Assert(buckets, DeepEquals, []string{"bar", "foo", "foobar"})
|
||||
}
|
||||
|
||||
// test object create without bucket
|
||||
func (s *MySuite) TestNewObjectFailsWithoutBucket(c *C) {
|
||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
||||
c.Assert(err, IsNil)
|
||||
|
@ -128,6 +166,7 @@ func (s *MySuite) TestNewObjectFailsWithoutBucket(c *C) {
|
|||
c.Assert(err, Not(IsNil))
|
||||
}
|
||||
|
||||
// test create object metadata
|
||||
func (s *MySuite) TestNewObjectMetadata(c *C) {
|
||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
||||
c.Assert(err, IsNil)
|
||||
|
@ -160,6 +199,7 @@ func (s *MySuite) TestNewObjectMetadata(c *C) {
|
|||
c.Assert(objectMetadata["hello"], Equals, metadata["hello"])
|
||||
}
|
||||
|
||||
// test create object fails without name
|
||||
func (s *MySuite) TestNewObjectFailsWithEmptyName(c *C) {
|
||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
||||
c.Assert(err, IsNil)
|
||||
|
@ -174,6 +214,7 @@ func (s *MySuite) TestNewObjectFailsWithEmptyName(c *C) {
|
|||
c.Assert(err, Not(IsNil))
|
||||
}
|
||||
|
||||
// test create object
|
||||
func (s *MySuite) TestNewObjectCanBeWritten(c *C) {
|
||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
||||
c.Assert(err, IsNil)
|
||||
|
@ -208,12 +249,13 @@ func (s *MySuite) TestNewObjectCanBeWritten(c *C) {
|
|||
actualMetadata, err := donut.GetObjectMetadata("foo", "obj")
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(expectedMd5Sum, Equals, actualMetadata["md5"])
|
||||
c.Assert("11", Equals, actualMetadata["size"])
|
||||
c.Assert(strconv.Itoa(len(data)), Equals, actualMetadata["size"])
|
||||
c.Assert("1.0", Equals, actualMetadata["version"])
|
||||
_, err = time.Parse(time.RFC3339Nano, actualMetadata["created"])
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
|
||||
// test list objects
|
||||
func (s *MySuite) TestMultipleNewObjects(c *C) {
|
||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
||||
c.Assert(err, IsNil)
|
||||
|
@ -249,12 +291,22 @@ func (s *MySuite) TestMultipleNewObjects(c *C) {
|
|||
c.Assert(err, IsNil)
|
||||
c.Assert(readerBuffer2.Bytes(), DeepEquals, []byte("two"))
|
||||
|
||||
// test list objects
|
||||
/// test list of objects
|
||||
|
||||
// test list objects with prefix and delimiter
|
||||
listObjects, prefixes, isTruncated, err := donut.ListObjects("foo", "o", "", "1", 1)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(isTruncated, Equals, false)
|
||||
c.Assert(prefixes[0], DeepEquals, "obj1")
|
||||
c.Assert(prefixes[0], Equals, "obj1")
|
||||
|
||||
// test list objects with only delimiter
|
||||
listObjects, prefixes, isTruncated, err = donut.ListObjects("foo", "", "", "1", 10)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(listObjects[0], Equals, "obj2")
|
||||
c.Assert(isTruncated, Equals, false)
|
||||
c.Assert(prefixes[0], Equals, "obj1")
|
||||
|
||||
// test list objects with only prefix
|
||||
listObjects, _, isTruncated, err = donut.ListObjects("foo", "o", "", "", 10)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(isTruncated, Equals, false)
|
||||
|
@ -273,6 +325,7 @@ func (s *MySuite) TestMultipleNewObjects(c *C) {
|
|||
c.Assert(err, IsNil)
|
||||
c.Assert(readerBuffer3.Bytes(), DeepEquals, []byte("three"))
|
||||
|
||||
// test list objects with maxkeys
|
||||
listObjects, _, isTruncated, err = donut.ListObjects("foo", "o", "", "", 2)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(isTruncated, Equals, true)
|
||||
|
|
|
@ -8,10 +8,12 @@ import (
|
|||
"github.com/minio-io/iodine"
|
||||
)
|
||||
|
||||
// Heal - heal a donut and fix bad data blocks
|
||||
func (d donut) Heal() error {
|
||||
return errors.New("Not Implemented")
|
||||
}
|
||||
|
||||
// Info - return info about donut configuration
|
||||
func (d donut) Info() (nodeDiskMap map[string][]string, err error) {
|
||||
nodeDiskMap = make(map[string][]string)
|
||||
for nodeName, node := range d.nodes {
|
||||
|
@ -28,6 +30,7 @@ func (d donut) Info() (nodeDiskMap map[string][]string, err error) {
|
|||
return nodeDiskMap, nil
|
||||
}
|
||||
|
||||
// AttachNode - attach node
|
||||
func (d donut) AttachNode(node Node) error {
|
||||
if node == nil {
|
||||
return iodine.New(errors.New("invalid argument"), nil)
|
||||
|
@ -35,11 +38,14 @@ func (d donut) AttachNode(node Node) error {
|
|||
d.nodes[node.GetNodeName()] = node
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetachNode - detach node
|
||||
func (d donut) DetachNode(node Node) error {
|
||||
delete(d.nodes, node.GetNodeName())
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveConfig - save donut configuration
|
||||
func (d donut) SaveConfig() error {
|
||||
nodeDiskMap := make(map[string][]string)
|
||||
for hostname, node := range d.nodes {
|
||||
|
@ -64,6 +70,7 @@ func (d donut) SaveConfig() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// LoadConfig - load configuration
|
||||
func (d donut) LoadConfig() error {
|
||||
return errors.New("Not Implemented")
|
||||
}
|
||||
|
|
|
@ -1,3 +1,19 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package donut
|
||||
|
||||
import (
|
||||
|
@ -11,15 +27,17 @@ import (
|
|||
"github.com/minio-io/iodine"
|
||||
)
|
||||
|
||||
// MakeBucket - make a new bucket
|
||||
func (d donut) MakeBucket(bucket string) error {
|
||||
if bucket == "" || strings.TrimSpace(bucket) == "" {
|
||||
return iodine.New(errors.New("invalid argument"), nil)
|
||||
}
|
||||
return d.makeBucket(bucket)
|
||||
return d.makeDonutBucket(bucket)
|
||||
}
|
||||
|
||||
// GetBucketMetadata - get bucket metadata
|
||||
func (d donut) GetBucketMetadata(bucket string) (map[string]string, error) {
|
||||
err := d.getAllBuckets()
|
||||
err := d.getDonutBuckets()
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
}
|
||||
|
@ -32,12 +50,14 @@ func (d donut) GetBucketMetadata(bucket string) (map[string]string, error) {
|
|||
return metadata, nil
|
||||
}
|
||||
|
||||
// SetBucketMetadata - set bucket metadata
|
||||
func (d donut) SetBucketMetadata(bucket string, metadata map[string]string) error {
|
||||
return errors.New("Not implemented")
|
||||
}
|
||||
|
||||
// ListBuckets - return list of buckets
|
||||
func (d donut) ListBuckets() (results []string, err error) {
|
||||
err = d.getAllBuckets()
|
||||
err = d.getDonutBuckets()
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
}
|
||||
|
@ -48,6 +68,7 @@ func (d donut) ListBuckets() (results []string, err error) {
|
|||
return results, nil
|
||||
}
|
||||
|
||||
// ListObjects - return list of objects
|
||||
func (d donut) ListObjects(bucket, prefix, marker, delimiter string, maxkeys int) ([]string, []string, bool, error) {
|
||||
// TODO: Marker is not yet handled please handle it
|
||||
errParams := map[string]string{
|
||||
|
@ -57,7 +78,7 @@ func (d donut) ListObjects(bucket, prefix, marker, delimiter string, maxkeys int
|
|||
"delimiter": delimiter,
|
||||
"maxkeys": strconv.Itoa(maxkeys),
|
||||
}
|
||||
err := d.getAllBuckets()
|
||||
err := d.getDonutBuckets()
|
||||
if err != nil {
|
||||
return nil, nil, false, iodine.New(err, errParams)
|
||||
}
|
||||
|
@ -109,6 +130,7 @@ func (d donut) ListObjects(bucket, prefix, marker, delimiter string, maxkeys int
|
|||
return results, commonPrefixes, isTruncated, nil
|
||||
}
|
||||
|
||||
// PutObject - put object
|
||||
func (d donut) PutObject(bucket, object, expectedMD5Sum string, reader io.ReadCloser, metadata map[string]string) error {
|
||||
errParams := map[string]string{
|
||||
"bucket": bucket,
|
||||
|
@ -120,7 +142,7 @@ func (d donut) PutObject(bucket, object, expectedMD5Sum string, reader io.ReadCl
|
|||
if object == "" || strings.TrimSpace(object) == "" {
|
||||
return iodine.New(errors.New("invalid argument"), errParams)
|
||||
}
|
||||
err := d.getAllBuckets()
|
||||
err := d.getDonutBuckets()
|
||||
if err != nil {
|
||||
return iodine.New(err, errParams)
|
||||
}
|
||||
|
@ -134,6 +156,7 @@ func (d donut) PutObject(bucket, object, expectedMD5Sum string, reader io.ReadCl
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetObject - get object
|
||||
func (d donut) GetObject(bucket, object string) (reader io.ReadCloser, size int64, err error) {
|
||||
errParams := map[string]string{
|
||||
"bucket": bucket,
|
||||
|
@ -145,7 +168,7 @@ func (d donut) GetObject(bucket, object string) (reader io.ReadCloser, size int6
|
|||
if object == "" || strings.TrimSpace(object) == "" {
|
||||
return nil, 0, iodine.New(errors.New("invalid argument"), errParams)
|
||||
}
|
||||
err = d.getAllBuckets()
|
||||
err = d.getDonutBuckets()
|
||||
if err != nil {
|
||||
return nil, 0, iodine.New(err, nil)
|
||||
}
|
||||
|
@ -155,12 +178,13 @@ func (d donut) GetObject(bucket, object string) (reader io.ReadCloser, size int6
|
|||
return d.buckets[bucket].GetObject(object)
|
||||
}
|
||||
|
||||
// GetObjectMetadata - get object metadata
|
||||
func (d donut) GetObjectMetadata(bucket, object string) (map[string]string, error) {
|
||||
errParams := map[string]string{
|
||||
"bucket": bucket,
|
||||
"object": object,
|
||||
}
|
||||
err := d.getAllBuckets()
|
||||
err := d.getDonutBuckets()
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, errParams)
|
||||
}
|
||||
|
@ -171,9 +195,9 @@ func (d donut) GetObjectMetadata(bucket, object string) (map[string]string, erro
|
|||
if err != nil {
|
||||
return nil, iodine.New(err, errParams)
|
||||
}
|
||||
objectStruct, ok := objectList[object]
|
||||
donutObject, ok := objectList[object]
|
||||
if !ok {
|
||||
return nil, iodine.New(errors.New("object does not exist"), errParams)
|
||||
}
|
||||
return objectStruct.GetObjectMetadata()
|
||||
return donutObject.GetObjectMetadata()
|
||||
}
|
||||
|
|
|
@ -1,86 +1,32 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package donut
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/minio-io/iodine"
|
||||
)
|
||||
|
||||
func appendUniq(slice []string, i string) []string {
|
||||
for _, ele := range slice {
|
||||
if ele == i {
|
||||
return slice
|
||||
}
|
||||
}
|
||||
return append(slice, i)
|
||||
}
|
||||
|
||||
func filterPrefix(objects []string, prefix string) []string {
|
||||
var results []string
|
||||
for _, object := range objects {
|
||||
if strings.HasPrefix(object, prefix) {
|
||||
results = append(results, object)
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func removePrefix(objects []string, prefix string) []string {
|
||||
var results []string
|
||||
for _, object := range objects {
|
||||
results = append(results, strings.TrimPrefix(object, prefix))
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func filterDelimited(objects []string, delim string) []string {
|
||||
var results []string
|
||||
for _, object := range objects {
|
||||
if !strings.Contains(object, delim) {
|
||||
results = append(results, object)
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func filterNotDelimited(objects []string, delim string) []string {
|
||||
var results []string
|
||||
for _, object := range objects {
|
||||
if strings.Contains(object, delim) {
|
||||
results = append(results, object)
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func extractDir(objects []string, delim string) []string {
|
||||
var results []string
|
||||
for _, object := range objects {
|
||||
parts := strings.Split(object, delim)
|
||||
results = append(results, parts[0]+delim)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func uniqueObjects(objects []string) []string {
|
||||
objectMap := make(map[string]string)
|
||||
for _, v := range objects {
|
||||
objectMap[v] = v
|
||||
}
|
||||
var results []string
|
||||
for k := range objectMap {
|
||||
results = append(results, k)
|
||||
}
|
||||
sort.Strings(results)
|
||||
return results
|
||||
}
|
||||
|
||||
func (d donut) makeBucket(bucketName string) error {
|
||||
err := d.getAllBuckets()
|
||||
func (d donut) makeDonutBucket(bucketName string) error {
|
||||
err := d.getDonutBuckets()
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
|
@ -110,7 +56,7 @@ func (d donut) makeBucket(bucketName string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d donut) getAllBuckets() error {
|
||||
func (d donut) getDonutBuckets() error {
|
||||
for _, node := range d.nodes {
|
||||
disks, err := node.ListDisks()
|
||||
if err != nil {
|
||||
|
@ -127,7 +73,7 @@ func (d donut) getAllBuckets() error {
|
|||
return iodine.New(errors.New("corrupted backend"), nil)
|
||||
}
|
||||
bucketName := splitDir[0]
|
||||
// we dont need this NewBucket once we cache these
|
||||
// we dont need this NewBucket once we cache from makeDonutBucket()
|
||||
bucket, err := NewBucket(bucketName, d.name, d.nodes)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
|
|
|
@ -1,24 +0,0 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
|
@ -1,45 +0,0 @@
|
|||
{
|
||||
"ImportPath": "github.com/minio-io/objectdriver",
|
||||
"GoVersion": "go1.4",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/minio-io/check",
|
||||
"Rev": "bc4e66da8cd7ff58a4b9b84301f906352b8f2c94"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/minio-io/donut",
|
||||
"Rev": "4b20a97ea6d1fd4744ae06774e4c0092a67dafb5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/minio-io/erasure",
|
||||
"Rev": "8a72b14991a6835b4d30403e7cb201f373b7cb3a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/minio-io/iodine",
|
||||
"Rev": "55cc4d4256c68fbd6f0775f1a25e37e6a2f6457e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/minio-io/minio/pkg/utils/log",
|
||||
"Rev": "81e4a3332cd3e1b44eb21fdbb7171ba715625ab7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/minio-io/minio/pkg/utils/split",
|
||||
"Rev": "81e4a3332cd3e1b44eb21fdbb7171ba715625ab7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/objx",
|
||||
"Rev": "cbeaeb16a013161a98496fad62933b1d21786672"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/assert",
|
||||
"Rev": "e4ec8152c15fc46bd5056ce65997a07c7d415325"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/mock",
|
||||
"Rev": "e4ec8152c15fc46bd5056ce65997a07c7d415325"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
|
@ -1,202 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -1,66 +0,0 @@
|
|||
MINIOPATH=$(GOPATH)/src/github.com/minio-io/objectdriver
|
||||
|
||||
all: getdeps install
|
||||
|
||||
checkdeps:
|
||||
@echo "Checking deps:"
|
||||
@(env bash $(PWD)/buildscripts/checkdeps.sh)
|
||||
|
||||
checkgopath:
|
||||
@echo "Checking if project is at ${MINIOPATH}"
|
||||
@if [ ! -d ${MINIOPATH} ]; then echo "Project not found in $GOPATH, please follow instructions provided at https://github.com/Minio-io/minio/blob/master/CONTRIBUTING.md#setup-your-minio-github-repository" && exit 1; fi
|
||||
|
||||
getdeps: checkdeps checkgopath
|
||||
@go get github.com/minio-io/godep && echo "Installed godep:"
|
||||
@go get github.com/golang/lint/golint && echo "Installed golint:"
|
||||
@go get golang.org/x/tools/cmd/vet && echo "Installed vet:"
|
||||
@go get github.com/fzipp/gocyclo && echo "Installed gocyclo:"
|
||||
|
||||
verifiers: getdeps vet fmt lint cyclo
|
||||
|
||||
vet:
|
||||
@echo "Running $@:"
|
||||
@go vet ./...
|
||||
fmt:
|
||||
@echo "Running $@:"
|
||||
@test -z "$$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" || \
|
||||
echo "+ please format Go code with 'gofmt -s'"
|
||||
lint:
|
||||
@echo "Running $@:"
|
||||
@test -z "$$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)"
|
||||
|
||||
cyclo:
|
||||
@echo "Running $@:"
|
||||
@test -z "$$(gocyclo -over 15 . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)"
|
||||
|
||||
build-all: verifiers
|
||||
@echo "Building Libraries:"
|
||||
@godep go generate github.com/minio-io/erasure
|
||||
@godep go generate ./...
|
||||
@godep go build -a ./... # have no stale packages
|
||||
|
||||
test-all: build-all
|
||||
@echo "Running Test Suites:"
|
||||
@godep go test -race ./...
|
||||
|
||||
test: test-all
|
||||
|
||||
minio: build-all test-all
|
||||
|
||||
install: minio
|
||||
|
||||
save: restore
|
||||
@godep save ./...
|
||||
|
||||
restore:
|
||||
@godep restore
|
||||
|
||||
env:
|
||||
@godep go env
|
||||
|
||||
docs-deploy:
|
||||
@mkdocs gh-deploy --clean
|
||||
|
||||
clean:
|
||||
@echo "Cleaning up all the generated files:"
|
||||
@rm -fv cover.out
|
|
@ -1,2 +0,0 @@
|
|||
# objectdriver
|
||||
Object Storage Driver
|
|
@ -1,423 +0,0 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package drivers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
|
||||
"time"
|
||||
|
||||
"github.com/minio-io/check"
|
||||
)
|
||||
|
||||
// APITestSuite - collection of API tests
|
||||
func APITestSuite(c *check.C, create func() Driver) {
|
||||
testCreateBucket(c, create)
|
||||
testMultipleObjectCreation(c, create)
|
||||
testPaging(c, create)
|
||||
testObjectOverwriteFails(c, create)
|
||||
testNonExistantBucketOperations(c, create)
|
||||
testBucketMetadata(c, create)
|
||||
testBucketRecreateFails(c, create)
|
||||
testPutObjectInSubdir(c, create)
|
||||
testListBuckets(c, create)
|
||||
testListBucketsOrder(c, create)
|
||||
testListObjectsTestsForNonExistantBucket(c, create)
|
||||
testNonExistantObjectInBucket(c, create)
|
||||
testGetDirectoryReturnsObjectNotFound(c, create)
|
||||
testDefaultContentType(c, create)
|
||||
}
|
||||
|
||||
func testCreateBucket(c *check.C, create func() Driver) {
|
||||
drivers := create()
|
||||
err := drivers.CreateBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func testMultipleObjectCreation(c *check.C, create func() Driver) {
|
||||
objects := make(map[string][]byte)
|
||||
drivers := create()
|
||||
err := drivers.CreateBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
for i := 0; i < 10; i++ {
|
||||
randomPerm := rand.Perm(10)
|
||||
randomString := ""
|
||||
for _, num := range randomPerm {
|
||||
randomString = randomString + strconv.Itoa(num)
|
||||
}
|
||||
|
||||
hasher := md5.New()
|
||||
hasher.Write([]byte(randomString))
|
||||
md5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
|
||||
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
objects[key] = []byte(randomString)
|
||||
err := drivers.CreateObject("bucket", key, "", md5Sum, bytes.NewBufferString(randomString))
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
// ensure no duplicate etags
|
||||
etags := make(map[string]string)
|
||||
for key, value := range objects {
|
||||
var byteBuffer bytes.Buffer
|
||||
_, err := drivers.GetObject(&byteBuffer, "bucket", key)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(byteBuffer.Bytes(), check.DeepEquals, value)
|
||||
|
||||
metadata, err := drivers.GetObjectMetadata("bucket", key, "")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(metadata.Size, check.Equals, int64(len(value)))
|
||||
|
||||
_, ok := etags[metadata.Md5]
|
||||
c.Assert(ok, check.Equals, false)
|
||||
etags[metadata.Md5] = metadata.Md5
|
||||
}
|
||||
}
|
||||
|
||||
func testPaging(c *check.C, create func() Driver) {
|
||||
drivers := create()
|
||||
drivers.CreateBucket("bucket")
|
||||
resources := BucketResourcesMetadata{}
|
||||
objects, resources, err := drivers.ListObjects("bucket", resources)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(objects), check.Equals, 0)
|
||||
c.Assert(resources.IsTruncated, check.Equals, false)
|
||||
// check before paging occurs
|
||||
for i := 0; i < 5; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
drivers.CreateObject("bucket", key, "", "", bytes.NewBufferString(key))
|
||||
resources.Maxkeys = 5
|
||||
objects, resources, err = drivers.ListObjects("bucket", resources)
|
||||
c.Assert(len(objects), check.Equals, i+1)
|
||||
c.Assert(resources.IsTruncated, check.Equals, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
// check after paging occurs pages work
|
||||
for i := 6; i <= 10; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
drivers.CreateObject("bucket", key, "", "", bytes.NewBufferString(key))
|
||||
resources.Maxkeys = 5
|
||||
objects, resources, err = drivers.ListObjects("bucket", resources)
|
||||
c.Assert(len(objects), check.Equals, 5)
|
||||
c.Assert(resources.IsTruncated, check.Equals, true)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
// check paging with prefix at end returns less objects
|
||||
{
|
||||
drivers.CreateObject("bucket", "newPrefix", "", "", bytes.NewBufferString("prefix1"))
|
||||
drivers.CreateObject("bucket", "newPrefix2", "", "", bytes.NewBufferString("prefix2"))
|
||||
resources.Prefix = "new"
|
||||
resources.Maxkeys = 5
|
||||
objects, resources, err = drivers.ListObjects("bucket", resources)
|
||||
c.Assert(len(objects), check.Equals, 2)
|
||||
}
|
||||
|
||||
// check ordering of pages
|
||||
{
|
||||
resources.Prefix = ""
|
||||
resources.Maxkeys = 1000
|
||||
objects, resources, err = drivers.ListObjects("bucket", resources)
|
||||
c.Assert(objects[0].Key, check.Equals, "newPrefix")
|
||||
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
|
||||
c.Assert(objects[2].Key, check.Equals, "obj0")
|
||||
c.Assert(objects[3].Key, check.Equals, "obj1")
|
||||
c.Assert(objects[4].Key, check.Equals, "obj10")
|
||||
}
|
||||
|
||||
// check delimited results with delimiter and prefix
|
||||
{
|
||||
drivers.CreateObject("bucket", "this/is/delimited", "", "", bytes.NewBufferString("prefix1"))
|
||||
drivers.CreateObject("bucket", "this/is/also/a/delimited/file", "", "", bytes.NewBufferString("prefix2"))
|
||||
var prefixes []string
|
||||
resources.CommonPrefixes = prefixes // allocate new everytime
|
||||
resources.Delimiter = "/"
|
||||
resources.Prefix = "this/is/"
|
||||
resources.Maxkeys = 10
|
||||
objects, resources, err = drivers.ListObjects("bucket", resources)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(objects), check.Equals, 1)
|
||||
c.Assert(resources.CommonPrefixes[0], check.Equals, "this/is/also/")
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
// check delimited results with delimiter without prefix
|
||||
{
|
||||
var prefixes []string
|
||||
resources.CommonPrefixes = prefixes // allocate new everytime
|
||||
resources.Delimiter = "/"
|
||||
resources.Prefix = ""
|
||||
resources.Maxkeys = 1000
|
||||
objects, resources, err = drivers.ListObjects("bucket", resources)
|
||||
c.Assert(objects[0].Key, check.Equals, "newPrefix")
|
||||
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
|
||||
c.Assert(objects[2].Key, check.Equals, "obj0")
|
||||
c.Assert(objects[3].Key, check.Equals, "obj1")
|
||||
c.Assert(objects[4].Key, check.Equals, "obj10")
|
||||
c.Assert(resources.CommonPrefixes[0], check.Equals, "this/")
|
||||
}
|
||||
|
||||
// check ordering of results with prefix
|
||||
{
|
||||
resources.Prefix = "obj"
|
||||
resources.Delimiter = ""
|
||||
resources.Maxkeys = 1000
|
||||
objects, resources, err = drivers.ListObjects("bucket", resources)
|
||||
c.Assert(objects[0].Key, check.Equals, "obj0")
|
||||
c.Assert(objects[1].Key, check.Equals, "obj1")
|
||||
c.Assert(objects[2].Key, check.Equals, "obj10")
|
||||
c.Assert(objects[3].Key, check.Equals, "obj2")
|
||||
c.Assert(objects[4].Key, check.Equals, "obj3")
|
||||
}
|
||||
// check ordering of results with prefix and no paging
|
||||
{
|
||||
resources.Prefix = "new"
|
||||
resources.Maxkeys = 5
|
||||
objects, resources, err = drivers.ListObjects("bucket", resources)
|
||||
c.Assert(objects[0].Key, check.Equals, "newPrefix")
|
||||
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
|
||||
}
|
||||
}
|
||||
|
||||
func testObjectOverwriteFails(c *check.C, create func() Driver) {
|
||||
drivers := create()
|
||||
drivers.CreateBucket("bucket")
|
||||
|
||||
hasher1 := md5.New()
|
||||
hasher1.Write([]byte("one"))
|
||||
md5Sum1 := base64.StdEncoding.EncodeToString(hasher1.Sum(nil))
|
||||
err := drivers.CreateObject("bucket", "object", "", md5Sum1, bytes.NewBufferString("one"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
hasher2 := md5.New()
|
||||
hasher2.Write([]byte("three"))
|
||||
md5Sum2 := base64.StdEncoding.EncodeToString(hasher2.Sum(nil))
|
||||
err = drivers.CreateObject("bucket", "object", "", md5Sum2, bytes.NewBufferString("three"))
|
||||
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
var bytesBuffer bytes.Buffer
|
||||
length, err := drivers.GetObject(&bytesBuffer, "bucket", "object")
|
||||
c.Assert(length, check.Equals, int64(len("one")))
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(string(bytesBuffer.Bytes()), check.Equals, "one")
|
||||
}
|
||||
|
||||
func testNonExistantBucketOperations(c *check.C, create func() Driver) {
|
||||
drivers := create()
|
||||
err := drivers.CreateObject("bucket", "object", "", "", bytes.NewBufferString("one"))
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
}
|
||||
|
||||
func testBucketMetadata(c *check.C, create func() Driver) {
|
||||
drivers := create()
|
||||
err := drivers.CreateBucket("string")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
metadata, err := drivers.GetBucketMetadata("string")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(metadata.Name, check.Equals, "string")
|
||||
}
|
||||
|
||||
func testBucketRecreateFails(c *check.C, create func() Driver) {
|
||||
drivers := create()
|
||||
err := drivers.CreateBucket("string")
|
||||
c.Assert(err, check.IsNil)
|
||||
err = drivers.CreateBucket("string")
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
}
|
||||
|
||||
func testPutObjectInSubdir(c *check.C, create func() Driver) {
|
||||
drivers := create()
|
||||
err := drivers.CreateBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
hasher := md5.New()
|
||||
hasher.Write([]byte("hello world"))
|
||||
md5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
|
||||
err = drivers.CreateObject("bucket", "dir1/dir2/object", "", md5Sum, bytes.NewBufferString("hello world"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
var bytesBuffer bytes.Buffer
|
||||
length, err := drivers.GetObject(&bytesBuffer, "bucket", "dir1/dir2/object")
|
||||
c.Assert(len(bytesBuffer.Bytes()), check.Equals, len("hello world"))
|
||||
c.Assert(int64(len(bytesBuffer.Bytes())), check.Equals, length)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func testListBuckets(c *check.C, create func() Driver) {
|
||||
drivers := create()
|
||||
|
||||
// test empty list
|
||||
buckets, err := drivers.ListBuckets()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(buckets), check.Equals, 0)
|
||||
|
||||
// add one and test exists
|
||||
err = drivers.CreateBucket("bucket1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
buckets, err = drivers.ListBuckets()
|
||||
c.Assert(len(buckets), check.Equals, 1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// add two and test exists
|
||||
err = drivers.CreateBucket("bucket2")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
buckets, err = drivers.ListBuckets()
|
||||
c.Assert(len(buckets), check.Equals, 2)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// add three and test exists + prefix
|
||||
err = drivers.CreateBucket("bucket22")
|
||||
|
||||
buckets, err = drivers.ListBuckets()
|
||||
c.Assert(len(buckets), check.Equals, 3)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func testListBucketsOrder(c *check.C, create func() Driver) {
|
||||
// if implementation contains a map, order of map keys will vary.
|
||||
// this ensures they return in the same order each time
|
||||
for i := 0; i < 10; i++ {
|
||||
drivers := create()
|
||||
// add one and test exists
|
||||
drivers.CreateBucket("bucket1")
|
||||
drivers.CreateBucket("bucket2")
|
||||
|
||||
buckets, err := drivers.ListBuckets()
|
||||
c.Assert(len(buckets), check.Equals, 2)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(buckets[0].Name, check.Equals, "bucket1")
|
||||
c.Assert(buckets[1].Name, check.Equals, "bucket2")
|
||||
}
|
||||
}
|
||||
|
||||
func testListObjectsTestsForNonExistantBucket(c *check.C, create func() Driver) {
|
||||
drivers := create()
|
||||
resources := BucketResourcesMetadata{Prefix: "", Maxkeys: 1000}
|
||||
objects, resources, err := drivers.ListObjects("bucket", resources)
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
c.Assert(resources.IsTruncated, check.Equals, false)
|
||||
c.Assert(len(objects), check.Equals, 0)
|
||||
}
|
||||
|
||||
func testNonExistantObjectInBucket(c *check.C, create func() Driver) {
|
||||
drivers := create()
|
||||
err := drivers.CreateBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
var byteBuffer bytes.Buffer
|
||||
length, err := drivers.GetObject(&byteBuffer, "bucket", "dir1")
|
||||
c.Assert(length, check.Equals, int64(0))
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
c.Assert(len(byteBuffer.Bytes()), check.Equals, 0)
|
||||
switch err := err.(type) {
|
||||
case ObjectNotFound:
|
||||
{
|
||||
c.Assert(err, check.ErrorMatches, "Object not Found: bucket#dir1")
|
||||
}
|
||||
default:
|
||||
{
|
||||
c.Assert(err, check.Equals, "fails")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() Driver) {
|
||||
drivers := create()
|
||||
err := drivers.CreateBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = drivers.CreateObject("bucket", "dir1/dir2/object", "", "", bytes.NewBufferString("hello world"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
var byteBuffer bytes.Buffer
|
||||
length, err := drivers.GetObject(&byteBuffer, "bucket", "dir1")
|
||||
c.Assert(length, check.Equals, int64(0))
|
||||
switch err := err.(type) {
|
||||
case ObjectNotFound:
|
||||
{
|
||||
c.Assert(err.Bucket, check.Equals, "bucket")
|
||||
c.Assert(err.Object, check.Equals, "dir1")
|
||||
}
|
||||
default:
|
||||
{
|
||||
// force a failure with a line number
|
||||
c.Assert(err, check.Equals, "ObjectNotFound")
|
||||
}
|
||||
}
|
||||
c.Assert(len(byteBuffer.Bytes()), check.Equals, 0)
|
||||
|
||||
var byteBuffer2 bytes.Buffer
|
||||
length, err = drivers.GetObject(&byteBuffer, "bucket", "dir1/")
|
||||
c.Assert(length, check.Equals, int64(0))
|
||||
switch err := err.(type) {
|
||||
case ObjectNotFound:
|
||||
{
|
||||
c.Assert(err.Bucket, check.Equals, "bucket")
|
||||
c.Assert(err.Object, check.Equals, "dir1/")
|
||||
}
|
||||
default:
|
||||
{
|
||||
// force a failure with a line number
|
||||
c.Assert(err, check.Equals, "ObjectNotFound")
|
||||
}
|
||||
}
|
||||
c.Assert(len(byteBuffer2.Bytes()), check.Equals, 0)
|
||||
}
|
||||
|
||||
func testDefaultContentType(c *check.C, create func() Driver) {
|
||||
drivers := create()
|
||||
err := drivers.CreateBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// test empty
|
||||
err = drivers.CreateObject("bucket", "one", "", "", bytes.NewBufferString("one"))
|
||||
metadata, err := drivers.GetObjectMetadata("bucket", "one", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(metadata.ContentType, check.Equals, "application/octet-stream")
|
||||
|
||||
// test custom
|
||||
drivers.CreateObject("bucket", "two", "application/text", "", bytes.NewBufferString("two"))
|
||||
metadata, err = drivers.GetObjectMetadata("bucket", "two", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(metadata.ContentType, check.Equals, "application/text")
|
||||
|
||||
// test trim space
|
||||
drivers.CreateObject("bucket", "three", "\tapplication/json ", "", bytes.NewBufferString("three"))
|
||||
metadata, err = drivers.GetObjectMetadata("bucket", "three", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(metadata.ContentType, check.Equals, "application/json")
|
||||
}
|
||||
|
||||
/*
|
||||
func testContentMd5Set(c *check.C, create func() Driver) {
|
||||
drivers := create()
|
||||
err := drivers.CreateBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// test md5 invalid
|
||||
err = drivers.CreateObject("bucket", "one", "", "NWJiZjVhNTIzMjhlNzQzOWFlNmU3MTlkZmU3MTIyMDA", bytes.NewBufferString("one"))
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
err = drivers.CreateObject("bucket", "two", "", "NWJiZjVhNTIzMjhlNzQzOWFlNmU3MTlkZmU3MTIyMDA=", bytes.NewBufferString("one"))
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
*/
|
|
@ -1,199 +0,0 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package drivers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// User - canonical
|
||||
type User struct {
|
||||
AWS string
|
||||
}
|
||||
|
||||
// Statement - minio policy statement
|
||||
type Statement struct {
|
||||
Sid string
|
||||
Effect string
|
||||
Principal User
|
||||
Action []string
|
||||
Resource []string
|
||||
// add Condition struct/var TODO - fix it in future if necessary
|
||||
}
|
||||
|
||||
// BucketPolicy - minio policy collection
|
||||
type BucketPolicy struct {
|
||||
Version string // date in 0000-00-00 format
|
||||
Statement []Statement
|
||||
}
|
||||
|
||||
// Resource delimiter
|
||||
const (
|
||||
MinioResource = "minio:::"
|
||||
)
|
||||
|
||||
// TODO support canonical user
|
||||
// Principal delimiter
|
||||
const (
|
||||
MinioPrincipal = "minio::"
|
||||
)
|
||||
|
||||
// Action map
|
||||
var SupportedActionMap = map[string]bool{
|
||||
"*": true,
|
||||
"minio:GetObject": true,
|
||||
"minio:ListBucket": true,
|
||||
"minio:PutObject": true,
|
||||
"minio:CreateBucket": true,
|
||||
"minio:GetBucketPolicy": true,
|
||||
"minio:DeleteBucketPolicy": true,
|
||||
"minio:ListAllMyBuckets": true,
|
||||
"minio:PutBucketPolicy": true,
|
||||
}
|
||||
|
||||
// Effect map
|
||||
var SupportedEffectMap = map[string]bool{
|
||||
"Allow": true,
|
||||
"Deny": true,
|
||||
}
|
||||
|
||||
func isValidAction(action []string) bool {
|
||||
for _, a := range action {
|
||||
if !SupportedActionMap[a] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isValidEffect(effect string) bool {
|
||||
if SupportedEffectMap[effect] {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isValidResource(resources []string) bool {
|
||||
var ok bool
|
||||
for _, resource := range resources {
|
||||
switch true {
|
||||
case strings.HasPrefix(resource, AwsResource):
|
||||
bucket := strings.SplitAfter(resource, AwsResource)[1]
|
||||
ok = true
|
||||
if len(bucket) == 0 {
|
||||
ok = false
|
||||
}
|
||||
case strings.HasPrefix(resource, MinioResource):
|
||||
bucket := strings.SplitAfter(resource, MinioResource)[1]
|
||||
ok = true
|
||||
if len(bucket) == 0 {
|
||||
ok = false
|
||||
}
|
||||
default:
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
func isValidPrincipal(principal string) bool {
|
||||
var ok bool
|
||||
if principal == "*" {
|
||||
return true
|
||||
}
|
||||
switch true {
|
||||
case strings.HasPrefix(principal, AwsPrincipal):
|
||||
username := strings.SplitAfter(principal, AwsPrincipal)[1]
|
||||
ok = true
|
||||
if len(username) == 0 {
|
||||
ok = false
|
||||
}
|
||||
|
||||
case strings.HasPrefix(principal, MinioPrincipal):
|
||||
username := strings.SplitAfter(principal, MinioPrincipal)[1]
|
||||
ok = true
|
||||
if len(username) == 0 {
|
||||
ok = false
|
||||
}
|
||||
default:
|
||||
ok = false
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
func parseStatement(statement Statement) bool {
|
||||
if len(statement.Sid) == 0 {
|
||||
return false
|
||||
}
|
||||
if len(statement.Effect) == 0 {
|
||||
return false
|
||||
}
|
||||
if !isValidEffect(statement.Effect) {
|
||||
return false
|
||||
}
|
||||
if len(statement.Principal.AWS) == 0 {
|
||||
return false
|
||||
}
|
||||
if !isValidPrincipal(statement.Principal.AWS) {
|
||||
return false
|
||||
}
|
||||
if len(statement.Action) == 0 {
|
||||
return false
|
||||
}
|
||||
if !isValidAction(statement.Action) && !isValidActionS3(statement.Action) {
|
||||
return false
|
||||
}
|
||||
if len(statement.Resource) == 0 {
|
||||
return false
|
||||
}
|
||||
if !isValidResource(statement.Resource) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Parsepolicy - validate request body is proper JSON and in accordance with policy standards
|
||||
func Parsepolicy(data io.Reader) (BucketPolicy, bool) {
|
||||
var policy BucketPolicy
|
||||
decoder := json.NewDecoder(data)
|
||||
err := decoder.Decode(&policy)
|
||||
if err != nil {
|
||||
goto error
|
||||
}
|
||||
if len(policy.Version) == 0 {
|
||||
goto error
|
||||
}
|
||||
_, err = parseDate(policy.Version)
|
||||
if err != nil {
|
||||
goto error
|
||||
}
|
||||
if len(policy.Statement) == 0 {
|
||||
goto error
|
||||
}
|
||||
|
||||
for _, statement := range policy.Statement {
|
||||
if !parseStatement(statement) {
|
||||
goto error
|
||||
}
|
||||
}
|
||||
return policy, true
|
||||
|
||||
error:
|
||||
return BucketPolicy{}, false
|
||||
}
|
52
Godeps/_workspace/src/github.com/minio-io/objectdriver/bucket_policy_compat.go
generated
vendored
52
Godeps/_workspace/src/github.com/minio-io/objectdriver/bucket_policy_compat.go
generated
vendored
|
@ -1,52 +0,0 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package drivers
|
||||
|
||||
// This file implements compatability layer for AWS clients
|
||||
|
||||
// Resource delimiter
|
||||
const (
|
||||
AwsResource = "arn:aws:s3:::"
|
||||
)
|
||||
|
||||
// TODO support canonical user
|
||||
// Principal delimiter
|
||||
const (
|
||||
AwsPrincipal = "arn:aws:iam::"
|
||||
)
|
||||
|
||||
// Action map
|
||||
var SupportedActionMapCompat = map[string]bool{
|
||||
"*": true,
|
||||
"s3:GetObject": true,
|
||||
"s3:ListBucket": true,
|
||||
"s3:PutObject": true,
|
||||
"s3:CreateBucket": true,
|
||||
"s3:GetBucketPolicy": true,
|
||||
"s3:DeleteBucketPolicy": true,
|
||||
"s3:ListAllMyBuckets": true,
|
||||
"s3:PutBucketPolicy": true,
|
||||
}
|
||||
|
||||
func isValidActionS3(action []string) bool {
|
||||
for _, a := range action {
|
||||
if !SupportedActionMapCompat[a] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
201
Godeps/_workspace/src/github.com/minio-io/objectdriver/buildscripts/checkdeps.sh
generated
vendored
201
Godeps/_workspace/src/github.com/minio-io/objectdriver/buildscripts/checkdeps.sh
generated
vendored
|
@ -1,201 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
#
|
||||
# Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
_init() {
|
||||
## Minimum required versions for build dependencies
|
||||
GCC_VERSION="4.0"
|
||||
CLANG_VERSION="3.5"
|
||||
YASM_VERSION="1.2.0"
|
||||
GIT_VERSION="1.0"
|
||||
GO_VERSION="1.4"
|
||||
OSX_VERSION="10.8"
|
||||
UNAME=$(uname -sm)
|
||||
|
||||
## Check all dependencies are present
|
||||
MISSING=""
|
||||
}
|
||||
|
||||
###
|
||||
#
|
||||
# Takes two arguments
|
||||
# arg1: version number in `x.x.x` format
|
||||
# arg2: version number in `x.x.x` format
|
||||
#
|
||||
# example: check_version "$version1" "$version2"
|
||||
#
|
||||
# returns:
|
||||
# 0 - Installed version is equal to required
|
||||
# 1 - Installed version is greater than required
|
||||
# 2 - Installed version is lesser than required
|
||||
# 3 - If args have length zero
|
||||
#
|
||||
####
|
||||
check_version () {
|
||||
## validate args
|
||||
[[ -z "$1" ]] && return 3
|
||||
[[ -z "$2" ]] && return 3
|
||||
|
||||
if [[ $1 == $2 ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local IFS=.
|
||||
local i ver1=($1) ver2=($2)
|
||||
# fill empty fields in ver1 with zeros
|
||||
for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do
|
||||
ver1[i]=0
|
||||
done
|
||||
for ((i=0; i<${#ver1[@]}; i++)); do
|
||||
if [[ -z ${ver2[i]} ]]; then
|
||||
# fill empty fields in ver2 with zeros
|
||||
ver2[i]=0
|
||||
fi
|
||||
if ((10#${ver1[i]} > 10#${ver2[i]})); then
|
||||
|
||||
return 1
|
||||
fi
|
||||
if ((10#${ver1[i]} < 10#${ver2[i]})); then
|
||||
## Installed version is lesser than required - Bad condition
|
||||
return 2
|
||||
fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
check_golang_env() {
|
||||
echo ${GOROOT:?} 2>&1 >/dev/null
|
||||
if [ $? -eq 1 ]; then
|
||||
echo "ERROR"
|
||||
echo "GOROOT environment variable missing, please refer to Go installation document"
|
||||
echo "https://github.com/Minio-io/minio/blob/master/BUILDDEPS.md#install-go-13"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ${GOPATH:?} 2>&1 >/dev/null
|
||||
if [ $? -eq 1 ]; then
|
||||
echo "ERROR"
|
||||
echo "GOPATH environment variable missing, please refer to Go installation document"
|
||||
echo "https://github.com/Minio-io/minio/blob/master/BUILDDEPS.md#install-go-13"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
is_supported_os() {
|
||||
case ${UNAME%% *} in
|
||||
"Linux")
|
||||
os="linux"
|
||||
;;
|
||||
"Darwin")
|
||||
osx_host_version=$(env sw_vers -productVersion)
|
||||
check_version "${osx_host_version}" "${OSX_VERSION}"
|
||||
[[ $? -ge 2 ]] && die "Minimum OSX version supported is ${OSX_VERSION}"
|
||||
;;
|
||||
"*")
|
||||
echo "Exiting.. unsupported operating system found"
|
||||
exit 1;
|
||||
esac
|
||||
}
|
||||
|
||||
is_supported_arch() {
|
||||
local supported
|
||||
case ${UNAME##* } in
|
||||
"x86_64")
|
||||
supported=1
|
||||
;;
|
||||
*)
|
||||
supported=0
|
||||
;;
|
||||
esac
|
||||
if [ $supported -eq 0 ]; then
|
||||
echo "Invalid arch: ${UNAME} not supported, please use x86_64/amd64"
|
||||
exit 1;
|
||||
fi
|
||||
}
|
||||
|
||||
check_deps() {
|
||||
check_version "$(env go version 2>/dev/null | sed 's/^.* go\([0-9.]*\).*$/\1/')" "${GO_VERSION}"
|
||||
if [ $? -ge 2 ]; then
|
||||
MISSING="${MISSING} golang(1.4)"
|
||||
fi
|
||||
|
||||
check_version "$(env git --version 2>/dev/null | sed -e 's/^.* \([0-9.\].*\).*$/\1/' -e 's/^\([0-9.\]*\).*/\1/g')" "${GIT_VERSION}"
|
||||
if [ $? -ge 2 ]; then
|
||||
MISSING="${MISSING} git"
|
||||
fi
|
||||
|
||||
case ${UNAME%% *} in
|
||||
"Linux")
|
||||
check_version "$(env gcc --version 2>/dev/null | sed 's/^.* \([0-9.]*\).*$/\1/' | head -1)" "${GCC_VERSION}"
|
||||
if [ $? -ge 2 ]; then
|
||||
MISSING="${MISSING} build-essential"
|
||||
fi
|
||||
;;
|
||||
"Darwin")
|
||||
check_version "$(env gcc --version 2>/dev/null | sed 's/^.* \([0-9.]*\).*$/\1/' | head -1)" "${CLANG_VERSION}"
|
||||
if [ $? -ge 2 ]; then
|
||||
MISSING="${MISSING} xcode-cli"
|
||||
fi
|
||||
;;
|
||||
"*")
|
||||
;;
|
||||
esac
|
||||
|
||||
check_version "$(env yasm --version 2>/dev/null | sed 's/^.* \([0-9.]*\).*$/\1/' | head -1)" "${YASM_VERSION}"
|
||||
if [ $? -ge 2 ]; then
|
||||
MISSING="${MISSING} yasm(1.2.0)"
|
||||
fi
|
||||
|
||||
env mkdocs help >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
MISSING="${MISSING} mkdocs"
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
echo -n "Check for supported arch.. "
|
||||
is_supported_arch
|
||||
|
||||
echo -n "Check for supported os.. "
|
||||
is_supported_os
|
||||
|
||||
echo -n "Checking if proper environment variables are set.. "
|
||||
check_golang_env
|
||||
|
||||
echo "Done"
|
||||
echo "Using GOPATH=${GOPATH} and GOROOT=${GOROOT}"
|
||||
|
||||
echo -n "Checking dependencies for Minio.. "
|
||||
check_deps
|
||||
|
||||
## If dependencies are missing, warn the user and abort
|
||||
if [ "x${MISSING}" != "x" ]; then
|
||||
echo "ERROR"
|
||||
echo
|
||||
echo "The following build tools are missing:"
|
||||
echo
|
||||
echo "** ${MISSING} **"
|
||||
echo
|
||||
echo "Please install them "
|
||||
echo "${MISSING}"
|
||||
echo
|
||||
echo "Follow https://github.com/Minio-io/minio/blob/master/BUILDDEPS.md for further instructions"
|
||||
exit 1
|
||||
fi
|
||||
echo "Done"
|
||||
}
|
||||
|
||||
_init && main "$@"
|
247
Godeps/_workspace/src/github.com/minio-io/objectdriver/buildscripts/verifier.go
generated
vendored
247
Godeps/_workspace/src/github.com/minio-io/objectdriver/buildscripts/verifier.go
generated
vendored
|
@ -1,247 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var exitCode int
|
||||
var dirs []string
|
||||
|
||||
func appendUniq(slice []string, i string) []string {
|
||||
for _, ele := range slice {
|
||||
if ele == i {
|
||||
return slice
|
||||
}
|
||||
}
|
||||
return append(slice, i)
|
||||
}
|
||||
|
||||
// error formats the error to standard error, adding program
|
||||
// identification and a newline
|
||||
func errorf(format string, args ...interface{}) {
|
||||
fmt.Fprintf(os.Stderr, "verifier: "+format+"\n", args...)
|
||||
exitCode = 2
|
||||
}
|
||||
|
||||
type goPackage struct {
|
||||
p *ast.Package
|
||||
fs *token.FileSet
|
||||
decl map[string]ast.Node
|
||||
missingcomments map[string]ast.Node
|
||||
used map[string]bool
|
||||
}
|
||||
|
||||
type usedWalker goPackage
|
||||
|
||||
// Walks through the AST marking used identifiers.
|
||||
func (p *usedWalker) Visit(node ast.Node) ast.Visitor {
|
||||
// just be stupid and mark all *ast.Ident
|
||||
switch n := node.(type) {
|
||||
case *ast.Ident:
|
||||
p.used[n.Name] = true
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type report struct {
|
||||
pos token.Pos
|
||||
name string
|
||||
}
|
||||
type reports []report
|
||||
|
||||
// Len
|
||||
func (l reports) Len() int { return len(l) }
|
||||
|
||||
// Less
|
||||
func (l reports) Less(i, j int) bool { return l[i].pos < l[j].pos }
|
||||
|
||||
// Swap
|
||||
func (l reports) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||
|
||||
// Visits files for used nodes.
|
||||
func (p *goPackage) Visit(node ast.Node) ast.Visitor {
|
||||
u := usedWalker(*p) // hopefully p fields are references.
|
||||
switch n := node.(type) {
|
||||
// don't walk whole file, but only:
|
||||
case *ast.ValueSpec:
|
||||
// - variable initializers
|
||||
for _, value := range n.Values {
|
||||
ast.Walk(&u, value)
|
||||
}
|
||||
// variable types.
|
||||
if n.Type != nil {
|
||||
ast.Walk(&u, n.Type)
|
||||
}
|
||||
case *ast.BlockStmt:
|
||||
// - function bodies
|
||||
for _, stmt := range n.List {
|
||||
ast.Walk(&u, stmt)
|
||||
}
|
||||
case *ast.FuncDecl:
|
||||
// - function signatures
|
||||
ast.Walk(&u, n.Type)
|
||||
case *ast.TypeSpec:
|
||||
// - type declarations
|
||||
ast.Walk(&u, n.Type)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func getAllMinioPkgs(path string, fl os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fl.IsDir() {
|
||||
// Skip godeps
|
||||
if strings.Contains(path, "Godeps") {
|
||||
return nil
|
||||
}
|
||||
dirs = appendUniq(dirs, path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func doDir(name string) {
|
||||
notests := func(info os.FileInfo) bool {
|
||||
if !info.IsDir() && strings.HasSuffix(info.Name(), ".go") &&
|
||||
!strings.HasSuffix(info.Name(), "_test.go") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
fs := token.NewFileSet()
|
||||
pkgs, err := parser.ParseDir(fs, name, notests, parser.ParseComments|parser.Mode(0))
|
||||
if err != nil {
|
||||
errorf("%s", err)
|
||||
return
|
||||
}
|
||||
for _, pkg := range pkgs {
|
||||
doPackage(fs, pkg)
|
||||
}
|
||||
}
|
||||
|
||||
func doDecl(p *goPackage, decl interface{}, cmap ast.CommentMap) bool {
|
||||
switch n := decl.(type) {
|
||||
case *ast.GenDecl:
|
||||
// var, const, types
|
||||
for _, spec := range n.Specs {
|
||||
switch s := spec.(type) {
|
||||
case *ast.ValueSpec:
|
||||
// constants and variables.
|
||||
for _, name := range s.Names {
|
||||
p.decl[name.Name] = n
|
||||
}
|
||||
case *ast.TypeSpec:
|
||||
// type definitions.
|
||||
p.decl[s.Name.Name] = n
|
||||
}
|
||||
}
|
||||
case *ast.FuncDecl:
|
||||
// if function is 'main', never check
|
||||
if n.Name.Name == "main" {
|
||||
return true
|
||||
}
|
||||
// Do not be strict on non-exported functions
|
||||
if !ast.IsExported(n.Name.Name) {
|
||||
return true
|
||||
}
|
||||
// Do not be strict for field list functions
|
||||
// if n.Recv != nil {
|
||||
// continue
|
||||
//}
|
||||
// Be strict for global functions
|
||||
_, ok := cmap[n]
|
||||
if ok == false {
|
||||
p.missingcomments[n.Name.Name] = n
|
||||
}
|
||||
|
||||
// function declarations
|
||||
// TODO(remy): do methods
|
||||
if n.Recv == nil {
|
||||
p.decl[n.Name.Name] = n
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func doPackage(fs *token.FileSet, pkg *ast.Package) {
|
||||
p := &goPackage{
|
||||
p: pkg,
|
||||
fs: fs,
|
||||
decl: make(map[string]ast.Node),
|
||||
missingcomments: make(map[string]ast.Node),
|
||||
used: make(map[string]bool),
|
||||
}
|
||||
for _, file := range pkg.Files {
|
||||
cmap := ast.NewCommentMap(fs, file, file.Comments)
|
||||
for _, decl := range file.Decls {
|
||||
if doDecl(p, decl, cmap) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
// init() is always used
|
||||
p.used["init"] = true
|
||||
if pkg.Name != "main" {
|
||||
// exported names are marked used for non-main packages.
|
||||
for name := range p.decl {
|
||||
if ast.IsExported(name) {
|
||||
p.used[name] = true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// in main programs, main() is called.
|
||||
p.used["main"] = true
|
||||
}
|
||||
for _, file := range pkg.Files {
|
||||
// walk file looking for used nodes.
|
||||
ast.Walk(p, file)
|
||||
}
|
||||
|
||||
// reports.
|
||||
reports := reports(nil)
|
||||
for name, node := range p.decl {
|
||||
if !p.used[name] {
|
||||
reports = append(reports, report{node.Pos(), name})
|
||||
}
|
||||
}
|
||||
sort.Sort(reports)
|
||||
for _, report := range reports {
|
||||
errorf("%s: %s is unused", fs.Position(report.pos), report.name)
|
||||
}
|
||||
|
||||
for name, node := range p.missingcomments {
|
||||
errorf("%s: comment is missing for 'func %s'", fs.Position(node.Pos()), name)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if flag.NArg() == 0 {
|
||||
doDir(".")
|
||||
} else {
|
||||
for _, name := range flag.Args() {
|
||||
// Is it a directory?
|
||||
if fi, err := os.Stat(name); err == nil && fi.IsDir() {
|
||||
err := filepath.Walk(name, getAllMinioPkgs)
|
||||
if err != nil {
|
||||
errorf(err.Error())
|
||||
}
|
||||
for _, dir := range dirs {
|
||||
doDir(dir)
|
||||
}
|
||||
} else {
|
||||
errorf("not a directory: %s", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
os.Exit(exitCode)
|
||||
}
|
|
@ -1,78 +0,0 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package drivers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Date - [0000-00-00]
|
||||
type Date struct {
|
||||
Year int16
|
||||
Month byte
|
||||
Day byte
|
||||
}
|
||||
|
||||
// String output in yyyy-mm-dd format
|
||||
func (d Date) String() string {
|
||||
return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day)
|
||||
}
|
||||
|
||||
// IsZero true if date is 0000-00-00
|
||||
func (d Date) IsZero() bool {
|
||||
return d.Day == 0 && d.Month == 0 && d.Year == 0
|
||||
}
|
||||
|
||||
// Convert string date in format YYYY-MM-DD to Date.
|
||||
// Leading and trailing spaces are ignored. If format is invalid returns zero.
|
||||
func parseDate(str string) (d Date, err error) {
|
||||
str = strings.TrimSpace(str)
|
||||
if str == "0000-00-00" {
|
||||
return
|
||||
}
|
||||
var (
|
||||
y, m, n int
|
||||
)
|
||||
if len(str) != 10 || str[4] != '-' || str[7] != '-' {
|
||||
err = errors.New("Invalid 0000-00-000 style DATE string: " + str)
|
||||
return
|
||||
}
|
||||
if y, err = strconv.Atoi(str[0:4]); err != nil {
|
||||
return
|
||||
}
|
||||
if m, err = strconv.Atoi(str[5:7]); err != nil {
|
||||
return
|
||||
}
|
||||
if m < 1 || m > 12 {
|
||||
err = errors.New("Invalid 0000-00-000 style DATE string: " + str)
|
||||
return
|
||||
}
|
||||
if n, err = strconv.Atoi(str[8:10]); err != nil {
|
||||
return
|
||||
}
|
||||
if n < 1 || n > 31 {
|
||||
err = errors.New("Invalid 0000-00-000 style DATE string: " + str)
|
||||
return
|
||||
}
|
||||
d.Year = int16(y)
|
||||
d.Month = byte(m)
|
||||
d.Day = byte(n)
|
||||
return
|
||||
}
|
|
@ -1,333 +0,0 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package donut
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/minio-io/donut"
|
||||
"github.com/minio-io/iodine"
|
||||
"github.com/minio-io/minio/pkg/utils/log"
|
||||
"github.com/minio-io/objectdriver"
|
||||
)
|
||||
|
||||
// donutDriver - creates a new single disk drivers driver using donut
|
||||
type donutDriver struct {
|
||||
donut donut.Donut
|
||||
}
|
||||
|
||||
const (
|
||||
blockSize = 10 * 1024 * 1024
|
||||
)
|
||||
|
||||
// This is a dummy nodeDiskMap which is going to be deprecated soon
|
||||
// once the Management API is standardized, this map is useful for now
|
||||
// to show multi disk API correctness behavior.
|
||||
//
|
||||
// This should be obtained from donut configuration file
|
||||
func createNodeDiskMap(p string) map[string][]string {
|
||||
nodes := make(map[string][]string)
|
||||
nodes["localhost"] = make([]string, 16)
|
||||
for i := 0; i < len(nodes["localhost"]); i++ {
|
||||
diskPath := path.Join(p, strconv.Itoa(i))
|
||||
if _, err := os.Stat(diskPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
os.MkdirAll(diskPath, 0700)
|
||||
}
|
||||
}
|
||||
nodes["localhost"][i] = diskPath
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// Start a single disk subsystem
|
||||
func Start(path string) (chan<- string, <-chan error, drivers.Driver) {
|
||||
ctrlChannel := make(chan string)
|
||||
errorChannel := make(chan error)
|
||||
errParams := map[string]string{"path": path}
|
||||
|
||||
// Soon to be user configurable, when Management API
|
||||
// is finished we remove "default" to something
|
||||
// which is passed down from configuration
|
||||
donut, err := donut.NewDonut("default", createNodeDiskMap(path))
|
||||
if err != nil {
|
||||
err = iodine.New(err, errParams)
|
||||
log.Error.Println(err)
|
||||
}
|
||||
|
||||
s := new(donutDriver)
|
||||
s.donut = donut
|
||||
|
||||
go start(ctrlChannel, errorChannel, s)
|
||||
return ctrlChannel, errorChannel, s
|
||||
}
|
||||
|
||||
func start(ctrlChannel <-chan string, errorChannel chan<- error, s *donutDriver) {
|
||||
close(errorChannel)
|
||||
}
|
||||
|
||||
// byBucketName is a type for sorting bucket metadata by bucket name
|
||||
type byBucketName []drivers.BucketMetadata
|
||||
|
||||
func (b byBucketName) Len() int { return len(b) }
|
||||
func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
|
||||
|
||||
// ListBuckets returns a list of buckets
|
||||
func (d donutDriver) ListBuckets() (results []drivers.BucketMetadata, err error) {
|
||||
buckets, err := d.donut.ListBuckets()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, name := range buckets {
|
||||
result := drivers.BucketMetadata{
|
||||
Name: name,
|
||||
// TODO Add real created date
|
||||
Created: time.Now(),
|
||||
}
|
||||
results = append(results, result)
|
||||
}
|
||||
sort.Sort(byBucketName(results))
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// CreateBucket creates a new bucket
|
||||
func (d donutDriver) CreateBucket(bucketName string) error {
|
||||
if drivers.IsValidBucket(bucketName) && !strings.Contains(bucketName, ".") {
|
||||
return d.donut.MakeBucket(bucketName)
|
||||
}
|
||||
return iodine.New(errors.New("Invalid bucket"), map[string]string{"bucket": bucketName})
|
||||
}
|
||||
|
||||
// GetBucketMetadata retrieves an bucket's metadata
|
||||
func (d donutDriver) GetBucketMetadata(bucketName string) (drivers.BucketMetadata, error) {
|
||||
if !drivers.IsValidBucket(bucketName) || strings.Contains(bucketName, ".") {
|
||||
return drivers.BucketMetadata{}, drivers.BucketNameInvalid{Bucket: bucketName}
|
||||
}
|
||||
metadata, err := d.donut.GetBucketMetadata(bucketName)
|
||||
if err != nil {
|
||||
return drivers.BucketMetadata{}, drivers.BucketNotFound{Bucket: bucketName}
|
||||
}
|
||||
created, err := time.Parse(time.RFC3339Nano, metadata["created"])
|
||||
if err != nil {
|
||||
return drivers.BucketMetadata{}, iodine.New(err, nil)
|
||||
}
|
||||
bucketMetadata := drivers.BucketMetadata{
|
||||
Name: metadata["name"],
|
||||
Created: created,
|
||||
}
|
||||
return bucketMetadata, nil
|
||||
}
|
||||
|
||||
// CreateBucketPolicy sets a bucket's access policy
|
||||
func (d donutDriver) CreateBucketPolicy(bucket string, p drivers.BucketPolicy) error {
|
||||
return iodine.New(errors.New("Not Implemented"), nil)
|
||||
}
|
||||
|
||||
// GetBucketPolicy returns a bucket's access policy
|
||||
func (d donutDriver) GetBucketPolicy(bucket string) (drivers.BucketPolicy, error) {
|
||||
return drivers.BucketPolicy{}, iodine.New(errors.New("Not Implemented"), nil)
|
||||
}
|
||||
|
||||
// GetObject retrieves an object and writes it to a writer
|
||||
func (d donutDriver) GetObject(target io.Writer, bucketName, objectName string) (int64, error) {
|
||||
errParams := map[string]string{
|
||||
"bucketName": bucketName,
|
||||
"objectName": objectName,
|
||||
}
|
||||
if bucketName == "" || strings.TrimSpace(bucketName) == "" {
|
||||
return 0, iodine.New(errors.New("invalid argument"), errParams)
|
||||
}
|
||||
if objectName == "" || strings.TrimSpace(objectName) == "" {
|
||||
return 0, iodine.New(errors.New("invalid argument"), errParams)
|
||||
}
|
||||
reader, size, err := d.donut.GetObject(bucketName, objectName)
|
||||
if err != nil {
|
||||
return 0, drivers.ObjectNotFound{
|
||||
Bucket: bucketName,
|
||||
Object: objectName,
|
||||
}
|
||||
}
|
||||
n, err := io.CopyN(target, reader, size)
|
||||
return n, iodine.New(err, errParams)
|
||||
}
|
||||
|
||||
// GetPartialObject retrieves an object range and writes it to a writer
|
||||
func (d donutDriver) GetPartialObject(w io.Writer, bucketName, objectName string, start, length int64) (int64, error) {
|
||||
// TODO more efficient get partial object with proper donut support
|
||||
errParams := map[string]string{
|
||||
"bucketName": bucketName,
|
||||
"objectName": objectName,
|
||||
"start": strconv.FormatInt(start, 10),
|
||||
"length": strconv.FormatInt(length, 10),
|
||||
}
|
||||
if bucketName == "" || strings.TrimSpace(bucketName) == "" {
|
||||
return 0, iodine.New(errors.New("invalid argument"), errParams)
|
||||
}
|
||||
if objectName == "" || strings.TrimSpace(objectName) == "" {
|
||||
return 0, iodine.New(errors.New("invalid argument"), errParams)
|
||||
}
|
||||
if start < 0 {
|
||||
return 0, iodine.New(errors.New("invalid argument"), errParams)
|
||||
}
|
||||
reader, size, err := d.donut.GetObject(bucketName, objectName)
|
||||
defer reader.Close()
|
||||
if err != nil {
|
||||
return 0, drivers.ObjectNotFound{
|
||||
Bucket: bucketName,
|
||||
Object: objectName,
|
||||
}
|
||||
}
|
||||
if start > size || (start+length-1) > size {
|
||||
return 0, iodine.New(errors.New("invalid range"), errParams)
|
||||
}
|
||||
_, err = io.CopyN(ioutil.Discard, reader, start)
|
||||
if err != nil {
|
||||
return 0, iodine.New(err, errParams)
|
||||
}
|
||||
n, err := io.CopyN(w, reader, length)
|
||||
if err != nil {
|
||||
return 0, iodine.New(err, errParams)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// GetObjectMetadata retrieves an object's metadata
|
||||
func (d donutDriver) GetObjectMetadata(bucketName, objectName, prefixName string) (drivers.ObjectMetadata, error) {
|
||||
errParams := map[string]string{
|
||||
"bucketName": bucketName,
|
||||
"objectName": objectName,
|
||||
"prefixName": prefixName,
|
||||
}
|
||||
metadata, err := d.donut.GetObjectMetadata(bucketName, objectName)
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, drivers.ObjectNotFound{
|
||||
Bucket: bucketName,
|
||||
Object: objectName,
|
||||
}
|
||||
}
|
||||
created, err := time.Parse(time.RFC3339Nano, metadata["created"])
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, iodine.New(err, errParams)
|
||||
}
|
||||
size, err := strconv.ParseInt(metadata["size"], 10, 64)
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, iodine.New(err, errParams)
|
||||
}
|
||||
objectMetadata := drivers.ObjectMetadata{
|
||||
Bucket: bucketName,
|
||||
Key: objectName,
|
||||
|
||||
ContentType: metadata["contentType"],
|
||||
Created: created,
|
||||
Md5: metadata["md5"],
|
||||
Size: size,
|
||||
}
|
||||
return objectMetadata, nil
|
||||
}
|
||||
|
||||
type byObjectKey []drivers.ObjectMetadata
|
||||
|
||||
func (b byObjectKey) Len() int { return len(b) }
|
||||
func (b byObjectKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byObjectKey) Less(i, j int) bool { return b[i].Key < b[j].Key }
|
||||
|
||||
// ListObjects - returns list of objects
|
||||
func (d donutDriver) ListObjects(bucketName string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) {
|
||||
errParams := map[string]string{
|
||||
"bucketName": bucketName,
|
||||
}
|
||||
actualObjects, commonPrefixes, isTruncated, err := d.donut.ListObjects(bucketName,
|
||||
resources.Prefix,
|
||||
resources.Marker,
|
||||
resources.Delimiter,
|
||||
resources.Maxkeys)
|
||||
if err != nil {
|
||||
return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, errParams)
|
||||
}
|
||||
resources.CommonPrefixes = commonPrefixes
|
||||
resources.IsTruncated = isTruncated
|
||||
|
||||
var results []drivers.ObjectMetadata
|
||||
for _, objectName := range actualObjects {
|
||||
objectMetadata, err := d.donut.GetObjectMetadata(bucketName, objectName)
|
||||
if err != nil {
|
||||
return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, errParams)
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339Nano, objectMetadata["created"])
|
||||
if err != nil {
|
||||
return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, nil)
|
||||
}
|
||||
size, err := strconv.ParseInt(objectMetadata["size"], 10, 64)
|
||||
if err != nil {
|
||||
return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, nil)
|
||||
}
|
||||
metadata := drivers.ObjectMetadata{
|
||||
Key: objectName,
|
||||
Created: t,
|
||||
Size: size,
|
||||
}
|
||||
results = append(results, metadata)
|
||||
}
|
||||
sort.Sort(byObjectKey(results))
|
||||
return results, resources, nil
|
||||
}
|
||||
|
||||
// CreateObject creates a new object
|
||||
func (d donutDriver) CreateObject(bucketName, objectName, contentType, expectedMD5Sum string, reader io.Reader) error {
|
||||
errParams := map[string]string{
|
||||
"bucketName": bucketName,
|
||||
"objectName": objectName,
|
||||
"contentType": contentType,
|
||||
}
|
||||
if bucketName == "" || strings.TrimSpace(bucketName) == "" {
|
||||
return iodine.New(errors.New("invalid argument"), errParams)
|
||||
}
|
||||
if objectName == "" || strings.TrimSpace(objectName) == "" {
|
||||
return iodine.New(errors.New("invalid argument"), errParams)
|
||||
}
|
||||
if strings.TrimSpace(contentType) == "" {
|
||||
contentType = "application/octet-stream"
|
||||
}
|
||||
metadata := make(map[string]string)
|
||||
metadata["contentType"] = strings.TrimSpace(contentType)
|
||||
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" {
|
||||
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
|
||||
}
|
||||
|
||||
err := d.donut.PutObject(bucketName, objectName, expectedMD5Sum, ioutil.NopCloser(reader), metadata)
|
||||
if err != nil {
|
||||
return iodine.New(err, errParams)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,53 +0,0 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package donut
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
. "github.com/minio-io/check"
|
||||
"github.com/minio-io/objectdriver"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type MySuite struct{}
|
||||
|
||||
var _ = Suite(&MySuite{})
|
||||
|
||||
func (s *MySuite) TestAPISuite(c *C) {
|
||||
// c.Skip("Not Implemented")
|
||||
var storageList []string
|
||||
create := func() drivers.Driver {
|
||||
path, err := ioutil.TempDir(os.TempDir(), "minio-fs-")
|
||||
c.Check(err, IsNil)
|
||||
storageList = append(storageList, path)
|
||||
_, _, store := Start(path) // TODO Make InMemory driver
|
||||
return store
|
||||
}
|
||||
drivers.APITestSuite(c, create)
|
||||
removeRoots(c, storageList)
|
||||
}
|
||||
|
||||
func removeRoots(c *C, roots []string) {
|
||||
for _, root := range roots {
|
||||
err := os.RemoveAll(root)
|
||||
c.Check(err, IsNil)
|
||||
}
|
||||
}
|
|
@ -1,151 +0,0 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package drivers
|
||||
|
||||
import (
|
||||
"io"
|
||||
"regexp"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Driver - generic API interface for various drivers - donut, file, memory
|
||||
type Driver interface {
|
||||
// Bucket Operations
|
||||
ListBuckets() ([]BucketMetadata, error)
|
||||
CreateBucket(bucket string) error
|
||||
GetBucketMetadata(bucket string) (BucketMetadata, error)
|
||||
CreateBucketPolicy(bucket string, p BucketPolicy) error
|
||||
GetBucketPolicy(bucket string) (BucketPolicy, error)
|
||||
|
||||
// Object Operations
|
||||
GetObject(w io.Writer, bucket, object string) (int64, error)
|
||||
GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error)
|
||||
GetObjectMetadata(bucket string, object string, prefix string) (ObjectMetadata, error)
|
||||
ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, error)
|
||||
CreateObject(bucket string, key string, contentType string, md5sum string, data io.Reader) error
|
||||
}
|
||||
|
||||
// BucketMetadata - name and create date
|
||||
type BucketMetadata struct {
|
||||
Name string
|
||||
Created time.Time
|
||||
}
|
||||
|
||||
// ObjectMetadata - object key and its relevant metadata
|
||||
type ObjectMetadata struct {
|
||||
Bucket string
|
||||
Key string
|
||||
|
||||
ContentType string
|
||||
Created time.Time
|
||||
Md5 string
|
||||
Size int64
|
||||
}
|
||||
|
||||
// FilterMode type
|
||||
type FilterMode int
|
||||
|
||||
// FilterMode list
|
||||
const (
|
||||
DelimiterPrefixMode FilterMode = iota
|
||||
DelimiterMode
|
||||
PrefixMode
|
||||
DefaultMode
|
||||
)
|
||||
|
||||
// BucketResourcesMetadata - various types of bucket resources
|
||||
type BucketResourcesMetadata struct {
|
||||
Prefix string
|
||||
Marker string
|
||||
Maxkeys int
|
||||
Delimiter string
|
||||
IsTruncated bool
|
||||
CommonPrefixes []string
|
||||
Mode FilterMode
|
||||
|
||||
Policy bool
|
||||
// TODO
|
||||
Logging string
|
||||
Notification string
|
||||
}
|
||||
|
||||
// GetMode - Populate filter mode
|
||||
func GetMode(resources BucketResourcesMetadata) FilterMode {
|
||||
var f FilterMode
|
||||
switch true {
|
||||
case resources.Delimiter != "" && resources.Prefix != "":
|
||||
f = DelimiterPrefixMode
|
||||
case resources.Delimiter != "" && resources.Prefix == "":
|
||||
f = DelimiterMode
|
||||
case resources.Delimiter == "" && resources.Prefix != "":
|
||||
f = PrefixMode
|
||||
case resources.Delimiter == "" && resources.Prefix == "":
|
||||
f = DefaultMode
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// IsDelimiterPrefixSet Delimiter and Prefix set
|
||||
func (b BucketResourcesMetadata) IsDelimiterPrefixSet() bool {
|
||||
return b.Mode == DelimiterPrefixMode
|
||||
}
|
||||
|
||||
// IsDelimiterSet Delimiter set
|
||||
func (b BucketResourcesMetadata) IsDelimiterSet() bool {
|
||||
return b.Mode == DelimiterMode
|
||||
}
|
||||
|
||||
// IsPrefixSet Prefix set
|
||||
func (b BucketResourcesMetadata) IsPrefixSet() bool {
|
||||
return b.Mode == PrefixMode
|
||||
}
|
||||
|
||||
// IsDefault No query values
|
||||
func (b BucketResourcesMetadata) IsDefault() bool {
|
||||
return b.Mode == DefaultMode
|
||||
}
|
||||
|
||||
// IsValidBucket - verify bucket name in accordance with
|
||||
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
|
||||
func IsValidBucket(bucket string) bool {
|
||||
if len(bucket) < 3 || len(bucket) > 63 {
|
||||
return false
|
||||
}
|
||||
if bucket[0] == '.' || bucket[len(bucket)-1] == '.' {
|
||||
return false
|
||||
}
|
||||
if match, _ := regexp.MatchString("\\.\\.", bucket); match == true {
|
||||
return false
|
||||
}
|
||||
// We don't support buckets with '.' in them
|
||||
match, _ := regexp.MatchString("^[a-zA-Z][a-zA-Z0-9\\-]+[a-zA-Z0-9]$", bucket)
|
||||
return match
|
||||
}
|
||||
|
||||
// IsValidObject - verify object name in accordance with
|
||||
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
|
||||
func IsValidObject(object string) bool {
|
||||
if len(object) > 1024 || len(object) == 0 {
|
||||
return false
|
||||
}
|
||||
if !utf8.ValidString(object) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -1,163 +0,0 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package drivers
|
||||
|
||||
// BackendError - generic disk backend error
|
||||
type BackendError struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
// BackendCorrupted - path has corrupted data
|
||||
type BackendCorrupted BackendError
|
||||
|
||||
// APINotImplemented - generic API not implemented error
|
||||
type APINotImplemented struct {
|
||||
API string
|
||||
}
|
||||
|
||||
// GenericBucketError - generic bucket error
|
||||
type GenericBucketError struct {
|
||||
Bucket string
|
||||
}
|
||||
|
||||
// GenericObjectError - generic object error
|
||||
type GenericObjectError struct {
|
||||
Bucket string
|
||||
Object string
|
||||
}
|
||||
|
||||
// ImplementationError - generic implementation error
|
||||
type ImplementationError struct {
|
||||
Bucket string
|
||||
Object string
|
||||
Err error
|
||||
}
|
||||
|
||||
// DigestError - Generic Md5 error
|
||||
type DigestError struct {
|
||||
Bucket string
|
||||
Key string
|
||||
Md5 string
|
||||
}
|
||||
|
||||
/// Bucket related errors
|
||||
|
||||
// BucketPolicyNotFound - missing bucket policy
|
||||
type BucketPolicyNotFound GenericBucketError
|
||||
|
||||
// BucketNameInvalid - bucketname provided is invalid
|
||||
type BucketNameInvalid GenericBucketError
|
||||
|
||||
// BucketExists - bucket already exists
|
||||
type BucketExists GenericBucketError
|
||||
|
||||
// BucketNotFound - requested bucket not found
|
||||
type BucketNotFound GenericBucketError
|
||||
|
||||
/// Object related errors
|
||||
|
||||
// ObjectNotFound - requested object not found
|
||||
type ObjectNotFound GenericObjectError
|
||||
|
||||
// ObjectExists - object already exists
|
||||
type ObjectExists GenericObjectError
|
||||
|
||||
// ObjectNameInvalid - object name provided is invalid
|
||||
type ObjectNameInvalid GenericObjectError
|
||||
|
||||
// BadDigest - md5 mismatch from data received
|
||||
type BadDigest DigestError
|
||||
|
||||
// InvalidDigest - md5 in request header invalid
|
||||
type InvalidDigest DigestError
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e ImplementationError) Error() string {
|
||||
error := ""
|
||||
if e.Bucket != "" {
|
||||
error = error + "Bucket: " + e.Bucket + " "
|
||||
}
|
||||
if e.Object != "" {
|
||||
error = error + "Object: " + e.Object + " "
|
||||
}
|
||||
error = error + "Error: " + e.Err.Error()
|
||||
return error
|
||||
}
|
||||
|
||||
// EmbedError - wrapper function for error object
|
||||
func EmbedError(bucket, object string, err error) ImplementationError {
|
||||
return ImplementationError{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e BucketPolicyNotFound) Error() string {
|
||||
return "Bucket policy not found for: " + e.Bucket
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e ObjectNotFound) Error() string {
|
||||
return "Object not Found: " + e.Bucket + "#" + e.Object
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e APINotImplemented) Error() string {
|
||||
return "Api not implemented: " + e.API
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e ObjectExists) Error() string {
|
||||
return "Object exists: " + e.Bucket + "#" + e.Object
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e BucketNameInvalid) Error() string {
|
||||
return "Bucket name invalid: " + e.Bucket
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e BucketExists) Error() string {
|
||||
return "Bucket exists: " + e.Bucket
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e BucketNotFound) Error() string {
|
||||
return "Bucket not Found: " + e.Bucket
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e ObjectNameInvalid) Error() string {
|
||||
return "Object name invalid: " + e.Bucket + "#" + e.Object
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e BackendCorrupted) Error() string {
|
||||
return "Backend corrupted: " + e.Path
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e BadDigest) Error() string {
|
||||
return "Md5 provided " + e.Md5 + " mismatches for: " + e.Bucket + "#" + e.Key
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e InvalidDigest) Error() string {
|
||||
return "Md5 provided " + e.Md5 + " is invalid"
|
||||
}
|
|
@ -1,41 +0,0 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package file
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/minio-io/objectdriver"
|
||||
)
|
||||
|
||||
// Start filesystem channel
|
||||
func Start(root string) (chan<- string, <-chan error, drivers.Driver) {
|
||||
ctrlChannel := make(chan string)
|
||||
errorChannel := make(chan error)
|
||||
s := new(fileDriver)
|
||||
s.root = root
|
||||
s.lock = new(sync.Mutex)
|
||||
go start(ctrlChannel, errorChannel, s)
|
||||
return ctrlChannel, errorChannel, s
|
||||
}
|
||||
|
||||
func start(ctrlChannel <-chan string, errorChannel chan<- error, s *fileDriver) {
|
||||
err := os.MkdirAll(s.root, 0700)
|
||||
errorChannel <- err
|
||||
close(errorChannel)
|
||||
}
|
|
@ -1,146 +0,0 @@
|
|||
/*
|
||||
* Minimalist Object File, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package file
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/minio-io/objectdriver"
|
||||
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
/// Bucket Operations
|
||||
|
||||
// GetBucketMetadata - head
|
||||
func (file *fileDriver) GetBucketMetadata(bucket string) (drivers.BucketMetadata, error) {
|
||||
st, err := os.Stat(path.Join(file.root, bucket))
|
||||
if err != nil {
|
||||
return drivers.BucketMetadata{}, drivers.BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
bucketMetadata := drivers.BucketMetadata{
|
||||
Name: st.Name(),
|
||||
Created: st.ModTime(),
|
||||
}
|
||||
return bucketMetadata, nil
|
||||
}
|
||||
|
||||
// ListBuckets - Get service
|
||||
func (file *fileDriver) ListBuckets() ([]drivers.BucketMetadata, error) {
|
||||
files, err := ioutil.ReadDir(file.root)
|
||||
if err != nil {
|
||||
return []drivers.BucketMetadata{}, drivers.EmbedError("bucket", "", err)
|
||||
}
|
||||
|
||||
var metadataList []drivers.BucketMetadata
|
||||
for _, fileName := range files {
|
||||
// Skip policy files
|
||||
if strings.HasSuffix(fileName.Name(), "_policy.json") {
|
||||
continue
|
||||
}
|
||||
if !fileName.IsDir() {
|
||||
return []drivers.BucketMetadata{}, drivers.BackendCorrupted{Path: file.root}
|
||||
}
|
||||
metadata := drivers.BucketMetadata{
|
||||
Name: fileName.Name(),
|
||||
Created: fileName.ModTime(), // TODO - provide real created time
|
||||
}
|
||||
metadataList = append(metadataList, metadata)
|
||||
}
|
||||
return metadataList, nil
|
||||
}
|
||||
|
||||
// CreateBucket - PUT Bucket
|
||||
func (file *fileDriver) CreateBucket(bucket string) error {
|
||||
file.lock.Lock()
|
||||
defer file.lock.Unlock()
|
||||
|
||||
// verify bucket path legal
|
||||
if drivers.IsValidBucket(bucket) == false {
|
||||
return drivers.BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
|
||||
// get bucket path
|
||||
bucketDir := path.Join(file.root, bucket)
|
||||
|
||||
// check if bucket exists
|
||||
if _, err := os.Stat(bucketDir); err == nil {
|
||||
return drivers.BucketExists{
|
||||
Bucket: bucket,
|
||||
}
|
||||
}
|
||||
|
||||
// make bucket
|
||||
err := os.Mkdir(bucketDir, 0700)
|
||||
if err != nil {
|
||||
return drivers.EmbedError(bucket, "", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListObjects - GET bucket (list objects)
|
||||
func (file *fileDriver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) {
|
||||
p := bucketDir{}
|
||||
p.files = make(map[string]os.FileInfo)
|
||||
|
||||
if drivers.IsValidBucket(bucket) == false {
|
||||
return []drivers.ObjectMetadata{}, resources, drivers.BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
if resources.Prefix != "" && drivers.IsValidObject(resources.Prefix) == false {
|
||||
return []drivers.ObjectMetadata{}, resources, drivers.ObjectNameInvalid{Bucket: bucket, Object: resources.Prefix}
|
||||
}
|
||||
|
||||
rootPrefix := path.Join(file.root, bucket)
|
||||
// check bucket exists
|
||||
if _, err := os.Stat(rootPrefix); os.IsNotExist(err) {
|
||||
return []drivers.ObjectMetadata{}, resources, drivers.BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
p.root = rootPrefix
|
||||
err := filepath.Walk(rootPrefix, p.getAllFiles)
|
||||
if err != nil {
|
||||
return []drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err)
|
||||
}
|
||||
|
||||
var metadataList []drivers.ObjectMetadata
|
||||
var metadata drivers.ObjectMetadata
|
||||
|
||||
// Populate filtering mode
|
||||
resources.Mode = drivers.GetMode(resources)
|
||||
|
||||
for name, f := range p.files {
|
||||
if len(metadataList) >= resources.Maxkeys {
|
||||
resources.IsTruncated = true
|
||||
goto ret
|
||||
}
|
||||
metadata, resources, err = file.filter(bucket, name, f, resources)
|
||||
if err != nil {
|
||||
return []drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err)
|
||||
}
|
||||
if metadata.Bucket != "" {
|
||||
metadataList = append(metadataList, metadata)
|
||||
}
|
||||
}
|
||||
|
||||
ret:
|
||||
sort.Sort(byObjectKey(metadataList))
|
||||
return metadataList, resources, nil
|
||||
}
|
|
@ -1,89 +0,0 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package file
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/minio-io/objectdriver"
|
||||
)
|
||||
|
||||
// fileDriver - file local variables
|
||||
type fileDriver struct {
|
||||
root string
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
// fileMetadata - carries metadata about object
|
||||
type fileMetadata struct {
|
||||
Md5sum []byte
|
||||
ContentType string
|
||||
}
|
||||
|
||||
func appendUniq(slice []string, i string) []string {
|
||||
for _, ele := range slice {
|
||||
if ele == i {
|
||||
return slice
|
||||
}
|
||||
}
|
||||
return append(slice, i)
|
||||
}
|
||||
|
||||
type bucketDir struct {
|
||||
files map[string]os.FileInfo
|
||||
root string
|
||||
}
|
||||
|
||||
func (p *bucketDir) getAllFiles(object string, fl os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fl.Mode().IsRegular() {
|
||||
if strings.HasSuffix(object, "$metadata") {
|
||||
return nil
|
||||
}
|
||||
_p := strings.Split(object, p.root+"/")
|
||||
if len(_p) > 1 {
|
||||
p.files[_p[1]] = fl
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func delimiter(object, delimiter string) string {
|
||||
readBuffer := bytes.NewBufferString(object)
|
||||
reader := bufio.NewReader(readBuffer)
|
||||
stringReader := strings.NewReader(delimiter)
|
||||
delimited, _ := stringReader.ReadByte()
|
||||
delimitedStr, _ := reader.ReadString(delimited)
|
||||
return delimitedStr
|
||||
}
|
||||
|
||||
type byObjectKey []drivers.ObjectMetadata
|
||||
|
||||
// Len
|
||||
func (b byObjectKey) Len() int { return len(b) }
|
||||
|
||||
// Swap
|
||||
func (b byObjectKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
|
||||
// Less
|
||||
func (b byObjectKey) Less(i, j int) bool { return b[i].Key < b[j].Key }
|
|
@ -1,100 +0,0 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package file
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/minio-io/objectdriver"
|
||||
)
|
||||
|
||||
func (file *fileDriver) filterDelimiterPrefix(bucket, name, fname, delimitedName string, resources drivers.BucketResourcesMetadata) (drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) {
|
||||
var err error
|
||||
var metadata drivers.ObjectMetadata
|
||||
switch true {
|
||||
case name == resources.Prefix:
|
||||
// Use resources.Prefix to filter out delimited files
|
||||
metadata, err = file.GetObjectMetadata(bucket, name, resources.Prefix)
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err)
|
||||
}
|
||||
case delimitedName == fname:
|
||||
// Use resources.Prefix to filter out delimited files
|
||||
metadata, err = file.GetObjectMetadata(bucket, name, resources.Prefix)
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err)
|
||||
}
|
||||
case delimitedName != "":
|
||||
resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, resources.Prefix+delimitedName)
|
||||
}
|
||||
return metadata, resources, nil
|
||||
}
|
||||
|
||||
// TODO handle resources.Marker
|
||||
func (file *fileDriver) filter(bucket, name string, f os.FileInfo, resources drivers.BucketResourcesMetadata) (drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) {
|
||||
var err error
|
||||
var metadata drivers.ObjectMetadata
|
||||
|
||||
switch true {
|
||||
// Both delimiter and Prefix is present
|
||||
case resources.IsDelimiterPrefixSet():
|
||||
if strings.HasPrefix(name, resources.Prefix) {
|
||||
trimmedName := strings.TrimPrefix(name, resources.Prefix)
|
||||
delimitedName := delimiter(trimmedName, resources.Delimiter)
|
||||
metadata, resources, err = file.filterDelimiterPrefix(bucket, name, f.Name(), delimitedName, resources)
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, resources, err
|
||||
}
|
||||
}
|
||||
// Delimiter present and Prefix is absent
|
||||
case resources.IsDelimiterSet():
|
||||
delimitedName := delimiter(name, resources.Delimiter)
|
||||
switch true {
|
||||
case delimitedName == "":
|
||||
// Do not strip prefix object output
|
||||
metadata, err = file.GetObjectMetadata(bucket, name, "")
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err)
|
||||
}
|
||||
case delimitedName == f.Name():
|
||||
// Do not strip prefix object output
|
||||
metadata, err = file.GetObjectMetadata(bucket, name, "")
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err)
|
||||
}
|
||||
case delimitedName != "":
|
||||
resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, delimitedName)
|
||||
}
|
||||
// Delimiter is absent and only Prefix is present
|
||||
case resources.IsPrefixSet():
|
||||
if strings.HasPrefix(name, resources.Prefix) {
|
||||
// Do not strip prefix object output
|
||||
metadata, err = file.GetObjectMetadata(bucket, name, "")
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err)
|
||||
}
|
||||
}
|
||||
case resources.IsDefault():
|
||||
metadata, err = file.GetObjectMetadata(bucket, name, "")
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, resources, drivers.EmbedError(bucket, "", err)
|
||||
}
|
||||
}
|
||||
|
||||
return metadata, resources, nil
|
||||
}
|
|
@ -1,283 +0,0 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package file
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/minio-io/objectdriver"
|
||||
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/gob"
|
||||
"encoding/hex"
|
||||
)
|
||||
|
||||
/// Object Operations
|
||||
|
||||
// GetPartialObject - GET object from range
|
||||
func (file *fileDriver) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) {
|
||||
// validate bucket
|
||||
if drivers.IsValidBucket(bucket) == false {
|
||||
return 0, drivers.BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
|
||||
// validate object
|
||||
if drivers.IsValidObject(object) == false {
|
||||
return 0, drivers.ObjectNameInvalid{Bucket: bucket, Object: object}
|
||||
}
|
||||
|
||||
objectPath := path.Join(file.root, bucket, object)
|
||||
filestat, err := os.Stat(objectPath)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
{
|
||||
if filestat.IsDir() {
|
||||
return 0, drivers.ObjectNotFound{Bucket: bucket, Object: object}
|
||||
}
|
||||
}
|
||||
default:
|
||||
{
|
||||
if os.IsNotExist(err) {
|
||||
return 0, drivers.ObjectNotFound{Bucket: bucket, Object: object}
|
||||
}
|
||||
return 0, drivers.EmbedError(bucket, object, err)
|
||||
}
|
||||
}
|
||||
f, err := os.Open(objectPath)
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
return 0, drivers.EmbedError(bucket, object, err)
|
||||
}
|
||||
|
||||
_, err = f.Seek(start, os.SEEK_SET)
|
||||
if err != nil {
|
||||
return 0, drivers.EmbedError(bucket, object, err)
|
||||
}
|
||||
|
||||
count, err := io.CopyN(w, f, length)
|
||||
if err != nil {
|
||||
return count, drivers.EmbedError(bucket, object, err)
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetObject - GET object from key
|
||||
func (file *fileDriver) GetObject(w io.Writer, bucket string, object string) (int64, error) {
|
||||
// validate bucket
|
||||
if drivers.IsValidBucket(bucket) == false {
|
||||
return 0, drivers.BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
|
||||
// check bucket exists
|
||||
if _, err := os.Stat(path.Join(file.root, bucket)); os.IsNotExist(err) {
|
||||
return 0, drivers.BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
// validate object
|
||||
if drivers.IsValidObject(object) == false {
|
||||
return 0, drivers.ObjectNameInvalid{Bucket: bucket, Object: object}
|
||||
}
|
||||
|
||||
objectPath := path.Join(file.root, bucket, object)
|
||||
|
||||
filestat, err := os.Stat(objectPath)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
{
|
||||
if filestat.IsDir() {
|
||||
return 0, drivers.ObjectNotFound{Bucket: bucket, Object: object}
|
||||
}
|
||||
}
|
||||
default:
|
||||
{
|
||||
if os.IsNotExist(err) {
|
||||
return 0, drivers.ObjectNotFound{Bucket: bucket, Object: object}
|
||||
}
|
||||
return 0, drivers.EmbedError(bucket, object, err)
|
||||
}
|
||||
}
|
||||
f, err := os.Open(objectPath)
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
return 0, drivers.EmbedError(bucket, object, err)
|
||||
}
|
||||
|
||||
count, err := io.Copy(w, f)
|
||||
if err != nil {
|
||||
return count, drivers.EmbedError(bucket, object, err)
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetObjectMetadata - HEAD object
|
||||
func (file *fileDriver) GetObjectMetadata(bucket, object, prefix string) (drivers.ObjectMetadata, error) {
|
||||
if drivers.IsValidBucket(bucket) == false {
|
||||
return drivers.ObjectMetadata{}, drivers.BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
if drivers.IsValidObject(object) == false {
|
||||
return drivers.ObjectMetadata{}, drivers.ObjectNameInvalid{Bucket: bucket, Object: bucket}
|
||||
}
|
||||
// check bucket exists
|
||||
if _, err := os.Stat(path.Join(file.root, bucket)); os.IsNotExist(err) {
|
||||
return drivers.ObjectMetadata{}, drivers.BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
// Do not use path.Join() since path.Join strips off any object names with '/', use them as is
|
||||
// in a static manner so that we can send a proper 'ObjectNotFound' reply back upon os.Stat()
|
||||
objectPath := file.root + "/" + bucket + "/" + object
|
||||
stat, err := os.Stat(objectPath)
|
||||
if os.IsNotExist(err) {
|
||||
return drivers.ObjectMetadata{}, drivers.ObjectNotFound{Bucket: bucket, Object: object}
|
||||
}
|
||||
|
||||
_, err = os.Stat(objectPath + "$metadata")
|
||||
if os.IsNotExist(err) {
|
||||
return drivers.ObjectMetadata{}, drivers.ObjectNotFound{Bucket: bucket, Object: object}
|
||||
}
|
||||
|
||||
f, err := os.Open(objectPath + "$metadata")
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, drivers.EmbedError(bucket, object, err)
|
||||
}
|
||||
|
||||
var deserializedMetadata fileMetadata
|
||||
decoder := gob.NewDecoder(f)
|
||||
err = decoder.Decode(&deserializedMetadata)
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, drivers.EmbedError(bucket, object, err)
|
||||
}
|
||||
|
||||
contentType := "application/octet-stream"
|
||||
if deserializedMetadata.ContentType != "" {
|
||||
contentType = deserializedMetadata.ContentType
|
||||
}
|
||||
contentType = strings.TrimSpace(contentType)
|
||||
|
||||
etag := bucket + "#" + path.Base(object)
|
||||
if len(deserializedMetadata.Md5sum) != 0 {
|
||||
etag = hex.EncodeToString(deserializedMetadata.Md5sum)
|
||||
}
|
||||
trimmedObject := strings.TrimPrefix(object, prefix)
|
||||
metadata := drivers.ObjectMetadata{
|
||||
Bucket: bucket,
|
||||
Key: trimmedObject,
|
||||
Created: stat.ModTime(),
|
||||
Size: stat.Size(),
|
||||
Md5: etag,
|
||||
ContentType: contentType,
|
||||
}
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// CreateObject - PUT object
|
||||
func (file *fileDriver) CreateObject(bucket, key, contentType, md5sum string, data io.Reader) error {
|
||||
// TODO Commits should stage then move instead of writing directly
|
||||
file.lock.Lock()
|
||||
defer file.lock.Unlock()
|
||||
|
||||
// check bucket name valid
|
||||
if drivers.IsValidBucket(bucket) == false {
|
||||
return drivers.BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
|
||||
// check bucket exists
|
||||
if _, err := os.Stat(path.Join(file.root, bucket)); os.IsNotExist(err) {
|
||||
return drivers.BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
// verify object path legal
|
||||
if drivers.IsValidObject(key) == false {
|
||||
return drivers.ObjectNameInvalid{Bucket: bucket, Object: key}
|
||||
}
|
||||
|
||||
// verify content type
|
||||
if contentType == "" {
|
||||
contentType = "application/octet-stream"
|
||||
}
|
||||
contentType = strings.TrimSpace(contentType)
|
||||
|
||||
// get object path
|
||||
objectPath := path.Join(file.root, bucket, key)
|
||||
objectDir := path.Dir(objectPath)
|
||||
if _, err := os.Stat(objectDir); os.IsNotExist(err) {
|
||||
err = os.MkdirAll(objectDir, 0700)
|
||||
if err != nil {
|
||||
return drivers.EmbedError(bucket, key, err)
|
||||
}
|
||||
}
|
||||
|
||||
// check if object exists
|
||||
if _, err := os.Stat(objectPath); !os.IsNotExist(err) {
|
||||
return drivers.ObjectExists{
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
}
|
||||
}
|
||||
|
||||
// write object
|
||||
f, err := os.OpenFile(objectPath, os.O_WRONLY|os.O_CREATE, 0600)
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
return drivers.EmbedError(bucket, key, err)
|
||||
}
|
||||
|
||||
h := md5.New()
|
||||
mw := io.MultiWriter(f, h)
|
||||
|
||||
_, err = io.Copy(mw, data)
|
||||
if err != nil {
|
||||
return drivers.EmbedError(bucket, key, err)
|
||||
}
|
||||
|
||||
//
|
||||
f, err = os.OpenFile(objectPath+"$metadata", os.O_WRONLY|os.O_CREATE, 0600)
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
return drivers.EmbedError(bucket, key, err)
|
||||
}
|
||||
|
||||
metadata := &fileMetadata{
|
||||
ContentType: contentType,
|
||||
Md5sum: h.Sum(nil),
|
||||
}
|
||||
// serialize metadata to gob
|
||||
encoder := gob.NewEncoder(f)
|
||||
err = encoder.Encode(metadata)
|
||||
if err != nil {
|
||||
return drivers.EmbedError(bucket, key, err)
|
||||
}
|
||||
|
||||
// Verify data received to be correct, Content-MD5 received
|
||||
if md5sum != "" {
|
||||
var data []byte
|
||||
data, err = base64.StdEncoding.DecodeString(md5sum)
|
||||
if err != nil {
|
||||
return drivers.InvalidDigest{Bucket: bucket, Key: key, Md5: md5sum}
|
||||
}
|
||||
if !bytes.Equal(metadata.Md5sum, data) {
|
||||
return drivers.BadDigest{Bucket: bucket, Key: key, Md5: md5sum}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,112 +0,0 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package file
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/minio-io/objectdriver"
|
||||
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// GetBucketPolicy - GET bucket policy
|
||||
func (file *fileDriver) GetBucketPolicy(bucket string) (drivers.BucketPolicy, error) {
|
||||
file.lock.Lock()
|
||||
defer file.lock.Unlock()
|
||||
|
||||
var p drivers.BucketPolicy
|
||||
// verify bucket path legal
|
||||
if drivers.IsValidBucket(bucket) == false {
|
||||
return drivers.BucketPolicy{}, drivers.BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
|
||||
// get bucket path
|
||||
bucketDir := path.Join(file.root, bucket)
|
||||
// check if bucket exists
|
||||
if _, err := os.Stat(bucketDir); err != nil {
|
||||
return drivers.BucketPolicy{}, drivers.BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
// get policy path
|
||||
bucketPolicy := path.Join(file.root, bucket+"_policy.json")
|
||||
filestat, err := os.Stat(bucketPolicy)
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
return drivers.BucketPolicy{}, drivers.BucketPolicyNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
if filestat.IsDir() {
|
||||
return drivers.BucketPolicy{}, drivers.BackendCorrupted{Path: bucketPolicy}
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(bucketPolicy, os.O_RDONLY, 0666)
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
return drivers.BucketPolicy{}, drivers.EmbedError(bucket, "", err)
|
||||
}
|
||||
encoder := json.NewDecoder(f)
|
||||
err = encoder.Decode(&p)
|
||||
if err != nil {
|
||||
return drivers.BucketPolicy{}, drivers.EmbedError(bucket, "", err)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
|
||||
}
|
||||
|
||||
// CreateBucketPolicy - PUT bucket policy
|
||||
func (file *fileDriver) CreateBucketPolicy(bucket string, p drivers.BucketPolicy) error {
|
||||
file.lock.Lock()
|
||||
defer file.lock.Unlock()
|
||||
|
||||
// verify bucket path legal
|
||||
if drivers.IsValidBucket(bucket) == false {
|
||||
return drivers.BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
|
||||
// get bucket path
|
||||
bucketDir := path.Join(file.root, bucket)
|
||||
// check if bucket exists
|
||||
if _, err := os.Stat(bucketDir); err != nil {
|
||||
return drivers.BucketNotFound{
|
||||
Bucket: bucket,
|
||||
}
|
||||
}
|
||||
|
||||
// get policy path
|
||||
bucketPolicy := path.Join(file.root, bucket+"_policy.json")
|
||||
filestat, ret := os.Stat(bucketPolicy)
|
||||
if !os.IsNotExist(ret) {
|
||||
if filestat.IsDir() {
|
||||
return drivers.BackendCorrupted{Path: bucketPolicy}
|
||||
}
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(bucketPolicy, os.O_WRONLY|os.O_CREATE, 0600)
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
return drivers.EmbedError(bucket, "", err)
|
||||
}
|
||||
encoder := json.NewEncoder(f)
|
||||
err = encoder.Encode(p)
|
||||
if err != nil {
|
||||
return drivers.EmbedError(bucket, "", err)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package file
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
. "github.com/minio-io/check"
|
||||
"github.com/minio-io/objectdriver"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type MySuite struct{}
|
||||
|
||||
var _ = Suite(&MySuite{})
|
||||
|
||||
func (s *MySuite) TestAPISuite(c *C) {
|
||||
var storageList []string
|
||||
create := func() drivers.Driver {
|
||||
path, err := ioutil.TempDir(os.TempDir(), "minio-file-")
|
||||
c.Check(err, IsNil)
|
||||
storageList = append(storageList, path)
|
||||
_, _, store := Start(path)
|
||||
return store
|
||||
}
|
||||
drivers.APITestSuite(c, create)
|
||||
removeRoots(c, storageList)
|
||||
}
|
||||
|
||||
func removeRoots(c *C, roots []string) {
|
||||
for _, root := range roots {
|
||||
err := os.RemoveAll(root)
|
||||
c.Check(err, IsNil)
|
||||
}
|
||||
}
|
|
@ -1,287 +0,0 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package memory
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio-io/objectdriver"
|
||||
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
// memoryDriver - local variables
|
||||
type memoryDriver struct {
|
||||
bucketdata map[string]storedBucket
|
||||
objectdata map[string]storedObject
|
||||
lock *sync.RWMutex
|
||||
}
|
||||
|
||||
type storedBucket struct {
|
||||
metadata drivers.BucketMetadata
|
||||
// owner string // TODO
|
||||
// id string // TODO
|
||||
}
|
||||
|
||||
type storedObject struct {
|
||||
metadata drivers.ObjectMetadata
|
||||
data []byte
|
||||
}
|
||||
|
||||
// Start memory object server
|
||||
func Start() (chan<- string, <-chan error, drivers.Driver) {
|
||||
ctrlChannel := make(chan string)
|
||||
errorChannel := make(chan error)
|
||||
|
||||
memory := new(memoryDriver)
|
||||
memory.bucketdata = make(map[string]storedBucket)
|
||||
memory.objectdata = make(map[string]storedObject)
|
||||
memory.lock = new(sync.RWMutex)
|
||||
|
||||
go start(ctrlChannel, errorChannel)
|
||||
return ctrlChannel, errorChannel, memory
|
||||
}
|
||||
|
||||
func start(ctrlChannel <-chan string, errorChannel chan<- error) {
|
||||
close(errorChannel)
|
||||
}
|
||||
|
||||
// GetObject - GET object from memory buffer
|
||||
func (memory memoryDriver) GetObject(w io.Writer, bucket string, object string) (int64, error) {
|
||||
if _, ok := memory.bucketdata[bucket]; ok == false {
|
||||
return 0, drivers.BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
// get object
|
||||
key := object
|
||||
if val, ok := memory.objectdata[key]; ok {
|
||||
objectBuffer := bytes.NewBuffer(val.data)
|
||||
written, err := io.Copy(w, objectBuffer)
|
||||
return written, err
|
||||
}
|
||||
return 0, drivers.ObjectNotFound{Bucket: bucket, Object: object}
|
||||
}
|
||||
|
||||
// GetPartialObject - GET object from memory buffer range
|
||||
func (memory memoryDriver) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) {
|
||||
var sourceBuffer bytes.Buffer
|
||||
if _, err := memory.GetObject(&sourceBuffer, bucket, object); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if _, err := io.CopyN(ioutil.Discard, &sourceBuffer, start); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return io.CopyN(w, &sourceBuffer, length)
|
||||
}
|
||||
|
||||
// GetBucketMetadata -
|
||||
func (memory memoryDriver) GetBucketMetadata(bucket string) (drivers.BucketMetadata, error) {
|
||||
if _, ok := memory.bucketdata[bucket]; ok == false {
|
||||
return drivers.BucketMetadata{}, drivers.BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
return memory.bucketdata[bucket].metadata, nil
|
||||
}
|
||||
|
||||
// CreateBucketPolicy - Not implemented
|
||||
func (memory memoryDriver) CreateBucketPolicy(bucket string, policy drivers.BucketPolicy) error {
|
||||
return drivers.APINotImplemented{API: "PutBucketPolicy"}
|
||||
}
|
||||
|
||||
// GetBucketPolicy - Not implemented
|
||||
func (memory memoryDriver) GetBucketPolicy(bucket string) (drivers.BucketPolicy, error) {
|
||||
return drivers.BucketPolicy{}, drivers.APINotImplemented{API: "GetBucketPolicy"}
|
||||
}
|
||||
|
||||
// CreateObject - PUT object to memory buffer
|
||||
func (memory memoryDriver) CreateObject(bucket, key, contentType, md5sum string, data io.Reader) error {
|
||||
memory.lock.Lock()
|
||||
defer memory.lock.Unlock()
|
||||
|
||||
if _, ok := memory.bucketdata[bucket]; ok == false {
|
||||
return drivers.BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
if _, ok := memory.objectdata[key]; ok == true {
|
||||
return drivers.ObjectExists{Bucket: bucket, Object: key}
|
||||
}
|
||||
|
||||
if contentType == "" {
|
||||
contentType = "application/octet-stream"
|
||||
}
|
||||
|
||||
contentType = strings.TrimSpace(contentType)
|
||||
|
||||
var bytesBuffer bytes.Buffer
|
||||
var newObject = storedObject{}
|
||||
if _, ok := io.Copy(&bytesBuffer, data); ok == nil {
|
||||
size := bytesBuffer.Len()
|
||||
md5SumBytes := md5.Sum(bytesBuffer.Bytes())
|
||||
md5Sum := hex.EncodeToString(md5SumBytes[:])
|
||||
newObject.metadata = drivers.ObjectMetadata{
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
|
||||
ContentType: contentType,
|
||||
Created: time.Now(),
|
||||
Md5: md5Sum,
|
||||
Size: int64(size),
|
||||
}
|
||||
newObject.data = bytesBuffer.Bytes()
|
||||
}
|
||||
memory.objectdata[key] = newObject
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateBucket - create bucket in memory
|
||||
func (memory memoryDriver) CreateBucket(bucketName string) error {
|
||||
memory.lock.Lock()
|
||||
defer memory.lock.Unlock()
|
||||
if !drivers.IsValidBucket(bucketName) {
|
||||
return drivers.BucketNameInvalid{Bucket: bucketName}
|
||||
}
|
||||
|
||||
if _, ok := memory.bucketdata[bucketName]; ok == true {
|
||||
return drivers.BucketExists{Bucket: bucketName}
|
||||
}
|
||||
|
||||
var newBucket = storedBucket{}
|
||||
newBucket.metadata = drivers.BucketMetadata{}
|
||||
newBucket.metadata.Name = bucketName
|
||||
newBucket.metadata.Created = time.Now()
|
||||
memory.bucketdata[bucketName] = newBucket
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func delimiter(object, delimiter string) string {
|
||||
readBuffer := bytes.NewBufferString(object)
|
||||
reader := bufio.NewReader(readBuffer)
|
||||
stringReader := strings.NewReader(delimiter)
|
||||
delimited, _ := stringReader.ReadByte()
|
||||
delimitedStr, _ := reader.ReadString(delimited)
|
||||
return delimitedStr
|
||||
}
|
||||
|
||||
func appendUniq(slice []string, i string) []string {
|
||||
for _, ele := range slice {
|
||||
if ele == i {
|
||||
return slice
|
||||
}
|
||||
}
|
||||
return append(slice, i)
|
||||
}
|
||||
|
||||
func (memory memoryDriver) filterDelimiterPrefix(keys []string, key, delimitedName string, resources drivers.BucketResourcesMetadata) (drivers.BucketResourcesMetadata, []string) {
|
||||
switch true {
|
||||
case key == resources.Prefix:
|
||||
keys = appendUniq(keys, key)
|
||||
// DelimitedName - requires resources.Prefix as it was trimmed off earlier in the flow
|
||||
case key == resources.Prefix+delimitedName:
|
||||
keys = appendUniq(keys, key)
|
||||
case delimitedName != "":
|
||||
resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, resources.Prefix+delimitedName)
|
||||
}
|
||||
return resources, keys
|
||||
}
|
||||
|
||||
// ListObjects - list objects from memory
|
||||
func (memory memoryDriver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) {
|
||||
if _, ok := memory.bucketdata[bucket]; ok == false {
|
||||
return []drivers.ObjectMetadata{}, drivers.BucketResourcesMetadata{IsTruncated: false}, drivers.BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
var results []drivers.ObjectMetadata
|
||||
var keys []string
|
||||
for key := range memory.objectdata {
|
||||
switch true {
|
||||
// Prefix absent, delimit object key based on delimiter
|
||||
case resources.IsDelimiterSet():
|
||||
delimitedName := delimiter(key, resources.Delimiter)
|
||||
switch true {
|
||||
case delimitedName == "" || delimitedName == key:
|
||||
keys = appendUniq(keys, key)
|
||||
case delimitedName != "":
|
||||
resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, delimitedName)
|
||||
}
|
||||
// Prefix present, delimit object key with prefix key based on delimiter
|
||||
case resources.IsDelimiterPrefixSet():
|
||||
if strings.HasPrefix(key, resources.Prefix) {
|
||||
trimmedName := strings.TrimPrefix(key, resources.Prefix)
|
||||
delimitedName := delimiter(trimmedName, resources.Delimiter)
|
||||
resources, keys = memory.filterDelimiterPrefix(keys, key, delimitedName, resources)
|
||||
}
|
||||
// Prefix present, nothing to delimit
|
||||
case resources.IsPrefixSet():
|
||||
keys = appendUniq(keys, key)
|
||||
// Prefix and delimiter absent
|
||||
case resources.IsDefault():
|
||||
keys = appendUniq(keys, key)
|
||||
}
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, key := range keys {
|
||||
if len(results) == resources.Maxkeys {
|
||||
return results, drivers.BucketResourcesMetadata{IsTruncated: true}, nil
|
||||
}
|
||||
object := memory.objectdata[key]
|
||||
if bucket == object.metadata.Bucket {
|
||||
results = append(results, object.metadata)
|
||||
}
|
||||
}
|
||||
return results, resources, nil
|
||||
}
|
||||
|
||||
// ByBucketName is a type for sorting bucket metadata by bucket name
|
||||
type ByBucketName []drivers.BucketMetadata
|
||||
|
||||
// Len of bucket name
|
||||
func (b ByBucketName) Len() int { return len(b) }
|
||||
|
||||
// Swap bucket i, j
|
||||
func (b ByBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
|
||||
// Less
|
||||
func (b ByBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
|
||||
|
||||
// ListBuckets - List buckets from memory
|
||||
func (memory memoryDriver) ListBuckets() ([]drivers.BucketMetadata, error) {
|
||||
var results []drivers.BucketMetadata
|
||||
for _, bucket := range memory.bucketdata {
|
||||
results = append(results, bucket.metadata)
|
||||
}
|
||||
sort.Sort(ByBucketName(results))
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// GetObjectMetadata - get object metadata from memory
|
||||
func (memory memoryDriver) GetObjectMetadata(bucket, key, prefix string) (drivers.ObjectMetadata, error) {
|
||||
// check if bucket exists
|
||||
if _, ok := memory.bucketdata[bucket]; ok == false {
|
||||
return drivers.ObjectMetadata{}, drivers.BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
if object, ok := memory.objectdata[key]; ok == true {
|
||||
return object.metadata, nil
|
||||
}
|
||||
return drivers.ObjectMetadata{}, drivers.ObjectNotFound{Bucket: bucket, Object: key}
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package memory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/minio-io/check"
|
||||
"github.com/minio-io/objectdriver"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type MySuite struct{}
|
||||
|
||||
var _ = Suite(&MySuite{})
|
||||
|
||||
func (s *MySuite) TestAPISuite(c *C) {
|
||||
create := func() drivers.Driver {
|
||||
_, _, store := Start()
|
||||
return store
|
||||
}
|
||||
drivers.APITestSuite(c, create)
|
||||
}
|
|
@ -1,135 +0,0 @@
|
|||
package mocks
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/minio-io/iodine"
|
||||
"github.com/minio-io/objectdriver"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// Driver is a mock
|
||||
type Driver struct {
|
||||
mock.Mock
|
||||
|
||||
ObjectWriterData map[string][]byte
|
||||
}
|
||||
|
||||
// ListBuckets is a mock
|
||||
func (m *Driver) ListBuckets() ([]drivers.BucketMetadata, error) {
|
||||
ret := m.Called()
|
||||
|
||||
r0 := ret.Get(0).([]drivers.BucketMetadata)
|
||||
r1 := ret.Error(1)
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CreateBucket is a mock
|
||||
func (m *Driver) CreateBucket(bucket string) error {
|
||||
ret := m.Called(bucket)
|
||||
|
||||
r0 := ret.Error(0)
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// GetBucketMetadata is a mock
|
||||
func (m *Driver) GetBucketMetadata(bucket string) (drivers.BucketMetadata, error) {
|
||||
ret := m.Called(bucket)
|
||||
r0 := ret.Get(0).(drivers.BucketMetadata)
|
||||
r1 := ret.Error(1)
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CreateBucketPolicy is a mock
|
||||
func (m *Driver) CreateBucketPolicy(bucket string, p drivers.BucketPolicy) error {
|
||||
ret := m.Called(bucket, p)
|
||||
|
||||
r0 := ret.Error(0)
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// GetBucketPolicy is a mock
|
||||
func (m *Driver) GetBucketPolicy(bucket string) (drivers.BucketPolicy, error) {
|
||||
ret := m.Called(bucket)
|
||||
|
||||
r0 := ret.Get(0).(drivers.BucketPolicy)
|
||||
r1 := ret.Error(1)
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// SetGetObjectWriter is a mock
|
||||
func (m *Driver) SetGetObjectWriter(bucket, object string, data []byte) {
|
||||
m.ObjectWriterData[bucket+":"+object] = data
|
||||
// println(string(m.ObjectWriterData["bucket:object"]))
|
||||
}
|
||||
|
||||
// GetObject is a mock
|
||||
func (m *Driver) GetObject(w io.Writer, bucket string, object string) (int64, error) {
|
||||
ret := m.Called(w, bucket, object)
|
||||
r0 := ret.Get(0).(int64)
|
||||
r1 := ret.Error(1)
|
||||
if r1 == nil {
|
||||
if obj, ok := m.ObjectWriterData[bucket+":"+object]; ok {
|
||||
n, _ := io.Copy(w, bytes.NewBuffer(obj))
|
||||
r0 = n
|
||||
}
|
||||
}
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetPartialObject is a mock
|
||||
func (m *Driver) GetPartialObject(w io.Writer, bucket string, object string, start int64, length int64) (int64, error) {
|
||||
ret := m.Called(w, bucket, object, start, length)
|
||||
|
||||
r0 := ret.Get(0).(int64)
|
||||
r1 := ret.Error(1)
|
||||
|
||||
if r1 == nil {
|
||||
if obj, ok := m.ObjectWriterData[bucket+":"+object]; ok {
|
||||
source := bytes.NewBuffer(obj)
|
||||
var nilSink bytes.Buffer
|
||||
io.CopyN(&nilSink, source, start)
|
||||
n, _ := io.CopyN(w, source, length)
|
||||
r0 = n
|
||||
}
|
||||
}
|
||||
r1 = iodine.New(r1, nil)
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetObjectMetadata is a mock
|
||||
func (m *Driver) GetObjectMetadata(bucket string, object string, prefix string) (drivers.ObjectMetadata, error) {
|
||||
ret := m.Called(bucket, object, prefix)
|
||||
|
||||
r0 := ret.Get(0).(drivers.ObjectMetadata)
|
||||
r1 := ret.Error(1)
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ListObjects is a mock
|
||||
func (m *Driver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) {
|
||||
ret := m.Called(bucket, resources)
|
||||
|
||||
r0 := ret.Get(0).([]drivers.ObjectMetadata)
|
||||
r1 := ret.Get(1).(drivers.BucketResourcesMetadata)
|
||||
r2 := ret.Error(2)
|
||||
|
||||
return r0, r1, r2
|
||||
}
|
||||
|
||||
// CreateObject is a mock
|
||||
func (m *Driver) CreateObject(bucket string, key string, contentType string, md5sum string, data io.Reader) error {
|
||||
ret := m.Called(bucket, key, contentType, md5sum, data)
|
||||
|
||||
r0 := ret.Error(0)
|
||||
|
||||
return r0
|
||||
}
|
|
@ -21,8 +21,8 @@ import (
|
|||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio-io/iodine"
|
||||
"github.com/minio-io/minio/pkg/storage/drivers"
|
||||
"github.com/minio-io/minio/pkg/utils/log"
|
||||
"github.com/minio-io/objectdriver"
|
||||
)
|
||||
|
||||
// GET Bucket (List Objects)
|
||||
|
|
|
@ -21,8 +21,8 @@ import (
|
|||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio-io/iodine"
|
||||
"github.com/minio-io/minio/pkg/storage/drivers"
|
||||
"github.com/minio-io/minio/pkg/utils/log"
|
||||
"github.com/minio-io/objectdriver"
|
||||
)
|
||||
|
||||
// GET Object
|
||||
|
|
|
@ -22,8 +22,8 @@ import (
|
|||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio-io/iodine"
|
||||
"github.com/minio-io/minio/pkg/storage/drivers"
|
||||
"github.com/minio-io/minio/pkg/utils/log"
|
||||
"github.com/minio-io/objectdriver"
|
||||
)
|
||||
|
||||
// PUT Bucket policy
|
||||
|
|
|
@ -19,7 +19,7 @@ package api
|
|||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/minio-io/objectdriver"
|
||||
"github.com/minio-io/minio/pkg/storage/drivers"
|
||||
)
|
||||
|
||||
// Reply date format
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
router "github.com/gorilla/mux"
|
||||
"github.com/minio-io/iodine"
|
||||
"github.com/minio-io/minio/pkg/api/config"
|
||||
"github.com/minio-io/objectdriver"
|
||||
"github.com/minio-io/minio/pkg/storage/drivers"
|
||||
)
|
||||
|
||||
// private use
|
||||
|
|
|
@ -33,11 +33,11 @@ import (
|
|||
"net/http/httptest"
|
||||
|
||||
"github.com/minio-io/minio/pkg/api"
|
||||
"github.com/minio-io/objectdriver"
|
||||
"github.com/minio-io/objectdriver/donut"
|
||||
"github.com/minio-io/objectdriver/file"
|
||||
"github.com/minio-io/objectdriver/memory"
|
||||
"github.com/minio-io/objectdriver/mocks"
|
||||
"github.com/minio-io/minio/pkg/storage/drivers"
|
||||
"github.com/minio-io/minio/pkg/storage/drivers/donut"
|
||||
"github.com/minio-io/minio/pkg/storage/drivers/file"
|
||||
"github.com/minio-io/minio/pkg/storage/drivers/memory"
|
||||
"github.com/minio-io/minio/pkg/storage/drivers/mocks"
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
||||
. "github.com/minio-io/check"
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/minio-io/objectdriver"
|
||||
"github.com/minio-io/minio/pkg/storage/drivers"
|
||||
)
|
||||
|
||||
// No encoder interface exists, so we create one.
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/minio-io/objectdriver"
|
||||
"github.com/minio-io/minio/pkg/storage/drivers"
|
||||
)
|
||||
|
||||
// parse bucket url queries
|
||||
|
|
|
@ -28,11 +28,11 @@ import (
|
|||
"github.com/minio-io/minio/pkg/api"
|
||||
"github.com/minio-io/minio/pkg/api/web"
|
||||
"github.com/minio-io/minio/pkg/server/httpserver"
|
||||
"github.com/minio-io/minio/pkg/storage/drivers"
|
||||
"github.com/minio-io/minio/pkg/storage/drivers/donut"
|
||||
"github.com/minio-io/minio/pkg/storage/drivers/file"
|
||||
"github.com/minio-io/minio/pkg/storage/drivers/memory"
|
||||
"github.com/minio-io/minio/pkg/utils/log"
|
||||
"github.com/minio-io/objectdriver"
|
||||
"github.com/minio-io/objectdriver/donut"
|
||||
"github.com/minio-io/objectdriver/file"
|
||||
"github.com/minio-io/objectdriver/memory"
|
||||
)
|
||||
|
||||
// Config - http server parameters
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Subproject commit 64e55f18297dd962102afc7e81ea63488fc4706a
|
Loading…
Reference in New Issue