Migrate to external Donut package

This commit is contained in:
Harshavardhana 2015-04-05 01:53:41 -07:00
parent 183df976f3
commit 0475d7d056
40 changed files with 2719 additions and 1100 deletions

6
Godeps/Godeps.json generated
View File

@ -1,6 +1,6 @@
{
"ImportPath": "github.com/minio-io/minio",
"GoVersion": "go1.4.2",
"GoVersion": "go1.4",
"Packages": [
"./..."
],
@ -22,6 +22,10 @@
"Comment": "1.2.0-100-g6d6f8d3",
"Rev": "6d6f8d3cc162bfcb60379888e2f37d73ff6a6253"
},
{
"ImportPath": "github.com/minio-io/donut",
"Rev": "5647e1e6c6a95caec431610a497b15f8298d56cf"
},
{
"ImportPath": "github.com/minio-io/erasure",
"Rev": "2a52bdad9b271ef680374a22f0cb68513a79ebf5"

View File

@ -0,0 +1,2 @@
donut
build-constants.go

View File

@ -0,0 +1,22 @@
{
"ImportPath": "github.com/minio-io/donut",
"GoVersion": "go1.4",
"Packages": [
"./..."
],
"Deps": [
{
"ImportPath": "github.com/minio-io/cli",
"Comment": "1.2.0-101-g1a25bbd",
"Rev": "1a25bbdce2344b0063ea0476bceb4a4adbe4492a"
},
{
"ImportPath": "github.com/minio-io/erasure",
"Rev": "3cece1a107115563682604b1430418e28f65dd80"
},
{
"ImportPath": "github.com/minio-io/minio/pkg/utils/split",
"Rev": "936520e6e0fc5dd4ce8d04504ee991084555e57a"
}
]
}

View File

@ -0,0 +1,5 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

202
Godeps/_workspace/src/github.com/minio-io/donut/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,69 @@
MINIOPATH=$(GOPATH)/src/github.com/minio-io/donut
all: getdeps install
checkdeps:
@echo "Checking deps:"
@(env bash $(PWD)/buildscripts/checkdeps.sh)
checkgopath:
@echo "Checking if project is at ${MINIOPATH}"
@if [ ! -d ${MINIOPATH} ]; then echo "Project not found in $GOPATH, please follow instructions provided at https://github.com/Minio-io/minio/blob/master/CONTRIBUTING.md#setup-your-minio-github-repository" && exit 1; fi
getdeps: checkdeps checkgopath
@go get github.com/minio-io/godep && echo "Installed godep:"
@go get github.com/golang/lint/golint && echo "Installed golint:"
@go get golang.org/x/tools/cmd/vet && echo "Installed vet:"
@go get github.com/fzipp/gocyclo && echo "Installed gocyclo:"
verifiers: getdeps vet fmt lint cyclo
vet:
@echo "Running $@:"
@go vet ./...
fmt:
@echo "Running $@:"
@test -z "$$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" || \
echo "+ please format Go code with 'gofmt -s'"
lint:
@echo "Running $@:"
@test -z "$$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)"
cyclo:
@echo "Running $@:"
@test -z "$$(gocyclo -over 15 . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)"
pre-build:
@echo "Running pre-build:"
@(env bash $(PWD)/buildscripts/git-commit-id.sh)
build-all: getdeps verifiers
@echo "Building Libraries:"
@godep go generate github.com/minio-io/erasure
@godep go generate ./...
@godep go build -a ./... # have no stale packages
test-all: pre-build build-all
@echo "Running Test Suites:"
@godep go test -race ./...
save: restore
@godep save ./...
restore:
@godep restore
env:
@godep go env
docs-deploy:
@mkdocs gh-deploy --clean
install: test-all
@echo "Installing donut-cli:"
@godep go install -a github.com/minio-io/donut/cmd/donut-cli
@mkdir -p $(HOME)/.minio/donut
clean:
@rm -fv cover.out
@rm -fv build-constants.go

View File

@ -0,0 +1,3 @@
# Donut
donut - Donut (do not delete) on disk format implementation released under [Apache license v2](./LICENSE).

View File

@ -0,0 +1,240 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"path"
"strconv"
"strings"
"time"
"crypto/md5"
"encoding/hex"
"encoding/json"
"github.com/minio-io/minio/pkg/utils/split"
)
type bucket struct {
name string
donutName string
nodes map[string]Node
objects map[string]Object
}
// NewBucket - instantiate a new bucket
func NewBucket(bucketName, donutName string, nodes map[string]Node) (Bucket, error) {
if bucketName == "" {
return nil, errors.New("invalid argument")
}
b := bucket{}
b.name = bucketName
b.donutName = donutName
b.objects = make(map[string]Object)
b.nodes = nodes
return b, nil
}
func (b bucket) ListNodes() (map[string]Node, error) {
return b.nodes, nil
}
func (b bucket) ListObjects() (map[string]Object, error) {
nodeSlice := 0
for _, node := range b.nodes {
disks, err := node.ListDisks()
if err != nil {
return nil, err
}
for _, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, disk.GetOrder())
bucketPath := path.Join(b.donutName, bucketSlice)
objects, err := disk.ListDir(bucketPath)
if err != nil {
return nil, err
}
for _, object := range objects {
newObject, err := NewObject(object.Name(), path.Join(disk.GetPath(), bucketPath))
if err != nil {
return nil, err
}
newObjectMetadata, err := newObject.GetObjectMetadata()
if err != nil {
return nil, err
}
objectName, ok := newObjectMetadata["object"]
if !ok {
return nil, errors.New("object corrupted")
}
b.objects[objectName] = newObject
}
}
nodeSlice = nodeSlice + 1
}
return b.objects, nil
}
func (b bucket) GetObject(objectName string) (reader io.ReadCloser, size int64, err error) {
reader, writer := io.Pipe()
// get list of objects
objects, err := b.ListObjects()
if err != nil {
return nil, 0, err
}
// check if object exists
object, ok := objects[objectName]
if !ok {
return nil, 0, os.ErrNotExist
}
donutObjectMetadata, err := object.GetDonutObjectMetadata()
if err != nil {
return nil, 0, err
}
if objectName == "" || writer == nil || len(donutObjectMetadata) == 0 {
return nil, 0, errors.New("invalid argument")
}
size, err = strconv.ParseInt(donutObjectMetadata["size"], 10, 64)
if err != nil {
return nil, 0, err
}
go b.getObject(b.normalizeObjectName(objectName), writer, donutObjectMetadata)
return reader, size, nil
}
func (b bucket) WriteObjectMetadata(objectName string, objectMetadata map[string]string) error {
if len(objectMetadata) == 0 {
return errors.New("invalid argument")
}
objectMetadataWriters, err := b.getDiskWriters(objectName, objectMetadataConfig)
if err != nil {
return err
}
for _, objectMetadataWriter := range objectMetadataWriters {
defer objectMetadataWriter.Close()
}
for _, objectMetadataWriter := range objectMetadataWriters {
jenc := json.NewEncoder(objectMetadataWriter)
if err := jenc.Encode(objectMetadata); err != nil {
return err
}
}
return nil
}
func (b bucket) WriteDonutObjectMetadata(objectName string, donutObjectMetadata map[string]string) error {
if len(donutObjectMetadata) == 0 {
return errors.New("invalid argument")
}
donutObjectMetadataWriters, err := b.getDiskWriters(objectName, donutObjectMetadataConfig)
if err != nil {
return err
}
for _, donutObjectMetadataWriter := range donutObjectMetadataWriters {
defer donutObjectMetadataWriter.Close()
}
for _, donutObjectMetadataWriter := range donutObjectMetadataWriters {
jenc := json.NewEncoder(donutObjectMetadataWriter)
if err := jenc.Encode(donutObjectMetadata); err != nil {
return err
}
}
return nil
}
// This a temporary normalization of object path, need to find a better way
func (b bucket) normalizeObjectName(objectName string) string {
// replace every '/' with '-'
return strings.Replace(objectName, "/", "-", -1)
}
func (b bucket) PutObject(objectName, contentType string, objectData io.Reader) error {
if objectName == "" {
return errors.New("invalid argument")
}
if objectData == nil {
return errors.New("invalid argument")
}
if contentType == "" || strings.TrimSpace(contentType) == "" {
contentType = "application/octet-stream"
}
writers, err := b.getDiskWriters(b.normalizeObjectName(objectName), "data")
if err != nil {
return err
}
for _, writer := range writers {
defer writer.Close()
}
summer := md5.New()
donutObjectMetadata := make(map[string]string)
switch len(writers) == 1 {
case true:
mw := io.MultiWriter(writers[0], summer)
totalLength, err := io.Copy(mw, objectData)
if err != nil {
return err
}
donutObjectMetadata["size"] = strconv.FormatInt(totalLength, 10)
case false:
k, m, err := b.getDataAndParity(len(writers))
if err != nil {
return err
}
chunks := split.Stream(objectData, 10*1024*1024)
encoder, err := NewEncoder(k, m, "Cauchy")
if err != nil {
return err
}
chunkCount := 0
totalLength := 0
for chunk := range chunks {
if chunk.Err == nil {
totalLength = totalLength + len(chunk.Data)
encodedBlocks, _ := encoder.Encode(chunk.Data)
summer.Write(chunk.Data)
for blockIndex, block := range encodedBlocks {
io.Copy(writers[blockIndex], bytes.NewBuffer(block))
}
}
chunkCount = chunkCount + 1
}
donutObjectMetadata["blockSize"] = strconv.Itoa(10 * 1024 * 1024)
donutObjectMetadata["chunkCount"] = strconv.Itoa(chunkCount)
donutObjectMetadata["erasureK"] = strconv.FormatUint(uint64(k), 10)
donutObjectMetadata["erasureM"] = strconv.FormatUint(uint64(m), 10)
donutObjectMetadata["erasureTechnique"] = "Cauchy"
donutObjectMetadata["size"] = strconv.Itoa(totalLength)
}
dataMd5sum := summer.Sum(nil)
donutObjectMetadata["created"] = time.Now().Format(time.RFC3339Nano)
donutObjectMetadata["md5"] = hex.EncodeToString(dataMd5sum)
if err := b.WriteDonutObjectMetadata(b.normalizeObjectName(objectName), donutObjectMetadata); err != nil {
return err
}
objectMetadata := make(map[string]string)
objectMetadata["bucket"] = b.name
objectMetadata["object"] = objectName
objectMetadata["contentType"] = strings.TrimSpace(contentType)
if err := b.WriteObjectMetadata(b.normalizeObjectName(objectName), objectMetadata); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,184 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"bytes"
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"io"
"path"
"strconv"
)
func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err error) {
if totalWriters <= 1 {
return 0, 0, errors.New("invalid argument")
}
quotient := totalWriters / 2 // not using float or abs to let integer round off to lower value
// quotient cannot be bigger than (255 / 2) = 127
if quotient > 127 {
return 0, 0, errors.New("parity over flow")
}
remainder := totalWriters % 2 // will be 1 for odd and 0 for even numbers
k = uint8(quotient + remainder)
m = uint8(quotient)
return k, m, nil
}
func (b bucket) getObject(objectName string, writer *io.PipeWriter, donutObjectMetadata map[string]string) {
expectedMd5sum, err := hex.DecodeString(donutObjectMetadata["md5"])
if err != nil {
writer.CloseWithError(err)
return
}
readers, err := b.getDiskReaders(objectName, "data")
if err != nil {
writer.CloseWithError(err)
return
}
hasher := md5.New()
mwriter := io.MultiWriter(writer, hasher)
switch len(readers) == 1 {
case false:
totalChunks, totalLeft, blockSize, k, m, err := b.metadata2Values(donutObjectMetadata)
if err != nil {
writer.CloseWithError(err)
return
}
technique, ok := donutObjectMetadata["erasureTechnique"]
if !ok {
writer.CloseWithError(errors.New("missing erasure Technique"))
return
}
encoder, err := NewEncoder(uint8(k), uint8(m), technique)
if err != nil {
writer.CloseWithError(err)
return
}
for i := 0; i < totalChunks; i++ {
decodedData, err := b.decodeData(totalLeft, blockSize, readers, encoder, writer)
if err != nil {
writer.CloseWithError(err)
return
}
_, err = io.Copy(mwriter, bytes.NewBuffer(decodedData))
if err != nil {
writer.CloseWithError(err)
return
}
totalLeft = totalLeft - int64(blockSize)
}
case true:
_, err := io.Copy(writer, readers[0])
if err != nil {
writer.CloseWithError(err)
return
}
}
// check if decodedData md5sum matches
if !bytes.Equal(expectedMd5sum, hasher.Sum(nil)) {
writer.CloseWithError(errors.New("checksum mismatch"))
return
}
writer.Close()
return
}
func (b bucket) decodeData(totalLeft, blockSize int64, readers []io.ReadCloser, encoder Encoder, writer *io.PipeWriter) ([]byte, error) {
var curBlockSize int64
if blockSize < totalLeft {
curBlockSize = blockSize
} else {
curBlockSize = totalLeft // cast is safe, blockSize in if protects
}
curChunkSize, err := encoder.GetEncodedBlockLen(int(curBlockSize))
if err != nil {
return nil, err
}
encodedBytes := make([][]byte, len(readers))
for i, reader := range readers {
var bytesBuffer bytes.Buffer
_, err := io.CopyN(&bytesBuffer, reader, int64(curChunkSize))
if err != nil {
return nil, err
}
encodedBytes[i] = bytesBuffer.Bytes()
}
decodedData, err := encoder.Decode(encodedBytes, int(curBlockSize))
if err != nil {
return nil, err
}
return decodedData, nil
}
func (b bucket) metadata2Values(donutObjectMetadata map[string]string) (totalChunks int, totalLeft, blockSize int64, k, m uint64, err error) {
totalChunks, err = strconv.Atoi(donutObjectMetadata["chunkCount"])
totalLeft, err = strconv.ParseInt(donutObjectMetadata["size"], 10, 64)
blockSize, err = strconv.ParseInt(donutObjectMetadata["blockSize"], 10, 64)
k, err = strconv.ParseUint(donutObjectMetadata["erasureK"], 10, 8)
m, err = strconv.ParseUint(donutObjectMetadata["erasureM"], 10, 8)
return
}
func (b bucket) getDiskReaders(objectName, objectMeta string) ([]io.ReadCloser, error) {
var readers []io.ReadCloser
nodeSlice := 0
for _, node := range b.nodes {
disks, err := node.ListDisks()
if err != nil {
return nil, err
}
readers = make([]io.ReadCloser, len(disks))
for _, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, disk.GetOrder())
objectPath := path.Join(b.donutName, bucketSlice, objectName, objectMeta)
objectSlice, err := disk.OpenFile(objectPath)
if err != nil {
return nil, err
}
readers[disk.GetOrder()] = objectSlice
}
nodeSlice = nodeSlice + 1
}
return readers, nil
}
func (b bucket) getDiskWriters(objectName, objectMeta string) ([]io.WriteCloser, error) {
var writers []io.WriteCloser
nodeSlice := 0
for _, node := range b.nodes {
disks, err := node.ListDisks()
if err != nil {
return nil, err
}
writers = make([]io.WriteCloser, len(disks))
for _, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, disk.GetOrder())
objectPath := path.Join(b.donutName, bucketSlice, objectName, objectMeta)
objectSlice, err := disk.MakeFile(objectPath)
if err != nil {
return nil, err
}
writers[disk.GetOrder()] = objectSlice
}
nodeSlice = nodeSlice + 1
}
return writers, nil
}

View File

@ -0,0 +1,201 @@
#!/usr/bin/env bash
#
# Minio Commander, (C) 2015 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
_init() {
## Minimum required versions for build dependencies
GCC_VERSION="4.0"
CLANG_VERSION="3.5"
YASM_VERSION="1.2.0"
GIT_VERSION="1.0"
GO_VERSION="1.4"
OSX_VERSION="10.8"
UNAME=$(uname -sm)
## Check all dependencies are present
MISSING=""
}
###
#
# Takes two arguments
# arg1: version number in `x.x.x` format
# arg2: version number in `x.x.x` format
#
# example: check_version "$version1" "$version2"
#
# returns:
# 0 - Installed version is equal to required
# 1 - Installed version is greater than required
# 2 - Installed version is lesser than required
# 3 - If args have length zero
#
####
check_version () {
## validate args
[[ -z "$1" ]] && return 3
[[ -z "$2" ]] && return 3
if [[ $1 == $2 ]]; then
return 0
fi
local IFS=.
local i ver1=($1) ver2=($2)
# fill empty fields in ver1 with zeros
for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do
ver1[i]=0
done
for ((i=0; i<${#ver1[@]}; i++)); do
if [[ -z ${ver2[i]} ]]; then
# fill empty fields in ver2 with zeros
ver2[i]=0
fi
if ((10#${ver1[i]} > 10#${ver2[i]})); then
return 1
fi
if ((10#${ver1[i]} < 10#${ver2[i]})); then
## Installed version is lesser than required - Bad condition
return 2
fi
done
return 0
}
check_golang_env() {
echo ${GOROOT:?} 2>&1 >/dev/null
if [ $? -eq 1 ]; then
echo "ERROR"
echo "GOROOT environment variable missing, please refer to Go installation document"
echo "https://github.com/Minio-io/minio/blob/master/BUILDDEPS.md#install-go-13"
exit 1
fi
echo ${GOPATH:?} 2>&1 >/dev/null
if [ $? -eq 1 ]; then
echo "ERROR"
echo "GOPATH environment variable missing, please refer to Go installation document"
echo "https://github.com/Minio-io/minio/blob/master/BUILDDEPS.md#install-go-13"
exit 1
fi
}
is_supported_os() {
case ${UNAME%% *} in
"Linux")
os="linux"
;;
"Darwin")
osx_host_version=$(env sw_vers -productVersion)
check_version "${osx_host_version}" "${OSX_VERSION}"
[[ $? -ge 2 ]] && die "Minimum OSX version supported is ${OSX_VERSION}"
;;
"*")
echo "Exiting.. unsupported operating system found"
exit 1;
esac
}
is_supported_arch() {
local supported
case ${UNAME##* } in
"x86_64")
supported=1
;;
*)
supported=0
;;
esac
if [ $supported -eq 0 ]; then
echo "Invalid arch: ${UNAME} not supported, please use x86_64/amd64"
exit 1;
fi
}
check_deps() {
check_version "$(env go version 2>/dev/null | sed 's/^.* go\([0-9.]*\).*$/\1/')" "${GO_VERSION}"
if [ $? -ge 2 ]; then
MISSING="${MISSING} golang(1.4)"
fi
check_version "$(env git --version 2>/dev/null | sed -e 's/^.* \([0-9.\].*\).*$/\1/' -e 's/^\([0-9.\]*\).*/\1/g')" "${GIT_VERSION}"
if [ $? -ge 2 ]; then
MISSING="${MISSING} git"
fi
case ${UNAME%% *} in
"Linux")
check_version "$(env gcc --version 2>/dev/null | sed 's/^.* \([0-9.]*\).*$/\1/' | head -1)" "${GCC_VERSION}"
if [ $? -ge 2 ]; then
MISSING="${MISSING} build-essential"
fi
;;
"Darwin")
check_version "$(env gcc --version 2>/dev/null | sed 's/^.* \([0-9.]*\).*$/\1/' | head -1)" "${CLANG_VERSION}"
if [ $? -ge 2 ]; then
MISSING="${MISSING} xcode-cli"
fi
;;
"*")
;;
esac
check_version "$(env yasm --version 2>/dev/null | sed 's/^.* \([0-9.]*\).*$/\1/' | head -1)" "${YASM_VERSION}"
if [ $? -ge 2 ]; then
MISSING="${MISSING} yasm(1.2.0)"
fi
env mkdocs help >/dev/null 2>&1
if [ $? -ne 0 ]; then
MISSING="${MISSING} mkdocs"
fi
}
main() {
echo -n "Check for supported arch.. "
is_supported_arch
echo -n "Check for supported os.. "
is_supported_os
echo -n "Checking if proper environment variables are set.. "
check_golang_env
echo "Done"
echo "Using GOPATH=${GOPATH} and GOROOT=${GOROOT}"
echo -n "Checking dependencies for Minio.. "
check_deps
## If dependencies are missing, warn the user and abort
if [ "x${MISSING}" != "x" ]; then
echo "ERROR"
echo
echo "The following build tools are missing:"
echo
echo "** ${MISSING} **"
echo
echo "Please install them "
echo "${MISSING}"
echo
echo "Follow https://github.com/Minio-io/minio/blob/master/BUILDDEPS.md for further instructions"
exit 1
fi
echo "Done"
}
_init && main "$@"

View File

@ -0,0 +1,18 @@
#!/usr/bin/env bash
CONST_FILE=${PWD}/cmd/donut-cli/build-constants.go
cat > $CONST_FILE <<EOF
/*
* ** DO NOT EDIT THIS FILE. THIS FILE IS AUTO GENERATED BY RUNNING MAKE **
*/
package main
const (
gitCommitHash = "__GIT_COMMIT_HASH__"
)
EOF
commit_id=$(git log --format="%H" -n 1)
sed -i "s/__GIT_COMMIT_HASH__/$commit_id/" $CONST_FILE

View File

@ -0,0 +1 @@
donut-cli

View File

@ -0,0 +1,58 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio-io/cli"
)
func doAttachDiskCmd(c *cli.Context) {
if !c.Args().Present() {
log.Fatalln("no args?")
}
disks := c.Args()
mcDonutConfigData, err := loadDonutConfig()
if err != nil {
log.Fatalln(err)
}
donutName := c.String("name")
if donutName == "" {
log.Fatalln("Invalid --donut <name> is needed for attach")
}
if _, ok := mcDonutConfigData.Donuts[donutName]; !ok {
log.Fatalf("Requested donut name %s does not exist, please use ``mc donut make`` first\n", donutName)
}
if _, ok := mcDonutConfigData.Donuts[donutName].Node["localhost"]; !ok {
log.Fatalln("Corrupted donut config, please consult donut experts")
}
activeDisks := mcDonutConfigData.Donuts[donutName].Node["localhost"].ActiveDisks
inactiveDisks := mcDonutConfigData.Donuts[donutName].Node["localhost"].InactiveDisks
for _, disk := range disks {
activeDisks = appendUniq(activeDisks, disk)
inactiveDisks = deleteFromSlice(inactiveDisks, disk)
}
mcDonutConfigData.Donuts[donutName].Node["localhost"] = nodeConfig{
ActiveDisks: activeDisks,
InactiveDisks: inactiveDisks,
}
if err := saveDonutConfig(mcDonutConfigData); err != nil {
log.Fatalln(err)
}
}

View File

@ -0,0 +1,110 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"strings"
"net/url"
)
// url2Object converts URL to bucket and objectname
func url2Object(urlStr string) (bucketName, objectName string, err error) {
u, err := url.Parse(urlStr)
if u.Path == "" {
// No bucket name passed. It is a valid case
return "", "", nil
}
splits := strings.SplitN(u.Path, "/", 3)
switch len(splits) {
case 0, 1:
bucketName = ""
objectName = ""
case 2:
bucketName = splits[1]
objectName = ""
case 3:
bucketName = splits[1]
objectName = splits[2]
}
return bucketName, objectName, nil
}
func isStringInSlice(items []string, item string) bool {
for _, s := range items {
if s == item {
return true
}
}
return false
}
func deleteFromSlice(items []string, item string) []string {
var newitems []string
for _, s := range items {
if s == item {
continue
}
newitems = append(newitems, s)
}
return newitems
}
func appendUniq(slice []string, i string) []string {
for _, ele := range slice {
if ele == i {
return slice
}
}
return append(slice, i)
}
// Is alphanumeric?
func isalnum(c rune) bool {
return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z'
}
// isValidDonutName - verify donutName to be valid
func isValidDonutName(donutName string) bool {
if len(donutName) > 1024 || len(donutName) == 0 {
return false
}
for _, char := range donutName {
if isalnum(char) {
continue
}
switch char {
case '-':
case '.':
case '_':
case '~':
continue
default:
return false
}
}
return true
}
// getNodeMap - get a node and disk map through nodeConfig struct
func getNodeMap(node map[string]nodeConfig) map[string][]string {
nodes := make(map[string][]string)
for k, v := range node {
nodes[k] = v.ActiveDisks
}
return nodes
}

View File

@ -0,0 +1,105 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"log"
"os"
"os/user"
"path"
"encoding/json"
"io/ioutil"
)
const (
donutConfigDir = ".minio/donut"
donutConfigFilename = "donuts.json"
)
type nodeConfig struct {
ActiveDisks []string
InactiveDisks []string
}
type donutConfig struct {
Node map[string]nodeConfig
}
type mcDonutConfig struct {
Donuts map[string]donutConfig
}
func getDonutConfigDir() string {
u, err := user.Current()
if err != nil {
msg := fmt.Sprintf("Unable to obtain user's home directory. \nError: %s", err)
log.Fatalln(msg)
}
return path.Join(u.HomeDir, donutConfigDir)
}
func getDonutConfigFilename() string {
return path.Join(getDonutConfigDir(), "donuts.json")
}
// saveDonutConfig writes configuration data in json format to donut config file.
func saveDonutConfig(donutConfigData *mcDonutConfig) error {
jsonConfig, err := json.MarshalIndent(donutConfigData, "", "\t")
if err != nil {
return err
}
err = os.MkdirAll(getDonutConfigDir(), 0755)
if !os.IsExist(err) && err != nil {
return err
}
configFile, err := os.OpenFile(getDonutConfigFilename(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
defer configFile.Close()
_, err = configFile.Write(jsonConfig)
if err != nil {
return err
}
return nil
}
func loadDonutConfig() (donutConfigData *mcDonutConfig, err error) {
configFile := getDonutConfigFilename()
_, err = os.Stat(configFile)
if err != nil {
return nil, err
}
configBytes, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
err = json.Unmarshal(configBytes, &donutConfigData)
if err != nil {
return nil, err
}
return donutConfigData, nil
}

View File

@ -0,0 +1,66 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"log"
"github.com/minio-io/cli"
)
func doDetachDiskCmd(c *cli.Context) {
if !c.Args().Present() {
log.Fatalln("no args?")
}
disks := c.Args()
mcDonutConfigData, err := loadDonutConfig()
if err != nil {
log.Fatalln(err.Error())
}
donutName := c.String("name")
if donutName == "" {
log.Fatalln("Invalid --donut <name> is needed for attach")
}
if _, ok := mcDonutConfigData.Donuts[donutName]; !ok {
msg := fmt.Sprintf("Requested donut name <%s> does not exist, please use ``mc donut make`` first", donutName)
log.Fatalln(msg)
}
if _, ok := mcDonutConfigData.Donuts[donutName].Node["localhost"]; !ok {
msg := fmt.Sprintf("Corrupted donut config, please consult donut experts")
log.Fatalln(msg)
}
inactiveDisks := mcDonutConfigData.Donuts[donutName].Node["localhost"].InactiveDisks
activeDisks := mcDonutConfigData.Donuts[donutName].Node["localhost"].ActiveDisks
for _, disk := range disks {
if isStringInSlice(activeDisks, disk) {
activeDisks = deleteFromSlice(activeDisks, disk)
inactiveDisks = appendUniq(inactiveDisks, disk)
} else {
msg := fmt.Sprintf("Cannot detach disk: <%s>, not part of donut <%s>", disk, donutName)
log.Println(msg)
}
}
mcDonutConfigData.Donuts[donutName].Node["localhost"] = nodeConfig{
ActiveDisks: activeDisks,
InactiveDisks: inactiveDisks,
}
if err := saveDonutConfig(mcDonutConfigData); err != nil {
log.Fatalln(err.Error())
}
}

View File

@ -0,0 +1,84 @@
/*
* Minimalist Object Storage, (C) 2014,2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"os"
"strings"
"text/tabwriter"
"text/template"
"github.com/minio-io/cli"
"github.com/minio-io/donut"
)
var infoTemplate = `
{{range $donutName, $nodes := .}}
DONUTNAME: {{$donutName}}
{{range $nodeName, $disks := $nodes}}
NODE: {{$nodeName}}
DISKS: {{$disks}}
{{end}}
{{end}}
`
var infoPrinter = func(templ string, data interface{}) {
funcMap := template.FuncMap{
"join": strings.Join,
}
w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
t := template.Must(template.New("help").Funcs(funcMap).Parse(templ))
err := t.Execute(w, data)
if err != nil {
panic(err)
}
w.Flush()
}
// doInfoDonutCmd
func doInfoDonutCmd(c *cli.Context) {
if !c.Args().Present() {
log.Fatalln("no args?")
}
if len(c.Args()) != 1 {
log.Fatalln("invalid number of args")
}
donutName := c.Args().First()
if !isValidDonutName(donutName) {
log.Fatalln("Invalid donutName")
}
mcDonutConfigData, err := loadDonutConfig()
if err != nil {
log.Fatalln(err)
}
if _, ok := mcDonutConfigData.Donuts[donutName]; !ok {
log.Fatalln("donut does not exist")
}
d, err := donut.NewDonut(donutName, getNodeMap(mcDonutConfigData.Donuts[donutName].Node))
if err != nil {
log.Fatalln(err)
}
donutNodes := make(map[string]map[string][]string)
donutNodes[donutName], err = d.Info()
if err != nil {
log.Fatalln(err)
}
infoPrinter(infoTemplate, donutNodes)
}

View File

@ -0,0 +1,36 @@
/*
* Minimalist Object Storage, (C) 2014,2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"log"
"github.com/minio-io/cli"
)
// doListDonutCmd creates a new bucket
func doListDonutCmd(c *cli.Context) {
mcDonutConfigData, err := loadDonutConfig()
if err != nil {
log.Fatalln(err)
}
for k := range mcDonutConfigData.Donuts {
fmt.Println(k)
}
}

View File

@ -0,0 +1,80 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"log"
"os"
"github.com/minio-io/cli"
)
func newDonutConfig(donutName string) (*mcDonutConfig, error) {
mcDonutConfigData := new(mcDonutConfig)
mcDonutConfigData.Donuts = make(map[string]donutConfig)
mcDonutConfigData.Donuts[donutName] = donutConfig{
Node: make(map[string]nodeConfig),
}
mcDonutConfigData.Donuts[donutName].Node["localhost"] = nodeConfig{
ActiveDisks: make([]string, 0),
InactiveDisks: make([]string, 0),
}
return mcDonutConfigData, nil
}
// doMakeDonutCmd creates a new donut
func doMakeDonutCmd(c *cli.Context) {
if !c.Args().Present() {
log.Fatalln("no args?")
}
if len(c.Args()) != 1 {
log.Fatalln("invalid number of args")
}
donutName := c.Args().First()
if !isValidDonutName(donutName) {
log.Fatalln("Invalid donutName")
}
mcDonutConfigData, err := loadDonutConfig()
if os.IsNotExist(err) {
mcDonutConfigData, err = newDonutConfig(donutName)
if err != nil {
log.Fatalln(err)
}
if err := saveDonutConfig(mcDonutConfigData); err != nil {
log.Fatalln(err)
}
return
} else if err != nil {
log.Fatalln(err)
}
if _, ok := mcDonutConfigData.Donuts[donutName]; !ok {
mcDonutConfigData.Donuts[donutName] = donutConfig{
Node: make(map[string]nodeConfig),
}
mcDonutConfigData.Donuts[donutName].Node["localhost"] = nodeConfig{
ActiveDisks: make([]string, 0),
InactiveDisks: make([]string, 0),
}
if err := saveDonutConfig(mcDonutConfigData); err != nil {
log.Fatalln(err)
}
} else {
msg := fmt.Sprintf("donut: %s already exists", donutName)
log.Println(msg)
}
}

View File

@ -0,0 +1,95 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"github.com/minio-io/cli"
)
var makeDonutCmd = cli.Command{
Name: "make",
Usage: "make donut",
Description: "Make a new donut",
Action: doMakeDonutCmd,
}
var listDonutCmd = cli.Command{
Name: "list",
Usage: "list donuts",
Description: "list all donuts locally or remote",
Action: doListDonutCmd,
}
var attachDiskCmd = cli.Command{
Name: "attach",
Usage: "attach disk",
Description: "Attach disk to an existing donut",
Action: doAttachDiskCmd,
Flags: []cli.Flag{
cli.StringFlag{
Name: "name",
Usage: "Donut name",
},
},
}
var detachDiskCmd = cli.Command{
Name: "detach",
Usage: "detach disk",
Description: "Detach disk from an existing donut",
Action: doDetachDiskCmd,
Flags: []cli.Flag{
cli.StringFlag{
Name: "name",
Usage: "Donut name",
},
},
}
var healDonutCmd = cli.Command{
Name: "heal",
Usage: "heal donut",
Description: "Heal donut with any errors",
Action: doHealDonutCmd,
}
var rebalanceDonutCmd = cli.Command{
Name: "rebalance",
Usage: "rebalance donut",
Description: "Rebalance data on donut after adding disks",
Action: doRebalanceDonutCmd,
}
var infoDonutCmd = cli.Command{
Name: "info",
Usage: "information about donut",
Description: "Pretty print donut information",
Action: doInfoDonutCmd,
}
var donutOptions = []cli.Command{
makeDonutCmd,
listDonutCmd,
attachDiskCmd,
detachDiskCmd,
healDonutCmd,
rebalanceDonutCmd,
infoDonutCmd,
}
func doHealDonutCmd(c *cli.Context) {
}

View File

@ -0,0 +1,32 @@
package main
import (
"log"
"github.com/minio-io/cli"
"github.com/minio-io/donut"
)
func doRebalanceDonutCmd(c *cli.Context) {
if !c.Args().Present() {
log.Fatalln("no args?")
}
donutName := c.Args().First()
if !isValidDonutName(donutName) {
log.Fatalln("Invalid donutName")
}
mcDonutConfigData, err := loadDonutConfig()
if err != nil {
log.Fatalln(err)
}
if _, ok := mcDonutConfigData.Donuts[donutName]; !ok {
log.Fatalln("donut does not exist")
}
d, err := donut.NewDonut(donutName, getNodeMap(mcDonutConfigData.Donuts[donutName].Node))
if err != nil {
log.Fatalln(err)
}
if err := d.Rebalance(); err != nil {
log.Fatalln(err)
}
}

View File

@ -0,0 +1,33 @@
/*
* Minimalist Object Storage, (C) 2014,2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"os"
"github.com/minio-io/cli"
)
func main() {
app := cli.NewApp()
app.Usage = ""
app.Version = gitCommitHash
app.Commands = donutOptions
app.Author = "Minio.io"
app.EnableBashCompletion = true
app.Run(os.Args)
}

150
Godeps/_workspace/src/github.com/minio-io/donut/disk.go generated vendored Normal file
View File

@ -0,0 +1,150 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"errors"
"os"
"path"
"syscall"
"io/ioutil"
)
type disk struct {
root string
order int
filesystem map[string]string
}
// NewDisk - instantiate new disk
func NewDisk(diskPath string, diskOrder int) (Disk, error) {
if diskPath == "" || diskOrder < 0 {
return nil, errors.New("invalid argument")
}
s := syscall.Statfs_t{}
err := syscall.Statfs(diskPath, &s)
if err != nil {
return nil, err
}
st, err := os.Stat(diskPath)
if err != nil {
return nil, err
}
if !st.IsDir() {
return nil, syscall.ENOTDIR
}
d := disk{
root: diskPath,
order: diskOrder,
filesystem: make(map[string]string),
}
if fsType := d.getFSType(s.Type); fsType != "UNKNOWN" {
d.filesystem["FSType"] = fsType
d.filesystem["MountPoint"] = d.root
return d, nil
}
return nil, errors.New("unsupported filesystem")
}
func (d disk) GetPath() string {
return d.root
}
func (d disk) GetOrder() int {
return d.order
}
func (d disk) GetFSInfo() map[string]string {
s := syscall.Statfs_t{}
err := syscall.Statfs(d.root, &s)
if err != nil {
return nil
}
d.filesystem["Total"] = d.formatBytes(s.Bsize * int64(s.Blocks))
d.filesystem["Free"] = d.formatBytes(s.Bsize * int64(s.Bfree))
return d.filesystem
}
func (d disk) MakeDir(dirname string) error {
return os.MkdirAll(path.Join(d.root, dirname), 0700)
}
func (d disk) ListDir(dirname string) ([]os.FileInfo, error) {
contents, err := ioutil.ReadDir(path.Join(d.root, dirname))
if err != nil {
return nil, err
}
var directories []os.FileInfo
for _, content := range contents {
// Include only directories, ignore everything else
if content.IsDir() {
directories = append(directories, content)
}
}
return directories, nil
}
func (d disk) ListFiles(dirname string) ([]os.FileInfo, error) {
contents, err := ioutil.ReadDir(path.Join(d.root, dirname))
if err != nil {
return nil, err
}
var files []os.FileInfo
for _, content := range contents {
// Include only regular files, ignore everything else
if content.Mode().IsRegular() {
files = append(files, content)
}
}
return files, nil
}
func (d disk) MakeFile(filename string) (*os.File, error) {
if filename == "" {
return nil, errors.New("Invalid argument")
}
filePath := path.Join(d.root, filename)
// Create directories if they don't exist
if err := os.MkdirAll(path.Dir(filePath), 0700); err != nil {
return nil, err
}
dataFile, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return nil, err
}
return dataFile, nil
}
func (d disk) OpenFile(filename string) (*os.File, error) {
if filename == "" {
return nil, errors.New("Invalid argument")
}
dataFile, err := os.Open(path.Join(d.root, filename))
if err != nil {
return nil, err
}
return dataFile, nil
}
func (d disk) SaveConfig() error {
return errors.New("Not Implemented")
}
func (d disk) LoadConfig() error {
return errors.New("Not Implemented")
}

View File

@ -0,0 +1,63 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"fmt"
"strconv"
"strings"
)
// Convert bytes to human readable string. Like a 2 MB, 64.2 KB, 52 B
func (d disk) formatBytes(i int64) (result string) {
switch {
case i > (1024 * 1024 * 1024 * 1024):
result = fmt.Sprintf("%.02f TB", float64(i)/1024/1024/1024/1024)
case i > (1024 * 1024 * 1024):
result = fmt.Sprintf("%.02f GB", float64(i)/1024/1024/1024)
case i > (1024 * 1024):
result = fmt.Sprintf("%.02f MB", float64(i)/1024/1024)
case i > 1024:
result = fmt.Sprintf("%.02f KB", float64(i)/1024)
default:
result = fmt.Sprintf("%d B", i)
}
result = strings.Trim(result, " ")
return
}
var fsType2StringMap = map[string]string{
"137d": "EXT",
"ef51": "EXT2OLD",
"ef53": "EXT4",
"4244": "HFS",
"5346544e": "NTFS",
"4d44": "MSDOS",
"52654973": "REISERFS",
"6969": "NFS",
"01021994": "TMPFS",
"58465342": "XFS",
}
func (d disk) getFSType(fsType int64) string {
fsTypeHex := strconv.FormatInt(fsType, 16)
fsTypeString, ok := fsType2StringMap[fsTypeHex]
if ok == false {
return "UNKNOWN"
}
return fsTypeString
}

View File

@ -0,0 +1,200 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"encoding/json"
"errors"
"fmt"
"path"
"strings"
)
type donut struct {
name string
buckets map[string]Bucket
nodes map[string]Node
}
// attachDonutNode - wrapper function to instantiate a new node for associated donut
// based on the configuration
func (d donut) attachDonutNode(hostname string, disks []string) error {
node, err := NewNode(hostname)
if err != nil {
return err
}
for i, disk := range disks {
// Order is necessary for maps, keep order number separately
newDisk, err := NewDisk(disk, i)
if err != nil {
return err
}
if err := newDisk.MakeDir(d.name); err != nil {
return err
}
if err := node.AttachDisk(newDisk); err != nil {
return err
}
}
if err := d.AttachNode(node); err != nil {
return err
}
return nil
}
// NewDonut - instantiate a new donut
func NewDonut(donutName string, nodeDiskMap map[string][]string) (Donut, error) {
if donutName == "" || len(nodeDiskMap) == 0 {
return nil, errors.New("invalid argument")
}
nodes := make(map[string]Node)
buckets := make(map[string]Bucket)
d := donut{
name: donutName,
nodes: nodes,
buckets: buckets,
}
for k, v := range nodeDiskMap {
if len(v) == 0 {
return nil, errors.New("invalid number of disks per node")
}
err := d.attachDonutNode(k, v)
if err != nil {
return nil, err
}
}
return d, nil
}
func (d donut) MakeBucket(bucketName string) error {
if bucketName == "" || strings.TrimSpace(bucketName) == "" {
return errors.New("invalid argument")
}
if _, ok := d.buckets[bucketName]; ok {
return errors.New("bucket exists")
}
bucket, err := NewBucket(bucketName, d.name, d.nodes)
if err != nil {
return err
}
nodeNumber := 0
d.buckets[bucketName] = bucket
for _, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return err
}
for _, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", bucketName, nodeNumber, disk.GetOrder())
err := disk.MakeDir(path.Join(d.name, bucketSlice))
if err != nil {
return err
}
}
nodeNumber = nodeNumber + 1
}
return nil
}
func (d donut) ListBuckets() (map[string]Bucket, error) {
for _, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return nil, err
}
for _, disk := range disks {
dirs, err := disk.ListDir(d.name)
if err != nil {
return nil, err
}
for _, dir := range dirs {
splitDir := strings.Split(dir.Name(), "$")
if len(splitDir) < 3 {
return nil, errors.New("corrupted backend")
}
bucketName := splitDir[0]
// we dont need this NewBucket once we cache these
bucket, err := NewBucket(bucketName, d.name, d.nodes)
if err != nil {
return nil, err
}
d.buckets[bucketName] = bucket
}
}
}
return d.buckets, nil
}
func (d donut) Heal() error {
return errors.New("Not Implemented")
}
func (d donut) Info() (nodeDiskMap map[string][]string, err error) {
nodeDiskMap = make(map[string][]string)
for nodeName, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return nil, err
}
diskList := make([]string, len(disks))
for diskName, disk := range disks {
diskList[disk.GetOrder()] = diskName
}
nodeDiskMap[nodeName] = diskList
}
return nodeDiskMap, nil
}
func (d donut) AttachNode(node Node) error {
if node == nil {
return errors.New("invalid argument")
}
d.nodes[node.GetNodeName()] = node
return nil
}
func (d donut) DetachNode(node Node) error {
delete(d.nodes, node.GetNodeName())
return nil
}
func (d donut) SaveConfig() error {
nodeDiskMap := make(map[string][]string)
for hostname, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return err
}
for _, disk := range disks {
donutConfigPath := path.Join(d.name, donutConfig)
donutConfigWriter, err := disk.MakeFile(donutConfigPath)
defer donutConfigWriter.Close()
if err != nil {
return err
}
nodeDiskMap[hostname][disk.GetOrder()] = disk.GetPath()
jenc := json.NewEncoder(donutConfigWriter)
if err := jenc.Encode(nodeDiskMap); err != nil {
return err
}
}
}
return nil
}
func (d donut) LoadConfig() error {
return errors.New("Not Implemented")
}

View File

@ -0,0 +1,85 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"errors"
encoding "github.com/minio-io/erasure"
)
type encoder struct {
encoder *encoding.Erasure
k, m uint8
technique encoding.Technique
}
// getErasureTechnique - convert technique string into Technique type
func getErasureTechnique(technique string) (encoding.Technique, error) {
switch true {
case technique == "Cauchy":
return encoding.Cauchy, nil
case technique == "Vandermonde":
return encoding.Cauchy, nil
default:
return encoding.None, errors.New("Invalid erasure technique")
}
}
// NewEncoder - instantiate a new encoder
func NewEncoder(k, m uint8, technique string) (Encoder, error) {
e := encoder{}
t, err := getErasureTechnique(technique)
if err != nil {
return nil, err
}
params, err := encoding.ValidateParams(k, m, t)
if err != nil {
return nil, err
}
e.encoder = encoding.NewErasure(params)
e.k = k
e.m = m
e.technique = t
return e, nil
}
func (e encoder) GetEncodedBlockLen(dataLength int) (int, error) {
if dataLength == 0 {
return 0, errors.New("invalid argument")
}
return encoding.GetEncodedBlockLen(dataLength, e.k), nil
}
func (e encoder) Encode(data []byte) (encodedData [][]byte, err error) {
if data == nil {
return nil, errors.New("invalid argument")
}
encodedData, err = e.encoder.Encode(data)
if err != nil {
return nil, err
}
return encodedData, nil
}
func (e encoder) Decode(encodedData [][]byte, dataLength int) (data []byte, err error) {
decodedData, err := e.encoder.Decode(encodedData, dataLength)
if err != nil {
return nil, err
}
return decodedData, nil
}

View File

@ -0,0 +1,106 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"io"
"os"
)
// Collection of Donut specification interfaces
// Donut interface
type Donut interface {
Storage
Management
}
// Storage object storage interface
type Storage interface {
MakeBucket(bucket string) error
ListBuckets() (map[string]Bucket, error)
}
// Management is a donut management system interface
type Management interface {
Heal() error
Rebalance() error
Info() (map[string][]string, error)
AttachNode(node Node) error
DetachNode(node Node) error
SaveConfig() error
LoadConfig() error
}
// Encoder interface
type Encoder interface {
GetEncodedBlockLen(dataLength int) (int, error)
Encode(data []byte) (encodedData [][]byte, err error)
Decode(encodedData [][]byte, dataLength int) (data []byte, err error)
}
// Bucket interface
type Bucket interface {
ListNodes() (map[string]Node, error)
ListObjects() (map[string]Object, error)
GetObject(object string) (io.ReadCloser, int64, error)
PutObject(object, contentType string, contents io.Reader) error
WriteDonutObjectMetadata(object string, donutMetadata map[string]string) error
WriteObjectMetadata(object string, objectMetadata map[string]string) error
}
// Object interface
type Object interface {
GetObjectMetadata() (map[string]string, error)
GetDonutObjectMetadata() (map[string]string, error)
}
// Node interface
type Node interface {
ListDisks() (map[string]Disk, error)
AttachDisk(disk Disk) error
DetachDisk(disk Disk) error
GetNodeName() string
SaveConfig() error
LoadConfig() error
}
// Disk interface
type Disk interface {
MakeDir(dirname string) error
ListDir(dirname string) ([]os.FileInfo, error)
ListFiles(dirname string) ([]os.FileInfo, error)
MakeFile(path string) (*os.File, error)
OpenFile(path string) (*os.File, error)
GetPath() string
GetOrder() int
GetFSInfo() map[string]string
}
const (
donutObjectMetadataConfig = "donutObjectMetadata.json"
objectMetadataConfig = "objectMetadata.json"
donutConfig = "donutMetadata.json"
)

View File

@ -0,0 +1,68 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"errors"
)
type node struct {
hostname string
disks map[string]Disk
}
// NewNode - instantiates a new node
func NewNode(hostname string) (Node, error) {
if hostname == "" {
return nil, errors.New("invalid argument")
}
disks := make(map[string]Disk)
n := node{
hostname: hostname,
disks: disks,
}
return n, nil
}
func (n node) GetNodeName() string {
return n.hostname
}
func (n node) ListDisks() (map[string]Disk, error) {
return n.disks, nil
}
func (n node) AttachDisk(disk Disk) error {
if disk == nil {
return errors.New("Invalid argument")
}
n.disks[disk.GetPath()] = disk
return nil
}
func (n node) DetachDisk(disk Disk) error {
delete(n.disks, disk.GetPath())
return nil
}
func (n node) SaveConfig() error {
return errors.New("Not Implemented")
}
func (n node) LoadConfig() error {
return errors.New("Not Implemented")
}

View File

@ -0,0 +1,69 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"errors"
"path"
"encoding/json"
"io/ioutil"
)
type object struct {
name string
objectPath string
objectMetadata map[string]string
donutObjectMetadata map[string]string
}
// NewObject - instantiate a new object
func NewObject(objectName, p string) (Object, error) {
if objectName == "" {
return nil, errors.New("invalid argument")
}
o := object{}
o.name = objectName
o.objectPath = path.Join(p, objectName)
return o, nil
}
func (o object) GetObjectMetadata() (map[string]string, error) {
objectMetadata := make(map[string]string)
objectMetadataBytes, err := ioutil.ReadFile(path.Join(o.objectPath, objectMetadataConfig))
if err != nil {
return nil, err
}
if err := json.Unmarshal(objectMetadataBytes, &objectMetadata); err != nil {
return nil, err
}
o.objectMetadata = objectMetadata
return objectMetadata, nil
}
func (o object) GetDonutObjectMetadata() (map[string]string, error) {
donutObjectMetadata := make(map[string]string)
donutObjectMetadataBytes, err := ioutil.ReadFile(path.Join(o.objectPath, donutObjectMetadataConfig))
if err != nil {
return nil, err
}
if err := json.Unmarshal(donutObjectMetadataBytes, &donutObjectMetadata); err != nil {
return nil, err
}
o.donutObjectMetadata = donutObjectMetadata
return donutObjectMetadata, nil
}

View File

@ -0,0 +1,37 @@
package donut
import (
"fmt"
"os"
"strings"
)
func (d donut) Rebalance() error {
var totalOffSetLength int
var newDisks []Disk
var existingDirs []os.FileInfo
for _, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return err
}
totalOffSetLength = len(disks)
fmt.Println(totalOffSetLength)
for _, disk := range disks {
dirs, err := disk.ListDir(d.name)
if err != nil {
return err
}
if len(dirs) == 0 {
newDisks = append(newDisks, disk)
}
existingDirs = append(existingDirs, dirs...)
}
}
for _, dir := range existingDirs {
splits := strings.Split(dir.Name(), "$")
bucketName, segment, offset := splits[0], splits[1], splits[2]
fmt.Println(bucketName, segment, offset)
}
return nil
}

View File

@ -19,6 +19,8 @@ package donut
import (
"errors"
"io"
"os"
"path"
"sort"
"strconv"
"strings"
@ -26,9 +28,9 @@ import (
"io/ioutil"
"github.com/minio-io/donut"
"github.com/minio-io/iodine"
"github.com/minio-io/minio/pkg/drivers"
"github.com/minio-io/minio/pkg/storage/donut"
"github.com/minio-io/minio/pkg/utils/log"
)
@ -41,20 +43,44 @@ const (
blockSize = 10 * 1024 * 1024
)
// This is a dummy nodeDiskMap which is going to be deprecated soon
// once the Management API is standardized, this map is useful for now
// to show multi disk API correctness behavior.
//
// This should be obtained from donut configuration file
func createNodeDiskMap(p string) map[string][]string {
nodes := make(map[string][]string)
nodes["localhost"] = make([]string, 16)
for i := 0; i < len(nodes["localhost"]); i++ {
diskPath := path.Join(p, strconv.Itoa(i))
if _, err := os.Stat(diskPath); err != nil {
if os.IsNotExist(err) {
os.MkdirAll(diskPath, 0700)
}
}
nodes["localhost"][i] = diskPath
}
return nodes
}
// Start a single disk subsystem
func Start(path string) (chan<- string, <-chan error, drivers.Driver) {
ctrlChannel := make(chan string)
errorChannel := make(chan error)
s := new(donutDriver)
errParams := map[string]string{"path": path}
// TODO donut driver should be passed in as Start param and driven by config
var err error
s.donut, err = donut.NewDonut(path)
err = iodine.New(err, map[string]string{"path": path})
// Soon to be user configurable, when Management API
// is finished we remove "default" to something
// which is passed down from configuration
donut, err := donut.NewDonut("default", createNodeDiskMap(path))
if err != nil {
err = iodine.New(err, errParams)
log.Error.Println(err)
}
s := new(donutDriver)
s.donut = donut
go start(ctrlChannel, errorChannel, s)
return ctrlChannel, errorChannel, s
}
@ -63,26 +89,37 @@ func start(ctrlChannel <-chan string, errorChannel chan<- error, s *donutDriver)
close(errorChannel)
}
// byBucketName is a type for sorting bucket metadata by bucket name
type byBucketName []drivers.BucketMetadata
func (b byBucketName) Len() int { return len(b) }
func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
// ListBuckets returns a list of buckets
func (d donutDriver) ListBuckets() (results []drivers.BucketMetadata, err error) {
buckets, err := d.donut.ListBuckets()
if err != nil {
return nil, err
}
for _, bucket := range buckets {
for name := range buckets {
result := drivers.BucketMetadata{
Name: bucket,
Name: name,
// TODO Add real created date
Created: time.Now(),
}
results = append(results, result)
}
sort.Sort(byBucketName(results))
return results, nil
}
// CreateBucket creates a new bucket
func (d donutDriver) CreateBucket(bucket string) error {
return d.donut.CreateBucket(bucket)
func (d donutDriver) CreateBucket(bucketName string) error {
if drivers.IsValidBucket(bucketName) && !strings.Contains(bucketName, ".") {
return d.donut.MakeBucket(bucketName)
}
return errors.New("Invalid bucket")
}
// GetBucketMetadata retrieves an bucket's metadata
@ -101,30 +138,75 @@ func (d donutDriver) GetBucketPolicy(bucket string) (drivers.BucketPolicy, error
}
// GetObject retrieves an object and writes it to a writer
func (d donutDriver) GetObject(target io.Writer, bucket, key string) (int64, error) {
reader, err := d.donut.GetObjectReader(bucket, key)
if err != nil {
return 0, drivers.ObjectNotFound{
Bucket: bucket,
Object: key,
}
func (d donutDriver) GetObject(target io.Writer, bucketName, objectName string) (int64, error) {
errParams := map[string]string{
"bucketName": bucketName,
"objectName": objectName,
}
return io.Copy(target, reader)
if bucketName == "" || strings.TrimSpace(bucketName) == "" {
return 0, iodine.New(errors.New("invalid argument"), errParams)
}
if objectName == "" || strings.TrimSpace(objectName) == "" {
return 0, iodine.New(errors.New("invalid argument"), errParams)
}
buckets, err := d.donut.ListBuckets()
if err != nil {
return 0, iodine.New(err, nil)
}
if _, ok := buckets[bucketName]; !ok {
return 0, iodine.New(errors.New("bucket does not exist"), errParams)
}
reader, size, err := buckets[bucketName].GetObject(objectName)
if os.IsNotExist(err) {
return 0, drivers.ObjectNotFound{
Bucket: bucketName,
Object: objectName,
}
} else if err != nil {
return 0, iodine.New(err, errParams)
}
n, err := io.CopyN(target, reader, size)
return n, iodine.New(err, errParams)
}
// GetPartialObject retrieves an object range and writes it to a writer
func (d donutDriver) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) {
func (d donutDriver) GetPartialObject(w io.Writer, bucketName, objectName string, start, length int64) (int64, error) {
// TODO more efficient get partial object with proper donut support
errParams := map[string]string{
"bucket": bucket,
"object": object,
"start": strconv.FormatInt(start, 10),
"length": strconv.FormatInt(length, 10),
"bucketName": bucketName,
"objectName": objectName,
"start": strconv.FormatInt(start, 10),
"length": strconv.FormatInt(length, 10),
}
reader, err := d.donut.GetObjectReader(bucket, object)
if bucketName == "" || strings.TrimSpace(bucketName) == "" {
return 0, iodine.New(errors.New("invalid argument"), errParams)
}
if objectName == "" || strings.TrimSpace(objectName) == "" {
return 0, iodine.New(errors.New("invalid argument"), errParams)
}
if start < 0 {
return 0, iodine.New(errors.New("invalid argument"), errParams)
}
buckets, err := d.donut.ListBuckets()
if err != nil {
return 0, iodine.New(err, nil)
}
if _, ok := buckets[bucketName]; !ok {
return 0, iodine.New(errors.New("bucket does not exist"), errParams)
}
reader, size, err := buckets[bucketName].GetObject(objectName)
if os.IsNotExist(err) {
return 0, drivers.ObjectNotFound{
Bucket: bucketName,
Object: objectName,
}
} else if err != nil {
return 0, iodine.New(err, errParams)
}
if start > size || (start+length-1) > size {
return 0, iodine.New(errors.New("invalid range"), errParams)
}
_, err = io.CopyN(ioutil.Discard, reader, start)
if err != nil {
return 0, iodine.New(err, errParams)
@ -134,156 +216,160 @@ func (d donutDriver) GetPartialObject(w io.Writer, bucket, object string, start,
}
// GetObjectMetadata retrieves an object's metadata
func (d donutDriver) GetObjectMetadata(bucket, key string, prefix string) (drivers.ObjectMetadata, error) {
metadata, err := d.donut.GetObjectMetadata(bucket, key)
func (d donutDriver) GetObjectMetadata(bucketName, objectName, prefixName string) (drivers.ObjectMetadata, error) {
errParams := map[string]string{
"bucketName": bucketName,
"objectName": objectName,
"prefixName": prefixName,
}
buckets, err := d.donut.ListBuckets()
if err != nil {
return drivers.ObjectMetadata{}, iodine.New(err, errParams)
}
if _, ok := buckets[bucketName]; !ok {
return drivers.ObjectMetadata{}, iodine.New(errors.New("bucket does not exist"), errParams)
}
objectList, err := buckets[bucketName].ListObjects()
if err != nil {
return drivers.ObjectMetadata{}, iodine.New(err, errParams)
}
donutObjectMetadata, err := objectList[objectName].GetDonutObjectMetadata()
if os.IsNotExist(err) {
// return ObjectNotFound quickly on an error, API needs this to handle invalid requests
return drivers.ObjectMetadata{}, drivers.ObjectNotFound{
Bucket: bucket,
Object: key,
Bucket: bucketName,
Object: objectName,
}
} else if err != nil {
return drivers.ObjectMetadata{}, iodine.New(err, errParams)
}
created, err := time.Parse(time.RFC3339Nano, metadata["sys.created"])
objectMetadata, err := objectList[objectName].GetObjectMetadata()
if os.IsNotExist(err) {
// return ObjectNotFound quickly on an error, API needs this to handle invalid requests
return drivers.ObjectMetadata{}, drivers.ObjectNotFound{
Bucket: bucketName,
Object: objectName,
}
} else if err != nil {
return drivers.ObjectMetadata{}, iodine.New(err, errParams)
}
created, err := time.Parse(time.RFC3339Nano, donutObjectMetadata["created"])
if err != nil {
return drivers.ObjectMetadata{}, err
return drivers.ObjectMetadata{}, iodine.New(err, nil)
}
size, err := strconv.ParseInt(metadata["sys.size"], 10, 64)
size, err := strconv.ParseInt(donutObjectMetadata["size"], 10, 64)
if err != nil {
return drivers.ObjectMetadata{}, err
return drivers.ObjectMetadata{}, iodine.New(err, nil)
}
objectMetadata := drivers.ObjectMetadata{
Bucket: bucket,
Key: key,
driversObjectMetadata := drivers.ObjectMetadata{
Bucket: bucketName,
Key: objectName,
ContentType: metadata["contentType"],
ContentType: objectMetadata["contentType"],
Created: created,
Md5: metadata["sys.md5"],
Md5: donutObjectMetadata["md5"],
Size: size,
}
return objectMetadata, nil
return driversObjectMetadata, nil
}
type byObjectKey []drivers.ObjectMetadata
func (b byObjectKey) Len() int { return len(b) }
func (b byObjectKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byObjectKey) Less(i, j int) bool { return b[i].Key < b[j].Key }
// ListObjects - returns list of objects
func (d donutDriver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) {
// TODO Fix IsPrefixSet && IsDelimiterSet and use them
objects, err := d.donut.ListObjects(bucket)
func (d donutDriver) ListObjects(bucketName string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) {
errParams := map[string]string{
"bucketName": bucketName,
}
buckets, err := d.donut.ListBuckets()
if err != nil {
return nil, drivers.BucketResourcesMetadata{}, err
return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, errParams)
}
if _, ok := buckets[bucketName]; !ok {
return nil, drivers.BucketResourcesMetadata{}, iodine.New(errors.New("bucket does not exist"), errParams)
}
objectList, err := buckets[bucketName].ListObjects()
if err != nil {
return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, errParams)
}
var objects []string
for key := range objectList {
objects = append(objects, key)
}
sort.Strings(objects)
if resources.Prefix != "" {
objects = filterPrefix(objects, resources.Prefix)
objects = removePrefix(objects, resources.Prefix)
}
if resources.Maxkeys <= 0 || resources.Maxkeys > 1000 {
resources.Maxkeys = 1000
}
var actualObjects []string
var commonPrefixes []string
if strings.TrimSpace(resources.Delimiter) != "" {
actualObjects = filterDelimited(objects, resources.Delimiter)
commonPrefixes = filterNotDelimited(objects, resources.Delimiter)
commonPrefixes = extractDir(commonPrefixes, resources.Delimiter)
commonPrefixes = uniqueObjects(commonPrefixes)
resources.CommonPrefixes = commonPrefixes
} else {
actualObjects = objects
}
// Populate filtering mode
resources.Mode = drivers.GetMode(resources)
// filter objects based on resources.Prefix and resources.Delimiter
actualObjects, commonPrefixes := d.filter(objects, resources)
resources.CommonPrefixes = commonPrefixes
var results []drivers.ObjectMetadata
for _, object := range actualObjects {
for _, objectName := range actualObjects {
if len(results) >= resources.Maxkeys {
resources.IsTruncated = true
break
}
metadata, err := d.GetObjectMetadata(bucket, resources.Prefix+object, "")
if _, ok := objectList[objectName]; !ok {
return nil, drivers.BucketResourcesMetadata{}, iodine.New(errors.New("object corrupted"), errParams)
}
objectMetadata, err := objectList[objectName].GetDonutObjectMetadata()
if err != nil {
return nil, drivers.BucketResourcesMetadata{}, err
return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, errParams)
}
t, err := time.Parse(time.RFC3339Nano, objectMetadata["created"])
if err != nil {
return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, nil)
}
size, err := strconv.ParseInt(objectMetadata["size"], 10, 64)
if err != nil {
return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, nil)
}
metadata := drivers.ObjectMetadata{
Key: objectName,
Created: t,
Size: size,
}
results = append(results, metadata)
}
sort.Sort(byObjectKey(results))
return results, resources, nil
}
func filterPrefix(objects []string, prefix string) []string {
var results []string
for _, object := range objects {
if strings.HasPrefix(object, prefix) {
results = append(results, object)
}
}
return results
}
func removePrefix(objects []string, prefix string) []string {
var results []string
for _, object := range objects {
results = append(results, strings.TrimPrefix(object, prefix))
}
return results
}
func filterDelimited(objects []string, delim string) []string {
var results []string
for _, object := range objects {
if !strings.Contains(object, delim) {
results = append(results, object)
}
}
return results
}
func filterNotDelimited(objects []string, delim string) []string {
var results []string
for _, object := range objects {
if strings.Contains(object, delim) {
results = append(results, object)
}
}
return results
}
func extractDir(objects []string, delim string) []string {
var results []string
for _, object := range objects {
parts := strings.Split(object, delim)
results = append(results, parts[0]+"/")
}
return results
}
func uniqueObjects(objects []string) []string {
objectMap := make(map[string]string)
for _, v := range objects {
objectMap[v] = v
}
var results []string
for k := range objectMap {
results = append(results, k)
}
sort.Strings(results)
return results
}
// CreateObject creates a new object
func (d donutDriver) CreateObject(bucketKey, objectKey, contentType, expectedMd5sum string, reader io.Reader) error {
writer, err := d.donut.GetObjectWriter(bucketKey, objectKey)
if err != nil {
return err
func (d donutDriver) CreateObject(bucketName, objectName, contentType, expectedMd5sum string, reader io.Reader) error {
errParams := map[string]string{
"bucketName": bucketName,
"objectName": objectName,
"contentType": contentType,
}
if _, err := io.Copy(writer, reader); err != nil {
return err
if bucketName == "" || strings.TrimSpace(bucketName) == "" {
return iodine.New(errors.New("invalid argument"), errParams)
}
if objectName == "" || strings.TrimSpace(objectName) == "" {
return iodine.New(errors.New("invalid argument"), errParams)
}
buckets, err := d.donut.ListBuckets()
if err != nil {
return iodine.New(err, errParams)
}
if _, ok := buckets[bucketName]; !ok {
return iodine.New(errors.New("bucket does not exist"), errParams)
}
if contentType == "" {
contentType = "application/octet-stream"
}
contentType = strings.TrimSpace(contentType)
metadata := make(map[string]string)
metadata["bucket"] = bucketKey
metadata["object"] = objectKey
metadata["contentType"] = contentType
if err = writer.SetMetadata(metadata); err != nil {
return err
err = buckets[bucketName].PutObject(objectName, contentType, reader)
if err != nil {
return iodine.New(err, errParams)
}
return writer.Close()
// handle expectedMd5sum
return nil
}

View File

@ -0,0 +1,68 @@
package donut
import (
"bufio"
"bytes"
"strings"
"github.com/minio-io/minio/pkg/drivers"
)
func delimiter(object, delimiter string) string {
readBuffer := bytes.NewBufferString(object)
reader := bufio.NewReader(readBuffer)
stringReader := strings.NewReader(delimiter)
delimited, _ := stringReader.ReadByte()
delimitedStr, _ := reader.ReadString(delimited)
return delimitedStr
}
func appendUniq(slice []string, i string) []string {
for _, ele := range slice {
if ele == i {
return slice
}
}
return append(slice, i)
}
func (d donutDriver) filter(objects []string, resources drivers.BucketResourcesMetadata) ([]string, []string) {
var actualObjects []string
var commonPrefixes []string
for _, name := range objects {
switch true {
// Both delimiter and Prefix is present
case resources.IsDelimiterPrefixSet():
if strings.HasPrefix(name, resources.Prefix) {
trimmedName := strings.TrimPrefix(name, resources.Prefix)
delimitedName := delimiter(trimmedName, resources.Delimiter)
if delimitedName != "" {
if delimitedName == resources.Delimiter {
commonPrefixes = appendUniq(commonPrefixes, resources.Prefix+delimitedName)
} else {
commonPrefixes = appendUniq(commonPrefixes, delimitedName)
}
if trimmedName == delimitedName {
actualObjects = appendUniq(actualObjects, name)
}
}
}
// Delimiter present and Prefix is absent
case resources.IsDelimiterSet():
delimitedName := delimiter(name, resources.Delimiter)
switch true {
case delimitedName == name:
actualObjects = appendUniq(actualObjects, name)
case delimitedName != "":
commonPrefixes = appendUniq(commonPrefixes, delimitedName)
}
case resources.IsPrefixSet():
if strings.HasPrefix(name, resources.Prefix) {
actualObjects = appendUniq(actualObjects, name)
}
case resources.IsDefault():
return objects, nil
}
}
return actualObjects, commonPrefixes
}

View File

@ -17,6 +17,7 @@
package server
import (
"fmt"
"os/user"
"path"
"reflect"
@ -187,7 +188,8 @@ func Start(configs []Config) {
for _, ch := range ctrlChans {
close(ch)
}
log.Fatal(value.Interface())
msg := fmt.Sprintf("%q", value.Interface())
log.Fatal(iodine.New(errors.New(msg), nil))
}
case false:
// Channel closed, remove from list

View File

@ -1,42 +0,0 @@
package donut
import (
"errors"
"strconv"
"strings"
"github.com/minio-io/iodine"
)
type donutBucket struct {
nodes []string
objects map[string][]byte
}
// GetNodes - get list of associated nodes for a given bucket
func (b donutBucket) GetNodes() ([]string, error) {
var nodes []string
for _, node := range b.nodes {
nodes = append(nodes, node)
}
return nodes, nil
}
// AddNode - adds a node to a bucket
func (b donutBucket) AddNode(nodeID, bucketID string) error {
errParams := map[string]string{"node": nodeID, "bucketID": bucketID}
tokens := strings.Split(bucketID, ":")
if len(tokens) != 3 {
return iodine.New(errors.New("Bucket ID malformeD: "+bucketID), errParams)
}
// bucketName := tokens[0]
// aggregate := tokens[1]
// aggregate := "0"
part, err := strconv.Atoi(tokens[2])
if err != nil {
return iodine.New(errors.New("Part malformed: "+tokens[2]), errParams)
}
b.nodes[part] = nodeID
return nil
}

View File

@ -1,196 +0,0 @@
package donut
import (
"errors"
"io"
"sort"
"strconv"
"strings"
"github.com/minio-io/iodine"
)
type donut struct {
buckets map[string]Bucket
nodes map[string]Node
}
// NewDonut - instantiate new donut driver
func NewDonut(root string) (Donut, error) {
nodes := make(map[string]Node)
nodes["localhost"] = &localDirectoryNode{root: root}
driver := &donut{
buckets: make(map[string]Bucket),
nodes: nodes,
}
for nodeID, node := range nodes {
bucketIDs, err := node.GetBuckets()
if err != nil {
return nil, iodine.New(err, map[string]string{"root": root})
}
for _, bucketID := range bucketIDs {
tokens := strings.Split(bucketID, ":")
if _, ok := driver.buckets[tokens[0]]; !ok {
bucket := donutBucket{
nodes: make([]string, 16),
}
// TODO catch errors
driver.buckets[tokens[0]] = bucket
}
if err = driver.buckets[tokens[0]].AddNode(nodeID, bucketID); err != nil {
return nil, iodine.New(err, map[string]string{"root": root})
}
}
}
return driver, nil
}
// CreateBucket - create a new bucket
func (d donut) CreateBucket(bucketName string) error {
if _, ok := d.buckets[bucketName]; ok == false {
bucketName = strings.TrimSpace(bucketName)
if bucketName == "" {
return iodine.New(errors.New("Cannot create bucket with no name"), map[string]string{"bucket": bucketName})
}
// assign nodes
// TODO assign other nodes
nodes := make([]string, 16)
for i := 0; i < 16; i++ {
nodes[i] = "localhost"
if node, ok := d.nodes["localhost"]; ok {
err := node.CreateBucket(bucketName + ":0:" + strconv.Itoa(i))
if err != nil {
return iodine.New(err, map[string]string{"node": nodes[i], "bucket": bucketName})
}
}
}
bucket := donutBucket{
nodes: nodes,
}
d.buckets[bucketName] = bucket
return nil
}
return iodine.New(errors.New("Bucket exists"), map[string]string{"bucket": bucketName})
}
// ListBuckets - list all buckets
func (d donut) ListBuckets() ([]string, error) {
var buckets []string
for bucket := range d.buckets {
buckets = append(buckets, bucket)
}
sort.Strings(buckets)
return buckets, nil
}
// GetObjectWriter - get a new writer interface for a new object
func (d donut) GetObjectWriter(bucketName, objectName string) (ObjectWriter, error) {
if bucket, ok := d.buckets[bucketName]; ok == true {
writers := make([]Writer, 16)
nodes, err := bucket.GetNodes()
if err != nil {
return nil, iodine.New(err, map[string]string{"bucket": bucketName, "object": objectName})
}
for i, nodeID := range nodes {
if node, ok := d.nodes[nodeID]; ok == true {
bucketID := bucketName + ":0:" + strconv.Itoa(i)
writer, err := node.GetWriter(bucketID, objectName)
if err != nil {
for _, writerToClose := range writers {
if writerToClose != nil {
writerToClose.CloseWithError(iodine.New(err, nil))
}
}
return nil, iodine.New(err, map[string]string{"bucketid": bucketID})
}
writers[i] = writer
}
}
return newErasureWriter(writers), nil
}
return nil, iodine.New(errors.New("Bucket not found"), map[string]string{"bucket": bucketName})
}
// GetObjectReader - get a new reader interface for a new object
func (d donut) GetObjectReader(bucketName, objectName string) (io.ReadCloser, error) {
errParams := map[string]string{"bucket": bucketName, "object": objectName}
r, w := io.Pipe()
if bucket, ok := d.buckets[bucketName]; ok == true {
readers := make([]io.ReadCloser, 16)
nodes, err := bucket.GetNodes()
if err != nil {
return nil, iodine.New(err, errParams)
}
var metadata map[string]string
for i, nodeID := range nodes {
if node, ok := d.nodes[nodeID]; ok == true {
bucketID := bucketName + ":0:" + strconv.Itoa(i)
reader, err := node.GetReader(bucketID, objectName)
if err != nil {
errParams["node"] = nodeID
return nil, iodine.New(err, errParams)
}
readers[i] = reader
if metadata == nil {
metadata, err = node.GetDonutMetadata(bucketID, objectName)
if err != nil {
errParams["node"] = nodeID
return nil, iodine.New(err, errParams)
}
}
}
}
go erasureReader(readers, metadata, w)
return r, nil
}
return nil, iodine.New(errors.New("Bucket not found"), errParams)
}
// GetObjectMetadata returns metadata for a given object in a bucket
func (d donut) GetObjectMetadata(bucketName, object string) (map[string]string, error) {
errParams := map[string]string{"bucket": bucketName, "object": object}
if bucket, ok := d.buckets[bucketName]; ok {
nodes, err := bucket.GetNodes()
if err != nil {
return nil, iodine.New(err, errParams)
}
if node, ok := d.nodes[nodes[0]]; ok {
bucketID := bucketName + ":0:0"
metadata, err := node.GetMetadata(bucketID, object)
if err != nil {
errParams["bucketID"] = bucketID
return nil, iodine.New(err, errParams)
}
donutMetadata, err := node.GetDonutMetadata(bucketID, object)
if err != nil {
errParams["bucketID"] = bucketID
return nil, iodine.New(err, errParams)
}
metadata["sys.created"] = donutMetadata["created"]
metadata["sys.md5"] = donutMetadata["md5"]
metadata["sys.size"] = donutMetadata["size"]
return metadata, nil
}
errParams["node"] = nodes[0]
return nil, iodine.New(errors.New("Cannot connect to node: "+nodes[0]), errParams)
}
return nil, errors.New("Bucket not found")
}
// ListObjects - list all the available objects in a bucket
func (d donut) ListObjects(bucketName string) ([]string, error) {
errParams := map[string]string{"bucket": bucketName}
if bucket, ok := d.buckets[bucketName]; ok {
nodes, err := bucket.GetNodes()
if err != nil {
return nil, iodine.New(err, errParams)
}
if node, ok := d.nodes[nodes[0]]; ok {
bucketID := bucketName + ":0:0"
objects, err := node.ListObjects(bucketID)
errParams["bucketID"] = bucketID
return objects, iodine.New(err, errParams)
}
}
return nil, iodine.New(errors.New("Bucket not found"), errParams)
}

View File

@ -1,240 +0,0 @@
package donut
import (
"bytes"
"io"
"io/ioutil"
"os"
"testing"
"time"
. "github.com/minio-io/check"
)
func Test(t *testing.T) { TestingT(t) }
type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) TestEmptyBucket(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
// check buckets are empty
buckets, err := donut.ListBuckets()
c.Assert(err, IsNil)
c.Assert(buckets, IsNil)
}
func (s *MySuite) TestBucketWithoutNameFails(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
// fail to create new bucket without a name
err = donut.CreateBucket("")
c.Assert(err, Not(IsNil))
err = donut.CreateBucket(" ")
c.Assert(err, Not(IsNil))
}
func (s *MySuite) TestCreateBucketAndList(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
// create bucket
err = donut.CreateBucket("foo")
c.Assert(err, IsNil)
// check bucket exists
buckets, err := donut.ListBuckets()
c.Assert(err, IsNil)
c.Assert(buckets, DeepEquals, []string{"foo"})
}
func (s *MySuite) TestCreateBucketWithSameNameFails(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
err = donut.CreateBucket("foo")
c.Assert(err, IsNil)
err = donut.CreateBucket("foo")
c.Assert(err, Not(IsNil))
}
func (s *MySuite) TestCreateMultipleBucketsAndList(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
// add a second bucket
err = donut.CreateBucket("foo")
c.Assert(err, IsNil)
err = donut.CreateBucket("bar")
c.Assert(err, IsNil)
buckets, err := donut.ListBuckets()
c.Assert(err, IsNil)
c.Assert(buckets, DeepEquals, []string{"bar", "foo"})
err = donut.CreateBucket("foobar")
c.Assert(err, IsNil)
buckets, err = donut.ListBuckets()
c.Assert(err, IsNil)
c.Assert(buckets, DeepEquals, []string{"bar", "foo", "foobar"})
}
func (s *MySuite) TestNewObjectFailsWithoutBucket(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
writer, err := donut.GetObjectWriter("foo", "obj")
c.Assert(err, Not(IsNil))
c.Assert(writer, IsNil)
}
func (s *MySuite) TestNewObjectFailsWithEmptyName(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
writer, err := donut.GetObjectWriter("foo", "")
c.Assert(err, Not(IsNil))
c.Assert(writer, IsNil)
writer, err = donut.GetObjectWriter("foo", " ")
c.Assert(err, Not(IsNil))
c.Assert(writer, IsNil)
}
func (s *MySuite) TestNewObjectCanBeWritten(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
err = donut.CreateBucket("foo")
c.Assert(err, IsNil)
writer, err := donut.GetObjectWriter("foo", "obj")
c.Assert(err, IsNil)
data := "Hello World"
length, err := writer.Write([]byte(data))
c.Assert(length, Equals, len(data))
expectedMetadata := map[string]string{
"foo": "bar",
"created": "one",
"hello": "world",
}
err = writer.SetMetadata(expectedMetadata)
c.Assert(err, IsNil)
err = writer.Close()
c.Assert(err, IsNil)
actualWriterMetadata, err := writer.GetMetadata()
c.Assert(err, IsNil)
c.Assert(actualWriterMetadata, DeepEquals, expectedMetadata)
c.Assert(err, IsNil)
reader, err := donut.GetObjectReader("foo", "obj")
c.Assert(err, IsNil)
var actualData bytes.Buffer
_, err = io.Copy(&actualData, reader)
c.Assert(err, IsNil)
c.Assert(actualData.Bytes(), DeepEquals, []byte(data))
actualMetadata, err := donut.GetObjectMetadata("foo", "obj")
c.Assert(err, IsNil)
expectedMetadata["sys.md5"] = "b10a8db164e0754105b7a99be72e3fe5"
expectedMetadata["sys.size"] = "11"
_, err = time.Parse(time.RFC3339Nano, actualMetadata["sys.created"])
c.Assert(err, IsNil)
expectedMetadata["sys.created"] = actualMetadata["sys.created"]
c.Assert(actualMetadata, DeepEquals, expectedMetadata)
}
func (s *MySuite) TestMultipleNewObjects(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
c.Assert(donut.CreateBucket("foo"), IsNil)
writer, err := donut.GetObjectWriter("foo", "obj1")
c.Assert(err, IsNil)
writer.Write([]byte("one"))
writer.Close()
writer, err = donut.GetObjectWriter("foo", "obj2")
c.Assert(err, IsNil)
writer.Write([]byte("two"))
writer.Close()
// c.Skip("not complete")
reader, err := donut.GetObjectReader("foo", "obj1")
c.Assert(err, IsNil)
var readerBuffer1 bytes.Buffer
_, err = io.Copy(&readerBuffer1, reader)
c.Assert(err, IsNil)
// c.Skip("Not Implemented")
c.Assert(readerBuffer1.Bytes(), DeepEquals, []byte("one"))
reader, err = donut.GetObjectReader("foo", "obj2")
c.Assert(err, IsNil)
var readerBuffer2 bytes.Buffer
_, err = io.Copy(&readerBuffer2, reader)
c.Assert(err, IsNil)
c.Assert(readerBuffer2.Bytes(), DeepEquals, []byte("two"))
// test list objects
listObjects, err := donut.ListObjects("foo")
c.Assert(err, IsNil)
c.Assert(listObjects, DeepEquals, []string{"obj1", "obj2"})
}
func (s *MySuite) TestSysPrefixShouldFail(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
defer os.RemoveAll(root)
donut, err := NewDonut(root)
c.Assert(err, IsNil)
c.Assert(donut.CreateBucket("foo"), IsNil)
writer, err := donut.GetObjectWriter("foo", "obj1")
c.Assert(err, IsNil)
writer.Write([]byte("one"))
metadata := make(map[string]string)
metadata["foo"] = "bar"
metadata["sys.hello"] = "world"
err = writer.SetMetadata(metadata)
c.Assert(err, Not(IsNil))
writer.Close()
}

View File

@ -1,245 +0,0 @@
package donut
import (
"bytes"
"errors"
"hash"
"io"
"strconv"
"strings"
"time"
"crypto/md5"
"encoding/hex"
encoding "github.com/minio-io/erasure"
"github.com/minio-io/iodine"
"github.com/minio-io/minio/pkg/utils/split"
)
// getErasureTechnique - convert technique string into Technique type
func getErasureTechnique(technique string) (encoding.Technique, error) {
switch true {
case technique == "Cauchy":
return encoding.Cauchy, nil
case technique == "Vandermonde":
return encoding.Cauchy, nil
default:
return encoding.None, iodine.New(errors.New("Invalid erasure technique: "+technique), nil)
}
}
// erasureReader - returns aligned streaming reads over a PipeWriter
func erasureReader(readers []io.ReadCloser, donutMetadata map[string]string, writer *io.PipeWriter) {
totalChunks, err := strconv.Atoi(donutMetadata["chunkCount"])
if err != nil {
writer.CloseWithError(iodine.New(err, donutMetadata))
return
}
totalLeft, err := strconv.ParseInt(donutMetadata["size"], 10, 64)
if err != nil {
writer.CloseWithError(iodine.New(err, donutMetadata))
return
}
blockSize, err := strconv.Atoi(donutMetadata["blockSize"])
if err != nil {
writer.CloseWithError(iodine.New(err, donutMetadata))
return
}
parsedk, err := strconv.ParseUint(donutMetadata["erasureK"], 10, 8)
if err != nil {
writer.CloseWithError(iodine.New(err, donutMetadata))
return
}
k := uint8(parsedk)
parsedm, err := strconv.ParseUint(donutMetadata["erasureM"], 10, 8)
if err != nil {
writer.CloseWithError(iodine.New(err, donutMetadata))
return
}
m := uint8(parsedm)
expectedMd5sum, err := hex.DecodeString(donutMetadata["md5"])
if err != nil {
writer.CloseWithError(iodine.New(err, donutMetadata))
return
}
technique, err := getErasureTechnique(donutMetadata["erasureTechnique"])
if err != nil {
writer.CloseWithError(iodine.New(err, donutMetadata))
return
}
hasher := md5.New()
params, err := encoding.ValidateParams(k, m, technique)
if err != nil {
writer.CloseWithError(iodine.New(err, donutMetadata))
}
encoder := encoding.NewErasure(params)
for i := 0; i < totalChunks; i++ {
totalLeft, err = decodeChunk(writer, readers, encoder, hasher, k, m, totalLeft, blockSize)
if err != nil {
errParams := map[string]string{
"totalLeft": strconv.FormatInt(totalLeft, 10),
}
for k, v := range donutMetadata {
errParams[k] = v
}
writer.CloseWithError(iodine.New(err, errParams))
}
}
actualMd5sum := hasher.Sum(nil)
if bytes.Compare(expectedMd5sum, actualMd5sum) != 0 {
writer.CloseWithError(iodine.New(errors.New("decoded md5sum did not match. expected: "+string(expectedMd5sum)+" actual: "+string(actualMd5sum)), donutMetadata))
return
}
writer.Close()
return
}
func decodeChunk(writer *io.PipeWriter, readers []io.ReadCloser, encoder *encoding.Erasure, hasher hash.Hash, k, m uint8, totalLeft int64, blockSize int) (int64, error) {
curBlockSize := 0
if int64(blockSize) < totalLeft {
curBlockSize = blockSize
} else {
curBlockSize = int(totalLeft) // cast is safe, blockSize in if protects
}
curChunkSize := encoding.GetEncodedBlockLen(curBlockSize, uint8(k))
encodedBytes := make([][]byte, 16)
for i, reader := range readers {
var bytesBuffer bytes.Buffer
written, err := io.CopyN(&bytesBuffer, reader, int64(curChunkSize))
if err != nil {
errParams := map[string]string{}
errParams["part"] = strconv.FormatInt(written, 10)
errParams["block.written"] = strconv.FormatInt(written, 10)
errParams["block.length"] = strconv.Itoa(curChunkSize)
return totalLeft, iodine.New(err, errParams)
}
encodedBytes[i] = bytesBuffer.Bytes()
}
decodedData, err := encoder.Decode(encodedBytes, curBlockSize)
if err != nil {
errParams := map[string]string{}
errParams["block.length"] = strconv.Itoa(curChunkSize)
return totalLeft, iodine.New(err, errParams)
}
_, err = hasher.Write(decodedData) // not expecting errors from hash, will also catch further down on .Sum mismatch in parent
if err != nil {
errParams := map[string]string{}
errParams["block.length"] = strconv.Itoa(curChunkSize)
return totalLeft, iodine.New(err, errParams)
}
_, err = io.Copy(writer, bytes.NewBuffer(decodedData))
if err != nil {
errParams := map[string]string{}
errParams["block.length"] = strconv.Itoa(curChunkSize)
return totalLeft, iodine.New(err, errParams)
}
totalLeft = totalLeft - int64(blockSize)
return totalLeft, nil
}
// erasure writer
type erasureWriter struct {
writers []Writer
metadata map[string]string
donutMetadata map[string]string // not exposed
erasureWriter *io.PipeWriter
isClosed <-chan bool
}
// newErasureWriter - get a new writer
func newErasureWriter(writers []Writer) ObjectWriter {
r, w := io.Pipe()
isClosed := make(chan bool)
writer := erasureWriter{
writers: writers,
metadata: make(map[string]string),
erasureWriter: w,
isClosed: isClosed,
}
go erasureGoroutine(r, writer, isClosed)
return writer
}
func erasureGoroutine(r *io.PipeReader, eWriter erasureWriter, isClosed chan<- bool) {
chunks := split.Stream(r, 10*1024*1024)
params, _ := encoding.ValidateParams(8, 8, encoding.Cauchy)
encoder := encoding.NewErasure(params)
chunkCount := 0
totalLength := 0
summer := md5.New()
for chunk := range chunks {
if chunk.Err == nil {
totalLength = totalLength + len(chunk.Data)
encodedBlocks, _ := encoder.Encode(chunk.Data)
summer.Write(chunk.Data)
for blockIndex, block := range encodedBlocks {
io.Copy(eWriter.writers[blockIndex], bytes.NewBuffer(block))
}
}
chunkCount = chunkCount + 1
}
dataMd5sum := summer.Sum(nil)
metadata := make(map[string]string)
metadata["blockSize"] = strconv.Itoa(10 * 1024 * 1024)
metadata["chunkCount"] = strconv.Itoa(chunkCount)
metadata["created"] = time.Now().Format(time.RFC3339Nano)
metadata["erasureK"] = "8"
metadata["erasureM"] = "8"
metadata["erasureTechnique"] = "Cauchy"
metadata["md5"] = hex.EncodeToString(dataMd5sum)
metadata["size"] = strconv.Itoa(totalLength)
for _, nodeWriter := range eWriter.writers {
if nodeWriter != nil {
nodeWriter.SetMetadata(eWriter.metadata)
nodeWriter.SetDonutMetadata(metadata)
nodeWriter.Close()
}
}
isClosed <- true
}
func (eWriter erasureWriter) Write(data []byte) (int, error) {
io.Copy(eWriter.erasureWriter, bytes.NewBuffer(data))
return len(data), nil
}
func (eWriter erasureWriter) Close() error {
eWriter.erasureWriter.Close()
<-eWriter.isClosed
return nil
}
func (eWriter erasureWriter) CloseWithError(err error) error {
for _, writer := range eWriter.writers {
if writer != nil {
writer.CloseWithError(err)
}
}
return nil
}
func (eWriter erasureWriter) SetMetadata(metadata map[string]string) error {
for k := range metadata {
if strings.HasPrefix(k, "sys.") {
return errors.New("Invalid key '" + k + "', cannot start with sys.'")
}
}
for k := range eWriter.metadata {
delete(eWriter.metadata, k)
}
for k, v := range metadata {
eWriter.metadata[k] = v
}
return nil
}
func (eWriter erasureWriter) GetMetadata() (map[string]string, error) {
metadata := make(map[string]string)
for k, v := range eWriter.metadata {
metadata[k] = v
}
return metadata, nil
}

View File

@ -1,51 +0,0 @@
package donut
import (
"io"
)
// Collection of Donut specification interfaces
// Donut interface
type Donut interface {
CreateBucket(bucket string) error
GetObjectReader(bucket, object string) (io.ReadCloser, error)
GetObjectWriter(bucket, object string) (ObjectWriter, error)
GetObjectMetadata(bucket, object string) (map[string]string, error)
ListBuckets() ([]string, error)
ListObjects(bucket string) ([]string, error)
}
// Bucket interface
type Bucket interface {
GetNodes() ([]string, error)
AddNode(nodeID, bucketID string) error
}
// Node interface
type Node interface {
CreateBucket(bucket string) error
GetBuckets() ([]string, error)
GetDonutMetadata(bucket, object string) (map[string]string, error)
GetMetadata(bucket, object string) (map[string]string, error)
GetReader(bucket, object string) (io.ReadCloser, error)
GetWriter(bucket, object string) (Writer, error)
ListObjects(bucket string) ([]string, error)
}
// ObjectWriter interface
type ObjectWriter interface {
Close() error
CloseWithError(error) error
GetMetadata() (map[string]string, error)
SetMetadata(map[string]string) error
Write([]byte) (int, error)
}
// Writer interface
type Writer interface {
ObjectWriter
GetDonutMetadata() (map[string]string, error)
SetDonutMetadata(map[string]string) error
}

View File

@ -1,100 +0,0 @@
package donut
import (
"io"
"os"
"path"
"sort"
"strings"
"encoding/json"
"io/ioutil"
"path/filepath"
"github.com/minio-io/iodine"
)
type localDirectoryNode struct {
root string
}
func (d localDirectoryNode) CreateBucket(bucket string) error {
objectPath := path.Join(d.root, bucket)
return iodine.New(os.MkdirAll(objectPath, 0700), map[string]string{"bucket": bucket})
}
func (d localDirectoryNode) GetBuckets() ([]string, error) {
files, err := ioutil.ReadDir(d.root)
if err != nil {
return nil, iodine.New(err, nil)
}
var results []string
for _, file := range files {
if file.IsDir() {
results = append(results, file.Name())
}
}
return results, nil
}
func (d localDirectoryNode) GetWriter(bucket, object string) (Writer, error) {
errParams := map[string]string{"bucket": bucket, "object": object}
objectPath := path.Join(d.root, bucket, object)
err := os.MkdirAll(objectPath, 0700)
if err != nil {
return nil, iodine.New(err, errParams)
}
writer, err := newDonutObjectWriter(objectPath)
return writer, iodine.New(err, errParams)
}
func (d localDirectoryNode) GetReader(bucket, object string) (io.ReadCloser, error) {
reader, err := os.Open(path.Join(d.root, bucket, object, "data"))
return reader, iodine.New(err, map[string]string{"bucket": bucket, "object": object})
}
func (d localDirectoryNode) GetMetadata(bucket, object string) (map[string]string, error) {
m, err := d.getMetadata(bucket, object, "metadata.json")
return m, iodine.New(err, map[string]string{"bucket": bucket, "object": object})
}
func (d localDirectoryNode) GetDonutMetadata(bucket, object string) (map[string]string, error) {
m, err := d.getMetadata(bucket, object, "donutMetadata.json")
return m, iodine.New(err, map[string]string{"bucket": bucket, "object": object})
}
func (d localDirectoryNode) getMetadata(bucket, object, fileName string) (map[string]string, error) {
errParams := map[string]string{"bucket": bucket, "object": object, "file": fileName}
file, err := os.Open(path.Join(d.root, bucket, object, fileName))
defer file.Close()
if err != nil {
return nil, iodine.New(err, errParams)
}
metadata := make(map[string]string)
decoder := json.NewDecoder(file)
if err := decoder.Decode(&metadata); err != nil {
return nil, iodine.New(err, errParams)
}
return metadata, nil
}
func (d localDirectoryNode) ListObjects(bucketName string) ([]string, error) {
errParams := map[string]string{"bucket": bucketName}
prefix := path.Join(d.root, bucketName)
var objects []string
if err := filepath.Walk(prefix, func(path string, info os.FileInfo, err error) error {
if err != nil {
return iodine.New(err, errParams)
}
if !info.IsDir() && strings.HasSuffix(path, "data") {
object := strings.TrimPrefix(path, prefix+"/")
object = strings.TrimSuffix(object, "/data")
objects = append(objects, object)
}
return nil
}); err != nil {
return nil, iodine.New(err, errParams)
}
sort.Strings(objects)
return objects, nil
}

View File

@ -1,91 +0,0 @@
package donut
import (
"encoding/json"
"io/ioutil"
"os"
"path"
"github.com/minio-io/iodine"
)
func newDonutObjectWriter(objectDir string) (Writer, error) {
dataFile, err := os.OpenFile(path.Join(objectDir, "data"), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return nil, iodine.New(err, map[string]string{"objectDir": objectDir})
}
return donutObjectWriter{
root: objectDir,
file: dataFile,
metadata: make(map[string]string),
donutMetadata: make(map[string]string),
}, nil
}
type donutObjectWriter struct {
root string
file *os.File
metadata map[string]string
donutMetadata map[string]string
err error
}
func (d donutObjectWriter) Write(data []byte) (int, error) {
written, err := d.file.Write(data)
return written, iodine.New(err, nil)
}
func (d donutObjectWriter) Close() error {
if d.err != nil {
return iodine.New(d.err, nil)
}
metadata, _ := json.Marshal(d.metadata)
ioutil.WriteFile(path.Join(d.root, "metadata.json"), metadata, 0600)
donutMetadata, _ := json.Marshal(d.donutMetadata)
ioutil.WriteFile(path.Join(d.root, "donutMetadata.json"), donutMetadata, 0600)
return iodine.New(d.file.Close(), nil)
}
func (d donutObjectWriter) CloseWithError(err error) error {
if d.err != nil {
d.err = err
}
return iodine.New(d.Close(), nil)
}
func (d donutObjectWriter) SetMetadata(metadata map[string]string) error {
for k := range d.metadata {
delete(d.metadata, k)
}
for k, v := range metadata {
d.metadata[k] = v
}
return nil
}
func (d donutObjectWriter) GetMetadata() (map[string]string, error) {
metadata := make(map[string]string)
for k, v := range d.metadata {
metadata[k] = v
}
return metadata, nil
}
func (d donutObjectWriter) SetDonutMetadata(metadata map[string]string) error {
for k := range d.donutMetadata {
delete(d.donutMetadata, k)
}
for k, v := range metadata {
d.donutMetadata[k] = v
}
return nil
}
func (d donutObjectWriter) GetDonutMetadata() (map[string]string, error) {
donutMetadata := make(map[string]string)
for k, v := range d.donutMetadata {
donutMetadata[k] = v
}
return donutMetadata, nil
}