diff --git a/pkg/storage/donut/donut_bucket_internal.go b/pkg/storage/donut/bucket.go similarity index 59% rename from pkg/storage/donut/donut_bucket_internal.go rename to pkg/storage/donut/bucket.go index b383545a3..419b0a581 100644 --- a/pkg/storage/donut/donut_bucket_internal.go +++ b/pkg/storage/donut/bucket.go @@ -18,21 +18,212 @@ package donut import ( "bytes" - "crypto/md5" - "encoding/hex" - "encoding/json" "fmt" "hash" "io" "path/filepath" "strconv" "strings" + "time" + + "crypto/md5" + "encoding/hex" + "encoding/json" "github.com/minio/minio/pkg/iodine" "github.com/minio/minio/pkg/utils/split" ) -/// This file contains all the internal functions +// internal struct carrying bucket specific information +type bucket struct { + name string + acl string + time time.Time + donutName string + nodes map[string]Node + objects map[string]object +} + +// NewBucket - instantiate a new bucket +func NewBucket(bucketName, aclType, donutName string, nodes map[string]Node) (bucket, map[string]string, error) { + errParams := map[string]string{ + "bucketName": bucketName, + "donutName": donutName, + "aclType": aclType, + } + if strings.TrimSpace(bucketName) == "" || strings.TrimSpace(donutName) == "" { + return bucket{}, nil, iodine.New(InvalidArgument{}, errParams) + } + bucketMetadata := make(map[string]string) + bucketMetadata["acl"] = aclType + t := time.Now().UTC() + bucketMetadata["created"] = t.Format(time.RFC3339Nano) + b := bucket{} + b.name = bucketName + b.acl = aclType + b.time = t + b.donutName = donutName + b.objects = make(map[string]object) + b.nodes = nodes + return b, bucketMetadata, nil +} + +// ListObjects - list all objects +func (b bucket) ListObjects() (map[string]object, error) { + nodeSlice := 0 + for _, node := range b.nodes { + disks, err := node.ListDisks() + if err != nil { + return nil, iodine.New(err, nil) + } + for order, disk := range disks { + bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, order) + bucketPath := filepath.Join(b.donutName, bucketSlice) + objects, err := disk.ListDir(bucketPath) + if err != nil { + return nil, iodine.New(err, nil) + } + for _, object := range objects { + newObject, err := NewObject(object.Name(), filepath.Join(disk.GetPath(), bucketPath)) + if err != nil { + return nil, iodine.New(err, nil) + } + newObjectMetadata, err := newObject.GetObjectMetadata() + if err != nil { + return nil, iodine.New(err, nil) + } + objectName, ok := newObjectMetadata["object"] + if !ok { + return nil, iodine.New(ObjectCorrupted{Object: object.Name()}, nil) + } + b.objects[objectName] = newObject + } + } + nodeSlice = nodeSlice + 1 + } + return b.objects, nil +} + +// ReadObject - open an object to read +func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64, err error) { + reader, writer := io.Pipe() + // get list of objects + objects, err := b.ListObjects() + if err != nil { + return nil, 0, iodine.New(err, nil) + } + // check if object exists + object, ok := objects[objectName] + if !ok { + return nil, 0, iodine.New(ObjectNotFound{Object: objectName}, nil) + } + // verify if objectMetadata is readable, before we serve the request + objectMetadata, err := object.GetObjectMetadata() + if err != nil { + return nil, 0, iodine.New(err, nil) + } + if objectName == "" || writer == nil || len(objectMetadata) == 0 { + return nil, 0, iodine.New(InvalidArgument{}, nil) + } + size, err = strconv.ParseInt(objectMetadata["size"], 10, 64) + if err != nil { + return nil, 0, iodine.New(err, nil) + } + // verify if donutObjectMetadata is readable, before we server the request + donutObjectMetadata, err := object.GetDonutObjectMetadata() + if err != nil { + return nil, 0, iodine.New(err, nil) + } + // read and reply back to GetObject() request in a go-routine + go b.readEncodedData(b.normalizeObjectName(objectName), writer, donutObjectMetadata) + return reader, size, nil +} + +// WriteObject - write a new object into bucket +func (b bucket) WriteObject(objectName string, objectData io.Reader, expectedMD5Sum string, metadata map[string]string) (string, error) { + if objectName == "" || objectData == nil { + return "", iodine.New(InvalidArgument{}, nil) + } + writers, err := b.getDiskWriters(b.normalizeObjectName(objectName), "data") + if err != nil { + return "", iodine.New(err, nil) + } + summer := md5.New() + objectMetadata := make(map[string]string) + donutObjectMetadata := make(map[string]string) + objectMetadata["version"] = objectMetadataVersion + donutObjectMetadata["version"] = donutObjectMetadataVersion + size := metadata["contentLength"] + sizeInt, err := strconv.ParseInt(size, 10, 64) + if err != nil { + return "", iodine.New(err, nil) + } + + // if total writers are only '1' do not compute erasure + switch len(writers) == 1 { + case true: + mw := io.MultiWriter(writers[0], summer) + totalLength, err := io.CopyN(mw, objectData, sizeInt) + if err != nil { + return "", iodine.New(err, nil) + } + donutObjectMetadata["sys.size"] = strconv.FormatInt(totalLength, 10) + objectMetadata["size"] = strconv.FormatInt(totalLength, 10) + case false: + // calculate data and parity dictated by total number of writers + k, m, err := b.getDataAndParity(len(writers)) + if err != nil { + return "", iodine.New(err, nil) + } + // encoded data with k, m and write + chunkCount, totalLength, err := b.writeEncodedData(k, m, writers, objectData, summer) + if err != nil { + return "", iodine.New(err, nil) + } + /// donutMetadata section + donutObjectMetadata["sys.blockSize"] = strconv.Itoa(10 * 1024 * 1024) + donutObjectMetadata["sys.chunkCount"] = strconv.Itoa(chunkCount) + donutObjectMetadata["sys.erasureK"] = strconv.FormatUint(uint64(k), 10) + donutObjectMetadata["sys.erasureM"] = strconv.FormatUint(uint64(m), 10) + donutObjectMetadata["sys.erasureTechnique"] = "Cauchy" + donutObjectMetadata["sys.size"] = strconv.Itoa(totalLength) + // keep size inside objectMetadata as well for Object API requests + objectMetadata["size"] = strconv.Itoa(totalLength) + } + objectMetadata["bucket"] = b.name + objectMetadata["object"] = objectName + // store all user provided metadata + for k, v := range metadata { + objectMetadata[k] = v + } + dataMd5sum := summer.Sum(nil) + objectMetadata["created"] = time.Now().UTC().Format(time.RFC3339Nano) + + // keeping md5sum for the object in two different places + // one for object storage and another is for internal use + objectMetadata["md5"] = hex.EncodeToString(dataMd5sum) + donutObjectMetadata["sys.md5"] = hex.EncodeToString(dataMd5sum) + + // Verify if the written object is equal to what is expected, only if it is requested as such + if strings.TrimSpace(expectedMD5Sum) != "" { + if err := b.isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), objectMetadata["md5"]); err != nil { + return "", iodine.New(err, nil) + } + } + // write donut specific metadata + if err := b.writeDonutObjectMetadata(b.normalizeObjectName(objectName), donutObjectMetadata); err != nil { + return "", iodine.New(err, nil) + } + // write object specific metadata + if err := b.writeObjectMetadata(b.normalizeObjectName(objectName), objectMetadata); err != nil { + return "", iodine.New(err, nil) + } + // close all writers, when control flow reaches here + for _, writer := range writers { + writer.Close() + } + return objectMetadata["md5"], nil +} // isMD5SumEqual - returns error if md5sum mismatches, other its `nil` func (b bucket) isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error { diff --git a/pkg/storage/donut/donut_common.go b/pkg/storage/donut/common.go similarity index 100% rename from pkg/storage/donut/donut_common.go rename to pkg/storage/donut/common.go diff --git a/pkg/storage/donut/donut_bucket.go b/pkg/storage/donut/donut_bucket.go deleted file mode 100644 index f12d6fe18..000000000 --- a/pkg/storage/donut/donut_bucket.go +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package donut - -import ( - "fmt" - "io" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "crypto/md5" - "encoding/hex" - - "github.com/minio/minio/pkg/iodine" -) - -// internal struct carrying bucket specific information -type bucket struct { - name string - acl string - time time.Time - donutName string - nodes map[string]Node - objects map[string]object -} - -// NewBucket - instantiate a new bucket -func NewBucket(bucketName, aclType, donutName string, nodes map[string]Node) (bucket, map[string]string, error) { - errParams := map[string]string{ - "bucketName": bucketName, - "donutName": donutName, - "aclType": aclType, - } - if strings.TrimSpace(bucketName) == "" || strings.TrimSpace(donutName) == "" { - return bucket{}, nil, iodine.New(InvalidArgument{}, errParams) - } - bucketMetadata := make(map[string]string) - bucketMetadata["acl"] = aclType - t := time.Now().UTC() - bucketMetadata["created"] = t.Format(time.RFC3339Nano) - b := bucket{} - b.name = bucketName - b.acl = aclType - b.time = t - b.donutName = donutName - b.objects = make(map[string]object) - b.nodes = nodes - return b, bucketMetadata, nil -} - -// ListObjects - list all objects -func (b bucket) ListObjects() (map[string]object, error) { - nodeSlice := 0 - for _, node := range b.nodes { - disks, err := node.ListDisks() - if err != nil { - return nil, iodine.New(err, nil) - } - for order, disk := range disks { - bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, order) - bucketPath := filepath.Join(b.donutName, bucketSlice) - objects, err := disk.ListDir(bucketPath) - if err != nil { - return nil, iodine.New(err, nil) - } - for _, object := range objects { - newObject, err := NewObject(object.Name(), filepath.Join(disk.GetPath(), bucketPath)) - if err != nil { - return nil, iodine.New(err, nil) - } - newObjectMetadata, err := newObject.GetObjectMetadata() - if err != nil { - return nil, iodine.New(err, nil) - } - objectName, ok := newObjectMetadata["object"] - if !ok { - return nil, iodine.New(ObjectCorrupted{Object: object.Name()}, nil) - } - b.objects[objectName] = newObject - } - } - nodeSlice = nodeSlice + 1 - } - return b.objects, nil -} - -// GetObject - get object -func (b bucket) GetObject(objectName string) (reader io.ReadCloser, size int64, err error) { - reader, writer := io.Pipe() - // get list of objects - objects, err := b.ListObjects() - if err != nil { - return nil, 0, iodine.New(err, nil) - } - // check if object exists - object, ok := objects[objectName] - if !ok { - return nil, 0, iodine.New(os.ErrNotExist, nil) - } - // verify if objectMetadata is readable, before we serve the request - objectMetadata, err := object.GetObjectMetadata() - if err != nil { - return nil, 0, iodine.New(err, nil) - } - if objectName == "" || writer == nil || len(objectMetadata) == 0 { - return nil, 0, iodine.New(InvalidArgument{}, nil) - } - size, err = strconv.ParseInt(objectMetadata["size"], 10, 64) - if err != nil { - return nil, 0, iodine.New(err, nil) - } - // verify if donutObjectMetadata is readable, before we server the request - donutObjectMetadata, err := object.GetDonutObjectMetadata() - if err != nil { - return nil, 0, iodine.New(err, nil) - } - // read and reply back to GetObject() request in a go-routine - go b.readEncodedData(b.normalizeObjectName(objectName), writer, donutObjectMetadata) - return reader, size, nil -} - -// PutObject - put a new object -func (b bucket) PutObject(objectName string, objectData io.Reader, expectedMD5Sum string, metadata map[string]string) (string, error) { - if objectName == "" || objectData == nil { - return "", iodine.New(InvalidArgument{}, nil) - } - writers, err := b.getDiskWriters(b.normalizeObjectName(objectName), "data") - if err != nil { - return "", iodine.New(err, nil) - } - summer := md5.New() - objectMetadata := make(map[string]string) - donutObjectMetadata := make(map[string]string) - objectMetadata["version"] = objectMetadataVersion - donutObjectMetadata["version"] = donutObjectMetadataVersion - size := metadata["contentLength"] - sizeInt, err := strconv.ParseInt(size, 10, 64) - if err != nil { - return "", iodine.New(err, nil) - } - - // if total writers are only '1' do not compute erasure - switch len(writers) == 1 { - case true: - mw := io.MultiWriter(writers[0], summer) - totalLength, err := io.CopyN(mw, objectData, sizeInt) - if err != nil { - return "", iodine.New(err, nil) - } - donutObjectMetadata["sys.size"] = strconv.FormatInt(totalLength, 10) - objectMetadata["size"] = strconv.FormatInt(totalLength, 10) - case false: - // calculate data and parity dictated by total number of writers - k, m, err := b.getDataAndParity(len(writers)) - if err != nil { - return "", iodine.New(err, nil) - } - // encoded data with k, m and write - chunkCount, totalLength, err := b.writeEncodedData(k, m, writers, objectData, summer) - if err != nil { - return "", iodine.New(err, nil) - } - /// donutMetadata section - donutObjectMetadata["sys.blockSize"] = strconv.Itoa(10 * 1024 * 1024) - donutObjectMetadata["sys.chunkCount"] = strconv.Itoa(chunkCount) - donutObjectMetadata["sys.erasureK"] = strconv.FormatUint(uint64(k), 10) - donutObjectMetadata["sys.erasureM"] = strconv.FormatUint(uint64(m), 10) - donutObjectMetadata["sys.erasureTechnique"] = "Cauchy" - donutObjectMetadata["sys.size"] = strconv.Itoa(totalLength) - // keep size inside objectMetadata as well for Object API requests - objectMetadata["size"] = strconv.Itoa(totalLength) - } - objectMetadata["bucket"] = b.name - objectMetadata["object"] = objectName - // store all user provided metadata - for k, v := range metadata { - objectMetadata[k] = v - } - dataMd5sum := summer.Sum(nil) - objectMetadata["created"] = time.Now().UTC().Format(time.RFC3339Nano) - - // keeping md5sum for the object in two different places - // one for object storage and another is for internal use - objectMetadata["md5"] = hex.EncodeToString(dataMd5sum) - donutObjectMetadata["sys.md5"] = hex.EncodeToString(dataMd5sum) - - // Verify if the written object is equal to what is expected, only if it is requested as such - if strings.TrimSpace(expectedMD5Sum) != "" { - if err := b.isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), objectMetadata["md5"]); err != nil { - return "", iodine.New(err, nil) - } - } - // write donut specific metadata - if err := b.writeDonutObjectMetadata(b.normalizeObjectName(objectName), donutObjectMetadata); err != nil { - return "", iodine.New(err, nil) - } - // write object specific metadata - if err := b.writeObjectMetadata(b.normalizeObjectName(objectName), objectMetadata); err != nil { - return "", iodine.New(err, nil) - } - // close all writers, when control flow reaches here - for _, writer := range writers { - writer.Close() - } - return objectMetadata["md5"], nil -} diff --git a/pkg/storage/donut/donut_encoder.go b/pkg/storage/donut/encoder.go similarity index 100% rename from pkg/storage/donut/donut_encoder.go rename to pkg/storage/donut/encoder.go diff --git a/pkg/storage/donut/donut_errors.go b/pkg/storage/donut/errors.go similarity index 100% rename from pkg/storage/donut/donut_errors.go rename to pkg/storage/donut/errors.go diff --git a/pkg/storage/donut/donut_public_interfaces.go b/pkg/storage/donut/interfaces.go similarity index 100% rename from pkg/storage/donut/donut_public_interfaces.go rename to pkg/storage/donut/interfaces.go diff --git a/pkg/storage/donut/donut_node.go b/pkg/storage/donut/node.go similarity index 100% rename from pkg/storage/donut/donut_node.go rename to pkg/storage/donut/node.go diff --git a/pkg/storage/donut/donut_object.go b/pkg/storage/donut/object.go similarity index 94% rename from pkg/storage/donut/donut_object.go rename to pkg/storage/donut/object.go index d394c132f..567ef56ce 100644 --- a/pkg/storage/donut/donut_object.go +++ b/pkg/storage/donut/object.go @@ -47,7 +47,7 @@ func (o object) GetObjectMetadata() (map[string]string, error) { objectMetadata := make(map[string]string) objectMetadataBytes, err := ioutil.ReadFile(filepath.Join(o.objectPath, objectMetadataConfig)) if err != nil { - return nil, iodine.New(err, nil) + return nil, iodine.New(ObjectNotFound{Object: o.name}, nil) } if err := json.Unmarshal(objectMetadataBytes, &objectMetadata); err != nil { return nil, iodine.New(err, nil) @@ -60,7 +60,7 @@ func (o object) GetDonutObjectMetadata() (map[string]string, error) { donutObjectMetadata := make(map[string]string) donutObjectMetadataBytes, err := ioutil.ReadFile(filepath.Join(o.objectPath, donutObjectMetadataConfig)) if err != nil { - return nil, iodine.New(err, nil) + return nil, iodine.New(ObjectNotFound{Object: o.name}, nil) } if err := json.Unmarshal(donutObjectMetadataBytes, &donutObjectMetadata); err != nil { return nil, iodine.New(err, nil) diff --git a/pkg/storage/donut/objectstorage.go b/pkg/storage/donut/objectstorage.go index b9640d9b2..a3b62506f 100644 --- a/pkg/storage/donut/objectstorage.go +++ b/pkg/storage/donut/objectstorage.go @@ -17,7 +17,11 @@ package donut import ( + "encoding/json" + "fmt" "io" + "os" + "path/filepath" "sort" "strconv" "strings" @@ -185,7 +189,7 @@ func (d donut) PutObject(bucket, object, expectedMD5Sum string, reader io.ReadCl return "", iodine.New(ObjectExists{Object: object}, nil) } } - md5sum, err := d.buckets[bucket].PutObject(object, reader, expectedMD5Sum, metadata) + md5sum, err := d.buckets[bucket].WriteObject(object, reader, expectedMD5Sum, metadata) if err != nil { return "", iodine.New(err, errParams) } @@ -211,16 +215,7 @@ func (d donut) GetObject(bucket, object string) (reader io.ReadCloser, size int6 if _, ok := d.buckets[bucket]; !ok { return nil, 0, iodine.New(BucketNotFound{Bucket: bucket}, errParams) } - objectList, err := d.buckets[bucket].ListObjects() - if err != nil { - return nil, 0, iodine.New(err, nil) - } - for objectName := range objectList { - if objectName == object { - return d.buckets[bucket].GetObject(object) - } - } - return nil, 0, iodine.New(ObjectNotFound{Object: object}, nil) + return d.buckets[bucket].ReadObject(object) } // GetObjectMetadata - get object metadata @@ -246,3 +241,157 @@ func (d donut) GetObjectMetadata(bucket, object string) (map[string]string, erro } return donutObject.GetObjectMetadata() } + +// getDiskWriters - +func (d donut) getBucketMetadataWriters() ([]io.WriteCloser, error) { + var writers []io.WriteCloser + for _, node := range d.nodes { + disks, err := node.ListDisks() + if err != nil { + return nil, iodine.New(err, nil) + } + writers = make([]io.WriteCloser, len(disks)) + for order, disk := range disks { + bucketMetaDataWriter, err := disk.CreateFile(filepath.Join(d.name, bucketMetadataConfig)) + if err != nil { + return nil, iodine.New(err, nil) + } + writers[order] = bucketMetaDataWriter + } + } + return writers, nil +} + +func (d donut) getBucketMetadataReaders() ([]io.ReadCloser, error) { + var readers []io.ReadCloser + for _, node := range d.nodes { + disks, err := node.ListDisks() + if err != nil { + return nil, iodine.New(err, nil) + } + readers = make([]io.ReadCloser, len(disks)) + for order, disk := range disks { + bucketMetaDataReader, err := disk.OpenFile(filepath.Join(d.name, bucketMetadataConfig)) + if err != nil { + return nil, iodine.New(err, nil) + } + readers[order] = bucketMetaDataReader + } + } + return readers, nil +} + +// +func (d donut) setDonutBucketMetadata(metadata map[string]map[string]string) error { + writers, err := d.getBucketMetadataWriters() + if err != nil { + return iodine.New(err, nil) + } + for _, writer := range writers { + defer writer.Close() + } + for _, writer := range writers { + jenc := json.NewEncoder(writer) + if err := jenc.Encode(metadata); err != nil { + return iodine.New(err, nil) + } + } + return nil +} + +func (d donut) getDonutBucketMetadata() (map[string]map[string]string, error) { + metadata := make(map[string]map[string]string) + readers, err := d.getBucketMetadataReaders() + if err != nil { + return nil, iodine.New(err, nil) + } + for _, reader := range readers { + defer reader.Close() + } + for _, reader := range readers { + jenc := json.NewDecoder(reader) + if err := jenc.Decode(&metadata); err != nil { + return nil, iodine.New(err, nil) + } + } + return metadata, nil +} + +func (d donut) makeDonutBucket(bucketName, acl string) error { + err := d.getDonutBuckets() + if err != nil { + return iodine.New(err, nil) + } + if _, ok := d.buckets[bucketName]; ok { + return iodine.New(BucketExists{Bucket: bucketName}, nil) + } + bucket, bucketMetadata, err := NewBucket(bucketName, acl, d.name, d.nodes) + if err != nil { + return iodine.New(err, nil) + } + nodeNumber := 0 + d.buckets[bucketName] = bucket + for _, node := range d.nodes { + disks, err := node.ListDisks() + if err != nil { + return iodine.New(err, nil) + } + for order, disk := range disks { + bucketSlice := fmt.Sprintf("%s$%d$%d", bucketName, nodeNumber, order) + err := disk.MakeDir(filepath.Join(d.name, bucketSlice)) + if err != nil { + return iodine.New(err, nil) + } + } + nodeNumber = nodeNumber + 1 + } + metadata, err := d.getDonutBucketMetadata() + if err != nil { + err = iodine.ToError(err) + if os.IsNotExist(err) { + metadata := make(map[string]map[string]string) + metadata[bucketName] = bucketMetadata + err = d.setDonutBucketMetadata(metadata) + if err != nil { + return iodine.New(err, nil) + } + return nil + } + return iodine.New(err, nil) + } + metadata[bucketName] = bucketMetadata + err = d.setDonutBucketMetadata(metadata) + if err != nil { + return iodine.New(err, nil) + } + return nil +} + +func (d donut) getDonutBuckets() error { + for _, node := range d.nodes { + disks, err := node.ListDisks() + if err != nil { + return iodine.New(err, nil) + } + for _, disk := range disks { + dirs, err := disk.ListDir(d.name) + if err != nil { + return iodine.New(err, nil) + } + for _, dir := range dirs { + splitDir := strings.Split(dir.Name(), "$") + if len(splitDir) < 3 { + return iodine.New(CorruptedBackend{Backend: dir.Name()}, nil) + } + bucketName := splitDir[0] + // we dont need this NewBucket once we cache from makeDonutBucket() + bucket, _, err := NewBucket(bucketName, "private", d.name, d.nodes) + if err != nil { + return iodine.New(err, nil) + } + d.buckets[bucketName] = bucket + } + } + } + return nil +} diff --git a/pkg/storage/donut/objectstorage_internal.go b/pkg/storage/donut/objectstorage_internal.go deleted file mode 100644 index 107f271b9..000000000 --- a/pkg/storage/donut/objectstorage_internal.go +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Minimalist Object Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package donut - -import ( - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - "strings" - - "github.com/minio/minio/pkg/iodine" -) - -/// This file contains all the internal functions used by Object interface - -// getDiskWriters - -func (d donut) getBucketMetadataWriters() ([]io.WriteCloser, error) { - var writers []io.WriteCloser - for _, node := range d.nodes { - disks, err := node.ListDisks() - if err != nil { - return nil, iodine.New(err, nil) - } - writers = make([]io.WriteCloser, len(disks)) - for order, disk := range disks { - bucketMetaDataWriter, err := disk.CreateFile(filepath.Join(d.name, bucketMetadataConfig)) - if err != nil { - return nil, iodine.New(err, nil) - } - writers[order] = bucketMetaDataWriter - } - } - return writers, nil -} - -func (d donut) getBucketMetadataReaders() ([]io.ReadCloser, error) { - var readers []io.ReadCloser - for _, node := range d.nodes { - disks, err := node.ListDisks() - if err != nil { - return nil, iodine.New(err, nil) - } - readers = make([]io.ReadCloser, len(disks)) - for order, disk := range disks { - bucketMetaDataReader, err := disk.OpenFile(filepath.Join(d.name, bucketMetadataConfig)) - if err != nil { - return nil, iodine.New(err, nil) - } - readers[order] = bucketMetaDataReader - } - } - return readers, nil -} - -// -func (d donut) setDonutBucketMetadata(metadata map[string]map[string]string) error { - writers, err := d.getBucketMetadataWriters() - if err != nil { - return iodine.New(err, nil) - } - for _, writer := range writers { - defer writer.Close() - } - for _, writer := range writers { - jenc := json.NewEncoder(writer) - if err := jenc.Encode(metadata); err != nil { - return iodine.New(err, nil) - } - } - return nil -} - -func (d donut) getDonutBucketMetadata() (map[string]map[string]string, error) { - metadata := make(map[string]map[string]string) - readers, err := d.getBucketMetadataReaders() - if err != nil { - return nil, iodine.New(err, nil) - } - for _, reader := range readers { - defer reader.Close() - } - for _, reader := range readers { - jenc := json.NewDecoder(reader) - if err := jenc.Decode(&metadata); err != nil { - return nil, iodine.New(err, nil) - } - } - return metadata, nil -} - -func (d donut) makeDonutBucket(bucketName, acl string) error { - err := d.getDonutBuckets() - if err != nil { - return iodine.New(err, nil) - } - if _, ok := d.buckets[bucketName]; ok { - return iodine.New(BucketExists{Bucket: bucketName}, nil) - } - bucket, bucketMetadata, err := NewBucket(bucketName, acl, d.name, d.nodes) - if err != nil { - return iodine.New(err, nil) - } - nodeNumber := 0 - d.buckets[bucketName] = bucket - for _, node := range d.nodes { - disks, err := node.ListDisks() - if err != nil { - return iodine.New(err, nil) - } - for order, disk := range disks { - bucketSlice := fmt.Sprintf("%s$%d$%d", bucketName, nodeNumber, order) - err := disk.MakeDir(filepath.Join(d.name, bucketSlice)) - if err != nil { - return iodine.New(err, nil) - } - } - nodeNumber = nodeNumber + 1 - } - metadata, err := d.getDonutBucketMetadata() - if err != nil { - err = iodine.ToError(err) - if os.IsNotExist(err) { - metadata := make(map[string]map[string]string) - metadata[bucketName] = bucketMetadata - err = d.setDonutBucketMetadata(metadata) - if err != nil { - return iodine.New(err, nil) - } - return nil - } - return iodine.New(err, nil) - } - metadata[bucketName] = bucketMetadata - err = d.setDonutBucketMetadata(metadata) - if err != nil { - return iodine.New(err, nil) - } - return nil -} - -func (d donut) getDonutBuckets() error { - for _, node := range d.nodes { - disks, err := node.ListDisks() - if err != nil { - return iodine.New(err, nil) - } - for _, disk := range disks { - dirs, err := disk.ListDir(d.name) - if err != nil { - return iodine.New(err, nil) - } - for _, dir := range dirs { - splitDir := strings.Split(dir.Name(), "$") - if len(splitDir) < 3 { - return iodine.New(CorruptedBackend{Backend: dir.Name()}, nil) - } - bucketName := splitDir[0] - // we dont need this NewBucket once we cache from makeDonutBucket() - bucket, _, err := NewBucket(bucketName, "private", d.name, d.nodes) - if err != nil { - return iodine.New(err, nil) - } - d.buckets[bucketName] = bucket - } - } - } - return nil -} diff --git a/pkg/storage/donut/donut_rebalance.go b/pkg/storage/donut/rebalance.go similarity index 100% rename from pkg/storage/donut/donut_rebalance.go rename to pkg/storage/donut/rebalance.go