2015-02-08 05:54:21 -05:00
|
|
|
/*
|
|
|
|
* Mini Object Storage, (C) 2015 Minio, Inc.
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2015-01-27 14:09:20 -05:00
|
|
|
package fs
|
|
|
|
|
|
|
|
import (
|
2015-02-04 20:32:40 -05:00
|
|
|
"bytes"
|
|
|
|
"encoding/json"
|
2015-01-27 14:09:20 -05:00
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"path"
|
2015-02-26 20:23:42 -05:00
|
|
|
"path/filepath"
|
|
|
|
"sort"
|
2015-01-27 14:09:20 -05:00
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
|
|
|
|
mstorage "github.com/minio-io/minio/pkg/storage"
|
2015-03-01 14:18:34 -05:00
|
|
|
"github.com/minio-io/minio/pkg/utils/helpers"
|
2015-02-15 20:03:27 -05:00
|
|
|
"github.com/minio-io/minio/pkg/utils/policy"
|
2015-01-27 14:09:20 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
type storage struct {
|
2015-02-18 02:48:42 -05:00
|
|
|
root string
|
|
|
|
lock *sync.Mutex
|
2015-01-27 14:09:20 -05:00
|
|
|
}
|
|
|
|
|
2015-02-04 20:32:40 -05:00
|
|
|
type SerializedMetadata struct {
|
|
|
|
ContentType string
|
|
|
|
}
|
|
|
|
|
2015-01-27 14:09:20 -05:00
|
|
|
func Start(root string) (chan<- string, <-chan error, *storage) {
|
|
|
|
ctrlChannel := make(chan string)
|
|
|
|
errorChannel := make(chan error)
|
2015-02-18 02:48:42 -05:00
|
|
|
s := storage{}
|
|
|
|
s.root = root
|
|
|
|
s.lock = new(sync.Mutex)
|
|
|
|
go start(ctrlChannel, errorChannel, &s)
|
|
|
|
return ctrlChannel, errorChannel, &s
|
2015-01-27 14:09:20 -05:00
|
|
|
}
|
|
|
|
|
2015-02-18 02:48:42 -05:00
|
|
|
func start(ctrlChannel <-chan string, errorChannel chan<- error, s *storage) {
|
|
|
|
err := os.MkdirAll(s.root, 0700)
|
|
|
|
errorChannel <- err
|
2015-01-27 14:09:20 -05:00
|
|
|
close(errorChannel)
|
|
|
|
}
|
|
|
|
|
2015-03-03 04:25:45 -05:00
|
|
|
func appendUniq(slice []string, i string) []string {
|
|
|
|
for _, ele := range slice {
|
|
|
|
if ele == i {
|
|
|
|
return slice
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return append(slice, i)
|
|
|
|
}
|
|
|
|
|
2015-01-27 21:43:55 -05:00
|
|
|
// Bucket Operations
|
2015-02-15 20:03:27 -05:00
|
|
|
func (storage *storage) ListBuckets() ([]mstorage.BucketMetadata, error) {
|
2015-01-27 21:43:55 -05:00
|
|
|
files, err := ioutil.ReadDir(storage.root)
|
|
|
|
if err != nil {
|
|
|
|
return []mstorage.BucketMetadata{}, mstorage.EmbedError("bucket", "", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var metadataList []mstorage.BucketMetadata
|
|
|
|
for _, file := range files {
|
2015-02-16 21:37:09 -05:00
|
|
|
// Skip policy files
|
|
|
|
if strings.HasSuffix(file.Name(), "_policy.json") {
|
|
|
|
continue
|
|
|
|
}
|
2015-01-27 21:43:55 -05:00
|
|
|
if !file.IsDir() {
|
|
|
|
return []mstorage.BucketMetadata{}, mstorage.BackendCorrupted{Path: storage.root}
|
|
|
|
}
|
2015-02-15 20:03:27 -05:00
|
|
|
metadata := mstorage.BucketMetadata{
|
|
|
|
Name: file.Name(),
|
|
|
|
Created: file.ModTime(), // TODO - provide real created time
|
2015-01-27 21:43:55 -05:00
|
|
|
}
|
2015-02-15 20:03:27 -05:00
|
|
|
metadataList = append(metadataList, metadata)
|
2015-01-27 21:43:55 -05:00
|
|
|
}
|
|
|
|
return metadataList, nil
|
2015-01-27 14:09:20 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (storage *storage) StoreBucket(bucket string) error {
|
2015-02-18 02:48:42 -05:00
|
|
|
storage.lock.Lock()
|
|
|
|
defer storage.lock.Unlock()
|
2015-01-27 14:09:20 -05:00
|
|
|
|
|
|
|
// verify bucket path legal
|
|
|
|
if mstorage.IsValidBucket(bucket) == false {
|
|
|
|
return mstorage.BucketNameInvalid{Bucket: bucket}
|
|
|
|
}
|
|
|
|
|
|
|
|
// get bucket path
|
|
|
|
bucketDir := path.Join(storage.root, bucket)
|
|
|
|
|
|
|
|
// check if bucket exists
|
|
|
|
if _, err := os.Stat(bucketDir); err == nil {
|
|
|
|
return mstorage.BucketExists{
|
|
|
|
Bucket: bucket,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// make bucket
|
|
|
|
err := os.Mkdir(bucketDir, 0700)
|
|
|
|
if err != nil {
|
|
|
|
return mstorage.EmbedError(bucket, "", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-02-15 20:03:27 -05:00
|
|
|
func (storage *storage) GetBucketPolicy(bucket string) (interface{}, error) {
|
2015-02-18 02:48:42 -05:00
|
|
|
storage.lock.Lock()
|
|
|
|
defer storage.lock.Unlock()
|
2015-02-15 20:03:27 -05:00
|
|
|
|
|
|
|
var p policy.BucketPolicy
|
|
|
|
// verify bucket path legal
|
|
|
|
if mstorage.IsValidBucket(bucket) == false {
|
|
|
|
return policy.BucketPolicy{}, mstorage.BucketNameInvalid{Bucket: bucket}
|
|
|
|
}
|
|
|
|
|
|
|
|
// get bucket path
|
|
|
|
bucketDir := path.Join(storage.root, bucket)
|
|
|
|
// check if bucket exists
|
|
|
|
if _, err := os.Stat(bucketDir); err != nil {
|
|
|
|
return policy.BucketPolicy{}, mstorage.BucketNotFound{Bucket: bucket}
|
|
|
|
}
|
|
|
|
|
|
|
|
// get policy path
|
|
|
|
bucketPolicy := path.Join(storage.root, bucket+"_policy.json")
|
|
|
|
filestat, err := os.Stat(bucketPolicy)
|
|
|
|
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return policy.BucketPolicy{}, mstorage.BucketPolicyNotFound{Bucket: bucket}
|
|
|
|
}
|
|
|
|
|
2015-02-23 05:11:27 -05:00
|
|
|
if filestat.IsDir() {
|
|
|
|
return policy.BucketPolicy{}, mstorage.BackendCorrupted{Path: bucketPolicy}
|
|
|
|
}
|
|
|
|
|
2015-02-15 20:03:27 -05:00
|
|
|
file, err := os.OpenFile(bucketPolicy, os.O_RDONLY, 0666)
|
|
|
|
defer file.Close()
|
|
|
|
if err != nil {
|
|
|
|
return policy.BucketPolicy{}, mstorage.EmbedError(bucket, "", err)
|
|
|
|
}
|
|
|
|
encoder := json.NewDecoder(file)
|
|
|
|
err = encoder.Decode(&p)
|
|
|
|
if err != nil {
|
|
|
|
return policy.BucketPolicy{}, mstorage.EmbedError(bucket, "", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return p, nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (storage *storage) StoreBucketPolicy(bucket string, policy interface{}) error {
|
2015-02-18 02:48:42 -05:00
|
|
|
storage.lock.Lock()
|
|
|
|
defer storage.lock.Unlock()
|
2015-02-15 20:03:27 -05:00
|
|
|
|
|
|
|
// verify bucket path legal
|
|
|
|
if mstorage.IsValidBucket(bucket) == false {
|
|
|
|
return mstorage.BucketNameInvalid{Bucket: bucket}
|
|
|
|
}
|
|
|
|
|
|
|
|
// get bucket path
|
|
|
|
bucketDir := path.Join(storage.root, bucket)
|
|
|
|
// check if bucket exists
|
|
|
|
if _, err := os.Stat(bucketDir); err != nil {
|
|
|
|
return mstorage.BucketNotFound{
|
|
|
|
Bucket: bucket,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// get policy path
|
|
|
|
bucketPolicy := path.Join(storage.root, bucket+"_policy.json")
|
2015-02-23 05:11:27 -05:00
|
|
|
filestat, ret := os.Stat(bucketPolicy)
|
|
|
|
if !os.IsNotExist(ret) {
|
|
|
|
if filestat.IsDir() {
|
|
|
|
return mstorage.BackendCorrupted{Path: bucketPolicy}
|
|
|
|
}
|
2015-02-15 20:03:27 -05:00
|
|
|
}
|
2015-02-23 05:11:27 -05:00
|
|
|
|
2015-02-15 20:03:27 -05:00
|
|
|
file, err := os.OpenFile(bucketPolicy, os.O_WRONLY|os.O_CREATE, 0600)
|
|
|
|
defer file.Close()
|
|
|
|
if err != nil {
|
|
|
|
return mstorage.EmbedError(bucket, "", err)
|
|
|
|
}
|
|
|
|
encoder := json.NewEncoder(file)
|
|
|
|
err = encoder.Encode(policy)
|
|
|
|
if err != nil {
|
|
|
|
return mstorage.EmbedError(bucket, "", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-01-27 14:09:20 -05:00
|
|
|
// Object Operations
|
|
|
|
|
|
|
|
func (storage *storage) CopyObjectToWriter(w io.Writer, bucket string, object string) (int64, error) {
|
|
|
|
// validate bucket
|
|
|
|
if mstorage.IsValidBucket(bucket) == false {
|
|
|
|
return 0, mstorage.BucketNameInvalid{Bucket: bucket}
|
|
|
|
}
|
|
|
|
|
|
|
|
// validate object
|
|
|
|
if mstorage.IsValidObject(object) == false {
|
|
|
|
return 0, mstorage.ObjectNameInvalid{Bucket: bucket, Object: object}
|
|
|
|
}
|
|
|
|
|
|
|
|
objectPath := path.Join(storage.root, bucket, object)
|
|
|
|
|
2015-02-03 19:44:32 -05:00
|
|
|
filestat, err := os.Stat(objectPath)
|
|
|
|
switch err := err.(type) {
|
|
|
|
case nil:
|
|
|
|
{
|
|
|
|
if filestat.IsDir() {
|
|
|
|
return 0, mstorage.ObjectNotFound{Bucket: bucket, Object: object}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
{
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return 0, mstorage.ObjectNotFound{Bucket: bucket, Object: object}
|
|
|
|
} else {
|
|
|
|
return 0, mstorage.EmbedError(bucket, object, err)
|
|
|
|
}
|
|
|
|
}
|
2015-01-27 14:09:20 -05:00
|
|
|
}
|
2015-02-03 19:44:32 -05:00
|
|
|
file, err := os.Open(objectPath)
|
2015-02-15 20:03:27 -05:00
|
|
|
defer file.Close()
|
|
|
|
if err != nil {
|
|
|
|
return 0, mstorage.EmbedError(bucket, object, err)
|
|
|
|
}
|
2015-01-27 14:09:20 -05:00
|
|
|
count, err := io.Copy(w, file)
|
|
|
|
if err != nil {
|
|
|
|
return count, mstorage.EmbedError(bucket, object, err)
|
|
|
|
}
|
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (storage *storage) GetObjectMetadata(bucket string, object string) (mstorage.ObjectMetadata, error) {
|
|
|
|
if mstorage.IsValidBucket(bucket) == false {
|
|
|
|
return mstorage.ObjectMetadata{}, mstorage.BucketNameInvalid{Bucket: bucket}
|
|
|
|
}
|
|
|
|
|
2015-02-27 22:49:18 -05:00
|
|
|
if mstorage.IsValidObject(object) == false {
|
2015-01-27 14:09:20 -05:00
|
|
|
return mstorage.ObjectMetadata{}, mstorage.ObjectNameInvalid{Bucket: bucket, Object: bucket}
|
|
|
|
}
|
|
|
|
|
|
|
|
objectPath := path.Join(storage.root, bucket, object)
|
|
|
|
|
|
|
|
stat, err := os.Stat(objectPath)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return mstorage.ObjectMetadata{}, mstorage.ObjectNotFound{Bucket: bucket, Object: object}
|
|
|
|
}
|
|
|
|
|
2015-02-04 20:32:40 -05:00
|
|
|
_, err = os.Stat(objectPath + "$metadata")
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return mstorage.ObjectMetadata{}, mstorage.ObjectNotFound{Bucket: bucket, Object: object}
|
|
|
|
}
|
|
|
|
|
|
|
|
file, err := os.Open(objectPath + "$metadata")
|
|
|
|
defer file.Close()
|
|
|
|
if err != nil {
|
|
|
|
return mstorage.ObjectMetadata{}, mstorage.EmbedError(bucket, object, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
metadataBuffer, err := ioutil.ReadAll(file)
|
|
|
|
if err != nil {
|
|
|
|
return mstorage.ObjectMetadata{}, mstorage.EmbedError(bucket, object, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var deserializedMetadata SerializedMetadata
|
|
|
|
err = json.Unmarshal(metadataBuffer, &deserializedMetadata)
|
|
|
|
if err != nil {
|
|
|
|
return mstorage.ObjectMetadata{}, mstorage.EmbedError(bucket, object, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
contentType := "application/octet-stream"
|
|
|
|
if deserializedMetadata.ContentType != "" {
|
|
|
|
contentType = deserializedMetadata.ContentType
|
|
|
|
}
|
|
|
|
contentType = strings.TrimSpace(contentType)
|
|
|
|
|
2015-01-27 14:09:20 -05:00
|
|
|
metadata := mstorage.ObjectMetadata{
|
2015-02-04 20:32:40 -05:00
|
|
|
Bucket: bucket,
|
|
|
|
Key: object,
|
|
|
|
Created: stat.ModTime(),
|
|
|
|
Size: stat.Size(),
|
|
|
|
ETag: bucket + "#" + object,
|
|
|
|
ContentType: contentType,
|
2015-01-27 14:09:20 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return metadata, nil
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:23:42 -05:00
|
|
|
type Path struct {
|
|
|
|
files map[string]os.FileInfo
|
|
|
|
root string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Path) getAllFiles(path string, fl os.FileInfo, err error) error {
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if fl.Mode().IsRegular() {
|
|
|
|
if strings.HasSuffix(path, "$metadata") {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
_p := strings.Split(path, p.root+"/")
|
|
|
|
if len(_p) > 1 {
|
|
|
|
p.files[_p[1]] = fl
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-02-28 17:44:26 -05:00
|
|
|
func delimiter(path, delimiter string) string {
|
|
|
|
delimited := ""
|
|
|
|
if !strings.Contains(path, delimiter) {
|
|
|
|
return delimited
|
|
|
|
}
|
|
|
|
index := strings.Index(path, delimiter)
|
|
|
|
if index == -1 {
|
|
|
|
return delimited
|
|
|
|
}
|
|
|
|
delimitedIndex := index + len(delimiter)
|
|
|
|
delimited = path[:delimitedIndex]
|
|
|
|
return delimited
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:23:42 -05:00
|
|
|
type ByObjectKey []mstorage.ObjectMetadata
|
|
|
|
|
|
|
|
func (b ByObjectKey) Len() int { return len(b) }
|
|
|
|
func (b ByObjectKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
|
|
|
func (b ByObjectKey) Less(i, j int) bool { return b[i].Key < b[j].Key }
|
|
|
|
|
|
|
|
func (storage *storage) ListObjects(bucket string, resources mstorage.BucketResourcesMetadata) ([]mstorage.ObjectMetadata, mstorage.BucketResourcesMetadata, error) {
|
|
|
|
p := Path{}
|
|
|
|
p.files = make(map[string]os.FileInfo)
|
|
|
|
|
2015-01-27 14:09:20 -05:00
|
|
|
if mstorage.IsValidBucket(bucket) == false {
|
2015-02-26 20:23:42 -05:00
|
|
|
return []mstorage.ObjectMetadata{}, resources, mstorage.BucketNameInvalid{Bucket: bucket}
|
2015-01-27 14:09:20 -05:00
|
|
|
}
|
2015-02-27 22:49:18 -05:00
|
|
|
if resources.Prefix != "" && mstorage.IsValidObject(resources.Prefix) == false {
|
2015-02-26 20:23:42 -05:00
|
|
|
return []mstorage.ObjectMetadata{}, resources, mstorage.ObjectNameInvalid{Bucket: bucket, Object: resources.Prefix}
|
2015-01-27 14:09:20 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
rootPrefix := path.Join(storage.root, bucket)
|
2015-02-01 06:10:28 -05:00
|
|
|
// check bucket exists
|
|
|
|
if _, err := os.Stat(rootPrefix); os.IsNotExist(err) {
|
2015-02-26 20:23:42 -05:00
|
|
|
return []mstorage.ObjectMetadata{}, resources, mstorage.BucketNotFound{Bucket: bucket}
|
2015-02-01 06:10:28 -05:00
|
|
|
}
|
|
|
|
|
2015-02-26 20:23:42 -05:00
|
|
|
p.root = rootPrefix
|
|
|
|
err := filepath.Walk(rootPrefix, p.getAllFiles)
|
2015-01-27 14:09:20 -05:00
|
|
|
if err != nil {
|
2015-02-26 20:23:42 -05:00
|
|
|
return []mstorage.ObjectMetadata{}, resources, mstorage.EmbedError(bucket, "", err)
|
2015-01-27 14:09:20 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
var metadataList []mstorage.ObjectMetadata
|
2015-02-26 20:23:42 -05:00
|
|
|
for name, file := range p.files {
|
|
|
|
if len(metadataList) >= resources.Maxkeys {
|
|
|
|
resources.IsTruncated = true
|
|
|
|
goto ret
|
|
|
|
}
|
|
|
|
// TODO handle resources.Marker
|
2015-02-28 17:44:26 -05:00
|
|
|
switch true {
|
|
|
|
case resources.Delimiter != "" && resources.Prefix == "":
|
|
|
|
delimited := delimiter(name, resources.Delimiter)
|
|
|
|
switch true {
|
|
|
|
case delimited == "":
|
|
|
|
metadata := mstorage.ObjectMetadata{
|
|
|
|
Bucket: bucket,
|
|
|
|
Key: name,
|
|
|
|
Created: file.ModTime(),
|
|
|
|
Size: file.Size(),
|
|
|
|
ETag: bucket + "#" + name,
|
|
|
|
}
|
|
|
|
metadataList = append(metadataList, metadata)
|
|
|
|
case delimited != "":
|
2015-03-03 04:25:45 -05:00
|
|
|
resources.CommonPrefixes = helpers.appendUniq(resources.CommonPrefixes, delimited)
|
2015-02-04 20:32:40 -05:00
|
|
|
}
|
2015-02-28 17:44:26 -05:00
|
|
|
case resources.Delimiter != "" && strings.HasPrefix(name, resources.Prefix):
|
|
|
|
delimited := delimiter(name, resources.Delimiter)
|
|
|
|
switch true {
|
|
|
|
case delimited == "":
|
|
|
|
metadata := mstorage.ObjectMetadata{
|
|
|
|
Bucket: bucket,
|
|
|
|
Key: name,
|
|
|
|
Created: file.ModTime(),
|
|
|
|
Size: file.Size(),
|
|
|
|
ETag: bucket + "#" + name,
|
|
|
|
}
|
|
|
|
metadataList = append(metadataList, metadata)
|
|
|
|
case delimited != "":
|
2015-03-01 20:06:00 -05:00
|
|
|
resources.CommonPrefixes = helpers.AppendUniqStr(resources.CommonPrefixes, delimited)
|
2015-02-28 17:44:26 -05:00
|
|
|
}
|
|
|
|
case strings.HasPrefix(name, resources.Prefix):
|
2015-02-26 20:23:42 -05:00
|
|
|
metadata := mstorage.ObjectMetadata{
|
2015-02-28 17:44:26 -05:00
|
|
|
Bucket: bucket,
|
|
|
|
Key: name,
|
|
|
|
Created: file.ModTime(),
|
|
|
|
Size: file.Size(),
|
|
|
|
ETag: bucket + "#" + name,
|
2015-01-27 14:09:20 -05:00
|
|
|
}
|
2015-02-26 20:23:42 -05:00
|
|
|
metadataList = append(metadataList, metadata)
|
2015-01-27 14:09:20 -05:00
|
|
|
}
|
|
|
|
}
|
2015-02-26 20:23:42 -05:00
|
|
|
|
|
|
|
ret:
|
|
|
|
sort.Sort(ByObjectKey(metadataList))
|
|
|
|
return metadataList, resources, nil
|
2015-01-27 14:09:20 -05:00
|
|
|
}
|
|
|
|
|
2015-02-04 20:32:40 -05:00
|
|
|
func (storage *storage) StoreObject(bucket, key, contentType string, data io.Reader) error {
|
2015-01-27 14:09:20 -05:00
|
|
|
// TODO Commits should stage then move instead of writing directly
|
2015-02-18 02:48:42 -05:00
|
|
|
storage.lock.Lock()
|
|
|
|
defer storage.lock.Unlock()
|
2015-01-27 14:09:20 -05:00
|
|
|
|
|
|
|
// check bucket name valid
|
|
|
|
if mstorage.IsValidBucket(bucket) == false {
|
|
|
|
return mstorage.BucketNameInvalid{Bucket: bucket}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check bucket exists
|
|
|
|
if _, err := os.Stat(path.Join(storage.root, bucket)); os.IsNotExist(err) {
|
|
|
|
return mstorage.BucketNotFound{Bucket: bucket}
|
|
|
|
}
|
|
|
|
|
|
|
|
// verify object path legal
|
|
|
|
if mstorage.IsValidObject(key) == false {
|
|
|
|
return mstorage.ObjectNameInvalid{Bucket: bucket, Object: key}
|
|
|
|
}
|
|
|
|
|
2015-02-04 20:32:40 -05:00
|
|
|
// verify content type
|
|
|
|
if contentType == "" {
|
|
|
|
contentType = "application/octet-stream"
|
|
|
|
}
|
|
|
|
contentType = strings.TrimSpace(contentType)
|
|
|
|
|
2015-01-27 14:09:20 -05:00
|
|
|
// get object path
|
|
|
|
objectPath := path.Join(storage.root, bucket, key)
|
2015-01-27 20:47:11 -05:00
|
|
|
objectDir := path.Dir(objectPath)
|
|
|
|
if _, err := os.Stat(objectDir); os.IsNotExist(err) {
|
|
|
|
err = os.MkdirAll(objectDir, 0700)
|
|
|
|
if err != nil {
|
|
|
|
return mstorage.EmbedError(bucket, key, err)
|
|
|
|
}
|
|
|
|
}
|
2015-01-27 14:09:20 -05:00
|
|
|
|
|
|
|
// check if object exists
|
|
|
|
if _, err := os.Stat(objectPath); !os.IsNotExist(err) {
|
|
|
|
return mstorage.ObjectExists{
|
|
|
|
Bucket: bucket,
|
|
|
|
Key: key,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// write object
|
|
|
|
file, err := os.OpenFile(objectPath, os.O_WRONLY|os.O_CREATE, 0600)
|
|
|
|
defer file.Close()
|
|
|
|
if err != nil {
|
|
|
|
return mstorage.EmbedError(bucket, key, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = io.Copy(file, data)
|
|
|
|
if err != nil {
|
|
|
|
return mstorage.EmbedError(bucket, key, err)
|
|
|
|
}
|
|
|
|
|
2015-02-04 20:32:40 -05:00
|
|
|
// serialize metadata to json
|
|
|
|
|
|
|
|
metadataBuffer, err := json.Marshal(SerializedMetadata{ContentType: contentType})
|
|
|
|
if err != nil {
|
|
|
|
return mstorage.EmbedError(bucket, key, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
file, err = os.OpenFile(objectPath+"$metadata", os.O_WRONLY|os.O_CREATE, 0600)
|
|
|
|
defer file.Close()
|
|
|
|
if err != nil {
|
|
|
|
return mstorage.EmbedError(bucket, key, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = io.Copy(file, bytes.NewBuffer(metadataBuffer))
|
|
|
|
if err != nil {
|
|
|
|
return mstorage.EmbedError(bucket, key, err)
|
|
|
|
}
|
|
|
|
|
2015-01-27 14:09:20 -05:00
|
|
|
return nil
|
|
|
|
}
|