2015-10-16 14:26:01 -04:00
|
|
|
/*
|
2016-04-08 13:37:38 -04:00
|
|
|
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
2015-10-16 14:26:01 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
fs: Break fs package to top-level and introduce ObjectAPI interface.
ObjectAPI interface brings in changes needed for XL ObjectAPI layer.
The new interface for any ObjectAPI layer is as below
```
// ObjectAPI interface.
type ObjectAPI interface {
// Bucket resource API.
DeleteBucket(bucket string) *probe.Error
ListBuckets() ([]BucketInfo, *probe.Error)
MakeBucket(bucket string) *probe.Error
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
// Bucket query API.
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error)
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
// Object resource API.
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error)
PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error)
DeleteObject(bucket, object string) *probe.Error
// Object query API.
NewMultipartUpload(bucket, object string) (string, *probe.Error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error)
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
}
```
2016-03-30 19:15:28 -04:00
|
|
|
package main
|
2015-10-16 14:26:01 -04:00
|
|
|
|
|
|
|
import (
|
2016-04-08 13:37:38 -04:00
|
|
|
"io"
|
2016-04-08 20:13:16 -04:00
|
|
|
"os"
|
2016-04-26 13:35:39 -04:00
|
|
|
slashpath "path"
|
2016-04-08 20:13:16 -04:00
|
|
|
"path/filepath"
|
2016-04-08 13:37:38 -04:00
|
|
|
"strings"
|
2015-10-16 14:26:01 -04:00
|
|
|
"sync"
|
2016-04-08 13:37:38 -04:00
|
|
|
"syscall"
|
2015-10-16 14:26:01 -04:00
|
|
|
|
2016-04-29 15:17:48 -04:00
|
|
|
"path"
|
|
|
|
|
2016-04-24 03:36:00 -04:00
|
|
|
"github.com/Sirupsen/logrus"
|
2016-04-08 20:13:16 -04:00
|
|
|
"github.com/minio/minio/pkg/disk"
|
2016-04-08 13:37:38 -04:00
|
|
|
"github.com/minio/minio/pkg/safe"
|
2015-10-16 14:26:01 -04:00
|
|
|
)
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
const (
|
|
|
|
fsListLimit = 1000
|
|
|
|
)
|
|
|
|
|
|
|
|
// listParams - list object params used for list object map
|
|
|
|
type listParams struct {
|
2016-02-18 03:38:58 -05:00
|
|
|
bucket string
|
2016-04-08 13:37:38 -04:00
|
|
|
recursive bool
|
2016-02-18 03:38:58 -05:00
|
|
|
marker string
|
|
|
|
prefix string
|
|
|
|
}
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// fsStorage - implements StorageAPI interface.
|
|
|
|
type fsStorage struct {
|
|
|
|
diskPath string
|
|
|
|
minFreeDisk int64
|
|
|
|
listObjectMap map[listParams][]*treeWalker
|
|
|
|
listObjectMapMutex *sync.Mutex
|
2016-03-28 12:52:09 -04:00
|
|
|
}
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// isDirEmpty - returns whether given directory is empty or not.
|
|
|
|
func isDirEmpty(dirname string) (status bool, err error) {
|
|
|
|
f, err := os.Open(dirname)
|
|
|
|
if err == nil {
|
|
|
|
defer f.Close()
|
|
|
|
if _, err = f.Readdirnames(1); err == io.EOF {
|
|
|
|
status = true
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return status, err
|
2016-02-18 03:38:58 -05:00
|
|
|
}
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// isDirExist - returns whether given directory exists or not.
|
|
|
|
func isDirExist(dirname string) (bool, error) {
|
2016-04-12 15:45:15 -04:00
|
|
|
fi, e := os.Stat(dirname)
|
2016-04-08 13:37:38 -04:00
|
|
|
if e != nil {
|
|
|
|
if os.IsNotExist(e) {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
return false, e
|
2016-02-04 23:40:58 -05:00
|
|
|
}
|
2016-04-08 13:37:38 -04:00
|
|
|
return fi.IsDir(), nil
|
|
|
|
}
|
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// Initialize a new storage disk.
|
2016-04-29 17:24:10 -04:00
|
|
|
func newPosix(diskPath string) (StorageAPI, error) {
|
2016-04-08 13:37:38 -04:00
|
|
|
if diskPath == "" {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.Debug("Disk cannot be empty")
|
2016-04-08 13:37:38 -04:00
|
|
|
return nil, errInvalidArgument
|
|
|
|
}
|
2016-04-22 02:40:01 -04:00
|
|
|
st, err := os.Stat(diskPath)
|
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": diskPath,
|
|
|
|
}).Debugf("Stat failed, with error %s.", err)
|
2016-04-22 02:40:01 -04:00
|
|
|
return nil, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
if !st.IsDir() {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": diskPath,
|
|
|
|
}).Debugf("Disk %s.", syscall.ENOTDIR)
|
2016-04-08 13:37:38 -04:00
|
|
|
return nil, syscall.ENOTDIR
|
|
|
|
}
|
|
|
|
fs := fsStorage{
|
|
|
|
diskPath: diskPath,
|
|
|
|
minFreeDisk: 5, // Minimum 5% disk should be free.
|
|
|
|
listObjectMap: make(map[listParams][]*treeWalker),
|
|
|
|
listObjectMapMutex: &sync.Mutex{},
|
|
|
|
}
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": diskPath,
|
|
|
|
"minFreeDisk": 5,
|
|
|
|
}).Debugf("Successfully configured FS storage API.")
|
2016-04-08 13:37:38 -04:00
|
|
|
return fs, nil
|
|
|
|
}
|
2016-01-25 02:03:38 -05:00
|
|
|
|
2016-04-24 03:36:00 -04:00
|
|
|
// checkDiskFree verifies if disk path has sufficient minium free disk
|
|
|
|
// space.
|
2016-04-08 13:37:38 -04:00
|
|
|
func checkDiskFree(diskPath string, minFreeDisk int64) (err error) {
|
|
|
|
di, err := disk.GetInfo(diskPath)
|
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": diskPath,
|
|
|
|
}).Debugf("Failed to get disk info, %s", err)
|
2016-04-08 13:37:38 -04:00
|
|
|
return err
|
|
|
|
}
|
2016-02-18 03:38:58 -05:00
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// Remove 5% from total space for cumulative disk
|
|
|
|
// space used for journalling, inodes etc.
|
|
|
|
availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100
|
|
|
|
if int64(availableDiskSpace) <= minFreeDisk {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"availableDiskSpace": int64(availableDiskSpace),
|
|
|
|
"minFreeDiskSpace": minFreeDisk,
|
|
|
|
}).Debugf("Disk free space has reached its limit.")
|
2016-04-19 05:42:10 -04:00
|
|
|
return errDiskFull
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-03-28 12:52:09 -04:00
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// Success.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-04-21 15:57:14 -04:00
|
|
|
func removeDuplicateVols(volsInfo []VolInfo) []VolInfo {
|
|
|
|
// Use map to record duplicates as we find them.
|
|
|
|
result := []VolInfo{}
|
|
|
|
|
|
|
|
m := make(map[string]VolInfo)
|
|
|
|
for _, v := range volsInfo {
|
|
|
|
if _, found := m[v.Name]; !found {
|
|
|
|
m[v.Name] = v
|
2016-04-16 15:48:41 -04:00
|
|
|
}
|
|
|
|
}
|
2016-04-21 15:57:14 -04:00
|
|
|
|
|
|
|
result = make([]VolInfo, 0, len(m))
|
|
|
|
for _, v := range m {
|
|
|
|
result = append(result, v)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return the new slice.
|
|
|
|
return result
|
2016-04-16 15:48:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// gets all the unique directories from diskPath.
|
|
|
|
func getAllUniqueVols(dirPath string) ([]VolInfo, error) {
|
|
|
|
volumeFn := func(dirent fsDirent) bool {
|
|
|
|
// Return all directories.
|
2016-04-17 15:00:23 -04:00
|
|
|
return dirent.IsDir() && isValidVolname(filepath.Clean(dirent.name))
|
2016-04-16 15:48:41 -04:00
|
|
|
}
|
2016-04-17 15:00:23 -04:00
|
|
|
namesOnly := true // Returned are only names.
|
2016-04-16 15:48:41 -04:00
|
|
|
dirents, err := scandir(dirPath, volumeFn, namesOnly)
|
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"dirPath": dirPath,
|
|
|
|
"namesOnly": true,
|
|
|
|
}).Debugf("Scandir failed with error %s", err)
|
2016-04-16 15:48:41 -04:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var volsInfo []VolInfo
|
|
|
|
for _, dirent := range dirents {
|
2016-04-17 15:00:23 -04:00
|
|
|
fi, err := os.Stat(filepath.Join(dirPath, dirent.name))
|
2016-04-16 15:48:41 -04:00
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"path": filepath.Join(dirPath, dirent.name),
|
|
|
|
}).Debugf("Stat failed with error %s", err)
|
2016-04-16 15:48:41 -04:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
volsInfo = append(volsInfo, VolInfo{
|
|
|
|
Name: fi.Name(),
|
|
|
|
// As os.Stat() doesn't carry other than ModTime(), use
|
|
|
|
// ModTime() as CreatedTime.
|
|
|
|
Created: fi.ModTime(),
|
|
|
|
})
|
|
|
|
}
|
2016-04-21 15:57:14 -04:00
|
|
|
return removeDuplicateVols(volsInfo), nil
|
2016-04-16 15:48:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// getVolumeDir - will convert incoming volume names to
|
2016-04-13 14:32:47 -04:00
|
|
|
// corresponding valid volume names on the backend in a platform
|
|
|
|
// compatible way for all operating systems. If volume is not found
|
|
|
|
// an error is generated.
|
2016-04-16 15:48:41 -04:00
|
|
|
func (s fsStorage) getVolumeDir(volume string) (string, error) {
|
2016-04-13 14:32:47 -04:00
|
|
|
if !isValidVolname(volume) {
|
|
|
|
return "", errInvalidArgument
|
|
|
|
}
|
|
|
|
volumeDir := filepath.Join(s.diskPath, volume)
|
|
|
|
_, err := os.Stat(volumeDir)
|
|
|
|
if err == nil {
|
|
|
|
return volumeDir, nil
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-04-13 14:32:47 -04:00
|
|
|
if os.IsNotExist(err) {
|
2016-04-16 15:48:41 -04:00
|
|
|
var volsInfo []VolInfo
|
|
|
|
volsInfo, err = getAllUniqueVols(s.diskPath)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
|
|
|
return volumeDir, errVolumeNotFound
|
|
|
|
}
|
2016-04-16 15:48:41 -04:00
|
|
|
for _, vol := range volsInfo {
|
2016-04-24 03:36:00 -04:00
|
|
|
// Verify if lowercase version of
|
|
|
|
// the volume
|
|
|
|
// is equal to the incoming volume, then use the proper
|
|
|
|
// name.
|
2016-04-16 15:48:41 -04:00
|
|
|
if strings.ToLower(vol.Name) == volume {
|
|
|
|
volumeDir = filepath.Join(s.diskPath, vol.Name)
|
|
|
|
return volumeDir, nil
|
2016-04-13 14:32:47 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return volumeDir, errVolumeNotFound
|
|
|
|
} else if os.IsPermission(err) {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
}).Debugf("Stat failed with error %s", err)
|
2016-04-13 14:32:47 -04:00
|
|
|
return volumeDir, errVolumeAccessDenied
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
}).Debugf("Stat failed with error %s", err)
|
2016-04-13 14:32:47 -04:00
|
|
|
return volumeDir, err
|
|
|
|
}
|
2016-04-08 13:37:38 -04:00
|
|
|
|
2016-04-13 14:32:47 -04:00
|
|
|
// Make a volume entry.
|
|
|
|
func (s fsStorage) MakeVol(volume string) (err error) {
|
2016-04-24 03:36:00 -04:00
|
|
|
// Validate if disk is free.
|
|
|
|
if err = checkDiskFree(s.diskPath, s.minFreeDisk); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-04-16 15:48:41 -04:00
|
|
|
volumeDir, err := s.getVolumeDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err == nil {
|
|
|
|
// Volume already exists, return error.
|
2016-04-08 13:37:38 -04:00
|
|
|
return errVolumeExists
|
|
|
|
}
|
|
|
|
|
2016-04-13 14:32:47 -04:00
|
|
|
// If volume not found create it.
|
|
|
|
if err == errVolumeNotFound {
|
|
|
|
// Make a volume entry.
|
|
|
|
return os.Mkdir(volumeDir, 0700)
|
|
|
|
}
|
|
|
|
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"volume": volume,
|
|
|
|
}).Debugf("MakeVol failed with %s", err)
|
|
|
|
|
2016-04-13 14:32:47 -04:00
|
|
|
// For all other errors return here.
|
|
|
|
return err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListVols - list volumes.
|
|
|
|
func (s fsStorage) ListVols() (volsInfo []VolInfo, err error) {
|
2016-04-21 23:07:47 -04:00
|
|
|
// Get disk info to be populated for VolInfo.
|
|
|
|
var diskInfo disk.Info
|
|
|
|
diskInfo, err = disk.GetInfo(s.diskPath)
|
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
}).Debugf("Failed to get disk info, %s", err)
|
2016-04-21 23:07:47 -04:00
|
|
|
return nil, err
|
|
|
|
}
|
2016-04-16 15:48:41 -04:00
|
|
|
volsInfo, err = getAllUniqueVols(s.diskPath)
|
2016-04-08 13:37:38 -04:00
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
}).Debugf("getAllUniqueVols failed with %s", err)
|
2016-04-08 13:37:38 -04:00
|
|
|
return nil, err
|
|
|
|
}
|
2016-04-17 15:00:23 -04:00
|
|
|
for i, vol := range volsInfo {
|
2016-04-13 14:32:47 -04:00
|
|
|
// Volname on case sensitive fs backends can come in as
|
|
|
|
// capitalized, but object layer cannot consume it
|
|
|
|
// directly. Convert it as we see fit.
|
2016-04-16 15:48:41 -04:00
|
|
|
volName := strings.ToLower(vol.Name)
|
2016-04-08 13:37:38 -04:00
|
|
|
volInfo := VolInfo{
|
2016-04-13 14:32:47 -04:00
|
|
|
Name: volName,
|
2016-04-16 15:48:41 -04:00
|
|
|
Created: vol.Created,
|
2016-04-21 23:07:47 -04:00
|
|
|
Total: diskInfo.Total,
|
|
|
|
Free: diskInfo.Free,
|
|
|
|
FSType: diskInfo.FSType,
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-04-17 15:00:23 -04:00
|
|
|
volsInfo[i] = volInfo
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
return volsInfo, nil
|
2015-10-16 14:26:01 -04:00
|
|
|
}
|
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// StatVol - get volume info.
|
|
|
|
func (s fsStorage) StatVol(volume string) (volInfo VolInfo, err error) {
|
2016-04-13 14:32:47 -04:00
|
|
|
// Verify if volume is valid and it exists.
|
2016-04-16 15:48:41 -04:00
|
|
|
volumeDir, err := s.getVolumeDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"volume": volume,
|
|
|
|
}).Debugf("getVolumeDir failed with %s", err)
|
2016-04-13 14:32:47 -04:00
|
|
|
return VolInfo{}, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
// Stat a volume entry.
|
|
|
|
var st os.FileInfo
|
|
|
|
st, err = os.Stat(volumeDir)
|
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"volume": volume,
|
|
|
|
}).Debugf("Stat on the volume failed with %s", err)
|
2016-04-08 13:37:38 -04:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return VolInfo{}, errVolumeNotFound
|
|
|
|
}
|
|
|
|
return VolInfo{}, err
|
2016-04-08 20:13:16 -04:00
|
|
|
}
|
2016-04-21 23:07:47 -04:00
|
|
|
// Get disk info, to be returned back along with volume info.
|
|
|
|
var diskInfo disk.Info
|
|
|
|
diskInfo, err = disk.GetInfo(s.diskPath)
|
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"volume": volume,
|
|
|
|
}).Debugf("Failed to get disk info, %s", err)
|
2016-04-21 23:07:47 -04:00
|
|
|
return VolInfo{}, err
|
|
|
|
}
|
2016-04-16 15:48:41 -04:00
|
|
|
// As os.Stat() doesn't carry other than ModTime(), use ModTime()
|
|
|
|
// as CreatedTime.
|
2016-04-13 14:32:47 -04:00
|
|
|
createdTime := st.ModTime()
|
2016-04-08 13:37:38 -04:00
|
|
|
return VolInfo{
|
2016-04-13 14:32:47 -04:00
|
|
|
Name: volume,
|
|
|
|
Created: createdTime,
|
2016-04-21 23:07:47 -04:00
|
|
|
Free: diskInfo.Free,
|
|
|
|
Total: diskInfo.Total,
|
|
|
|
FSType: diskInfo.FSType,
|
2016-04-08 13:37:38 -04:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteVol - delete a volume.
|
|
|
|
func (s fsStorage) DeleteVol(volume string) error {
|
2016-04-13 14:32:47 -04:00
|
|
|
// Verify if volume is valid and it exists.
|
2016-04-16 15:48:41 -04:00
|
|
|
volumeDir, err := s.getVolumeDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"volume": volume,
|
|
|
|
}).Debugf("getVolumeDir failed with %s", err)
|
2016-04-13 14:32:47 -04:00
|
|
|
return err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-04-13 14:32:47 -04:00
|
|
|
err = os.Remove(volumeDir)
|
2016-04-16 15:48:41 -04:00
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"volume": volume,
|
|
|
|
}).Debugf("Volume remove failed with %s", err)
|
2016-04-16 15:48:41 -04:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return errVolumeNotFound
|
|
|
|
} else if strings.Contains(err.Error(), "directory is not empty") {
|
2016-03-28 00:52:38 -04:00
|
|
|
// On windows the string is slightly different, handle it here.
|
2016-04-16 15:48:41 -04:00
|
|
|
return errVolumeNotEmpty
|
|
|
|
} else if strings.Contains(err.Error(), "directory not empty") {
|
2016-04-24 03:36:00 -04:00
|
|
|
// Hopefully for all other
|
|
|
|
// operating systems, this is
|
2016-04-16 15:48:41 -04:00
|
|
|
// assumed to be consistent.
|
|
|
|
return errVolumeNotEmpty
|
|
|
|
}
|
|
|
|
return err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-03-28 00:52:38 -04:00
|
|
|
return nil
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Save the goroutine reference in the map
|
|
|
|
func (s *fsStorage) saveTreeWalk(params listParams, walker *treeWalker) {
|
|
|
|
s.listObjectMapMutex.Lock()
|
|
|
|
defer s.listObjectMapMutex.Unlock()
|
|
|
|
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"bucket": params.bucket,
|
|
|
|
"recursive": params.recursive,
|
|
|
|
"marker": params.marker,
|
|
|
|
"prefix": params.prefix,
|
|
|
|
}).Debugf("saveTreeWalk has been invoked.")
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
walkers, _ := s.listObjectMap[params]
|
|
|
|
walkers = append(walkers, walker)
|
|
|
|
|
|
|
|
s.listObjectMap[params] = walkers
|
2016-04-24 03:36:00 -04:00
|
|
|
log.Debugf("Successfully saved in listObjectMap.")
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the goroutine reference from map
|
|
|
|
func (s *fsStorage) lookupTreeWalk(params listParams) *treeWalker {
|
|
|
|
s.listObjectMapMutex.Lock()
|
|
|
|
defer s.listObjectMapMutex.Unlock()
|
|
|
|
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"bucket": params.bucket,
|
|
|
|
"recursive": params.recursive,
|
|
|
|
"marker": params.marker,
|
|
|
|
"prefix": params.prefix,
|
|
|
|
}).Debugf("lookupTreeWalk has been invoked.")
|
2016-04-08 13:37:38 -04:00
|
|
|
if walkChs, ok := s.listObjectMap[params]; ok {
|
|
|
|
for i, walkCh := range walkChs {
|
|
|
|
if !walkCh.timedOut {
|
|
|
|
newWalkChs := walkChs[i+1:]
|
|
|
|
if len(newWalkChs) > 0 {
|
|
|
|
s.listObjectMap[params] = newWalkChs
|
|
|
|
} else {
|
|
|
|
delete(s.listObjectMap, params)
|
|
|
|
}
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"bucket": params.bucket,
|
|
|
|
"recursive": params.recursive,
|
|
|
|
"marker": params.marker,
|
|
|
|
"prefix": params.prefix,
|
|
|
|
}).Debugf("Found the previous saved listsObjects params.")
|
2016-04-08 13:37:38 -04:00
|
|
|
return walkCh
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// As all channels are timed out, delete the map entry
|
|
|
|
delete(s.listObjectMap, params)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// List operation.
|
|
|
|
func (s fsStorage) ListFiles(volume, prefix, marker string, recursive bool, count int) ([]FileInfo, bool, error) {
|
2016-04-13 14:32:47 -04:00
|
|
|
// Verify if volume is valid and it exists.
|
2016-04-16 15:48:41 -04:00
|
|
|
volumeDir, err := s.getVolumeDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"volume": volume,
|
|
|
|
}).Debugf("getVolumeDir failed with %s", err)
|
2016-04-13 14:32:47 -04:00
|
|
|
return nil, true, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
var fileInfos []FileInfo
|
|
|
|
|
|
|
|
if marker != "" {
|
|
|
|
// Verify if marker has prefix.
|
|
|
|
if marker != "" && !strings.HasPrefix(marker, prefix) {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"marker": marker,
|
|
|
|
"prefix": prefix,
|
|
|
|
}).Debugf("Marker doesn't have prefix in common.")
|
2016-04-08 13:37:38 -04:00
|
|
|
return nil, true, errInvalidArgument
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return empty response for a valid request when count is 0.
|
|
|
|
if count == 0 {
|
|
|
|
return nil, true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Over flowing count - reset to fsListLimit.
|
|
|
|
if count < 0 || count > fsListLimit {
|
|
|
|
count = fsListLimit
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify if prefix exists.
|
2016-04-26 13:35:39 -04:00
|
|
|
prefixDir := slashpath.Dir(prefix)
|
|
|
|
prefixRootDir := slashpath.Join(volumeDir, prefixDir)
|
|
|
|
if status, err := isDirExist(prefixRootDir); !status {
|
2016-04-08 13:37:38 -04:00
|
|
|
if err == nil {
|
|
|
|
// Prefix does not exist, not an error just respond empty list response.
|
|
|
|
return nil, true, nil
|
2016-04-27 22:28:13 -04:00
|
|
|
} else if err.Error() == syscall.ENOTDIR.Error() {
|
2016-04-13 14:32:47 -04:00
|
|
|
// Prefix exists as a file.
|
2016-04-12 15:45:15 -04:00
|
|
|
return nil, true, nil
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-04-27 22:28:13 -04:00
|
|
|
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"volumeDir": volumeDir,
|
|
|
|
"prefixRootDir": prefixRootDir,
|
|
|
|
}).Debugf("isDirExist returned an unhandled error %s", err)
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// Rest errors should be treated as failure.
|
|
|
|
return nil, true, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Maximum 1000 files returned in a single call.
|
|
|
|
// Further calls will set right marker value to continue reading the rest of the files.
|
|
|
|
// popTreeWalker returns nil if the call to ListFiles is done for the first time.
|
|
|
|
// On further calls to ListFiles to retrive more files within the timeout period,
|
|
|
|
// popTreeWalker returns the channel from which rest of the objects can be retrieved.
|
|
|
|
walker := s.lookupTreeWalk(listParams{volume, recursive, marker, prefix})
|
|
|
|
if walker == nil {
|
2016-04-26 13:35:39 -04:00
|
|
|
walker = startTreeWalk(filepath.ToSlash(s.diskPath), volume, prefix, marker, recursive)
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
nextMarker := ""
|
2016-04-24 03:36:00 -04:00
|
|
|
log.Debugf("Reading from the tree walk channel has begun.")
|
2016-04-08 13:37:38 -04:00
|
|
|
for i := 0; i < count; {
|
|
|
|
walkResult, ok := <-walker.ch
|
|
|
|
if !ok {
|
|
|
|
// Closed channel.
|
|
|
|
return fileInfos, true, nil
|
|
|
|
}
|
|
|
|
// For any walk error return right away.
|
|
|
|
if walkResult.err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"volume": volume,
|
|
|
|
"prefix": prefix,
|
|
|
|
"marker": marker,
|
|
|
|
"recursive": recursive,
|
|
|
|
}).Debugf("Walk resulted in an error %s", walkResult.err)
|
2016-04-08 13:37:38 -04:00
|
|
|
return nil, true, walkResult.err
|
|
|
|
}
|
|
|
|
fileInfo := walkResult.fileInfo
|
|
|
|
fileInfo.Name = filepath.ToSlash(fileInfo.Name)
|
|
|
|
fileInfos = append(fileInfos, fileInfo)
|
|
|
|
// We have listed everything return.
|
|
|
|
if walkResult.end {
|
|
|
|
return fileInfos, true, nil
|
|
|
|
}
|
|
|
|
nextMarker = fileInfo.Name
|
|
|
|
i++
|
|
|
|
}
|
2016-04-24 03:36:00 -04:00
|
|
|
params := listParams{volume, recursive, nextMarker, prefix}
|
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"bucket": params.bucket,
|
|
|
|
"recursive": params.recursive,
|
|
|
|
"marker": params.marker,
|
|
|
|
"prefix": params.prefix,
|
|
|
|
}).Debugf("Save the tree walk into map for subsequent requests.")
|
|
|
|
s.saveTreeWalk(params, walker)
|
2016-04-08 13:37:38 -04:00
|
|
|
return fileInfos, false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadFile - read a file at a given offset.
|
|
|
|
func (s fsStorage) ReadFile(volume string, path string, offset int64) (readCloser io.ReadCloser, err error) {
|
2016-04-16 15:48:41 -04:00
|
|
|
volumeDir, err := s.getVolumeDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"volume": volume,
|
|
|
|
}).Debugf("getVolumeDir failed with %s", err)
|
2016-04-13 14:32:47 -04:00
|
|
|
return nil, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-04-13 14:32:47 -04:00
|
|
|
|
|
|
|
filePath := filepath.Join(volumeDir, filepath.FromSlash(path))
|
2016-04-08 13:37:38 -04:00
|
|
|
file, err := os.Open(filePath)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil, errFileNotFound
|
2016-04-13 14:32:47 -04:00
|
|
|
} else if os.IsPermission(err) {
|
|
|
|
return nil, errFileAccessDenied
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"filePath": filePath,
|
|
|
|
}).Debugf("Opening a file failed with %s", err)
|
2016-04-08 13:37:38 -04:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
st, err := file.Stat()
|
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"filePath": filePath,
|
|
|
|
}).Debugf("Stat failed with %s", err)
|
2016-04-08 13:37:38 -04:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// Verify if its not a regular file, since subsequent Seek is undefined.
|
|
|
|
if !st.Mode().IsRegular() {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"filePath": filePath,
|
|
|
|
}).Debugf("Unexpected type %s", errIsNotRegular)
|
2016-04-25 13:39:28 -04:00
|
|
|
return nil, errFileNotFound
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
// Seek to requested offset.
|
|
|
|
_, err = file.Seek(offset, os.SEEK_SET)
|
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"filePath": filePath,
|
|
|
|
"offset": offset,
|
|
|
|
}).Debugf("Seek failed with %s", err)
|
2016-04-08 13:37:38 -04:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return file, nil
|
2016-04-08 20:13:16 -04:00
|
|
|
}
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// CreateFile - create a file at path.
|
|
|
|
func (s fsStorage) CreateFile(volume, path string) (writeCloser io.WriteCloser, err error) {
|
2016-04-16 15:48:41 -04:00
|
|
|
volumeDir, err := s.getVolumeDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"volume": volume,
|
|
|
|
}).Debugf("getVolumeDir failed with %s", err)
|
2016-04-13 14:32:47 -04:00
|
|
|
return nil, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-05-04 15:18:40 -04:00
|
|
|
if err = checkDiskFree(s.diskPath, s.minFreeDisk); err != nil {
|
2016-04-13 14:32:47 -04:00
|
|
|
return nil, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
filePath := filepath.Join(volumeDir, path)
|
|
|
|
// Verify if the file already exists and is not of regular type.
|
2016-05-04 15:18:40 -04:00
|
|
|
var st os.FileInfo
|
|
|
|
if st, err = os.Stat(filePath); err == nil {
|
2016-04-08 13:37:38 -04:00
|
|
|
if st.IsDir() {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"filePath": filePath,
|
|
|
|
}).Debugf("Unexpected type %s", errIsNotRegular)
|
2016-04-08 13:37:38 -04:00
|
|
|
return nil, errIsNotRegular
|
|
|
|
}
|
|
|
|
}
|
2016-05-04 15:18:40 -04:00
|
|
|
w, err := safe.CreateFileWithPrefix(filePath, "$tmpfile")
|
|
|
|
if err != nil {
|
|
|
|
// File path cannot be verified since one of the parents is a file.
|
|
|
|
if strings.Contains(err.Error(), "not a directory") {
|
|
|
|
return nil, errFileAccessDenied
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return w, nil
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// StatFile - get file info.
|
|
|
|
func (s fsStorage) StatFile(volume, path string) (file FileInfo, err error) {
|
2016-04-16 15:48:41 -04:00
|
|
|
volumeDir, err := s.getVolumeDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"volume": volume,
|
|
|
|
}).Debugf("getVolumeDir failed with %s", err)
|
2016-04-13 14:32:47 -04:00
|
|
|
return FileInfo{}, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
2016-04-13 14:32:47 -04:00
|
|
|
filePath := filepath.Join(volumeDir, filepath.FromSlash(path))
|
2016-04-08 13:37:38 -04:00
|
|
|
st, err := os.Stat(filePath)
|
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"filePath": filePath,
|
|
|
|
}).Debugf("Stat failed with %s", err)
|
|
|
|
|
2016-04-13 14:32:47 -04:00
|
|
|
// File is really not found.
|
2016-04-08 13:37:38 -04:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return FileInfo{}, errFileNotFound
|
|
|
|
}
|
2016-04-24 03:36:00 -04:00
|
|
|
|
2016-04-13 14:32:47 -04:00
|
|
|
// File path cannot be verified since one of the parents is a file.
|
2016-04-12 15:45:15 -04:00
|
|
|
if strings.Contains(err.Error(), "not a directory") {
|
2016-04-25 13:39:28 -04:00
|
|
|
return FileInfo{}, errFileNotFound
|
2016-04-12 15:45:15 -04:00
|
|
|
}
|
2016-04-24 03:36:00 -04:00
|
|
|
|
2016-04-13 14:32:47 -04:00
|
|
|
// Return all errors here.
|
2016-04-08 13:37:38 -04:00
|
|
|
return FileInfo{}, err
|
|
|
|
}
|
2016-04-24 03:36:00 -04:00
|
|
|
|
2016-04-13 14:32:47 -04:00
|
|
|
// If its a directory its not a regular file.
|
2016-04-08 13:37:38 -04:00
|
|
|
if st.Mode().IsDir() {
|
2016-04-25 02:12:54 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"filePath": filePath,
|
|
|
|
}).Debugf("File is %s.", errIsNotRegular)
|
2016-04-25 13:39:28 -04:00
|
|
|
return FileInfo{}, errFileNotFound
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-04-24 03:36:00 -04:00
|
|
|
return FileInfo{
|
2016-04-08 13:37:38 -04:00
|
|
|
Volume: volume,
|
|
|
|
Name: path,
|
|
|
|
ModTime: st.ModTime(),
|
|
|
|
Size: st.Size(),
|
|
|
|
Mode: st.Mode(),
|
2016-04-24 03:36:00 -04:00
|
|
|
}, nil
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// deleteFile - delete file path if its empty.
|
2016-04-13 14:32:47 -04:00
|
|
|
func deleteFile(basePath, deletePath string) error {
|
2016-04-08 13:37:38 -04:00
|
|
|
if basePath == deletePath {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// Verify if the path exists.
|
2016-04-13 14:32:47 -04:00
|
|
|
pathSt, err := os.Stat(deletePath)
|
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"deletePath": deletePath,
|
|
|
|
}).Debugf("Stat failed with %s", err)
|
2016-04-13 14:32:47 -04:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return errFileNotFound
|
|
|
|
} else if os.IsPermission(err) {
|
|
|
|
return errFileAccessDenied
|
|
|
|
}
|
|
|
|
return err
|
2016-04-08 20:13:16 -04:00
|
|
|
}
|
2016-04-08 13:37:38 -04:00
|
|
|
if pathSt.IsDir() {
|
|
|
|
// Verify if directory is empty.
|
2016-04-13 14:32:47 -04:00
|
|
|
empty, err := isDirEmpty(deletePath)
|
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"deletePath": deletePath,
|
|
|
|
}).Debugf("isDirEmpty failed with %s", err)
|
2016-04-13 14:32:47 -04:00
|
|
|
return err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
if !empty {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Attempt to remove path.
|
2016-04-13 14:32:47 -04:00
|
|
|
if err := os.Remove(deletePath); err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"deletePath": deletePath,
|
|
|
|
}).Debugf("Remove failed with %s", err)
|
2016-04-13 14:32:47 -04:00
|
|
|
return err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
// Recursively go down the next path and delete again.
|
2016-04-13 14:32:47 -04:00
|
|
|
if err := deleteFile(basePath, filepath.Dir(deletePath)); err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"basePath": basePath,
|
|
|
|
"deleteDir": filepath.Dir(deletePath),
|
|
|
|
}).Debugf("deleteFile failed with %s", err)
|
2016-04-13 14:32:47 -04:00
|
|
|
return err
|
2016-04-08 20:13:16 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// DeleteFile - delete a file at path.
|
|
|
|
func (s fsStorage) DeleteFile(volume, path string) error {
|
2016-04-16 15:48:41 -04:00
|
|
|
volumeDir, err := s.getVolumeDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
2016-04-24 03:36:00 -04:00
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"volume": volume,
|
|
|
|
}).Debugf("getVolumeDir failed with %s", err)
|
2016-04-13 14:32:47 -04:00
|
|
|
return err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Following code is needed so that we retain "/" suffix if any in
|
2016-04-13 14:32:47 -04:00
|
|
|
// path argument.
|
|
|
|
filePath := filepath.Join(volumeDir, filepath.FromSlash(path))
|
|
|
|
if strings.HasSuffix(filepath.FromSlash(path), string(os.PathSeparator)) {
|
|
|
|
filePath = filePath + string(os.PathSeparator)
|
|
|
|
}
|
2016-04-08 13:37:38 -04:00
|
|
|
|
|
|
|
// Delete file and delete parent directory as well if its empty.
|
2016-04-13 14:32:47 -04:00
|
|
|
return deleteFile(volumeDir, filePath)
|
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
|
|
|
}
|
2016-04-29 15:17:48 -04:00
|
|
|
|
|
|
|
// RenameFile - rename file.
|
|
|
|
func (s fsStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error {
|
|
|
|
srcVolumeDir, err := s.getVolumeDir(srcVolume)
|
|
|
|
if err != nil {
|
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"volume": srcVolume,
|
2016-05-03 19:10:24 -04:00
|
|
|
}).Errorf("getVolumeDir failed with %s", err)
|
2016-04-29 15:17:48 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
dstVolumeDir, err := s.getVolumeDir(dstVolume)
|
|
|
|
if err != nil {
|
|
|
|
log.WithFields(logrus.Fields{
|
|
|
|
"diskPath": s.diskPath,
|
|
|
|
"volume": dstVolume,
|
2016-05-03 19:10:24 -04:00
|
|
|
}).Errorf("getVolumeDir failed with %s", err)
|
2016-04-29 15:17:48 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err = os.MkdirAll(path.Join(dstVolumeDir, path.Dir(dstPath)), 0755); err != nil {
|
2016-05-04 15:18:40 -04:00
|
|
|
// File path cannot be verified since one of the parents is a file.
|
|
|
|
if strings.Contains(err.Error(), "not a directory") {
|
|
|
|
return errFileAccessDenied
|
|
|
|
}
|
2016-05-03 19:10:24 -04:00
|
|
|
log.Errorf("os.MkdirAll failed with %s", err)
|
2016-04-29 15:17:48 -04:00
|
|
|
return err
|
|
|
|
}
|
2016-05-03 19:10:24 -04:00
|
|
|
err = os.Rename(path.Join(srcVolumeDir, srcPath), path.Join(dstVolumeDir, dstPath))
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return errFileNotFound
|
|
|
|
}
|
|
|
|
log.Errorf("os.Rename failed with %s", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
2016-04-29 15:17:48 -04:00
|
|
|
}
|