2015-10-16 14:26:01 -04:00
|
|
|
/*
|
2016-04-08 13:37:38 -04:00
|
|
|
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
2015-10-16 14:26:01 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
fs: Break fs package to top-level and introduce ObjectAPI interface.
ObjectAPI interface brings in changes needed for XL ObjectAPI layer.
The new interface for any ObjectAPI layer is as below
```
// ObjectAPI interface.
type ObjectAPI interface {
// Bucket resource API.
DeleteBucket(bucket string) *probe.Error
ListBuckets() ([]BucketInfo, *probe.Error)
MakeBucket(bucket string) *probe.Error
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
// Bucket query API.
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error)
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
// Object resource API.
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error)
PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error)
DeleteObject(bucket, object string) *probe.Error
// Object query API.
NewMultipartUpload(bucket, object string) (string, *probe.Error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error)
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
}
```
2016-03-30 19:15:28 -04:00
|
|
|
package main
|
2015-10-16 14:26:01 -04:00
|
|
|
|
|
|
|
import (
|
2016-04-08 13:37:38 -04:00
|
|
|
"io"
|
2016-04-08 20:13:16 -04:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2016-04-08 13:37:38 -04:00
|
|
|
"strings"
|
2015-10-16 14:26:01 -04:00
|
|
|
"sync"
|
2016-04-08 13:37:38 -04:00
|
|
|
"syscall"
|
2015-10-16 14:26:01 -04:00
|
|
|
|
2016-04-08 20:13:16 -04:00
|
|
|
"github.com/minio/minio/pkg/disk"
|
2016-04-08 13:37:38 -04:00
|
|
|
"github.com/minio/minio/pkg/safe"
|
2015-10-16 14:26:01 -04:00
|
|
|
)
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
const (
|
|
|
|
fsListLimit = 1000
|
|
|
|
)
|
|
|
|
|
|
|
|
// listParams - list object params used for list object map
|
|
|
|
type listParams struct {
|
2016-02-18 03:38:58 -05:00
|
|
|
bucket string
|
2016-04-08 13:37:38 -04:00
|
|
|
recursive bool
|
2016-02-18 03:38:58 -05:00
|
|
|
marker string
|
|
|
|
prefix string
|
|
|
|
}
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// fsStorage - implements StorageAPI interface.
|
|
|
|
type fsStorage struct {
|
|
|
|
diskPath string
|
|
|
|
diskInfo disk.Info
|
|
|
|
minFreeDisk int64
|
|
|
|
listObjectMap map[listParams][]*treeWalker
|
|
|
|
listObjectMapMutex *sync.Mutex
|
2016-03-28 12:52:09 -04:00
|
|
|
}
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// isDirEmpty - returns whether given directory is empty or not.
|
|
|
|
func isDirEmpty(dirname string) (status bool, err error) {
|
|
|
|
f, err := os.Open(dirname)
|
|
|
|
if err == nil {
|
|
|
|
defer f.Close()
|
|
|
|
if _, err = f.Readdirnames(1); err == io.EOF {
|
|
|
|
status = true
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return status, err
|
2016-02-18 03:38:58 -05:00
|
|
|
}
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// isDirExist - returns whether given directory exists or not.
|
|
|
|
func isDirExist(dirname string) (bool, error) {
|
2016-04-12 15:45:15 -04:00
|
|
|
fi, e := os.Stat(dirname)
|
2016-04-08 13:37:38 -04:00
|
|
|
if e != nil {
|
|
|
|
if os.IsNotExist(e) {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
return false, e
|
2016-02-04 23:40:58 -05:00
|
|
|
}
|
2016-04-08 13:37:38 -04:00
|
|
|
return fi.IsDir(), nil
|
|
|
|
}
|
accessPolicy: Implement Put, Get, Delete access policy.
This patch implements Get,Put,Delete bucket policies
Supporting - http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
Currently supports following actions.
"*": true,
"s3:*": true,
"s3:GetObject": true,
"s3:ListBucket": true,
"s3:PutObject": true,
"s3:CreateBucket": true,
"s3:GetBucketLocation": true,
"s3:DeleteBucket": true,
"s3:DeleteObject": true,
"s3:AbortMultipartUpload": true,
"s3:ListBucketMultipartUploads": true,
"s3:ListMultipartUploadParts": true,
following conditions for "StringEquals" and "StringNotEquals"
"s3:prefix", "s3:max-keys"
2016-02-03 19:46:56 -05:00
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// Initialize a new storage disk.
|
|
|
|
func newFS(diskPath string) (StorageAPI, error) {
|
|
|
|
if diskPath == "" {
|
|
|
|
return nil, errInvalidArgument
|
|
|
|
}
|
|
|
|
st, e := os.Stat(diskPath)
|
|
|
|
if e != nil {
|
|
|
|
return nil, e
|
|
|
|
}
|
|
|
|
if !st.IsDir() {
|
|
|
|
return nil, syscall.ENOTDIR
|
|
|
|
}
|
|
|
|
diskInfo, e := disk.GetInfo(diskPath)
|
|
|
|
if e != nil {
|
|
|
|
return nil, e
|
|
|
|
}
|
|
|
|
fs := fsStorage{
|
|
|
|
diskPath: diskPath,
|
|
|
|
diskInfo: diskInfo,
|
|
|
|
minFreeDisk: 5, // Minimum 5% disk should be free.
|
|
|
|
listObjectMap: make(map[listParams][]*treeWalker),
|
|
|
|
listObjectMapMutex: &sync.Mutex{},
|
|
|
|
}
|
|
|
|
return fs, nil
|
|
|
|
}
|
2016-01-25 02:03:38 -05:00
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// checkDiskFree verifies if disk path has sufficient minium free disk space.
|
|
|
|
func checkDiskFree(diskPath string, minFreeDisk int64) (err error) {
|
|
|
|
di, err := disk.GetInfo(diskPath)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-02-18 03:38:58 -05:00
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// Remove 5% from total space for cumulative disk
|
|
|
|
// space used for journalling, inodes etc.
|
|
|
|
availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100
|
|
|
|
if int64(availableDiskSpace) <= minFreeDisk {
|
|
|
|
return errDiskPathFull
|
|
|
|
}
|
2016-03-28 12:52:09 -04:00
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// Success.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-04-16 15:48:41 -04:00
|
|
|
// removeDuplicateVols - remove duplicate volumes.
|
|
|
|
func removeDuplicateVols(vols []VolInfo) []VolInfo {
|
|
|
|
length := len(vols) - 1
|
|
|
|
for i := 0; i < length; i++ {
|
|
|
|
for j := i + 1; j <= length; j++ {
|
|
|
|
if vols[i].Name == vols[j].Name {
|
|
|
|
// Pick the latest volume, if there is a duplicate.
|
|
|
|
if vols[i].Created.Sub(vols[j].Created) > 0 {
|
|
|
|
vols[i] = vols[length]
|
|
|
|
} else {
|
|
|
|
vols[j] = vols[length]
|
|
|
|
}
|
|
|
|
vols = vols[0:length]
|
|
|
|
length--
|
|
|
|
j--
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return vols
|
|
|
|
}
|
|
|
|
|
|
|
|
// gets all the unique directories from diskPath.
|
|
|
|
func getAllUniqueVols(dirPath string) ([]VolInfo, error) {
|
|
|
|
volumeFn := func(dirent fsDirent) bool {
|
|
|
|
// Return all directories.
|
2016-04-17 15:00:23 -04:00
|
|
|
return dirent.IsDir() && isValidVolname(filepath.Clean(dirent.name))
|
2016-04-16 15:48:41 -04:00
|
|
|
}
|
2016-04-17 15:00:23 -04:00
|
|
|
namesOnly := true // Returned are only names.
|
2016-04-16 15:48:41 -04:00
|
|
|
dirents, err := scandir(dirPath, volumeFn, namesOnly)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var volsInfo []VolInfo
|
|
|
|
for _, dirent := range dirents {
|
2016-04-17 15:00:23 -04:00
|
|
|
fi, err := os.Stat(filepath.Join(dirPath, dirent.name))
|
2016-04-16 15:48:41 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
volsInfo = append(volsInfo, VolInfo{
|
|
|
|
Name: fi.Name(),
|
|
|
|
// As os.Stat() doesn't carry other than ModTime(), use
|
|
|
|
// ModTime() as CreatedTime.
|
|
|
|
Created: fi.ModTime(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
volsInfo = removeDuplicateVols(volsInfo)
|
|
|
|
return volsInfo, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// getVolumeDir - will convert incoming volume names to
|
2016-04-13 14:32:47 -04:00
|
|
|
// corresponding valid volume names on the backend in a platform
|
|
|
|
// compatible way for all operating systems. If volume is not found
|
|
|
|
// an error is generated.
|
2016-04-16 15:48:41 -04:00
|
|
|
func (s fsStorage) getVolumeDir(volume string) (string, error) {
|
2016-04-13 14:32:47 -04:00
|
|
|
if !isValidVolname(volume) {
|
|
|
|
return "", errInvalidArgument
|
|
|
|
}
|
|
|
|
volumeDir := filepath.Join(s.diskPath, volume)
|
|
|
|
_, err := os.Stat(volumeDir)
|
|
|
|
if err == nil {
|
|
|
|
return volumeDir, nil
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-04-13 14:32:47 -04:00
|
|
|
if os.IsNotExist(err) {
|
2016-04-16 15:48:41 -04:00
|
|
|
var volsInfo []VolInfo
|
|
|
|
volsInfo, err = getAllUniqueVols(s.diskPath)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
|
|
|
return volumeDir, errVolumeNotFound
|
|
|
|
}
|
2016-04-16 15:48:41 -04:00
|
|
|
for _, vol := range volsInfo {
|
|
|
|
// Verify if lowercase version of the volume
|
|
|
|
// is equal to the incoming volume, then use the proper name.
|
|
|
|
if strings.ToLower(vol.Name) == volume {
|
|
|
|
volumeDir = filepath.Join(s.diskPath, vol.Name)
|
|
|
|
return volumeDir, nil
|
2016-04-13 14:32:47 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return volumeDir, errVolumeNotFound
|
|
|
|
} else if os.IsPermission(err) {
|
|
|
|
return volumeDir, errVolumeAccessDenied
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-04-13 14:32:47 -04:00
|
|
|
return volumeDir, err
|
|
|
|
}
|
2016-04-08 13:37:38 -04:00
|
|
|
|
2016-04-13 14:32:47 -04:00
|
|
|
// Make a volume entry.
|
|
|
|
func (s fsStorage) MakeVol(volume string) (err error) {
|
2016-04-16 15:48:41 -04:00
|
|
|
volumeDir, err := s.getVolumeDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err == nil {
|
|
|
|
// Volume already exists, return error.
|
2016-04-08 13:37:38 -04:00
|
|
|
return errVolumeExists
|
|
|
|
}
|
|
|
|
|
2016-04-13 14:32:47 -04:00
|
|
|
// Validate if disk is free.
|
|
|
|
if e := checkDiskFree(s.diskPath, s.minFreeDisk); e != nil {
|
|
|
|
return e
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
2016-04-13 14:32:47 -04:00
|
|
|
// If volume not found create it.
|
|
|
|
if err == errVolumeNotFound {
|
|
|
|
// Make a volume entry.
|
|
|
|
return os.Mkdir(volumeDir, 0700)
|
|
|
|
}
|
|
|
|
|
|
|
|
// For all other errors return here.
|
|
|
|
return err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListVols - list volumes.
|
|
|
|
func (s fsStorage) ListVols() (volsInfo []VolInfo, err error) {
|
2016-04-16 15:48:41 -04:00
|
|
|
volsInfo, err = getAllUniqueVols(s.diskPath)
|
2016-04-08 13:37:38 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-04-17 15:00:23 -04:00
|
|
|
for i, vol := range volsInfo {
|
2016-04-13 14:32:47 -04:00
|
|
|
// Volname on case sensitive fs backends can come in as
|
|
|
|
// capitalized, but object layer cannot consume it
|
|
|
|
// directly. Convert it as we see fit.
|
2016-04-16 15:48:41 -04:00
|
|
|
volName := strings.ToLower(vol.Name)
|
2016-04-08 13:37:38 -04:00
|
|
|
volInfo := VolInfo{
|
2016-04-13 14:32:47 -04:00
|
|
|
Name: volName,
|
2016-04-16 15:48:41 -04:00
|
|
|
Created: vol.Created,
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-04-17 15:00:23 -04:00
|
|
|
volsInfo[i] = volInfo
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
return volsInfo, nil
|
2015-10-16 14:26:01 -04:00
|
|
|
}
|
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// StatVol - get volume info.
|
|
|
|
func (s fsStorage) StatVol(volume string) (volInfo VolInfo, err error) {
|
2016-04-13 14:32:47 -04:00
|
|
|
// Verify if volume is valid and it exists.
|
2016-04-16 15:48:41 -04:00
|
|
|
volumeDir, err := s.getVolumeDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
|
|
|
return VolInfo{}, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
// Stat a volume entry.
|
|
|
|
var st os.FileInfo
|
|
|
|
st, err = os.Stat(volumeDir)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return VolInfo{}, errVolumeNotFound
|
|
|
|
}
|
|
|
|
return VolInfo{}, err
|
2016-04-08 20:13:16 -04:00
|
|
|
}
|
2016-04-16 15:48:41 -04:00
|
|
|
// As os.Stat() doesn't carry other than ModTime(), use ModTime()
|
|
|
|
// as CreatedTime.
|
2016-04-13 14:32:47 -04:00
|
|
|
createdTime := st.ModTime()
|
2016-04-08 13:37:38 -04:00
|
|
|
return VolInfo{
|
2016-04-13 14:32:47 -04:00
|
|
|
Name: volume,
|
|
|
|
Created: createdTime,
|
2016-04-08 13:37:38 -04:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteVol - delete a volume.
|
|
|
|
func (s fsStorage) DeleteVol(volume string) error {
|
2016-04-13 14:32:47 -04:00
|
|
|
// Verify if volume is valid and it exists.
|
2016-04-16 15:48:41 -04:00
|
|
|
volumeDir, err := s.getVolumeDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-04-13 14:32:47 -04:00
|
|
|
err = os.Remove(volumeDir)
|
2016-04-16 15:48:41 -04:00
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return errVolumeNotFound
|
|
|
|
} else if strings.Contains(err.Error(), "directory is not empty") {
|
|
|
|
// On windows the string is slightly different, handle it
|
|
|
|
// here.
|
|
|
|
return errVolumeNotEmpty
|
|
|
|
} else if strings.Contains(err.Error(), "directory not empty") {
|
|
|
|
// Hopefully for all other operating systems, this is
|
|
|
|
// assumed to be consistent.
|
|
|
|
return errVolumeNotEmpty
|
|
|
|
}
|
|
|
|
return err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save the goroutine reference in the map
|
|
|
|
func (s *fsStorage) saveTreeWalk(params listParams, walker *treeWalker) {
|
|
|
|
s.listObjectMapMutex.Lock()
|
|
|
|
defer s.listObjectMapMutex.Unlock()
|
|
|
|
|
|
|
|
walkers, _ := s.listObjectMap[params]
|
|
|
|
walkers = append(walkers, walker)
|
|
|
|
|
|
|
|
s.listObjectMap[params] = walkers
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the goroutine reference from map
|
|
|
|
func (s *fsStorage) lookupTreeWalk(params listParams) *treeWalker {
|
|
|
|
s.listObjectMapMutex.Lock()
|
|
|
|
defer s.listObjectMapMutex.Unlock()
|
|
|
|
|
|
|
|
if walkChs, ok := s.listObjectMap[params]; ok {
|
|
|
|
for i, walkCh := range walkChs {
|
|
|
|
if !walkCh.timedOut {
|
|
|
|
newWalkChs := walkChs[i+1:]
|
|
|
|
if len(newWalkChs) > 0 {
|
|
|
|
s.listObjectMap[params] = newWalkChs
|
|
|
|
} else {
|
|
|
|
delete(s.listObjectMap, params)
|
|
|
|
}
|
|
|
|
return walkCh
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// As all channels are timed out, delete the map entry
|
|
|
|
delete(s.listObjectMap, params)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// List operation.
|
|
|
|
func (s fsStorage) ListFiles(volume, prefix, marker string, recursive bool, count int) ([]FileInfo, bool, error) {
|
2016-04-13 14:32:47 -04:00
|
|
|
// Verify if volume is valid and it exists.
|
2016-04-16 15:48:41 -04:00
|
|
|
volumeDir, err := s.getVolumeDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, true, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
var fileInfos []FileInfo
|
|
|
|
|
|
|
|
if marker != "" {
|
|
|
|
// Verify if marker has prefix.
|
|
|
|
if marker != "" && !strings.HasPrefix(marker, prefix) {
|
|
|
|
return nil, true, errInvalidArgument
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return empty response for a valid request when count is 0.
|
|
|
|
if count == 0 {
|
|
|
|
return nil, true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Over flowing count - reset to fsListLimit.
|
|
|
|
if count < 0 || count > fsListLimit {
|
|
|
|
count = fsListLimit
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify if prefix exists.
|
|
|
|
prefixDir := filepath.Dir(filepath.FromSlash(prefix))
|
|
|
|
prefixRootDir := filepath.Join(volumeDir, prefixDir)
|
|
|
|
if status, err := isDirExist(prefixRootDir); !status {
|
|
|
|
if err == nil {
|
|
|
|
// Prefix does not exist, not an error just respond empty list response.
|
|
|
|
return nil, true, nil
|
2016-04-12 15:45:15 -04:00
|
|
|
} else if strings.Contains(err.Error(), "not a directory") {
|
2016-04-13 14:32:47 -04:00
|
|
|
// Prefix exists as a file.
|
2016-04-12 15:45:15 -04:00
|
|
|
return nil, true, nil
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
// Rest errors should be treated as failure.
|
|
|
|
return nil, true, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Maximum 1000 files returned in a single call.
|
|
|
|
// Further calls will set right marker value to continue reading the rest of the files.
|
|
|
|
// popTreeWalker returns nil if the call to ListFiles is done for the first time.
|
|
|
|
// On further calls to ListFiles to retrive more files within the timeout period,
|
|
|
|
// popTreeWalker returns the channel from which rest of the objects can be retrieved.
|
|
|
|
walker := s.lookupTreeWalk(listParams{volume, recursive, marker, prefix})
|
|
|
|
if walker == nil {
|
|
|
|
walker = startTreeWalk(s.diskPath, volume, filepath.FromSlash(prefix), filepath.FromSlash(marker), recursive)
|
|
|
|
}
|
|
|
|
nextMarker := ""
|
|
|
|
for i := 0; i < count; {
|
|
|
|
walkResult, ok := <-walker.ch
|
|
|
|
if !ok {
|
|
|
|
// Closed channel.
|
|
|
|
return fileInfos, true, nil
|
|
|
|
}
|
|
|
|
// For any walk error return right away.
|
|
|
|
if walkResult.err != nil {
|
|
|
|
return nil, true, walkResult.err
|
|
|
|
}
|
|
|
|
fileInfo := walkResult.fileInfo
|
|
|
|
fileInfo.Name = filepath.ToSlash(fileInfo.Name)
|
|
|
|
fileInfos = append(fileInfos, fileInfo)
|
|
|
|
// We have listed everything return.
|
|
|
|
if walkResult.end {
|
|
|
|
return fileInfos, true, nil
|
|
|
|
}
|
|
|
|
nextMarker = fileInfo.Name
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
s.saveTreeWalk(listParams{volume, recursive, nextMarker, prefix}, walker)
|
|
|
|
return fileInfos, false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadFile - read a file at a given offset.
|
|
|
|
func (s fsStorage) ReadFile(volume string, path string, offset int64) (readCloser io.ReadCloser, err error) {
|
2016-04-16 15:48:41 -04:00
|
|
|
volumeDir, err := s.getVolumeDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-04-13 14:32:47 -04:00
|
|
|
|
|
|
|
filePath := filepath.Join(volumeDir, filepath.FromSlash(path))
|
2016-04-08 13:37:38 -04:00
|
|
|
file, err := os.Open(filePath)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil, errFileNotFound
|
2016-04-13 14:32:47 -04:00
|
|
|
} else if os.IsPermission(err) {
|
|
|
|
return nil, errFileAccessDenied
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
st, err := file.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// Verify if its not a regular file, since subsequent Seek is undefined.
|
|
|
|
if !st.Mode().IsRegular() {
|
|
|
|
return nil, errIsNotRegular
|
|
|
|
}
|
|
|
|
// Seek to requested offset.
|
|
|
|
_, err = file.Seek(offset, os.SEEK_SET)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return file, nil
|
2016-04-08 20:13:16 -04:00
|
|
|
}
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// CreateFile - create a file at path.
|
|
|
|
func (s fsStorage) CreateFile(volume, path string) (writeCloser io.WriteCloser, err error) {
|
2016-04-16 15:48:41 -04:00
|
|
|
volumeDir, err := s.getVolumeDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-04-13 14:32:47 -04:00
|
|
|
if err := checkDiskFree(s.diskPath, s.minFreeDisk); err != nil {
|
|
|
|
return nil, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
filePath := filepath.Join(volumeDir, path)
|
|
|
|
// Verify if the file already exists and is not of regular type.
|
|
|
|
if st, err := os.Stat(filePath); err == nil {
|
|
|
|
if st.IsDir() {
|
|
|
|
return nil, errIsNotRegular
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return safe.CreateFileWithPrefix(filePath, "$tmpfile")
|
|
|
|
}
|
|
|
|
|
|
|
|
// StatFile - get file info.
|
|
|
|
func (s fsStorage) StatFile(volume, path string) (file FileInfo, err error) {
|
2016-04-16 15:48:41 -04:00
|
|
|
volumeDir, err := s.getVolumeDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
|
|
|
return FileInfo{}, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
2016-04-13 14:32:47 -04:00
|
|
|
filePath := filepath.Join(volumeDir, filepath.FromSlash(path))
|
2016-04-08 13:37:38 -04:00
|
|
|
st, err := os.Stat(filePath)
|
|
|
|
if err != nil {
|
2016-04-13 14:32:47 -04:00
|
|
|
// File is really not found.
|
2016-04-08 13:37:38 -04:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return FileInfo{}, errFileNotFound
|
|
|
|
}
|
2016-04-13 14:32:47 -04:00
|
|
|
// File path cannot be verified since one of the parents is a file.
|
2016-04-12 15:45:15 -04:00
|
|
|
if strings.Contains(err.Error(), "not a directory") {
|
|
|
|
return FileInfo{}, errIsNotRegular
|
|
|
|
}
|
2016-04-13 14:32:47 -04:00
|
|
|
// Return all errors here.
|
2016-04-08 13:37:38 -04:00
|
|
|
return FileInfo{}, err
|
|
|
|
}
|
2016-04-13 14:32:47 -04:00
|
|
|
// If its a directory its not a regular file.
|
2016-04-08 13:37:38 -04:00
|
|
|
if st.Mode().IsDir() {
|
|
|
|
return FileInfo{}, errIsNotRegular
|
|
|
|
}
|
|
|
|
file = FileInfo{
|
|
|
|
Volume: volume,
|
|
|
|
Name: path,
|
|
|
|
ModTime: st.ModTime(),
|
|
|
|
Size: st.Size(),
|
|
|
|
Mode: st.Mode(),
|
|
|
|
}
|
|
|
|
return file, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// deleteFile - delete file path if its empty.
|
2016-04-13 14:32:47 -04:00
|
|
|
func deleteFile(basePath, deletePath string) error {
|
2016-04-08 13:37:38 -04:00
|
|
|
if basePath == deletePath {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// Verify if the path exists.
|
2016-04-13 14:32:47 -04:00
|
|
|
pathSt, err := os.Stat(deletePath)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return errFileNotFound
|
|
|
|
} else if os.IsPermission(err) {
|
|
|
|
return errFileAccessDenied
|
|
|
|
}
|
|
|
|
return err
|
2016-04-08 20:13:16 -04:00
|
|
|
}
|
2016-04-08 13:37:38 -04:00
|
|
|
if pathSt.IsDir() {
|
|
|
|
// Verify if directory is empty.
|
2016-04-13 14:32:47 -04:00
|
|
|
empty, err := isDirEmpty(deletePath)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
if !empty {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Attempt to remove path.
|
2016-04-13 14:32:47 -04:00
|
|
|
if err := os.Remove(deletePath); err != nil {
|
|
|
|
return err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
// Recursively go down the next path and delete again.
|
2016-04-13 14:32:47 -04:00
|
|
|
if err := deleteFile(basePath, filepath.Dir(deletePath)); err != nil {
|
|
|
|
return err
|
2016-04-08 20:13:16 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// DeleteFile - delete a file at path.
|
|
|
|
func (s fsStorage) DeleteFile(volume, path string) error {
|
2016-04-16 15:48:41 -04:00
|
|
|
volumeDir, err := s.getVolumeDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Following code is needed so that we retain "/" suffix if any in
|
2016-04-13 14:32:47 -04:00
|
|
|
// path argument.
|
|
|
|
filePath := filepath.Join(volumeDir, filepath.FromSlash(path))
|
|
|
|
if strings.HasSuffix(filepath.FromSlash(path), string(os.PathSeparator)) {
|
|
|
|
filePath = filePath + string(os.PathSeparator)
|
|
|
|
}
|
2016-04-08 13:37:38 -04:00
|
|
|
|
|
|
|
// Delete file and delete parent directory as well if its empty.
|
2016-04-13 14:32:47 -04:00
|
|
|
return deleteFile(volumeDir, filePath)
|
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
|
|
|
}
|