2016-04-29 20:52:17 -04:00
|
|
|
/*
|
|
|
|
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-04-29 20:52:17 -04:00
|
|
|
|
2016-05-25 04:33:39 -04:00
|
|
|
import (
|
2016-07-31 17:11:14 -04:00
|
|
|
"net"
|
2016-05-25 04:33:39 -04:00
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
)
|
2016-04-29 20:52:17 -04:00
|
|
|
|
2016-05-29 18:38:14 -04:00
|
|
|
const (
|
|
|
|
// Block size used for all internal operations version 1.
|
|
|
|
blockSizeV1 = 10 * 1024 * 1024 // 10MiB.
|
2016-06-25 06:03:27 -04:00
|
|
|
|
|
|
|
// Staging buffer read size for all internal operations version 1.
|
|
|
|
readSizeV1 = 128 * 1024 // 128KiB.
|
2016-07-21 20:31:14 -04:00
|
|
|
|
|
|
|
// Buckets meta prefix.
|
|
|
|
bucketMetaPrefix = "buckets"
|
2016-05-29 18:38:14 -04:00
|
|
|
)
|
|
|
|
|
2016-10-10 02:03:10 -04:00
|
|
|
// Global object layer mutex, used for safely updating object layer.
|
|
|
|
var globalObjLayerMutex *sync.Mutex
|
|
|
|
|
|
|
|
// Global object layer, only accessed by newObjectLayerFn().
|
|
|
|
var globalObjectAPI ObjectLayer
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
// Initialize this once per server initialization.
|
|
|
|
globalObjLayerMutex = &sync.Mutex{}
|
|
|
|
}
|
|
|
|
|
2016-07-08 01:10:27 -04:00
|
|
|
// isErrIgnored should we ignore this error?, takes a list of errors which can be ignored.
|
|
|
|
func isErrIgnored(err error, ignoredErrs []error) bool {
|
2016-08-25 12:39:01 -04:00
|
|
|
err = errorCause(err)
|
2016-07-08 01:10:27 -04:00
|
|
|
for _, ignoredErr := range ignoredErrs {
|
|
|
|
if ignoredErr == err {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2016-05-30 19:51:59 -04:00
|
|
|
// House keeping code needed for FS.
|
|
|
|
func fsHouseKeeping(storageDisk StorageAPI) error {
|
|
|
|
// Cleanup all temp entries upon start.
|
2016-07-02 04:59:28 -04:00
|
|
|
err := cleanupDir(storageDisk, minioMetaBucket, tmpMetaPrefix)
|
2016-05-30 19:51:59 -04:00
|
|
|
if err != nil {
|
2016-07-02 04:59:28 -04:00
|
|
|
return toObjectErr(err, minioMetaBucket, tmpMetaPrefix)
|
2016-05-30 19:51:59 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-07-31 17:11:14 -04:00
|
|
|
// Check if a network path is local to this node.
|
|
|
|
func isLocalStorage(networkPath string) bool {
|
|
|
|
if idx := strings.LastIndex(networkPath, ":"); idx != -1 {
|
2016-08-15 17:33:48 -04:00
|
|
|
// e.g 10.0.0.1:/mnt/networkPath
|
|
|
|
netAddr, _, err := splitNetPath(networkPath)
|
2016-07-31 17:11:14 -04:00
|
|
|
if err != nil {
|
2016-08-15 17:33:48 -04:00
|
|
|
errorIf(err, "Splitting into ip and path failed")
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
// netAddr will only be set if this is not a local path.
|
|
|
|
if netAddr == "" {
|
|
|
|
return true
|
2016-07-31 17:11:14 -04:00
|
|
|
}
|
|
|
|
// Resolve host to address to check if the IP is loopback.
|
|
|
|
// If address resolution fails, assume it's a non-local host.
|
2016-08-15 17:33:48 -04:00
|
|
|
addrs, err := net.LookupHost(netAddr)
|
2016-07-31 17:11:14 -04:00
|
|
|
if err != nil {
|
2016-08-15 17:33:48 -04:00
|
|
|
errorIf(err, "Failed to lookup host")
|
2016-07-31 17:11:14 -04:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
for _, addr := range addrs {
|
|
|
|
if ip := net.ParseIP(addr); ip.IsLoopback() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
iaddrs, err := net.InterfaceAddrs()
|
|
|
|
if err != nil {
|
2016-08-15 17:33:48 -04:00
|
|
|
errorIf(err, "Unable to list interface addresses")
|
2016-07-31 17:11:14 -04:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
for _, addr := range addrs {
|
|
|
|
for _, iaddr := range iaddrs {
|
|
|
|
ip, _, err := net.ParseCIDR(iaddr.String())
|
|
|
|
if err != nil {
|
2016-08-15 17:33:48 -04:00
|
|
|
errorIf(err, "Unable to parse CIDR")
|
2016-07-31 17:11:14 -04:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
if ip.String() == addr {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2016-06-02 04:49:46 -04:00
|
|
|
// Depending on the disk type network or local, initialize storage API.
|
|
|
|
func newStorageAPI(disk string) (storage StorageAPI, err error) {
|
2016-07-31 17:11:14 -04:00
|
|
|
if isLocalStorage(disk) {
|
2016-06-02 04:49:46 -04:00
|
|
|
return newPosix(disk)
|
|
|
|
}
|
|
|
|
return newRPCClient(disk)
|
|
|
|
}
|
|
|
|
|
2016-07-02 04:59:28 -04:00
|
|
|
// Initializes meta volume on all input storage disks.
|
|
|
|
func initMetaVolume(storageDisks []StorageAPI) error {
|
2016-05-05 18:00:03 -04:00
|
|
|
// This happens for the first time, but keep this here since this
|
|
|
|
// is the only place where it can be made expensive optimizing all
|
|
|
|
// other calls. Create minio meta volume, if it doesn't exist yet.
|
2016-05-25 04:33:39 -04:00
|
|
|
var wg = &sync.WaitGroup{}
|
|
|
|
|
|
|
|
// Initialize errs to collect errors inside go-routine.
|
|
|
|
var errs = make([]error, len(storageDisks))
|
|
|
|
|
|
|
|
// Initialize all disks in parallel.
|
|
|
|
for index, disk := range storageDisks {
|
2016-06-02 19:34:15 -04:00
|
|
|
if disk == nil {
|
2016-07-10 17:38:15 -04:00
|
|
|
// Ignore create meta volume on disks which are not found.
|
2016-06-02 19:34:15 -04:00
|
|
|
continue
|
|
|
|
}
|
2016-05-25 04:33:39 -04:00
|
|
|
wg.Add(1)
|
|
|
|
go func(index int, disk StorageAPI) {
|
|
|
|
// Indicate this wait group is done.
|
|
|
|
defer wg.Done()
|
|
|
|
|
2016-09-06 23:31:50 -04:00
|
|
|
// Attempt to create `.minio.sys`.
|
2016-05-25 04:33:39 -04:00
|
|
|
err := disk.MakeVol(minioMetaBucket)
|
2016-07-02 04:59:28 -04:00
|
|
|
if err != nil {
|
|
|
|
switch err {
|
|
|
|
// Ignored errors.
|
|
|
|
case errVolumeExists, errDiskNotFound, errFaultyDisk:
|
|
|
|
default:
|
|
|
|
errs[index] = err
|
|
|
|
}
|
2016-05-20 05:22:22 -04:00
|
|
|
}
|
2016-07-02 04:59:28 -04:00
|
|
|
}(index, disk)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for all cleanup to finish.
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
// Return upon first error.
|
|
|
|
for _, err := range errs {
|
|
|
|
if err == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return toObjectErr(err, minioMetaBucket)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return success here.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// House keeping code needed for XL.
|
|
|
|
func xlHouseKeeping(storageDisks []StorageAPI) error {
|
|
|
|
// This happens for the first time, but keep this here since this
|
|
|
|
// is the only place where it can be made expensive optimizing all
|
|
|
|
// other calls. Create metavolume.
|
|
|
|
var wg = &sync.WaitGroup{}
|
|
|
|
|
|
|
|
// Initialize errs to collect errors inside go-routine.
|
|
|
|
var errs = make([]error, len(storageDisks))
|
|
|
|
|
|
|
|
// Initialize all disks in parallel.
|
|
|
|
for index, disk := range storageDisks {
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
wg.Add(1)
|
|
|
|
go func(index int, disk StorageAPI) {
|
|
|
|
// Indicate this wait group is done.
|
|
|
|
defer wg.Done()
|
|
|
|
|
2016-05-25 04:33:39 -04:00
|
|
|
// Cleanup all temp entries upon start.
|
2016-07-02 04:59:28 -04:00
|
|
|
err := cleanupDir(disk, minioMetaBucket, tmpMetaPrefix)
|
2016-09-06 23:31:50 -04:00
|
|
|
if err != nil {
|
|
|
|
switch errorCause(err) {
|
2016-10-17 17:31:33 -04:00
|
|
|
case errDiskNotFound, errVolumeNotFound, errFileNotFound:
|
2016-09-06 23:31:50 -04:00
|
|
|
default:
|
|
|
|
errs[index] = err
|
|
|
|
}
|
2016-05-25 04:33:39 -04:00
|
|
|
}
|
|
|
|
}(index, disk)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for all cleanup to finish.
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
// Return upon first error.
|
|
|
|
for _, err := range errs {
|
|
|
|
if err == nil {
|
|
|
|
continue
|
2016-05-20 05:22:22 -04:00
|
|
|
}
|
2016-05-25 04:33:39 -04:00
|
|
|
return toObjectErr(err, minioMetaBucket, tmpMetaPrefix)
|
2016-05-05 18:00:03 -04:00
|
|
|
}
|
2016-05-25 04:33:39 -04:00
|
|
|
|
|
|
|
// Return success here.
|
2016-05-05 18:00:03 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-05-08 13:15:34 -04:00
|
|
|
// Cleanup a directory recursively.
|
|
|
|
func cleanupDir(storage StorageAPI, volume, dirPath string) error {
|
|
|
|
var delFunc func(string) error
|
|
|
|
// Function to delete entries recursively.
|
|
|
|
delFunc = func(entryPath string) error {
|
|
|
|
if !strings.HasSuffix(entryPath, slashSeparator) {
|
2016-07-12 04:01:47 -04:00
|
|
|
// Delete the file entry.
|
2016-09-14 19:41:39 -04:00
|
|
|
err := storage.DeleteFile(volume, entryPath)
|
|
|
|
return traceError(err)
|
2016-05-08 13:15:34 -04:00
|
|
|
}
|
2016-07-12 04:01:47 -04:00
|
|
|
|
2016-05-08 13:15:34 -04:00
|
|
|
// If it's a directory, list and call delFunc() for each entry.
|
|
|
|
entries, err := storage.ListDir(volume, entryPath)
|
2016-07-12 04:01:47 -04:00
|
|
|
// If entryPath prefix never existed, safe to ignore.
|
|
|
|
if err == errFileNotFound {
|
|
|
|
return nil
|
|
|
|
} else if err != nil { // For any other errors fail.
|
2016-08-25 12:39:01 -04:00
|
|
|
return traceError(err)
|
2016-07-12 04:01:47 -04:00
|
|
|
} // else on success..
|
2016-06-14 04:39:40 -04:00
|
|
|
|
2016-07-12 04:01:47 -04:00
|
|
|
// Recurse and delete all other entries.
|
|
|
|
for _, entry := range entries {
|
|
|
|
if err = delFunc(pathJoin(entryPath, entry)); err != nil {
|
2016-05-08 13:15:34 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2016-06-02 19:34:15 -04:00
|
|
|
err := delFunc(retainSlash(pathJoin(dirPath)))
|
|
|
|
return err
|
2016-05-08 13:15:34 -04:00
|
|
|
}
|
2016-09-21 19:38:50 -04:00
|
|
|
|
|
|
|
// Checks whether bucket exists.
|
|
|
|
func isBucketExist(bucket string, obj ObjectLayer) error {
|
|
|
|
if !IsValidBucketName(bucket) {
|
|
|
|
return BucketNameInvalid{Bucket: bucket}
|
|
|
|
}
|
|
|
|
_, err := obj.GetBucketInfo(bucket)
|
|
|
|
if err != nil {
|
|
|
|
return BucketNotFound{Bucket: bucket}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|