mirror of
https://github.com/minio/minio.git
synced 2024-12-25 22:55:54 -05:00
fb96779a8a
This PR implements an object layer which combines input erasure sets of XL layers into a unified namespace. This object layer extends the existing erasure coded implementation, it is assumed in this design that providing > 16 disks is a static configuration as well i.e if you started the setup with 32 disks with 4 sets 8 disks per pack then you would need to provide 4 sets always. Some design details and restrictions: - Objects are distributed using consistent ordering to a unique erasure coded layer. - Each pack has its own dsync so locks are synchronized properly at pack (erasure layer). - Each pack still has a maximum of 16 disks requirement, you can start with multiple such sets statically. - Static sets set of disks and cannot be changed, there is no elastic expansion allowed. - Static sets set of disks and cannot be changed, there is no elastic removal allowed. - ListObjects() across sets can be noticeably slower since List happens on all servers, and is merged at this sets layer. Fixes #5465 Fixes #5464 Fixes #5461 Fixes #5460 Fixes #5459 Fixes #5458 Fixes #5460 Fixes #5488 Fixes #5489 Fixes #5497 Fixes #5496
86 lines
2.7 KiB
Go
86 lines
2.7 KiB
Go
/*
|
|
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
package cmd
|
|
|
|
import (
|
|
"path"
|
|
|
|
"github.com/minio/minio/pkg/errors"
|
|
)
|
|
|
|
// getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice.
|
|
func (xl xlObjects) getLoadBalancedDisks() (disks []StorageAPI) {
|
|
// Based on the random shuffling return back randomized disks.
|
|
for _, i := range hashOrder(UTCNow().String(), len(xl.getDisks())) {
|
|
disks = append(disks, xl.getDisks()[i-1])
|
|
}
|
|
return disks
|
|
}
|
|
|
|
// This function does the following check, suppose
|
|
// object is "a/b/c/d", stat makes sure that objects ""a/b/c""
|
|
// "a/b" and "a" do not exist.
|
|
func (xl xlObjects) parentDirIsObject(bucket, parent string) bool {
|
|
var isParentDirObject func(string) bool
|
|
isParentDirObject = func(p string) bool {
|
|
if p == "." || p == "/" {
|
|
return false
|
|
}
|
|
if xl.isObject(bucket, p) {
|
|
// If there is already a file at prefix "p", return true.
|
|
return true
|
|
}
|
|
// Check if there is a file as one of the parent paths.
|
|
return isParentDirObject(path.Dir(p))
|
|
}
|
|
return isParentDirObject(parent)
|
|
}
|
|
|
|
// isObject - returns `true` if the prefix is an object i.e if
|
|
// `xl.json` exists at the leaf, false otherwise.
|
|
func (xl xlObjects) isObject(bucket, prefix string) (ok bool) {
|
|
for _, disk := range xl.getLoadBalancedDisks() {
|
|
if disk == nil {
|
|
continue
|
|
}
|
|
// Check if 'prefix' is an object on this 'disk', else continue the check the next disk
|
|
_, err := disk.StatFile(bucket, path.Join(prefix, xlMetaJSONFile))
|
|
if err == nil {
|
|
return true
|
|
}
|
|
// Ignore for file not found, disk not found or faulty disk.
|
|
if errors.IsErrIgnored(err, xlTreeWalkIgnoredErrs...) {
|
|
continue
|
|
}
|
|
errorIf(err, "Unable to stat a file %s/%s/%s", bucket, prefix, xlMetaJSONFile)
|
|
} // Exhausted all disks - return false.
|
|
return false
|
|
}
|
|
|
|
// Calculate the space occupied by an object in a single disk
|
|
func (xl xlObjects) sizeOnDisk(fileSize int64, blockSize int64, dataBlocks int) int64 {
|
|
numBlocks := fileSize / blockSize
|
|
chunkSize := getChunkSize(blockSize, dataBlocks)
|
|
sizeInDisk := numBlocks * chunkSize
|
|
remaining := fileSize % blockSize
|
|
if remaining > 0 {
|
|
sizeInDisk += getChunkSize(remaining, dataBlocks)
|
|
}
|
|
|
|
return sizeInDisk
|
|
}
|