Add multipart combination to xl-meta (#19780)

Add combination of multiple parts.

Parts will be reconstructed and saved separately and can manually be combined to the complete object.

Parts will be named `(version_id)-(filename).(partnum).(in)complete`.
This commit is contained in:
Klaus Post 2024-05-23 09:37:31 -07:00 committed by GitHub
parent 7981509cc8
commit b92ac55250
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -30,6 +30,7 @@ import (
"path/filepath" "path/filepath"
"regexp" "regexp"
"sort" "sort"
"strconv"
"strings" "strings"
"time" "time"
@ -185,6 +186,9 @@ FLAGS:
EcN int EcN int
DDir []byte DDir []byte
PartNums []int PartNums []int
MetaSys struct {
Inline []byte `json:"x-minio-internal-inline-data"`
}
} }
} }
var ei erasureInfo var ei erasureInfo
@ -199,7 +203,17 @@ FLAGS:
filemap[file][verID] = fmt.Sprintf("%s/shard-%02d-of-%02d", verID, idx, ei.V2Obj.EcN+ei.V2Obj.EcM) filemap[file][verID] = fmt.Sprintf("%s/shard-%02d-of-%02d", verID, idx, ei.V2Obj.EcN+ei.V2Obj.EcM)
filemap[file][verID+".json"] = buf.String() filemap[file][verID+".json"] = buf.String()
for _, i := range ei.V2Obj.PartNums { for _, i := range ei.V2Obj.PartNums {
if len(ei.V2Obj.MetaSys.Inline) != 0 {
break
}
file := file
dataFile := fmt.Sprintf("%s%s/part.%d", strings.TrimSuffix(file, "xl.meta"), uuid.UUID(ei.V2Obj.DDir).String(), i) dataFile := fmt.Sprintf("%s%s/part.%d", strings.TrimSuffix(file, "xl.meta"), uuid.UUID(ei.V2Obj.DDir).String(), i)
if i > 1 {
file = fmt.Sprintf("%s/part.%d", file, i)
filemap[file] = make(map[string]string)
filemap[file][verID] = fmt.Sprintf("%s/part.%d/shard-%02d-of-%02d", verID, i, idx, ei.V2Obj.EcN+ei.V2Obj.EcM)
filemap[file][verID+".json"] = buf.String()
}
partDataToVerID[dataFile] = [2]string{file, verID} partDataToVerID[dataFile] = [2]string{file, verID}
} }
} else if err != nil { } else if err != nil {
@ -415,6 +429,7 @@ FLAGS:
} }
sort.Strings(toPrint) sort.Strings(toPrint)
fmt.Printf("{\n%s\n}\n", strings.Join(toPrint, ",\n")) fmt.Printf("{\n%s\n}\n", strings.Join(toPrint, ",\n"))
if c.Bool("combine") {
for partName, data := range foundData { for partName, data := range foundData {
if verid := partDataToVerID[partName]; verid != [2]string{} { if verid := partDataToVerID[partName]; verid != [2]string{} {
file := verid[0] file := verid[0]
@ -438,15 +453,25 @@ FLAGS:
} }
fmt.Println("Remapped", partName, "to", fn) fmt.Println("Remapped", partName, "to", fn)
} }
delete(partDataToVerID, partName)
err := os.WriteFile(fn, data, os.ModePerm) err := os.WriteFile(fn, data, os.ModePerm)
if err != nil { if err != nil {
fmt.Println("WriteFile:", err) fmt.Println("WriteFile:", err)
} }
} }
} }
if len(partDataToVerID) > 0 {
fmt.Println("MISSING PART FILES:")
for k := range partDataToVerID {
fmt.Println(k)
}
fmt.Println("END MISSING PART FILES")
}
}
if len(combineFiles) > 0 { if len(combineFiles) > 0 {
if c.Bool("xver") { if c.Bool("xver") {
if err := combineCrossVer(combineFiles, baseName, foundData); err != nil { if err := combineCrossVer(combineFiles, baseName); err != nil {
fmt.Println("ERROR:", err) fmt.Println("ERROR:", err)
} }
} else { } else {
@ -788,18 +813,14 @@ type mappedData struct {
parityData map[int]map[int][]byte parityData map[int]map[int][]byte
blockOffset int // Offset in bytes to start of block. blockOffset int // Offset in bytes to start of block.
blocks int // 0 = one block. blocks int // 0 = one block.
objSize int objSize, partSize int
} }
func readAndMap(files []string, blockNum int) (*mappedData, error) { func readAndMap(files []string, partNum, blockNum int) (*mappedData, error) {
var m mappedData var m mappedData
sort.Strings(files) sort.Strings(files)
m.parityData = make(map[int]map[int][]byte) m.parityData = make(map[int]map[int][]byte)
for _, file := range files { for _, file := range files {
b, err := os.ReadFile(file)
if err != nil {
return nil, err
}
meta, err := os.ReadFile(file + ".json") meta, err := os.ReadFile(file + ".json")
if err != nil { if err != nil {
return nil, err return nil, err
@ -812,6 +833,8 @@ func readAndMap(files []string, blockNum int) (*mappedData, error) {
EcN int EcN int
Size int Size int
EcBSize int EcBSize int
PartNums []int
PartSizes []int
} }
} }
var ei erasureInfo var ei erasureInfo
@ -830,25 +853,38 @@ func readAndMap(files []string, blockNum int) (*mappedData, error) {
if ei.V2Obj.Size != m.objSize { if ei.V2Obj.Size != m.objSize {
return nil, fmt.Errorf("size mismatch. Meta size: %d, Prev: %d", ei.V2Obj.Size, m.objSize) return nil, fmt.Errorf("size mismatch. Meta size: %d, Prev: %d", ei.V2Obj.Size, m.objSize)
} }
for i, s := range ei.V2Obj.PartNums {
if s == partNum {
m.size = ei.V2Obj.PartSizes[i]
m.partSize = ei.V2Obj.PartSizes[i]
break
}
}
} else { } else {
return nil, err return nil, err
} }
if len(b) < 32 {
return nil, fmt.Errorf("file %s too short", file)
}
offset := ei.V2Obj.EcBSize * blockNum offset := ei.V2Obj.EcBSize * blockNum
if offset > ei.V2Obj.Size { if offset >= m.size {
return nil, fmt.Errorf("block %d out of range. offset %d > size %d", blockNum, offset, ei.V2Obj.Size) return nil, fmt.Errorf("block %d out of range. offset %d > size %d", blockNum, offset, m.size)
} }
m.blockOffset = offset m.blockOffset = offset
m.blocks = (ei.V2Obj.Size + ei.V2Obj.EcBSize - 1) / ei.V2Obj.EcBSize m.blocks = (m.size + ei.V2Obj.EcBSize - 1) / ei.V2Obj.EcBSize
if m.blocks > 0 { if m.blocks > 0 {
m.blocks-- m.blocks--
} }
if blockNum < m.blocks { if blockNum < m.blocks {
m.size = ei.V2Obj.EcBSize m.size = ei.V2Obj.EcBSize
} else { } else {
m.size = ei.V2Obj.Size - offset m.size -= offset
}
b, err := os.ReadFile(file)
if err != nil {
return nil, err
}
if len(b) < 32 {
return nil, fmt.Errorf("file %s too short", file)
} }
// Extract block data. // Extract block data.
@ -889,7 +925,7 @@ func readAndMap(files []string, blockNum int) (*mappedData, error) {
func combine(files []string, out string) error { func combine(files []string, out string) error {
fmt.Printf("Attempting to combine version %q.\n", out) fmt.Printf("Attempting to combine version %q.\n", out)
m, err := readAndMap(files, 0) m, err := readAndMap(files, 1, 0)
if err != nil { if err != nil {
return err return err
} }
@ -938,7 +974,7 @@ func combine(files []string, out string) error {
hasParity := 0 hasParity := 0
for idx, sh := range v { for idx, sh := range v {
split[idx] = sh split[idx] = sh
if idx >= k && len(v) > 0 { if idx >= k && len(sh) > 0 {
hasParity++ hasParity++
} }
} }
@ -976,42 +1012,63 @@ func combine(files []string, out string) error {
return nil return nil
} }
func combineCrossVer(all map[string][]string, baseName string, additional map[string][]byte) error { func combineCrossVer(all map[string][]string, baseName string) error {
names := make([]string, 0, len(all)) names := make([][]string, 0)
files := make([][]string, 0, len(all)) /// part, verID, file
files := make([]map[string][]string, 0)
partNums := make(map[int]int)
for k, v := range all { for k, v := range all {
names = append(names, k) for _, file := range v {
files = append(files, v) part := getPartNum(file)
partIdx, ok := partNums[part]
if !ok {
partIdx = len(names)
partNums[part] = partIdx
names = append(names, nil)
files = append(files, make(map[string][]string))
}
names[partIdx] = append(names[partIdx], k)
files[partIdx][k] = append(files[partIdx][k], file)
}
} }
if len(files) <= 1 {
if len(files) == 0 { if len(files) == 0 {
return nil return nil
} }
return combine(files[0], names[0]) for part, partIdx := range partNums {
if len(files[partIdx]) == 0 {
continue
} }
exportedSizes := make(map[int]bool) exportedSizes := make(map[int]bool)
nextFile: nextFile:
for i, file := range files { for key, file := range files[partIdx] {
fmt.Println("Reading base version", file[0], "part", part)
var combined []byte var combined []byte
var missingAll int var missingAll int
var lastValidAll int var lastValidAll int
for block := 0; ; block++ { for block := 0; ; block++ {
fmt.Printf("Block %d, Base version %q.\n", block+1, names[i]) fmt.Printf("Block %d, Base version %q. Part %d. Files %d\n", block+1, key, part, len(file))
m, err := readAndMap(file, block) m, err := readAndMap(file, part, block)
if err != nil { if err != nil {
return err return err
} }
if exportedSizes[m.objSize] { if exportedSizes[m.objSize] {
fmt.Println("Skipping version", names[i], "as it has already been exported.") fmt.Println("Skipping version", key, "as it has already been exported.")
continue nextFile continue nextFile
} }
compareFile: compareFile:
for j, other := range files { for otherKey, other := range files[partIdx] {
if i == j { if key == otherKey {
continue continue
} }
fmt.Printf("Reading version %q.\n", names[j]) otherPart := getPartNum(other[0])
otherM, err := readAndMap(other, block) if part != otherPart {
fmt.Println("part ", part, " != other part", otherPart, other[0])
continue
}
// fmt.Println("part ", part, "other part", otherPart, other[0])
fmt.Printf("Reading version %q Part %d.\n", otherKey, otherPart)
// os.Exit(0)
otherM, err := readAndMap(other, part, block)
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
continue continue
@ -1023,7 +1080,7 @@ nextFile:
for i, filled := range otherM.filled[:m.size] { for i, filled := range otherM.filled[:m.size] {
if filled == 1 && m.filled[i] == 1 { if filled == 1 && m.filled[i] == 1 {
if m.mapped[i] != otherM.mapped[i] { if m.mapped[i] != otherM.mapped[i] {
fmt.Println("Data mismatch at byte", i, "- Disregarding version", names[j]) fmt.Println("Data mismatch at byte", i, "- Disregarding version", otherKey)
continue compareFile continue compareFile
} }
ok++ ok++
@ -1045,7 +1102,7 @@ nextFile:
} }
} }
fmt.Printf("Data overlaps (%d bytes). Combining with %q.\n", ok, names[j]) fmt.Printf("Data overlaps (%d bytes). Combining with %q.\n", ok, otherKey)
for i := range otherM.filled { for i := range otherM.filled {
if otherM.filled[i] == 1 { if otherM.filled[i] == 1 {
m.filled[i] = 1 m.filled[i] = 1
@ -1094,7 +1151,7 @@ nextFile:
hasParity := 0 hasParity := 0
for idx, sh := range v { for idx, sh := range v {
split[idx] = sh split[idx] = sh
if idx >= k && len(v) > 0 { if idx >= k && len(sh) > 0 {
hasParity++ hasParity++
} }
} }
@ -1122,17 +1179,20 @@ nextFile:
lastValidAll = lastValid + m.blockOffset lastValidAll = lastValid + m.blockOffset
} }
if m.blocks == block { if m.blocks == block {
if len(combined) != m.objSize { if len(combined) != m.partSize {
fmt.Println("Combined size mismatch. Expected", m.objSize, "got", len(combined)) fmt.Println("Combined size mismatch. Expected", m.partSize, "got", len(combined))
} }
fmt.Println("Reached block", block+1, "of", m.blocks+1, "for", names[i], ". Done.") fmt.Println("Reached block", block+1, "of", m.blocks+1, "for", key, ". Done.")
break break
} }
} }
if lastValidAll == 0 { if lastValidAll == 0 {
return errors.New("no valid data found") return errors.New("no valid data found")
} }
out := names[i] + "-" + baseName out := fmt.Sprintf("%s-%s.%05d", key, baseName, part)
if len(files) == 1 {
out = fmt.Sprintf("%s-%s", key, baseName)
}
if missingAll > 0 { if missingAll > 0 {
out += ".incomplete" out += ".incomplete"
fmt.Println(missingAll, "bytes missing. Truncating", len(combined)-lastValidAll-1, "from end.") fmt.Println(missingAll, "bytes missing. Truncating", len(combined)-lastValidAll-1, "from end.")
@ -1150,6 +1210,7 @@ nextFile:
} }
fmt.Println("Wrote output to", out) fmt.Println("Wrote output to", out)
} }
}
return nil return nil
} }
@ -1196,3 +1257,13 @@ func shardSize(blockSize, dataBlocks int) (sz int) {
} }
return return
} }
var rePartNum = regexp.MustCompile("/part\\.([0-9]+)/")
func getPartNum(s string) int {
if m := rePartNum.FindStringSubmatch(s); len(m) > 1 {
n, _ := strconv.Atoi(m[1])
return n
}
return 1
}