mirror of https://github.com/minio/minio.git
Avoid using jsoniter, move to fastjson (#8063)
This is to avoid using unsafe.Pointer type code dependency for MinIO, this causes crashes on ARM64 platforms Refer #8005 collection of runtime crashes due to unsafe.Pointer usage incorrectly. We have seen issues like this before when using jsoniter library in the past. This PR hopes to fix this using fastjson
This commit is contained in:
parent
b3ca304c01
commit
9ca7470ccc
|
@ -30,7 +30,7 @@ import (
|
|||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/lock"
|
||||
"github.com/minio/minio/pkg/mimedb"
|
||||
"github.com/tidwall/gjson"
|
||||
"github.com/valyala/fastjson"
|
||||
)
|
||||
|
||||
// FS format, and object metadata.
|
||||
|
@ -203,44 +203,37 @@ func (m *fsMetaV1) WriteTo(lk *lock.LockedFile) (n int64, err error) {
|
|||
return fi.Size(), nil
|
||||
}
|
||||
|
||||
func parseFSVersion(fsMetaBuf []byte) string {
|
||||
return gjson.GetBytes(fsMetaBuf, "version").String()
|
||||
func parseFSVersion(v *fastjson.Value) string {
|
||||
return string(v.GetStringBytes("version"))
|
||||
}
|
||||
|
||||
func parseFSMetaMap(fsMetaBuf []byte) map[string]string {
|
||||
// Get xlMetaV1.Meta map.
|
||||
metaMapResult := gjson.GetBytes(fsMetaBuf, "meta").Map()
|
||||
func parseFSMetaMap(v *fastjson.Value) map[string]string {
|
||||
metaMap := make(map[string]string)
|
||||
for key, valResult := range metaMapResult {
|
||||
metaMap[key] = valResult.String()
|
||||
}
|
||||
// Get fsMetaV1.Meta map.
|
||||
v.GetObject("meta").Visit(func(k []byte, kv *fastjson.Value) {
|
||||
metaMap[string(k)] = string(kv.GetStringBytes())
|
||||
})
|
||||
return metaMap
|
||||
}
|
||||
|
||||
func parseFSPartsArray(fsMetaBuf []byte) []ObjectPartInfo {
|
||||
func parseFSPartsArray(v *fastjson.Value) []ObjectPartInfo {
|
||||
// Get xlMetaV1.Parts array
|
||||
var partsArray []ObjectPartInfo
|
||||
|
||||
partsArrayResult := gjson.GetBytes(fsMetaBuf, "parts")
|
||||
partsArrayResult.ForEach(func(key, part gjson.Result) bool {
|
||||
partJSON := part.String()
|
||||
number := gjson.Get(partJSON, "number").Int()
|
||||
name := gjson.Get(partJSON, "name").String()
|
||||
etag := gjson.Get(partJSON, "etag").String()
|
||||
size := gjson.Get(partJSON, "size").Int()
|
||||
actualSize := gjson.Get(partJSON, "actualSize").Int()
|
||||
for _, result := range v.GetArray("parts") {
|
||||
partsArray = append(partsArray, ObjectPartInfo{
|
||||
Number: int(number),
|
||||
Name: name,
|
||||
ETag: etag,
|
||||
Size: size,
|
||||
ActualSize: int64(actualSize),
|
||||
Number: result.GetInt("number"),
|
||||
Name: string(result.GetStringBytes("name")),
|
||||
ETag: string(result.GetStringBytes("etag")),
|
||||
Size: result.GetInt64("size"),
|
||||
ActualSize: result.GetInt64("actualSize"),
|
||||
})
|
||||
return true
|
||||
})
|
||||
}
|
||||
return partsArray
|
||||
}
|
||||
|
||||
// fs.json parser pool
|
||||
var fsParserPool fastjson.ParserPool
|
||||
|
||||
func (m *fsMetaV1) ReadFrom(ctx context.Context, lk *lock.LockedFile) (n int64, err error) {
|
||||
var fsMetaBuf []byte
|
||||
fi, err := lk.Stat()
|
||||
|
@ -260,8 +253,17 @@ func (m *fsMetaV1) ReadFrom(ctx context.Context, lk *lock.LockedFile) (n int64,
|
|||
return 0, io.EOF
|
||||
}
|
||||
|
||||
parser := fsParserPool.Get()
|
||||
defer fsParserPool.Put(parser)
|
||||
|
||||
var v *fastjson.Value
|
||||
v, err = parser.ParseBytes(fsMetaBuf)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// obtain version.
|
||||
m.Version = parseFSVersion(fsMetaBuf)
|
||||
m.Version = parseFSVersion(v)
|
||||
|
||||
// Verify if the format is valid, return corrupted format
|
||||
// for unrecognized formats.
|
||||
|
@ -272,10 +274,10 @@ func (m *fsMetaV1) ReadFrom(ctx context.Context, lk *lock.LockedFile) (n int64,
|
|||
}
|
||||
|
||||
// obtain parts information
|
||||
m.Parts = parseFSPartsArray(fsMetaBuf)
|
||||
m.Parts = parseFSPartsArray(v)
|
||||
|
||||
// obtain metadata.
|
||||
m.Meta = parseFSMetaMap(fsMetaBuf)
|
||||
m.Meta = parseFSMetaMap(v)
|
||||
|
||||
// Success.
|
||||
return int64(len(fsMetaBuf)), nil
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
mioutil "github.com/minio/minio/pkg/ioutil"
|
||||
"github.com/valyala/fastjson"
|
||||
)
|
||||
|
||||
// Returns EXPORT/.minio.sys/multipart/SHA256/UPLOADID
|
||||
|
@ -456,7 +457,8 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
|
|||
}
|
||||
for i, part := range result.Parts {
|
||||
var stat os.FileInfo
|
||||
stat, err = fsStatFile(ctx, pathJoin(uploadIDDir, fs.encodePartFile(part.PartNumber, part.ETag, part.ActualSize)))
|
||||
stat, err = fsStatFile(ctx, pathJoin(uploadIDDir,
|
||||
fs.encodePartFile(part.PartNumber, part.ETag, part.ActualSize)))
|
||||
if err != nil {
|
||||
return result, toObjectErr(err)
|
||||
}
|
||||
|
@ -470,7 +472,16 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
|
|||
return result, err
|
||||
}
|
||||
|
||||
result.UserDefined = parseFSMetaMap(fsMetaBytes)
|
||||
parser := fsParserPool.Get()
|
||||
defer fsParserPool.Put(parser)
|
||||
|
||||
var v *fastjson.Value
|
||||
v, err = parser.ParseBytes(fsMetaBytes)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
result.UserDefined = parseFSMetaMap(v)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
|
|
14
cmd/fs-v1.go
14
cmd/fs-v1.go
|
@ -38,6 +38,7 @@ import (
|
|||
"github.com/minio/minio/pkg/mimedb"
|
||||
"github.com/minio/minio/pkg/mountinfo"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
"github.com/valyala/fastjson"
|
||||
)
|
||||
|
||||
// Default etag is used for pre-existing objects.
|
||||
|
@ -1092,13 +1093,22 @@ func (fs *FSObjects) getObjectETag(ctx context.Context, bucket, entry string, lo
|
|||
return "", toObjectErr(err, bucket, entry)
|
||||
}
|
||||
|
||||
parser := fsParserPool.Get()
|
||||
defer fsParserPool.Put(parser)
|
||||
|
||||
var v *fastjson.Value
|
||||
v, err = parser.ParseBytes(fsMetaBuf)
|
||||
if err != nil {
|
||||
return "", toObjectErr(err, bucket, entry)
|
||||
}
|
||||
|
||||
// Check if FS metadata is valid, if not return error.
|
||||
if !isFSMetaValid(parseFSVersion(fsMetaBuf)) {
|
||||
if !isFSMetaValid(parseFSVersion(v)) {
|
||||
logger.LogIf(ctx, errCorruptedFormat)
|
||||
return "", toObjectErr(errCorruptedFormat, bucket, entry)
|
||||
}
|
||||
|
||||
return extractETag(parseFSMetaMap(fsMetaBuf)), nil
|
||||
return extractETag(parseFSMetaMap(v)), nil
|
||||
}
|
||||
|
||||
// ListObjects - list all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
minio "github.com/minio/minio/cmd"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/tidwall/gjson"
|
||||
"github.com/valyala/fastjson"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -138,81 +138,98 @@ func (m gwMetaV1) ObjectToPartOffset(ctx context.Context, offset int64) (partInd
|
|||
}
|
||||
|
||||
// parses gateway metadata stat info from metadata json
|
||||
func parseGWStat(gwMetaBuf []byte) (si minio.StatInfo, e error) {
|
||||
func parseGWStat(v *fastjson.Value) (si minio.StatInfo, err error) {
|
||||
// obtain stat info.
|
||||
stat := minio.StatInfo{}
|
||||
// fetching modTime.
|
||||
modTime, err := time.Parse(time.RFC3339, gjson.GetBytes(gwMetaBuf, "stat.modTime").String())
|
||||
st := v.GetObject("stat")
|
||||
var mb []byte
|
||||
mb, err = st.Get("modTime").StringBytes()
|
||||
if err != nil {
|
||||
return si, err
|
||||
}
|
||||
// fetching modTime.
|
||||
si.ModTime, err = time.Parse(time.RFC3339, string(mb))
|
||||
if err != nil {
|
||||
return si, err
|
||||
}
|
||||
stat.ModTime = modTime
|
||||
// obtain Stat.Size .
|
||||
stat.Size = gjson.GetBytes(gwMetaBuf, "stat.size").Int()
|
||||
return stat, nil
|
||||
si.Size, err = st.Get("size").Int64()
|
||||
if err != nil {
|
||||
return si, err
|
||||
}
|
||||
return si, nil
|
||||
}
|
||||
|
||||
// parses gateway metadata version from metadata json
|
||||
func parseGWVersion(gwMetaBuf []byte) string {
|
||||
return gjson.GetBytes(gwMetaBuf, "version").String()
|
||||
func parseGWVersion(v *fastjson.Value) string {
|
||||
return string(v.GetStringBytes("version"))
|
||||
}
|
||||
|
||||
// parses gateway ETag from metadata json
|
||||
func parseGWETag(gwMetaBuf []byte) string {
|
||||
return gjson.GetBytes(gwMetaBuf, "etag").String()
|
||||
func parseGWETag(v *fastjson.Value) string {
|
||||
return string(v.GetStringBytes("etag"))
|
||||
}
|
||||
|
||||
// parses gateway metadata format from metadata json
|
||||
func parseGWFormat(gwMetaBuf []byte) string {
|
||||
return gjson.GetBytes(gwMetaBuf, "format").String()
|
||||
func parseGWFormat(v *fastjson.Value) string {
|
||||
return string(v.GetStringBytes("format"))
|
||||
}
|
||||
|
||||
// parses gateway metadata json to get list of ObjectPartInfo
|
||||
func parseGWParts(gwMetaBuf []byte) []minio.ObjectPartInfo {
|
||||
func parseGWParts(v *fastjson.Value) []minio.ObjectPartInfo {
|
||||
// Parse the GW Parts.
|
||||
partsResult := gjson.GetBytes(gwMetaBuf, "parts").Array()
|
||||
partsResult := v.GetArray("parts")
|
||||
partInfo := make([]minio.ObjectPartInfo, len(partsResult))
|
||||
for i, p := range partsResult {
|
||||
info := minio.ObjectPartInfo{}
|
||||
info.Number = int(p.Get("number").Int())
|
||||
info.Name = p.Get("name").String()
|
||||
info.ETag = p.Get("etag").String()
|
||||
info.Size = p.Get("size").Int()
|
||||
partInfo[i] = info
|
||||
partInfo[i] = minio.ObjectPartInfo{
|
||||
Number: p.GetInt("number"),
|
||||
Name: string(p.GetStringBytes("name")),
|
||||
ETag: string(p.GetStringBytes("etag")),
|
||||
Size: p.GetInt64("size"),
|
||||
}
|
||||
}
|
||||
return partInfo
|
||||
}
|
||||
|
||||
// parses gateway metadata json to get the metadata map
|
||||
func parseGWMetaMap(gwMetaBuf []byte) map[string]string {
|
||||
// Get gwMetaV1.Meta map.
|
||||
metaMapResult := gjson.GetBytes(gwMetaBuf, "meta").Map()
|
||||
func parseGWMetaMap(v *fastjson.Value) map[string]string {
|
||||
metaMap := make(map[string]string)
|
||||
for key, valResult := range metaMapResult {
|
||||
metaMap[key] = valResult.String()
|
||||
}
|
||||
// Get gwMetaV1.Meta map.
|
||||
v.GetObject("meta").Visit(func(k []byte, kv *fastjson.Value) {
|
||||
metaMap[string(k)] = string(kv.GetStringBytes())
|
||||
})
|
||||
return metaMap
|
||||
}
|
||||
|
||||
// Constructs GWMetaV1 using `gjson` lib to retrieve each field.
|
||||
func gwMetaUnmarshalJSON(ctx context.Context, gwMetaBuf []byte) (gwMeta gwMetaV1, e error) {
|
||||
var gwParserPool fastjson.ParserPool
|
||||
|
||||
// Constructs GWMetaV1 using `fastjson` lib to retrieve each field.
|
||||
func gwMetaUnmarshalJSON(ctx context.Context, gwMetaBuf []byte) (gwMeta gwMetaV1, err error) {
|
||||
parser := gwParserPool.Get()
|
||||
defer gwParserPool.Put(parser)
|
||||
|
||||
var v *fastjson.Value
|
||||
v, err = parser.ParseBytes(gwMetaBuf)
|
||||
if err != nil {
|
||||
return gwMeta, err
|
||||
}
|
||||
|
||||
// obtain version.
|
||||
gwMeta.Version = parseGWVersion(gwMetaBuf)
|
||||
gwMeta.Version = parseGWVersion(v)
|
||||
// obtain format.
|
||||
gwMeta.Format = parseGWFormat(gwMetaBuf)
|
||||
gwMeta.Format = parseGWFormat(v)
|
||||
// Parse gwMetaV1.Stat .
|
||||
stat, err := parseGWStat(gwMetaBuf)
|
||||
stat, err := parseGWStat(v)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return gwMeta, err
|
||||
}
|
||||
gwMeta.ETag = parseGWETag(gwMetaBuf)
|
||||
gwMeta.ETag = parseGWETag(v)
|
||||
gwMeta.Stat = stat
|
||||
|
||||
// Parse the GW Parts.
|
||||
gwMeta.Parts = parseGWParts(gwMetaBuf)
|
||||
gwMeta.Parts = parseGWParts(v)
|
||||
// parse gwMetaV1.
|
||||
gwMeta.Meta = parseGWMetaMap(gwMetaBuf)
|
||||
gwMeta.Meta = parseGWMetaMap(v)
|
||||
|
||||
return gwMeta, nil
|
||||
}
|
||||
|
|
|
@ -53,25 +53,25 @@ func TestReadGWMetadata(t *testing.T) {
|
|||
metaStr string
|
||||
pass bool
|
||||
}{
|
||||
{`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", {"stat": {"size": "132", "modTime": "2018-08-31T22:25:39.23626461Z" }}}`, true},
|
||||
{`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", {"stat": {"size": "132", "modTime": "0000-00-00T00:00:00.00000000Z" }}}`, false},
|
||||
{`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", {"stat": {"size": "5242880", "modTime": "2018-08-31T22:25:39.23626461Z" }},"meta":{"content-type":"application/octet-stream","etag":"57c743902b2fc8eea6ba3bb4fc58c8e8"},"parts":[{"number":1,"name":"part.1","etag":"","size":5242880}]}}`, true},
|
||||
{`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", {"stat": {"size": "68190720", "modTime": "2018-08-31T22:25:39.23626461Z" }},"meta":{"X-Minio-Internal-Encrypted-Multipart":"","X-Minio-Internal-Server-Side-Encryption-Iv":"kdbOcKdXD3Sew8tOiHe5eI9xkX1oQ2W9JURz0oslCZA=","X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm":"DAREv2-HMAC-SHA256","X-Minio-Internal-Server-Side-Encryption-Sealed-Key":"IAAfAMfqKrxMXC9LuiI7ENP+p0xArepzAiIeB/MftFp7Xmq2OzDkKlmNbj5RKI89RrjiAbOVLSSEMvqQsrIrTQ==","content-type":"text/plain; charset=utf-8","etag":"2b137fa4ab80126af54623b010c98de6-2"},"parts":[{"number":1,"name":"part.1","etag":"c5cac075eefdab801a5198812f51b36e","size":67141632},{"number":2,"name":"part.2","etag":"ccdf4b774bc3be8eef9a8987309e8171","size":1049088}]}`, true},
|
||||
{`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", {"stat": {"size": "68190720", "modTime": "2018-08-31T22:25:39.23626461Z" }},"meta":{"X-Minio-Internal-Encrypted-Multipart":"","X-Minio-Internal-Server-Side-Encryption-Iv":"kdbOcKdXD3Sew8tOiHe5eI9xkX1oQ2W9JURz0oslCZA=","X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm":"DAREv2-HMAC-SHA256","X-Minio-Internal-Server-Side-Encryption-Sealed-Key":"IAAfAMfqKrxMXC9LuiI7ENP+p0xArepzAiIeB/MftFp7Xmq2OzDkKlmNbj5RKI89RrjiAbOVLSSEMvqQsrIrTQ==","content-type":"text/plain; charset=utf-8","etag":"2b137fa4ab80126af54623b010c98de6-2"},"parts":"123"}`, true},
|
||||
{`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", "stat": {"size": 132, "modTime": "2018-08-31T22:25:39.23626461Z" }}`, true},
|
||||
{`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", "stat": {"size": 132, "modTime": "0000-00-00T00:00:00.00000000Z" }}`, false},
|
||||
{`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", "stat": {"size": 5242880, "modTime": "2018-08-31T22:25:39.23626461Z" },"meta":{"content-type":"application/octet-stream","etag":"57c743902b2fc8eea6ba3bb4fc58c8e8"},"parts":[{"number":1,"name":"part.1","etag":"","size":5242880}]}`, true},
|
||||
{`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", "stat": {"size": 68190720, "modTime": "2018-08-31T22:25:39.23626461Z" },"meta":{"X-Minio-Internal-Encrypted-Multipart":"","X-Minio-Internal-Server-Side-Encryption-Iv":"kdbOcKdXD3Sew8tOiHe5eI9xkX1oQ2W9JURz0oslCZA=","X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm":"DAREv2-HMAC-SHA256","X-Minio-Internal-Server-Side-Encryption-Sealed-Key":"IAAfAMfqKrxMXC9LuiI7ENP+p0xArepzAiIeB/MftFp7Xmq2OzDkKlmNbj5RKI89RrjiAbOVLSSEMvqQsrIrTQ==","content-type":"text/plain; charset=utf-8","etag":"2b137fa4ab80126af54623b010c98de6-2"},"parts":[{"number":1,"name":"part.1","etag":"c5cac075eefdab801a5198812f51b36e","size":67141632},{"number":2,"name":"part.2","etag":"ccdf4b774bc3be8eef9a8987309e8171","size":1049088}]}`, true},
|
||||
{`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", "stat": {"size": "68190720", "modTime": "2018-08-31T22:25:39.23626461Z" },"meta":{"X-Minio-Internal-Encrypted-Multipart":"","X-Minio-Internal-Server-Side-Encryption-Iv":"kdbOcKdXD3Sew8tOiHe5eI9xkX1oQ2W9JURz0oslCZA=","X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm":"DAREv2-HMAC-SHA256","X-Minio-Internal-Server-Side-Encryption-Sealed-Key":"IAAfAMfqKrxMXC9LuiI7ENP+p0xArepzAiIeB/MftFp7Xmq2OzDkKlmNbj5RKI89RrjiAbOVLSSEMvqQsrIrTQ==","content-type":"text/plain; charset=utf-8","etag":"2b137fa4ab80126af54623b010c98de6-2"},"parts":"123"}`, false},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
buf := bytes.NewBufferString(tt.metaStr)
|
||||
m, err := readGWMetadata(context.Background(), *buf)
|
||||
if err != nil && tt.pass {
|
||||
t.Errorf("Test %d: Expected parse gw metadata to succeed, but failed", i)
|
||||
t.Errorf("Test %d: Expected parse gw metadata to succeed, but failed, %s", i+1, err)
|
||||
}
|
||||
if err == nil && !tt.pass {
|
||||
t.Errorf("Test %d: Expected parse gw metadata to succeed, but failed", i)
|
||||
t.Errorf("Test %d: Expected parse gw metadata to succeed, but failed", i+1)
|
||||
}
|
||||
if err == nil {
|
||||
if m.Version != gwMetaVersion {
|
||||
t.Errorf("Test %d: Expected version %s, but failed with %s", i, gwMetaVersion, m.Version)
|
||||
t.Errorf("Test %d: Expected version %s, but failed with %s", i+1, gwMetaVersion, m.Version)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,6 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
|
@ -76,8 +75,9 @@ func (c ChecksumInfo) MarshalJSON() ([]byte, error) {
|
|||
|
||||
// UnmarshalJSON - should never be called, instead xlMetaV1UnmarshalJSON() should be used.
|
||||
func (c *ChecksumInfo) UnmarshalJSON(data []byte) error {
|
||||
logger.LogIf(context.Background(), errUnexpected)
|
||||
|
||||
var info checksumInfoJSON
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err := json.Unmarshal(data, &info); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2016 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2016-2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -18,15 +18,15 @@ package cmd
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"hash/crc32"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/tidwall/gjson"
|
||||
"github.com/valyala/fastjson"
|
||||
)
|
||||
|
||||
// Returns number of errors that occurred the most (incl. nil) and the
|
||||
|
@ -117,59 +117,167 @@ func hashOrder(key string, cardinality int) []int {
|
|||
return nums
|
||||
}
|
||||
|
||||
func parseXLStat(xlMetaBuf []byte) (si statInfo, e error) {
|
||||
func parseXLStat(v *fastjson.Value) (si statInfo, err error) {
|
||||
// obtain stat info.
|
||||
stat := statInfo{}
|
||||
// fetching modTime.
|
||||
modTime, err := time.Parse(time.RFC3339, gjson.GetBytes(xlMetaBuf, "stat.modTime").String())
|
||||
st := v.GetObject("stat")
|
||||
var mb []byte
|
||||
mb, err = st.Get("modTime").StringBytes()
|
||||
if err != nil {
|
||||
return si, err
|
||||
}
|
||||
// fetching modTime.
|
||||
si.ModTime, err = time.Parse(time.RFC3339, string(mb))
|
||||
if err != nil {
|
||||
return si, err
|
||||
}
|
||||
stat.ModTime = modTime
|
||||
// obtain Stat.Size .
|
||||
stat.Size = gjson.GetBytes(xlMetaBuf, "stat.size").Int()
|
||||
return stat, nil
|
||||
si.Size, err = st.Get("size").Int64()
|
||||
if err != nil {
|
||||
return si, err
|
||||
}
|
||||
return si, nil
|
||||
}
|
||||
|
||||
func parseXLVersion(xlMetaBuf []byte) string {
|
||||
return gjson.GetBytes(xlMetaBuf, "version").String()
|
||||
func parseXLVersion(v *fastjson.Value) string {
|
||||
return string(v.GetStringBytes("version"))
|
||||
}
|
||||
|
||||
func parseXLFormat(xlMetaBuf []byte) string {
|
||||
return gjson.GetBytes(xlMetaBuf, "format").String()
|
||||
func parseXLFormat(v *fastjson.Value) string {
|
||||
return string(v.GetStringBytes("format"))
|
||||
}
|
||||
|
||||
func parseXLParts(xlMetaBuf []byte) []ObjectPartInfo {
|
||||
func parseXLRelease(v *fastjson.Value) string {
|
||||
return string(v.GetStringBytes("minio", "release"))
|
||||
}
|
||||
|
||||
func parseXLErasureInfo(ctx context.Context, v *fastjson.Value) (ErasureInfo, error) {
|
||||
erasure := ErasureInfo{}
|
||||
// parse the xlV1Meta.Erasure.Distribution.
|
||||
er := v.GetObject("erasure")
|
||||
disResult := er.Get("distribution").GetArray()
|
||||
distribution := make([]int, len(disResult))
|
||||
var err error
|
||||
for i, dis := range disResult {
|
||||
distribution[i], err = dis.Int()
|
||||
if err != nil {
|
||||
return erasure, err
|
||||
}
|
||||
}
|
||||
erasure.Distribution = distribution
|
||||
|
||||
erasure.Algorithm = string(er.Get("algorithm").GetStringBytes())
|
||||
erasure.DataBlocks = er.Get("data").GetInt()
|
||||
erasure.ParityBlocks = er.Get("parity").GetInt()
|
||||
erasure.BlockSize = er.Get("blockSize").GetInt64()
|
||||
erasure.Index = er.Get("index").GetInt()
|
||||
checkSumsResult := er.Get("checksum").GetArray()
|
||||
|
||||
// Parse xlMetaV1.Erasure.Checksum array.
|
||||
checkSums := make([]ChecksumInfo, len(checkSumsResult))
|
||||
for i, ck := range checkSumsResult {
|
||||
algorithm := BitrotAlgorithmFromString(string(ck.GetStringBytes("algorithm")))
|
||||
if !algorithm.Available() {
|
||||
logger.LogIf(ctx, errBitrotHashAlgoInvalid)
|
||||
return erasure, errBitrotHashAlgoInvalid
|
||||
}
|
||||
srcHash := ck.GetStringBytes("hash")
|
||||
n, err := hex.Decode(srcHash, srcHash)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return erasure, err
|
||||
}
|
||||
nmb := ck.GetStringBytes("name")
|
||||
if nmb == nil {
|
||||
return erasure, errCorruptedFormat
|
||||
}
|
||||
checkSums[i] = ChecksumInfo{
|
||||
Name: string(nmb),
|
||||
Algorithm: algorithm,
|
||||
Hash: srcHash[:n],
|
||||
}
|
||||
}
|
||||
erasure.Checksums = checkSums
|
||||
return erasure, nil
|
||||
}
|
||||
|
||||
func parseXLParts(partsResult []*fastjson.Value) []ObjectPartInfo {
|
||||
// Parse the XL Parts.
|
||||
partsResult := gjson.GetBytes(xlMetaBuf, "parts").Array()
|
||||
partInfo := make([]ObjectPartInfo, len(partsResult))
|
||||
for i, p := range partsResult {
|
||||
info := ObjectPartInfo{}
|
||||
info.Number = int(p.Get("number").Int())
|
||||
info.Name = p.Get("name").String()
|
||||
info.ETag = p.Get("etag").String()
|
||||
info.Size = p.Get("size").Int()
|
||||
info.ActualSize = p.Get("actualSize").Int()
|
||||
partInfo[i] = info
|
||||
partInfo[i] = ObjectPartInfo{
|
||||
Number: p.GetInt("number"),
|
||||
Name: string(p.GetStringBytes("name")),
|
||||
ETag: string(p.GetStringBytes("etag")),
|
||||
Size: p.GetInt64("size"),
|
||||
ActualSize: p.GetInt64("actualSize"),
|
||||
}
|
||||
}
|
||||
return partInfo
|
||||
}
|
||||
|
||||
func parseXLMetaMap(xlMetaBuf []byte) map[string]string {
|
||||
// Get xlMetaV1.Meta map.
|
||||
metaMapResult := gjson.GetBytes(xlMetaBuf, "meta").Map()
|
||||
func parseXLMetaMap(v *fastjson.Value) map[string]string {
|
||||
metaMap := make(map[string]string)
|
||||
for key, valResult := range metaMapResult {
|
||||
metaMap[key] = valResult.String()
|
||||
}
|
||||
// Get xlMetaV1.Meta map.
|
||||
v.GetObject("meta").Visit(func(k []byte, kv *fastjson.Value) {
|
||||
metaMap[string(k)] = string(kv.GetStringBytes())
|
||||
})
|
||||
return metaMap
|
||||
}
|
||||
|
||||
// Constructs XLMetaV1 using `gjson` lib to retrieve each field.
|
||||
func xlMetaV1UnmarshalJSON(ctx context.Context, xlMetaBuf []byte) (xlMeta xlMetaV1, e error) {
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
e = json.Unmarshal(xlMetaBuf, &xlMeta)
|
||||
return xlMeta, e
|
||||
// xl.json Parser pool
|
||||
var xlParserPool fastjson.ParserPool
|
||||
|
||||
// Constructs XLMetaV1 using `fastjson` lib to retrieve each field.
|
||||
func xlMetaV1UnmarshalJSON(ctx context.Context, xlMetaBuf []byte) (xlMeta xlMetaV1, err error) {
|
||||
parser := xlParserPool.Get()
|
||||
defer xlParserPool.Put(parser)
|
||||
|
||||
var v *fastjson.Value
|
||||
v, err = parser.ParseBytes(xlMetaBuf)
|
||||
if err != nil {
|
||||
return xlMeta, err
|
||||
}
|
||||
|
||||
// obtain version.
|
||||
xlMeta.Version = parseXLVersion(v)
|
||||
// obtain format.
|
||||
xlMeta.Format = parseXLFormat(v)
|
||||
|
||||
// Validate if the xl.json we read is sane, return corrupted format.
|
||||
if !isXLMetaFormatValid(xlMeta.Version, xlMeta.Format) {
|
||||
// For version mismatchs and unrecognized format, return corrupted format.
|
||||
logger.LogIf(ctx, errCorruptedFormat)
|
||||
return xlMeta, errCorruptedFormat
|
||||
}
|
||||
|
||||
// Parse xlMetaV1.Stat .
|
||||
stat, err := parseXLStat(v)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return xlMeta, err
|
||||
}
|
||||
|
||||
xlMeta.Stat = stat
|
||||
// parse the xlV1Meta.Erasure fields.
|
||||
xlMeta.Erasure, err = parseXLErasureInfo(ctx, v)
|
||||
if err != nil {
|
||||
return xlMeta, err
|
||||
}
|
||||
|
||||
// Check for scenario where checksum information missing for some parts.
|
||||
partsResult := v.Get("parts").GetArray()
|
||||
if len(xlMeta.Erasure.Checksums) != len(partsResult) {
|
||||
return xlMeta, errCorruptedFormat
|
||||
}
|
||||
|
||||
// Parse the XL Parts.
|
||||
xlMeta.Parts = parseXLParts(partsResult)
|
||||
// Get the xlMetaV1.Realse field.
|
||||
xlMeta.Minio.Release = parseXLRelease(v)
|
||||
// parse xlMetaV1.
|
||||
xlMeta.Meta = parseXLMetaMap(v)
|
||||
|
||||
return xlMeta, nil
|
||||
}
|
||||
|
||||
// read xl.json from the given disk, parse and return xlV1MetaV1.Parts.
|
||||
|
@ -181,15 +289,18 @@ func readXLMetaParts(ctx context.Context, disk StorageAPI, bucket string, object
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
// obtain xlMetaV1{}.Partsusing `github.com/tidwall/gjson`.
|
||||
xlMetaParts := parseXLParts(xlMetaBuf)
|
||||
xlMetaMap := parseXLMetaMap(xlMetaBuf)
|
||||
var xlMeta xlMetaV1
|
||||
xlMeta, err = xlMetaV1UnmarshalJSON(ctx, xlMetaBuf)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return xlMetaParts, xlMetaMap, nil
|
||||
return xlMeta.Parts, xlMeta.Meta, nil
|
||||
}
|
||||
|
||||
// read xl.json from the given disk and parse xlV1Meta.Stat and xlV1Meta.Meta using gjson.
|
||||
func readXLMetaStat(ctx context.Context, disk StorageAPI, bucket string, object string) (si statInfo, mp map[string]string, e error) {
|
||||
// read xl.json from the given disk and parse xlV1Meta.Stat and xlV1Meta.Meta using fastjson.
|
||||
func readXLMetaStat(ctx context.Context, disk StorageAPI, bucket string, object string) (si statInfo,
|
||||
mp map[string]string, e error) {
|
||||
// Reads entire `xl.json`.
|
||||
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
|
||||
if err != nil {
|
||||
|
@ -197,31 +308,14 @@ func readXLMetaStat(ctx context.Context, disk StorageAPI, bucket string, object
|
|||
return si, nil, err
|
||||
}
|
||||
|
||||
// obtain version.
|
||||
xlVersion := parseXLVersion(xlMetaBuf)
|
||||
|
||||
// obtain format.
|
||||
xlFormat := parseXLFormat(xlMetaBuf)
|
||||
|
||||
// Validate if the xl.json we read is sane, return corrupted format.
|
||||
if !isXLMetaFormatValid(xlVersion, xlFormat) {
|
||||
// For version mismatchs and unrecognized format, return corrupted format.
|
||||
logger.LogIf(ctx, errCorruptedFormat)
|
||||
return si, nil, errCorruptedFormat
|
||||
}
|
||||
|
||||
// obtain xlMetaV1{}.Meta using `github.com/tidwall/gjson`.
|
||||
xlMetaMap := parseXLMetaMap(xlMetaBuf)
|
||||
|
||||
// obtain xlMetaV1{}.Stat using `github.com/tidwall/gjson`.
|
||||
xlStat, err := parseXLStat(xlMetaBuf)
|
||||
var xlMeta xlMetaV1
|
||||
xlMeta, err = xlMetaV1UnmarshalJSON(ctx, xlMetaBuf)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return si, nil, err
|
||||
return si, mp, err
|
||||
}
|
||||
|
||||
// Return structured `xl.json`.
|
||||
return xlStat, xlMetaMap, nil
|
||||
return xlMeta.Stat, xlMeta.Meta, nil
|
||||
}
|
||||
|
||||
// readXLMeta reads `xl.json` and returns back XL metadata structure.
|
||||
|
@ -238,7 +332,6 @@ func readXLMeta(ctx context.Context, disk StorageAPI, bucket string, object stri
|
|||
if len(xlMetaBuf) == 0 {
|
||||
return xlMetaV1{}, errFileNotFound
|
||||
}
|
||||
// obtain xlMetaV1{} using `github.com/tidwall/gjson`.
|
||||
xlMeta, err = xlMetaV1UnmarshalJSON(ctx, xlMetaBuf)
|
||||
if err != nil {
|
||||
logger.GetReqInfo(ctx).AppendTags("disk", disk.String())
|
||||
|
|
|
@ -212,99 +212,99 @@ func getSampleXLMeta(totalParts int) xlMetaV1 {
|
|||
return xlMeta
|
||||
}
|
||||
|
||||
// Compare the unmarshaled XLMetaV1 with the one obtained from gjson parsing.
|
||||
func compareXLMetaV1(t *testing.T, unMarshalXLMeta, gjsonXLMeta xlMetaV1) {
|
||||
// Start comparing the fields of xlMetaV1 obtained from gjson parsing with one parsed using json unmarshaling.
|
||||
if unMarshalXLMeta.Version != gjsonXLMeta.Version {
|
||||
t.Errorf("Expected the Version to be \"%s\", but got \"%s\".", unMarshalXLMeta.Version, gjsonXLMeta.Version)
|
||||
// Compare the unmarshaled XLMetaV1 with the one obtained from fastjson parsing.
|
||||
func compareXLMetaV1(t *testing.T, unMarshalXLMeta, fastjsonXLMeta xlMetaV1) {
|
||||
// Start comparing the fields of xlMetaV1 obtained from fastjson parsing with one parsed using json unmarshaling.
|
||||
if unMarshalXLMeta.Version != fastjsonXLMeta.Version {
|
||||
t.Errorf("Expected the Version to be \"%s\", but got \"%s\".", unMarshalXLMeta.Version, fastjsonXLMeta.Version)
|
||||
}
|
||||
if unMarshalXLMeta.Format != gjsonXLMeta.Format {
|
||||
t.Errorf("Expected the format to be \"%s\", but got \"%s\".", unMarshalXLMeta.Format, gjsonXLMeta.Format)
|
||||
if unMarshalXLMeta.Format != fastjsonXLMeta.Format {
|
||||
t.Errorf("Expected the format to be \"%s\", but got \"%s\".", unMarshalXLMeta.Format, fastjsonXLMeta.Format)
|
||||
}
|
||||
if unMarshalXLMeta.Stat.Size != gjsonXLMeta.Stat.Size {
|
||||
t.Errorf("Expected the stat size to be %v, but got %v.", unMarshalXLMeta.Stat.Size, gjsonXLMeta.Stat.Size)
|
||||
if unMarshalXLMeta.Stat.Size != fastjsonXLMeta.Stat.Size {
|
||||
t.Errorf("Expected the stat size to be %v, but got %v.", unMarshalXLMeta.Stat.Size, fastjsonXLMeta.Stat.Size)
|
||||
}
|
||||
if !unMarshalXLMeta.Stat.ModTime.Equal(gjsonXLMeta.Stat.ModTime) {
|
||||
t.Errorf("Expected the modTime to be \"%v\", but got \"%v\".", unMarshalXLMeta.Stat.ModTime, gjsonXLMeta.Stat.ModTime)
|
||||
if !unMarshalXLMeta.Stat.ModTime.Equal(fastjsonXLMeta.Stat.ModTime) {
|
||||
t.Errorf("Expected the modTime to be \"%v\", but got \"%v\".", unMarshalXLMeta.Stat.ModTime, fastjsonXLMeta.Stat.ModTime)
|
||||
}
|
||||
if unMarshalXLMeta.Erasure.Algorithm != gjsonXLMeta.Erasure.Algorithm {
|
||||
t.Errorf("Expected the erasure algorithm to be \"%v\", but got \"%v\".", unMarshalXLMeta.Erasure.Algorithm, gjsonXLMeta.Erasure.Algorithm)
|
||||
if unMarshalXLMeta.Erasure.Algorithm != fastjsonXLMeta.Erasure.Algorithm {
|
||||
t.Errorf("Expected the erasure algorithm to be \"%v\", but got \"%v\".", unMarshalXLMeta.Erasure.Algorithm, fastjsonXLMeta.Erasure.Algorithm)
|
||||
}
|
||||
if unMarshalXLMeta.Erasure.DataBlocks != gjsonXLMeta.Erasure.DataBlocks {
|
||||
t.Errorf("Expected the erasure data blocks to be %v, but got %v.", unMarshalXLMeta.Erasure.DataBlocks, gjsonXLMeta.Erasure.DataBlocks)
|
||||
if unMarshalXLMeta.Erasure.DataBlocks != fastjsonXLMeta.Erasure.DataBlocks {
|
||||
t.Errorf("Expected the erasure data blocks to be %v, but got %v.", unMarshalXLMeta.Erasure.DataBlocks, fastjsonXLMeta.Erasure.DataBlocks)
|
||||
}
|
||||
if unMarshalXLMeta.Erasure.ParityBlocks != gjsonXLMeta.Erasure.ParityBlocks {
|
||||
t.Errorf("Expected the erasure parity blocks to be %v, but got %v.", unMarshalXLMeta.Erasure.ParityBlocks, gjsonXLMeta.Erasure.ParityBlocks)
|
||||
if unMarshalXLMeta.Erasure.ParityBlocks != fastjsonXLMeta.Erasure.ParityBlocks {
|
||||
t.Errorf("Expected the erasure parity blocks to be %v, but got %v.", unMarshalXLMeta.Erasure.ParityBlocks, fastjsonXLMeta.Erasure.ParityBlocks)
|
||||
}
|
||||
if unMarshalXLMeta.Erasure.BlockSize != gjsonXLMeta.Erasure.BlockSize {
|
||||
t.Errorf("Expected the erasure block size to be %v, but got %v.", unMarshalXLMeta.Erasure.BlockSize, gjsonXLMeta.Erasure.BlockSize)
|
||||
if unMarshalXLMeta.Erasure.BlockSize != fastjsonXLMeta.Erasure.BlockSize {
|
||||
t.Errorf("Expected the erasure block size to be %v, but got %v.", unMarshalXLMeta.Erasure.BlockSize, fastjsonXLMeta.Erasure.BlockSize)
|
||||
}
|
||||
if unMarshalXLMeta.Erasure.Index != gjsonXLMeta.Erasure.Index {
|
||||
t.Errorf("Expected the erasure index to be %v, but got %v.", unMarshalXLMeta.Erasure.Index, gjsonXLMeta.Erasure.Index)
|
||||
if unMarshalXLMeta.Erasure.Index != fastjsonXLMeta.Erasure.Index {
|
||||
t.Errorf("Expected the erasure index to be %v, but got %v.", unMarshalXLMeta.Erasure.Index, fastjsonXLMeta.Erasure.Index)
|
||||
}
|
||||
if len(unMarshalXLMeta.Erasure.Distribution) != len(gjsonXLMeta.Erasure.Distribution) {
|
||||
t.Errorf("Expected the size of Erasure Distribution to be %d, but got %d.", len(unMarshalXLMeta.Erasure.Distribution), len(gjsonXLMeta.Erasure.Distribution))
|
||||
if len(unMarshalXLMeta.Erasure.Distribution) != len(fastjsonXLMeta.Erasure.Distribution) {
|
||||
t.Errorf("Expected the size of Erasure Distribution to be %d, but got %d.", len(unMarshalXLMeta.Erasure.Distribution), len(fastjsonXLMeta.Erasure.Distribution))
|
||||
} else {
|
||||
for i := 0; i < len(unMarshalXLMeta.Erasure.Distribution); i++ {
|
||||
if unMarshalXLMeta.Erasure.Distribution[i] != gjsonXLMeta.Erasure.Distribution[i] {
|
||||
t.Errorf("Expected the Erasure Distribution to be %d, got %d.", unMarshalXLMeta.Erasure.Distribution[i], gjsonXLMeta.Erasure.Distribution[i])
|
||||
if unMarshalXLMeta.Erasure.Distribution[i] != fastjsonXLMeta.Erasure.Distribution[i] {
|
||||
t.Errorf("Expected the Erasure Distribution to be %d, got %d.", unMarshalXLMeta.Erasure.Distribution[i], fastjsonXLMeta.Erasure.Distribution[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(unMarshalXLMeta.Erasure.Checksums) != len(gjsonXLMeta.Erasure.Checksums) {
|
||||
t.Errorf("Expected the size of Erasure Checksums to be %d, but got %d.", len(unMarshalXLMeta.Erasure.Checksums), len(gjsonXLMeta.Erasure.Checksums))
|
||||
if len(unMarshalXLMeta.Erasure.Checksums) != len(fastjsonXLMeta.Erasure.Checksums) {
|
||||
t.Errorf("Expected the size of Erasure Checksums to be %d, but got %d.", len(unMarshalXLMeta.Erasure.Checksums), len(fastjsonXLMeta.Erasure.Checksums))
|
||||
} else {
|
||||
for i := 0; i < len(unMarshalXLMeta.Erasure.Checksums); i++ {
|
||||
if unMarshalXLMeta.Erasure.Checksums[i].Name != gjsonXLMeta.Erasure.Checksums[i].Name {
|
||||
t.Errorf("Expected the Erasure Checksum Name to be \"%s\", got \"%s\".", unMarshalXLMeta.Erasure.Checksums[i].Name, gjsonXLMeta.Erasure.Checksums[i].Name)
|
||||
if unMarshalXLMeta.Erasure.Checksums[i].Name != fastjsonXLMeta.Erasure.Checksums[i].Name {
|
||||
t.Errorf("Expected the Erasure Checksum Name to be \"%s\", got \"%s\".", unMarshalXLMeta.Erasure.Checksums[i].Name, fastjsonXLMeta.Erasure.Checksums[i].Name)
|
||||
}
|
||||
if unMarshalXLMeta.Erasure.Checksums[i].Algorithm != gjsonXLMeta.Erasure.Checksums[i].Algorithm {
|
||||
t.Errorf("Expected the Erasure Checksum Algorithm to be \"%s\", got \"%s\".", unMarshalXLMeta.Erasure.Checksums[i].Algorithm, gjsonXLMeta.Erasure.Checksums[i].Algorithm)
|
||||
if unMarshalXLMeta.Erasure.Checksums[i].Algorithm != fastjsonXLMeta.Erasure.Checksums[i].Algorithm {
|
||||
t.Errorf("Expected the Erasure Checksum Algorithm to be \"%s\", got \"%s\".", unMarshalXLMeta.Erasure.Checksums[i].Algorithm, fastjsonXLMeta.Erasure.Checksums[i].Algorithm)
|
||||
}
|
||||
if !bytes.Equal(unMarshalXLMeta.Erasure.Checksums[i].Hash, gjsonXLMeta.Erasure.Checksums[i].Hash) {
|
||||
t.Errorf("Expected the Erasure Checksum Hash to be \"%s\", got \"%s\".", unMarshalXLMeta.Erasure.Checksums[i].Hash, gjsonXLMeta.Erasure.Checksums[i].Hash)
|
||||
if !bytes.Equal(unMarshalXLMeta.Erasure.Checksums[i].Hash, fastjsonXLMeta.Erasure.Checksums[i].Hash) {
|
||||
t.Errorf("Expected the Erasure Checksum Hash to be \"%s\", got \"%s\".", unMarshalXLMeta.Erasure.Checksums[i].Hash, fastjsonXLMeta.Erasure.Checksums[i].Hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if unMarshalXLMeta.Minio.Release != gjsonXLMeta.Minio.Release {
|
||||
t.Errorf("Expected the Release string to be \"%s\", but got \"%s\".", unMarshalXLMeta.Minio.Release, gjsonXLMeta.Minio.Release)
|
||||
if unMarshalXLMeta.Minio.Release != fastjsonXLMeta.Minio.Release {
|
||||
t.Errorf("Expected the Release string to be \"%s\", but got \"%s\".", unMarshalXLMeta.Minio.Release, fastjsonXLMeta.Minio.Release)
|
||||
}
|
||||
if len(unMarshalXLMeta.Parts) != len(gjsonXLMeta.Parts) {
|
||||
t.Errorf("Expected info of %d parts to be present, but got %d instead.", len(unMarshalXLMeta.Parts), len(gjsonXLMeta.Parts))
|
||||
if len(unMarshalXLMeta.Parts) != len(fastjsonXLMeta.Parts) {
|
||||
t.Errorf("Expected info of %d parts to be present, but got %d instead.", len(unMarshalXLMeta.Parts), len(fastjsonXLMeta.Parts))
|
||||
} else {
|
||||
for i := 0; i < len(unMarshalXLMeta.Parts); i++ {
|
||||
if unMarshalXLMeta.Parts[i].Name != gjsonXLMeta.Parts[i].Name {
|
||||
t.Errorf("Expected the name of part %d to be \"%s\", got \"%s\".", i+1, unMarshalXLMeta.Parts[i].Name, gjsonXLMeta.Parts[i].Name)
|
||||
if unMarshalXLMeta.Parts[i].Name != fastjsonXLMeta.Parts[i].Name {
|
||||
t.Errorf("Expected the name of part %d to be \"%s\", got \"%s\".", i+1, unMarshalXLMeta.Parts[i].Name, fastjsonXLMeta.Parts[i].Name)
|
||||
}
|
||||
if unMarshalXLMeta.Parts[i].ETag != gjsonXLMeta.Parts[i].ETag {
|
||||
t.Errorf("Expected the ETag of part %d to be \"%s\", got \"%s\".", i+1, unMarshalXLMeta.Parts[i].ETag, gjsonXLMeta.Parts[i].ETag)
|
||||
if unMarshalXLMeta.Parts[i].ETag != fastjsonXLMeta.Parts[i].ETag {
|
||||
t.Errorf("Expected the ETag of part %d to be \"%s\", got \"%s\".", i+1, unMarshalXLMeta.Parts[i].ETag, fastjsonXLMeta.Parts[i].ETag)
|
||||
}
|
||||
if unMarshalXLMeta.Parts[i].Number != gjsonXLMeta.Parts[i].Number {
|
||||
t.Errorf("Expected the number of part %d to be \"%d\", got \"%d\".", i+1, unMarshalXLMeta.Parts[i].Number, gjsonXLMeta.Parts[i].Number)
|
||||
if unMarshalXLMeta.Parts[i].Number != fastjsonXLMeta.Parts[i].Number {
|
||||
t.Errorf("Expected the number of part %d to be \"%d\", got \"%d\".", i+1, unMarshalXLMeta.Parts[i].Number, fastjsonXLMeta.Parts[i].Number)
|
||||
}
|
||||
if unMarshalXLMeta.Parts[i].Size != gjsonXLMeta.Parts[i].Size {
|
||||
t.Errorf("Expected the size of part %d to be %v, got %v.", i+1, unMarshalXLMeta.Parts[i].Size, gjsonXLMeta.Parts[i].Size)
|
||||
if unMarshalXLMeta.Parts[i].Size != fastjsonXLMeta.Parts[i].Size {
|
||||
t.Errorf("Expected the size of part %d to be %v, got %v.", i+1, unMarshalXLMeta.Parts[i].Size, fastjsonXLMeta.Parts[i].Size)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for key, val := range unMarshalXLMeta.Meta {
|
||||
gjsonVal, exists := gjsonXLMeta.Meta[key]
|
||||
fastjsonVal, exists := fastjsonXLMeta.Meta[key]
|
||||
if !exists {
|
||||
t.Errorf("No meta data entry for Key \"%s\" exists.", key)
|
||||
}
|
||||
if val != gjsonVal {
|
||||
t.Errorf("Expected the value for Meta data key \"%s\" to be \"%s\", but got \"%s\".", key, val, gjsonVal)
|
||||
if val != fastjsonVal {
|
||||
t.Errorf("Expected the value for Meta data key \"%s\" to be \"%s\", but got \"%s\".", key, val, fastjsonVal)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Tests the correctness of constructing XLMetaV1 using gjson lib.
|
||||
// Tests the correctness of constructing XLMetaV1 using fastjson lib.
|
||||
// The result will be compared with the result obtained from json.unMarshal of the byte data.
|
||||
func TestGetXLMetaV1GJson1(t *testing.T) {
|
||||
func TestGetXLMetaV1Fastjson1(t *testing.T) {
|
||||
xlMetaJSON := getXLMetaBytes(1)
|
||||
|
||||
var unMarshalXLMeta xlMetaV1
|
||||
|
@ -312,16 +312,16 @@ func TestGetXLMetaV1GJson1(t *testing.T) {
|
|||
t.Errorf("Unmarshalling failed: %v", err)
|
||||
}
|
||||
|
||||
gjsonXLMeta, err := xlMetaV1UnmarshalJSON(context.Background(), xlMetaJSON)
|
||||
fastjsonXLMeta, err := xlMetaV1UnmarshalJSON(context.Background(), xlMetaJSON)
|
||||
if err != nil {
|
||||
t.Errorf("gjson parsing of XLMeta failed: %v", err)
|
||||
t.Errorf("fastjson parsing of XLMeta failed: %v", err)
|
||||
}
|
||||
compareXLMetaV1(t, unMarshalXLMeta, gjsonXLMeta)
|
||||
compareXLMetaV1(t, unMarshalXLMeta, fastjsonXLMeta)
|
||||
}
|
||||
|
||||
// Tests the correctness of constructing XLMetaV1 using gjson lib for XLMetaV1 of size 10 parts.
|
||||
// Tests the correctness of constructing XLMetaV1 using fastjson lib for XLMetaV1 of size 10 parts.
|
||||
// The result will be compared with the result obtained from json.unMarshal of the byte data.
|
||||
func TestGetXLMetaV1GJson10(t *testing.T) {
|
||||
func TestGetXLMetaV1Fastjson10(t *testing.T) {
|
||||
|
||||
xlMetaJSON := getXLMetaBytes(10)
|
||||
|
||||
|
@ -329,11 +329,11 @@ func TestGetXLMetaV1GJson10(t *testing.T) {
|
|||
if err := json.Unmarshal(xlMetaJSON, &unMarshalXLMeta); err != nil {
|
||||
t.Errorf("Unmarshalling failed: %v", err)
|
||||
}
|
||||
gjsonXLMeta, err := xlMetaV1UnmarshalJSON(context.Background(), xlMetaJSON)
|
||||
fastjsonXLMeta, err := xlMetaV1UnmarshalJSON(context.Background(), xlMetaJSON)
|
||||
if err != nil {
|
||||
t.Errorf("gjson parsing of XLMeta failed: %v", err)
|
||||
t.Errorf("fastjson parsing of XLMeta failed: %v", err)
|
||||
}
|
||||
compareXLMetaV1(t, unMarshalXLMeta, gjsonXLMeta)
|
||||
compareXLMetaV1(t, unMarshalXLMeta, fastjsonXLMeta)
|
||||
}
|
||||
|
||||
// Test the predicted part size from the part index
|
||||
|
|
4
go.mod
4
go.mod
|
@ -42,7 +42,6 @@ require (
|
|||
github.com/hashicorp/raft v1.1.0 // indirect
|
||||
github.com/hashicorp/vault v1.1.0
|
||||
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf
|
||||
github.com/json-iterator/go v1.1.6
|
||||
github.com/klauspost/compress v1.5.0 // indirect
|
||||
github.com/klauspost/cpuid v1.2.1 // indirect
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
|
@ -66,8 +65,6 @@ require (
|
|||
github.com/minio/sha256-simd v0.1.0
|
||||
github.com/minio/sio v0.2.0
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/nats-io/gnatsd v1.4.1 // indirect
|
||||
github.com/nats-io/go-nats-streaming v0.4.4 // indirect
|
||||
github.com/nats-io/nats-server v1.4.1 // indirect
|
||||
|
@ -91,6 +88,7 @@ require (
|
|||
github.com/tidwall/match v1.0.1 // indirect
|
||||
github.com/tidwall/pretty v1.0.0 // indirect
|
||||
github.com/tidwall/sjson v1.0.4
|
||||
github.com/valyala/fastjson v1.4.1
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a
|
||||
go.etcd.io/bbolt v1.3.3 // indirect
|
||||
go.uber.org/atomic v1.3.2
|
||||
|
|
2
go.sum
2
go.sum
|
@ -610,6 +610,8 @@ github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJ
|
|||
github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go/codec v0.0.0-20190320090025-2dc34c0b8780/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA=
|
||||
github.com/valyala/fastjson v1.4.1 h1:hrltpHpIpkaxll8QltMU8c3QZ5+qIiCL8yKqPFJI/yE=
|
||||
github.com/valyala/fastjson v1.4.1/go.mod h1:nV6MsjxL2IMJQUoHDIrjEI7oLyeqK6aBD7EFWPsvP8o=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
|
|
Loading…
Reference in New Issue