Refactor s3select to support parquet. (#7023)

Also handle pretty formatted JSON documents.
This commit is contained in:
Bala FA
2019-01-09 06:23:04 +05:30
committed by kannappanr
parent e98d89274f
commit b0deea27df
124 changed files with 27376 additions and 4152 deletions

View File

@@ -1,87 +0,0 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ioutil
import (
"bufio"
"io"
)
var (
nByte byte = 10 // the byte that corresponds to the '\n' rune.
rByte byte = 13 // the byte that corresponds to the '\r' rune.
)
// DelimitedReader reduces the custom delimiter to `\n`.
type DelimitedReader struct {
r *bufio.Reader
delimiter []rune // Select can have upto 2 characters as delimiter.
assignEmpty bool // Decides whether the next read byte should be discarded.
}
// NewDelimitedReader detects the custom delimiter and replaces with `\n`.
func NewDelimitedReader(r io.Reader, delimiter []rune) *DelimitedReader {
return &DelimitedReader{r: bufio.NewReader(r), delimiter: delimiter, assignEmpty: false}
}
// Reads and replaces the custom delimiter with `\n`.
func (r *DelimitedReader) Read(p []byte) (n int, err error) {
n, err = r.r.Read(p)
if err != nil {
return
}
for i, b := range p {
if r.assignEmpty {
swapAndNullify(p, i)
r.assignEmpty = false
continue
}
if b == rByte && rune(b) != r.delimiter[0] {
// Replace the carriage returns with `\n`.
// Mac styled csv will have `\r` as their record delimiter.
p[i] = nByte
} else if rune(b) == r.delimiter[0] { // Eg, `\r\n`,`ab`,`a` are valid delimiters
if i+1 == len(p) && len(r.delimiter) > 1 {
// If the first delimiter match falls on the boundary,
// Peek the next byte and if it matches, discard it in the next byte read.
if nextByte, nerr := r.r.Peek(1); nerr == nil {
if rune(nextByte[0]) == r.delimiter[1] {
p[i] = nByte
// To Discard in the next read.
r.assignEmpty = true
}
}
} else if len(r.delimiter) > 1 && rune(p[i+1]) == r.delimiter[1] {
// The second delimiter falls in the same chunk.
p[i] = nByte
r.assignEmpty = true
} else if len(r.delimiter) == 1 {
// Replace with `\n` incase of single charecter delimiter match.
p[i] = nByte
}
}
}
return
}
// Occupy the first byte space and nullify the last byte.
func swapAndNullify(p []byte, n int) {
for i := n; i < len(p)-1; i++ {
p[i] = p[i+1]
}
p[len(p)-1] = 0
}

View File

@@ -1,83 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ioutil
import (
"bytes"
"io"
"strings"
"testing"
)
// Test for DelimitedCSVReader.
func TestDelimitedReader(t *testing.T) {
expected := "username,age\nbanana,12\ncarrot,23\napple,34\nbrinjal,90\nraddish,45"
inputs := []struct {
inputcsv string
delimiter string
chunkSize int
}{
// case 1 - with default `\n` delimiter.
{"username,age\nbanana,12\ncarrot,23\napple,34\nbrinjal,90\nraddish,45", "\n", 10},
// case 2 - with carriage return `\r` which should be replaced with `\n` by default.
{"username,age\rbanana,12\rcarrot,23\rapple,34\rbrinjal,90\rraddish,45", "\n", 10},
// case 3 - with a double character delimiter (octals).
{"username,age\r\nbanana,12\r\ncarrot,23\r\napple,34\r\nbrinjal,90\r\nraddish,45", "\r\n", 10},
// case 4 - with a double character delimiter.
{"username,agexvbanana,12xvcarrot,23xvapple,34xvbrinjal,90xvraddish,45", "xv", 10},
// case 5 - with a double character delimiter `\t `
{"username,age\t banana,12\t carrot,23\t apple,34\t brinjal,90\t raddish,45", "\t ", 10},
// case 6 - This is a special case where the first delimiter match falls in the 13'th byte space
// ie, the last byte space of the read chunk, In this case the reader should peek in the next byte
// and replace with `\n`.
{"username,agexxbanana,12xxcarrot,23xxapple,34xxbrinjal,90xxraddish,45", "xx", 13},
}
for c, input := range inputs {
var readcsv []byte
var err error
delimitedReader := NewDelimitedReader(strings.NewReader(input.inputcsv), []rune(input.delimiter))
for err == nil {
chunk := make([]byte, input.chunkSize)
_, err = delimitedReader.Read(chunk)
readcsv = append(readcsv, chunk...)
}
if err != io.EOF {
t.Fatalf("Case %d: Error in delimited read", c+1)
}
expected := []byte(expected)
cleanCsv := removeNulls(readcsv)
if !bytes.Equal(cleanCsv, expected) {
t.Fatalf("Case %d: Expected the delimited csv to be `%s`, but instead found `%s`", c+1, string(expected), string(cleanCsv))
}
}
}
// Removes all the tailing nulls in chunks.
// Null chunks will be assigned if there is a reduction
// Eg, When `xv` is reduced to `\n`, the last byte is nullified.
func removeNulls(csv []byte) []byte {
cleanCsv := []byte{}
for _, p := range csv {
if p != 0 {
cleanCsv = append(cleanCsv, p)
}
}
return cleanCsv
}

190
pkg/s3select/csv/args.go Normal file
View File

@@ -0,0 +1,190 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package csv
import (
"encoding/xml"
"fmt"
"strings"
)
const (
none = "none"
use = "use"
ignore = "ignore"
defaultRecordDelimiter = "\n"
defaultFieldDelimiter = ","
defaultQuoteCharacter = `"`
defaultQuoteEscapeCharacter = `"`
defaultCommentCharacter = "#"
always = "always"
asneeded = "asneeded"
)
// ReaderArgs - represents elements inside <InputSerialization><CSV> in request XML.
type ReaderArgs struct {
FileHeaderInfo string `xml:"FileHeaderInfo"`
RecordDelimiter string `xml:"RecordDelimiter"`
FieldDelimiter string `xml:"FieldDelimiter"`
QuoteCharacter string `xml:"QuoteCharacter"`
QuoteEscapeCharacter string `xml:"QuoteEscapeCharacter"`
CommentCharacter string `xml:"Comments"`
AllowQuotedRecordDelimiter bool `xml:"AllowQuotedRecordDelimiter"`
unmarshaled bool
}
// IsEmpty - returns whether reader args is empty or not.
func (args *ReaderArgs) IsEmpty() bool {
return !args.unmarshaled
}
// UnmarshalXML - decodes XML data.
func (args *ReaderArgs) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
// Make subtype to avoid recursive UnmarshalXML().
type subReaderArgs ReaderArgs
parsedArgs := subReaderArgs{}
if err := d.DecodeElement(&parsedArgs, &start); err != nil {
return err
}
parsedArgs.FileHeaderInfo = strings.ToLower(parsedArgs.FileHeaderInfo)
switch parsedArgs.FileHeaderInfo {
case none, use, ignore:
default:
return errInvalidFileHeaderInfo(fmt.Errorf("invalid FileHeaderInfo '%v'", parsedArgs.FileHeaderInfo))
}
switch len(parsedArgs.RecordDelimiter) {
case 0:
parsedArgs.RecordDelimiter = defaultRecordDelimiter
case 1, 2:
default:
return fmt.Errorf("invalid RecordDelimiter '%v'", parsedArgs.RecordDelimiter)
}
switch len(parsedArgs.FieldDelimiter) {
case 0:
parsedArgs.FieldDelimiter = defaultFieldDelimiter
case 1:
default:
return fmt.Errorf("invalid FieldDelimiter '%v'", parsedArgs.FieldDelimiter)
}
switch parsedArgs.QuoteCharacter {
case "":
parsedArgs.QuoteCharacter = defaultQuoteCharacter
case defaultQuoteCharacter:
default:
return fmt.Errorf("unsupported QuoteCharacter '%v'", parsedArgs.QuoteCharacter)
}
switch parsedArgs.QuoteEscapeCharacter {
case "":
parsedArgs.QuoteEscapeCharacter = defaultQuoteEscapeCharacter
case defaultQuoteEscapeCharacter:
default:
return fmt.Errorf("unsupported QuoteEscapeCharacter '%v'", parsedArgs.QuoteEscapeCharacter)
}
switch parsedArgs.CommentCharacter {
case "":
parsedArgs.CommentCharacter = defaultCommentCharacter
case defaultCommentCharacter:
default:
return fmt.Errorf("unsupported Comments '%v'", parsedArgs.CommentCharacter)
}
if parsedArgs.AllowQuotedRecordDelimiter {
return fmt.Errorf("flag AllowQuotedRecordDelimiter is unsupported at the moment")
}
*args = ReaderArgs(parsedArgs)
args.unmarshaled = true
return nil
}
// WriterArgs - represents elements inside <OutputSerialization><CSV/> in request XML.
type WriterArgs struct {
QuoteFields string `xml:"QuoteFields"`
RecordDelimiter string `xml:"RecordDelimiter"`
FieldDelimiter string `xml:"FieldDelimiter"`
QuoteCharacter string `xml:"QuoteCharacter"`
QuoteEscapeCharacter string `xml:"QuoteEscapeCharacter"`
unmarshaled bool
}
// IsEmpty - returns whether writer args is empty or not.
func (args *WriterArgs) IsEmpty() bool {
return !args.unmarshaled
}
// UnmarshalXML - decodes XML data.
func (args *WriterArgs) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
// Make subtype to avoid recursive UnmarshalXML().
type subWriterArgs WriterArgs
parsedArgs := subWriterArgs{}
if err := d.DecodeElement(&parsedArgs, &start); err != nil {
return err
}
parsedArgs.QuoteFields = strings.ToLower(parsedArgs.QuoteFields)
switch parsedArgs.QuoteFields {
case "":
parsedArgs.QuoteFields = asneeded
case always, asneeded:
default:
return errInvalidQuoteFields(fmt.Errorf("invalid QuoteFields '%v'", parsedArgs.QuoteFields))
}
switch len(parsedArgs.RecordDelimiter) {
case 0:
parsedArgs.RecordDelimiter = defaultRecordDelimiter
case 1, 2:
default:
return fmt.Errorf("invalid RecordDelimiter '%v'", parsedArgs.RecordDelimiter)
}
switch len(parsedArgs.FieldDelimiter) {
case 0:
parsedArgs.FieldDelimiter = defaultFieldDelimiter
case 1:
default:
return fmt.Errorf("invalid FieldDelimiter '%v'", parsedArgs.FieldDelimiter)
}
switch parsedArgs.QuoteCharacter {
case "":
parsedArgs.QuoteCharacter = defaultQuoteCharacter
case defaultQuoteCharacter:
default:
return fmt.Errorf("unsupported QuoteCharacter '%v'", parsedArgs.QuoteCharacter)
}
switch parsedArgs.QuoteEscapeCharacter {
case "":
parsedArgs.QuoteEscapeCharacter = defaultQuoteEscapeCharacter
case defaultQuoteEscapeCharacter:
default:
return fmt.Errorf("unsupported QuoteEscapeCharacter '%v'", parsedArgs.QuoteEscapeCharacter)
}
*args = WriterArgs(parsedArgs)
args.unmarshaled = true
return nil
}

View File

@@ -0,0 +1,71 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package csv
type s3Error struct {
code string
message string
statusCode int
cause error
}
func (err *s3Error) Cause() error {
return err.cause
}
func (err *s3Error) ErrorCode() string {
return err.code
}
func (err *s3Error) ErrorMessage() string {
return err.message
}
func (err *s3Error) HTTPStatusCode() int {
return err.statusCode
}
func (err *s3Error) Error() string {
return err.message
}
func errInvalidFileHeaderInfo(err error) *s3Error {
return &s3Error{
code: "InvalidFileHeaderInfo",
message: "The FileHeaderInfo is invalid. Only NONE, USE, and IGNORE are supported.",
statusCode: 400,
cause: err,
}
}
func errInvalidQuoteFields(err error) *s3Error {
return &s3Error{
code: "InvalidQuoteFields",
message: "The QuoteFields is invalid. Only ALWAYS and ASNEEDED are supported.",
statusCode: 400,
cause: err,
}
}
func errCSVParsingError(err error) *s3Error {
return &s3Error{
code: "CSVParsingError",
message: "Encountered an error parsing the CSV file. Check the file and try again.",
statusCode: 400,
cause: err,
}
}

166
pkg/s3select/csv/reader.go Normal file
View File

@@ -0,0 +1,166 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package csv
import (
"bytes"
"encoding/csv"
"fmt"
"io"
"github.com/minio/minio/pkg/s3select/sql"
)
type recordReader struct {
reader io.Reader
recordDelimiter []byte
oneByte []byte
useOneByte bool
}
func (rr *recordReader) Read(p []byte) (n int, err error) {
if rr.useOneByte {
p[0] = rr.oneByte[0]
rr.useOneByte = false
n, err = rr.reader.Read(p[1:])
n++
} else {
n, err = rr.reader.Read(p)
}
if err != nil {
return 0, err
}
if string(rr.recordDelimiter) == "\n" {
return n, nil
}
for {
i := bytes.Index(p, rr.recordDelimiter)
if i < 0 {
break
}
p[i] = '\n'
if len(rr.recordDelimiter) > 1 {
p = append(p[:i+1], p[i+len(rr.recordDelimiter):]...)
}
}
n = len(p)
if len(rr.recordDelimiter) == 1 || p[n-1] != rr.recordDelimiter[0] {
return n, nil
}
if _, err = rr.reader.Read(rr.oneByte); err != nil {
return 0, err
}
if rr.oneByte[0] == rr.recordDelimiter[1] {
p[n-1] = '\n'
return n, nil
}
rr.useOneByte = true
return n, nil
}
// Reader - CSV record reader for S3Select.
type Reader struct {
args *ReaderArgs
readCloser io.ReadCloser
csvReader *csv.Reader
columnNames []string
}
// Read - reads single record.
func (r *Reader) Read() (sql.Record, error) {
csvRecord, err := r.csvReader.Read()
if err != nil {
if err != io.EOF {
return nil, errCSVParsingError(err)
}
return nil, err
}
columnNames := r.columnNames
if columnNames == nil {
columnNames = make([]string, len(csvRecord))
for i := range csvRecord {
columnNames[i] = fmt.Sprintf("_%v", i+1)
}
}
nameIndexMap := make(map[string]int64)
for i := range columnNames {
nameIndexMap[columnNames[i]] = int64(i)
}
return &Record{
columnNames: columnNames,
csvRecord: csvRecord,
nameIndexMap: nameIndexMap,
}, nil
}
// Close - closes underlaying reader.
func (r *Reader) Close() error {
return r.readCloser.Close()
}
// NewReader - creates new CSV reader using readCloser.
func NewReader(readCloser io.ReadCloser, args *ReaderArgs) (*Reader, error) {
if args == nil || args.IsEmpty() {
panic(fmt.Errorf("empty args passed %v", args))
}
csvReader := csv.NewReader(&recordReader{
reader: readCloser,
recordDelimiter: []byte(args.RecordDelimiter),
oneByte: []byte{0},
})
csvReader.Comma = []rune(args.FieldDelimiter)[0]
csvReader.Comment = []rune(args.CommentCharacter)[0]
csvReader.FieldsPerRecord = -1
r := &Reader{
args: args,
readCloser: readCloser,
csvReader: csvReader,
}
if args.FileHeaderInfo == none {
return r, nil
}
record, err := csvReader.Read()
if err != nil {
if err != io.EOF {
return nil, errCSVParsingError(err)
}
return nil, err
}
if args.FileHeaderInfo == use {
r.columnNames = record
}
return r, nil
}

View File

@@ -0,0 +1,95 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package csv
import (
"bytes"
"encoding/csv"
"fmt"
"github.com/minio/minio/pkg/s3select/sql"
"github.com/tidwall/sjson"
)
// Record - is CSV record.
type Record struct {
columnNames []string
csvRecord []string
nameIndexMap map[string]int64
}
// Get - gets the value for a column name.
func (r *Record) Get(name string) (*sql.Value, error) {
index, found := r.nameIndexMap[name]
if !found {
return nil, fmt.Errorf("column %v not found", name)
}
if index >= int64(len(r.csvRecord)) {
// No value found for column 'name', hence return empty string for compatibility.
return sql.NewString(""), nil
}
return sql.NewString(r.csvRecord[index]), nil
}
// Set - sets the value for a column name.
func (r *Record) Set(name string, value *sql.Value) error {
r.columnNames = append(r.columnNames, name)
r.csvRecord = append(r.csvRecord, value.CSVString())
return nil
}
// MarshalCSV - encodes to CSV data.
func (r *Record) MarshalCSV(fieldDelimiter rune) ([]byte, error) {
buf := new(bytes.Buffer)
w := csv.NewWriter(buf)
w.Comma = fieldDelimiter
if err := w.Write(r.csvRecord); err != nil {
return nil, err
}
w.Flush()
if err := w.Error(); err != nil {
return nil, err
}
data := buf.Bytes()
return data[:len(data)-1], nil
}
// MarshalJSON - encodes to JSON data.
func (r *Record) MarshalJSON() ([]byte, error) {
data := "{}"
var err error
for i := len(r.columnNames) - 1; i >= 0; i-- {
if i >= len(r.csvRecord) {
continue
}
if data, err = sjson.Set(data, r.columnNames[i], r.csvRecord[i]); err != nil {
return nil, err
}
}
return []byte(data), nil
}
// NewRecord - creates new CSV record.
func NewRecord() *Record {
return &Record{}
}

View File

@@ -1,110 +0,0 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3select
import (
"encoding/xml"
)
// CSVFileHeaderInfo -Can be either USE IGNORE OR NONE, defines what to do with
// the first row
type CSVFileHeaderInfo string
// Constants for file header info.
const (
CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE"
CSVFileHeaderInfoIgnore = "IGNORE"
CSVFileHeaderInfoUse = "USE"
)
// The maximum character per record is set to be 1 MB.
const (
MaxCharsPerRecord = 1000000
)
// SelectCompressionType - ONLY GZIP is supported
type SelectCompressionType string
// JSONType determines json input serialization type.
type JSONType string
// Constants for compression types under select API.
const (
SelectCompressionNONE SelectCompressionType = "NONE"
SelectCompressionGZIP = "GZIP"
SelectCompressionBZIP = "BZIP2"
)
// CSVQuoteFields - Can be either Always or AsNeeded
type CSVQuoteFields string
// Constants for csv quote styles.
const (
CSVQuoteFieldsAlways CSVQuoteFields = "Always"
CSVQuoteFieldsAsNeeded = "AsNeeded"
)
// QueryExpressionType - Currently can only be SQL
type QueryExpressionType string
// Constants for expression type.
const (
QueryExpressionTypeSQL QueryExpressionType = "SQL"
)
// Constants for JSONTypes.
const (
JSONTypeDocument JSONType = "DOCUMENT"
JSONLinesType = "LINES"
)
// ObjectSelectRequest - represents the input select body
type ObjectSelectRequest struct {
XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"`
Expression string
ExpressionType QueryExpressionType
InputSerialization struct {
CompressionType SelectCompressionType
Parquet *struct{}
CSV *struct {
FileHeaderInfo CSVFileHeaderInfo
RecordDelimiter string
FieldDelimiter string
QuoteCharacter string
QuoteEscapeCharacter string
Comments string
}
JSON *struct {
Type JSONType
}
}
OutputSerialization struct {
CSV *struct {
QuoteFields CSVQuoteFields
RecordDelimiter string
FieldDelimiter string
QuoteCharacter string
QuoteEscapeCharacter string
}
JSON *struct {
RecordDelimiter string
}
}
RequestProgress struct {
Enabled bool
}
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,456 +16,111 @@
package s3select
import (
"errors"
"github.com/minio/minio/pkg/s3select/format"
)
//S3 errors below
// ErrBusy is an error if the service is too busy.
var ErrBusy = errors.New("The service is unavailable. Please retry")
// ErrUnauthorizedAccess is an error if you lack the appropriate credentials to
// access the object.
var ErrUnauthorizedAccess = errors.New("You are not authorized to perform this operation")
// ErrExpressionTooLong is an error if your SQL expression too long for
// processing.
var ErrExpressionTooLong = errors.New("The SQL expression is too long: The maximum byte-length for the SQL expression is 256 KB")
// ErrIllegalSQLFunctionArgument is an error if you provide an illegal argument
// in the SQL function.
var ErrIllegalSQLFunctionArgument = errors.New("Illegal argument was used in the SQL function")
// ErrInvalidKeyPath is an error if you provide a key in the SQL expression that
// is invalid.
var ErrInvalidKeyPath = errors.New("Key path in the SQL expression is invalid")
// ErrColumnTooLong is an error if your query results in a column that is
// greater than the max amount of characters per column of 1mb
var ErrColumnTooLong = errors.New("The length of a column in the result is greater than maxCharsPerColumn of 1 MB")
// ErrOverMaxColumn is an error if the number of columns from the resulting
// query is greater than 1Mb.
var ErrOverMaxColumn = errors.New("The number of columns in the result is greater than maxColumnNumber of 1 MB")
// ErrOverMaxRecordSize is an error if the length of a record in the result is
// greater than 1 Mb.
var ErrOverMaxRecordSize = errors.New("The length of a record in the result is greater than maxCharsPerRecord of 1 MB")
// ErrMissingHeaders is an error if some of the headers that are requested in
// the Select Query are not present in the file.
var ErrMissingHeaders = errors.New("Some headers in the query are missing from the file. Check the file and try again")
// ErrInvalidCompressionFormat is an error if an unsupported compression type is
// utilized with the select object query.
var ErrInvalidCompressionFormat = errors.New("The file is not in a supported compression format. Only GZIP is supported at this time")
// ErrInvalidFileHeaderInfo is an error if the argument provided to the
// FileHeader Argument is incorrect.
var ErrInvalidFileHeaderInfo = errors.New("The FileHeaderInfo is invalid. Only NONE, USE, and IGNORE are supported")
// ErrInvalidJSONType is an error if the json format provided as an argument is
// invalid.
var ErrInvalidJSONType = errors.New("The JsonType is invalid. Only DOCUMENT and LINES are supported at this time")
// ErrInvalidQuoteFields is an error if the arguments provided to the
// QuoteFields options are not valid.
var ErrInvalidQuoteFields = errors.New("The QuoteFields is invalid. Only ALWAYS and ASNEEDED are supported")
// ErrInvalidRequestParameter is an error if the value of a parameter in the
// request element is not valid.
var ErrInvalidRequestParameter = errors.New("The value of a parameter in Request element is invalid. Check the service API documentation and try again")
// ErrExternalEvalException is an error that arises if the query can not be
// evaluated.
var ErrExternalEvalException = errors.New("The query cannot be evaluated. Check the file and try again")
// ErrInvalidDataType is an error that occurs if the SQL expression contains an
// invalid data type.
var ErrInvalidDataType = errors.New("The SQL expression contains an invalid data type")
// ErrUnrecognizedFormatException is an error that arises if there is an invalid
// record type.
var ErrUnrecognizedFormatException = errors.New("Encountered an invalid record type")
// ErrInvalidTextEncoding is an error if the text encoding is not valid.
var ErrInvalidTextEncoding = errors.New("Invalid encoding type. Only UTF-8 encoding is supported at this time")
// ErrInvalidTableAlias is an error that arises if the table alias provided in
// the SQL expression is invalid.
var ErrInvalidTableAlias = errors.New("The SQL expression contains an invalid table alias")
// ErrMultipleDataSourcesUnsupported is an error that arises if multiple data
// sources are provided.
var ErrMultipleDataSourcesUnsupported = errors.New("Multiple data sources are not supported")
// ErrMissingRequiredParameter is an error that arises if a required argument
// is omitted from the Request.
var ErrMissingRequiredParameter = errors.New("The Request entity is missing a required parameter. Check the service documentation and try again")
// ErrObjectSerializationConflict is an error that arises if an unsupported
// output seralization is provided.
var ErrObjectSerializationConflict = errors.New("The Request entity can only contain one of CSV or JSON. Check the service documentation and try again")
// ErrUnsupportedSQLOperation is an error that arises if an unsupported SQL
// operation is used.
var ErrUnsupportedSQLOperation = errors.New("Encountered an unsupported SQL operation")
// ErrUnsupportedSQLStructure is an error that occurs if an unsupported SQL
// structure is used.
var ErrUnsupportedSQLStructure = errors.New("Encountered an unsupported SQL structure. Check the SQL Reference")
// ErrUnsupportedStorageClass is an error that occurs if an invalid storace
// class is present.
var ErrUnsupportedStorageClass = errors.New("Encountered an invalid storage class. Only STANDARD, STANDARD_IA, and ONEZONE_IA storage classes are supported at this time")
// ErrUnsupportedSyntax is an error that occurs if invalid syntax is present in
// the query.
var ErrUnsupportedSyntax = errors.New("Encountered invalid syntax")
// ErrUnsupportedRangeHeader is an error that occurs if a range header is
// provided.
var ErrUnsupportedRangeHeader = errors.New("Range header is not supported for this operation")
// ErrLexerInvalidChar is an error that occurs if the SQL expression contains an
// invalid character.
var ErrLexerInvalidChar = errors.New("The SQL expression contains an invalid character")
// ErrLexerInvalidOperator is an error that occurs if an invalid operator is
// used.
var ErrLexerInvalidOperator = errors.New("The SQL expression contains an invalid operator")
// ErrLexerInvalidLiteral is an error that occurs if an invalid literal is used.
var ErrLexerInvalidLiteral = errors.New("The SQL expression contains an invalid literal")
// ErrLexerInvalidIONLiteral is an error that occurs if an invalid operator is
// used
var ErrLexerInvalidIONLiteral = errors.New("The SQL expression contains an invalid operator")
// ErrParseExpectedDatePart is an error that occurs if the date part is not
// found in the SQL expression.
var ErrParseExpectedDatePart = errors.New("Did not find the expected date part in the SQL expression")
// ErrParseExpectedKeyword is an error that occurs if the expected keyword was
// not found in the expression.
var ErrParseExpectedKeyword = errors.New("Did not find the expected keyword in the SQL expression")
// ErrParseExpectedTokenType is an error that occurs if the expected token is
// not found in the SQL expression.
var ErrParseExpectedTokenType = errors.New("Did not find the expected token in the SQL expression")
// ErrParseExpected2TokenTypes is an error that occurs if 2 token types are not
// found.
var ErrParseExpected2TokenTypes = errors.New("Did not find the expected token in the SQL expression")
// ErrParseExpectedNumber is an error that occurs if a number is expected but
// not found in the expression.
var ErrParseExpectedNumber = errors.New("Did not find the expected number in the SQL expression")
// ErrParseExpectedRightParenBuiltinFunctionCall is an error that occurs if a
// right parenthesis is missing.
var ErrParseExpectedRightParenBuiltinFunctionCall = errors.New("Did not find the expected right parenthesis character in the SQL expression")
// ErrParseExpectedTypeName is an error that occurs if a type name is expected
// but not found.
var ErrParseExpectedTypeName = errors.New("Did not find the expected type name in the SQL expression")
// ErrParseExpectedWhenClause is an error that occurs if a When clause is
// expected but not found.
var ErrParseExpectedWhenClause = errors.New("Did not find the expected WHEN clause in the SQL expression. CASE is not supported")
// ErrParseUnsupportedToken is an error that occurs if the SQL expression
// contains an unsupported token.
var ErrParseUnsupportedToken = errors.New("The SQL expression contains an unsupported token")
// ErrParseUnsupportedLiteralsGroupBy is an error that occurs if the SQL
// expression has an unsupported use of Group By.
var ErrParseUnsupportedLiteralsGroupBy = errors.New("The SQL expression contains an unsupported use of GROUP BY")
// ErrParseExpectedMember is an error that occurs if there is an unsupported use
// of member in the SQL expression.
var ErrParseExpectedMember = errors.New("The SQL expression contains an unsupported use of MEMBER")
// ErrParseUnsupportedSelect is an error that occurs if there is an unsupported
// use of Select.
var ErrParseUnsupportedSelect = errors.New("The SQL expression contains an unsupported use of SELECT")
// ErrParseUnsupportedCase is an error that occurs if there is an unsupported
// use of case.
var ErrParseUnsupportedCase = errors.New("The SQL expression contains an unsupported use of CASE")
// ErrParseUnsupportedCaseClause is an error that occurs if there is an
// unsupported use of case.
var ErrParseUnsupportedCaseClause = errors.New("The SQL expression contains an unsupported use of CASE")
// ErrParseUnsupportedAlias is an error that occurs if there is an unsupported
// use of Alias.
var ErrParseUnsupportedAlias = errors.New("The SQL expression contains an unsupported use of ALIAS")
// ErrParseUnsupportedSyntax is an error that occurs if there is an
// UnsupportedSyntax in the SQL expression.
var ErrParseUnsupportedSyntax = errors.New("The SQL expression contains unsupported syntax")
// ErrParseUnknownOperator is an error that occurs if there is an invalid
// operator present in the SQL expression.
var ErrParseUnknownOperator = errors.New("The SQL expression contains an invalid operator")
// ErrParseMissingIdentAfterAt is an error that occurs if the wrong symbol
// follows the "@" symbol in the SQL expression.
var ErrParseMissingIdentAfterAt = errors.New("Did not find the expected identifier after the @ symbol in the SQL expression")
// ErrParseUnexpectedOperator is an error that occurs if the SQL expression
// contains an unexpected operator.
var ErrParseUnexpectedOperator = errors.New("The SQL expression contains an unexpected operator")
// ErrParseUnexpectedTerm is an error that occurs if the SQL expression contains
// an unexpected term.
var ErrParseUnexpectedTerm = errors.New("The SQL expression contains an unexpected term")
// ErrParseUnexpectedToken is an error that occurs if the SQL expression
// contains an unexpected token.
var ErrParseUnexpectedToken = errors.New("The SQL expression contains an unexpected token")
// ErrParseUnexpectedKeyword is an error that occurs if the SQL expression
// contains an unexpected keyword.
var ErrParseUnexpectedKeyword = errors.New("The SQL expression contains an unexpected keyword")
// ErrParseExpectedExpression is an error that occurs if the SQL expression is
// not found.
var ErrParseExpectedExpression = errors.New("Did not find the expected SQL expression")
// ErrParseExpectedLeftParenAfterCast is an error that occurs if the left
// parenthesis is missing after a cast in the SQL expression.
var ErrParseExpectedLeftParenAfterCast = errors.New("Did not find the expected left parenthesis after CAST in the SQL expression")
// ErrParseExpectedLeftParenValueConstructor is an error that occurs if the left
// parenthesis is not found in the SQL expression.
var ErrParseExpectedLeftParenValueConstructor = errors.New("Did not find expected the left parenthesis in the SQL expression")
// ErrParseExpectedLeftParenBuiltinFunctionCall is an error that occurs if the
// left parenthesis is not found in the SQL expression function call.
var ErrParseExpectedLeftParenBuiltinFunctionCall = errors.New("Did not find the expected left parenthesis in the SQL expression")
// ErrParseExpectedArgumentDelimiter is an error that occurs if the argument
// delimiter for the SQL expression is not provided.
var ErrParseExpectedArgumentDelimiter = errors.New("Did not find the expected argument delimiter in the SQL expression")
// ErrParseCastArity is an error that occurs because the CAST has incorrect
// arity.
var ErrParseCastArity = errors.New("The SQL expression CAST has incorrect arity")
// ErrParseInvalidTypeParam is an error that occurs because there is an invalid
// parameter value.
var ErrParseInvalidTypeParam = errors.New("The SQL expression contains an invalid parameter value")
// ErrParseEmptySelect is an error that occurs because the SQL expression
// contains an empty Select
var ErrParseEmptySelect = errors.New("The SQL expression contains an empty SELECT")
// ErrParseSelectMissingFrom is an error that occurs because there is a missing
// From after the Select List.
var ErrParseSelectMissingFrom = errors.New("The SQL expression contains a missing FROM after SELECT list")
// ErrParseExpectedIdentForGroupName is an error that occurs because Group is
// not supported in the SQL expression.
var ErrParseExpectedIdentForGroupName = errors.New("GROUP is not supported in the SQL expression")
// ErrParseExpectedIdentForAlias is an error that occurs if expected identifier
// for alias is not in the SQL expression.
var ErrParseExpectedIdentForAlias = errors.New("Did not find the expected identifier for the alias in the SQL expression")
// ErrParseUnsupportedCallWithStar is an error that occurs if COUNT is used with
// an argument other than "*".
var ErrParseUnsupportedCallWithStar = errors.New("Only COUNT with (*) as a parameter is supported in the SQL expression")
// ErrParseNonUnaryAgregateFunctionCall is an error that occurs if more than one
// argument is provided as an argument for aggregation functions.
var ErrParseNonUnaryAgregateFunctionCall = errors.New("Only one argument is supported for aggregate functions in the SQL expression")
// ErrParseMalformedJoin is an error that occurs if a "join" operation is
// attempted in the SQL expression as this is not supported.
var ErrParseMalformedJoin = errors.New("JOIN is not supported in the SQL expression")
// ErrParseExpectedIdentForAt is an error that occurs if after "AT" an Alias
// identifier is not provided.
var ErrParseExpectedIdentForAt = errors.New("Did not find the expected identifier for AT name in the SQL expression")
// ErrParseAsteriskIsNotAloneInSelectList is an error that occurs if in addition
// to an asterix, more column names are provided as arguments in the SQL
// expression.
var ErrParseAsteriskIsNotAloneInSelectList = errors.New("Other expressions are not allowed in the SELECT list when '*' is used without dot notation in the SQL expression")
// ErrParseCannotMixSqbAndWildcardInSelectList is an error that occurs if list
// indexing and an asterix are mixed in the SQL expression.
var ErrParseCannotMixSqbAndWildcardInSelectList = errors.New("Cannot mix [] and * in the same expression in a SELECT list in SQL expression")
// ErrParseInvalidContextForWildcardInSelectList is an error that occurs if the
// asterix is used improperly within the SQL expression.
var ErrParseInvalidContextForWildcardInSelectList = errors.New("Invalid use of * in SELECT list in the SQL expression")
// ErrEvaluatorBindingDoesNotExist is an error that occurs if a column name or
// path provided in the expression does not exist.
var ErrEvaluatorBindingDoesNotExist = errors.New("A column name or a path provided does not exist in the SQL expression")
// ErrIncorrectSQLFunctionArgumentType is an error that occurs if the wrong
// argument is provided to a SQL function.
var ErrIncorrectSQLFunctionArgumentType = errors.New("Incorrect type of arguments in function call in the SQL expression")
// ErrAmbiguousFieldName is an error that occurs if the column name which is not
// case sensitive, is not descriptive enough to retrieve a singular column.
var ErrAmbiguousFieldName = errors.New("Field name matches to multiple fields in the file. Check the SQL expression and the file, and try again")
// ErrEvaluatorInvalidArguments is an error that occurs if there are not the
// correct number of arguments in a functional call to a SQL expression.
var ErrEvaluatorInvalidArguments = errors.New("Incorrect number of arguments in the function call in the SQL expression")
// ErrValueParseFailure is an error that occurs if the Time Stamp is not parsed
// correctly in the SQL expression.
var ErrValueParseFailure = errors.New("Time stamp parse failure in the SQL expression")
// ErrIntegerOverflow is an error that occurs if there is an IntegerOverflow or
// IntegerUnderFlow in the SQL expression.
var ErrIntegerOverflow = errors.New("Int overflow or underflow in the SQL expression")
// ErrLikeInvalidInputs is an error that occurs if invalid inputs are provided
// to the argument LIKE Clause.
var ErrLikeInvalidInputs = errors.New("Invalid argument given to the LIKE clause in the SQL expression")
// ErrCastFailed occurs if the attempt to convert data types in the cast is not
// done correctly.
var ErrCastFailed = errors.New("Attempt to convert from one data type to another using CAST failed in the SQL expression")
// ErrInvalidCast is an error that occurs if the attempt to convert data types
// failed and was done in an improper fashion.
var ErrInvalidCast = errors.New("Attempt to convert from one data type to another using CAST failed in the SQL expression")
// ErrEvaluatorInvalidTimestampFormatPattern is an error that occurs if the Time
// Stamp Format needs more additional fields to be filled.
var ErrEvaluatorInvalidTimestampFormatPattern = errors.New("Time stamp format pattern requires additional fields in the SQL expression")
// ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing is an error that
// occurs if the format of the time stamp can not be parsed.
var ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing = errors.New("Time stamp format pattern contains a valid format symbol that cannot be applied to time stamp parsing in the SQL expression")
// ErrEvaluatorTimestampFormatPatternDuplicateFields is an error that occurs if
// the time stamp format pattern contains multiple format specifications which
// can not be clearly resolved.
var ErrEvaluatorTimestampFormatPatternDuplicateFields = errors.New("Time stamp format pattern contains multiple format specifiers representing the time stamp field in the SQL expression")
//ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch is an error that
//occurs if the time stamp format pattern contains a 12 hour day of format but
//does not have an AM/PM field.
var ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch = errors.New("Time stamp format pattern contains a 12-hour hour of day format symbol but doesn't also contain an AM/PM field, or it contains a 24-hour hour of day format specifier and contains an AM/PM field in the SQL expression")
// ErrEvaluatorUnterminatedTimestampFormatPatternToken is an error that occurs
// if there is an unterminated token in the SQL expression for time stamp
// format.
var ErrEvaluatorUnterminatedTimestampFormatPatternToken = errors.New("Time stamp format pattern contains unterminated token in the SQL expression")
// ErrEvaluatorInvalidTimestampFormatPatternToken is an error that occurs if
// there is an invalid token in the time stamp format within the SQL expression.
var ErrEvaluatorInvalidTimestampFormatPatternToken = errors.New("Time stamp format pattern contains an invalid token in the SQL expression")
// ErrEvaluatorInvalidTimestampFormatPatternSymbol is an error that occurs if
// the time stamp format pattern has an invalid symbol within the SQL
// expression.
var ErrEvaluatorInvalidTimestampFormatPatternSymbol = errors.New("Time stamp format pattern contains an invalid symbol in the SQL expression")
// S3 select API errors - TODO fix the errors.
var errorCodeResponse = map[error]string{
ErrBusy: "Busy",
ErrUnauthorizedAccess: "UnauthorizedAccess",
ErrExpressionTooLong: "ExpressionTooLong",
ErrIllegalSQLFunctionArgument: "IllegalSqlFunctionArgument",
format.ErrInvalidColumnIndex: "InvalidColumnIndex",
ErrInvalidKeyPath: "InvalidKeyPath",
ErrColumnTooLong: "ColumnTooLong",
ErrOverMaxColumn: "OverMaxColumn",
ErrOverMaxRecordSize: "OverMaxRecordSize",
ErrMissingHeaders: "MissingHeaders",
ErrInvalidCompressionFormat: "InvalidCompressionFormat",
format.ErrTruncatedInput: "TruncatedInput",
ErrInvalidFileHeaderInfo: "InvalidFileHeaderInfo",
ErrInvalidJSONType: "InvalidJsonType",
ErrInvalidQuoteFields: "InvalidQuoteFields",
ErrInvalidRequestParameter: "InvalidRequestParameter",
format.ErrCSVParsingError: "CSVParsingError",
format.ErrJSONParsingError: "JSONParsingError",
ErrExternalEvalException: "ExternalEvalException",
ErrInvalidDataType: "InvalidDataType",
ErrUnrecognizedFormatException: "UnrecognizedFormatException",
ErrInvalidTextEncoding: "InvalidTextEncoding",
ErrInvalidTableAlias: "InvalidTableAlias",
ErrMultipleDataSourcesUnsupported: "MultipleDataSourcesUnsupported",
ErrMissingRequiredParameter: "MissingRequiredParameter",
ErrObjectSerializationConflict: "ObjectSerializationConflict",
ErrUnsupportedSQLOperation: "UnsupportedSqlOperation",
ErrUnsupportedSQLStructure: "UnsupportedSqlStructure",
ErrUnsupportedStorageClass: "UnsupportedStorageClass",
ErrUnsupportedSyntax: "UnsupportedSyntax",
ErrUnsupportedRangeHeader: "UnsupportedRangeHeader",
ErrLexerInvalidChar: "LexerInvalidChar",
ErrLexerInvalidOperator: "LexerInvalidOperator",
ErrLexerInvalidLiteral: "LexerInvalidLiteral",
ErrLexerInvalidIONLiteral: "LexerInvalidIONLiteral",
ErrParseExpectedDatePart: "ParseExpectedDatePart",
ErrParseExpectedKeyword: "ParseExpectedKeyword",
ErrParseExpectedTokenType: "ParseExpectedTokenType",
ErrParseExpected2TokenTypes: "ParseExpected2TokenTypes",
ErrParseExpectedNumber: "ParseExpectedNumber",
ErrParseExpectedRightParenBuiltinFunctionCall: "ParseExpectedRightParenBuiltinFunctionCall",
ErrParseExpectedTypeName: "ParseExpectedTypeName",
ErrParseExpectedWhenClause: "ParseExpectedWhenClause",
ErrParseUnsupportedToken: "ParseUnsupportedToken",
ErrParseUnsupportedLiteralsGroupBy: "ParseUnsupportedLiteralsGroupBy",
ErrParseExpectedMember: "ParseExpectedMember",
ErrParseUnsupportedSelect: "ParseUnsupportedSelect",
ErrParseUnsupportedCase: "ParseUnsupportedCase:",
ErrParseUnsupportedCaseClause: "ParseUnsupportedCaseClause",
ErrParseUnsupportedAlias: "ParseUnsupportedAlias",
ErrParseUnsupportedSyntax: "ParseUnsupportedSyntax",
ErrParseUnknownOperator: "ParseUnknownOperator",
format.ErrParseInvalidPathComponent: "ParseInvalidPathComponent",
ErrParseMissingIdentAfterAt: "ParseMissingIdentAfterAt",
ErrParseUnexpectedOperator: "ParseUnexpectedOperator",
ErrParseUnexpectedTerm: "ParseUnexpectedTerm",
ErrParseUnexpectedToken: "ParseUnexpectedToken",
ErrParseUnexpectedKeyword: "ParseUnexpectedKeyword",
ErrParseExpectedExpression: "ParseExpectedExpression",
ErrParseExpectedLeftParenAfterCast: "ParseExpectedLeftParenAfterCast",
ErrParseExpectedLeftParenValueConstructor: "ParseExpectedLeftParenValueConstructor",
ErrParseExpectedLeftParenBuiltinFunctionCall: "ParseExpectedLeftParenBuiltinFunctionCall",
ErrParseExpectedArgumentDelimiter: "ParseExpectedArgumentDelimiter",
ErrParseCastArity: "ParseCastArity",
ErrParseInvalidTypeParam: "ParseInvalidTypeParam",
ErrParseEmptySelect: "ParseEmptySelect",
ErrParseSelectMissingFrom: "ParseSelectMissingFrom",
ErrParseExpectedIdentForGroupName: "ParseExpectedIdentForGroupName",
ErrParseExpectedIdentForAlias: "ParseExpectedIdentForAlias",
ErrParseUnsupportedCallWithStar: "ParseUnsupportedCallWithStar",
ErrParseNonUnaryAgregateFunctionCall: "ParseNonUnaryAgregateFunctionCall",
ErrParseMalformedJoin: "ParseMalformedJoin",
ErrParseExpectedIdentForAt: "ParseExpectedIdentForAt",
ErrParseAsteriskIsNotAloneInSelectList: "ParseAsteriskIsNotAloneInSelectList",
ErrParseCannotMixSqbAndWildcardInSelectList: "ParseCannotMixSqbAndWildcardInSelectList",
ErrParseInvalidContextForWildcardInSelectList: "ParseInvalidContextForWildcardInSelectList",
ErrEvaluatorBindingDoesNotExist: "EvaluatorBindingDoesNotExist",
ErrIncorrectSQLFunctionArgumentType: "IncorrectSqlFunctionArgumentType",
ErrAmbiguousFieldName: "AmbiguousFieldName",
ErrEvaluatorInvalidArguments: "EvaluatorInvalidArguments",
ErrValueParseFailure: "ValueParseFailure",
ErrIntegerOverflow: "IntegerOverflow",
ErrLikeInvalidInputs: "LikeInvalidInputs",
ErrCastFailed: "CastFailed",
ErrInvalidCast: "Attempt to convert from one data type to another using CAST failed in the SQL expression.",
ErrEvaluatorInvalidTimestampFormatPattern: "EvaluatorInvalidTimestampFormatPattern",
ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing: "EvaluatorInvalidTimestampFormatPatternSymbolForParsing",
ErrEvaluatorTimestampFormatPatternDuplicateFields: "EvaluatorTimestampFormatPatternDuplicateFields",
ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch: "EvaluatorTimestampFormatPatternHourClockAmPmMismatch",
ErrEvaluatorUnterminatedTimestampFormatPatternToken: "EvaluatorUnterminatedTimestampFormatPatternToken",
ErrEvaluatorInvalidTimestampFormatPatternToken: "EvaluatorInvalidTimestampFormatPatternToken",
ErrEvaluatorInvalidTimestampFormatPatternSymbol: "EvaluatorInvalidTimestampFormatPatternSymbol",
// SelectError - represents s3 select error specified in
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html#RESTObjectSELECTContent-responses-special-errors.
type SelectError interface {
Cause() error
ErrorCode() string
ErrorMessage() string
HTTPStatusCode() int
Error() string
}
type s3Error struct {
code string
message string
statusCode int
cause error
}
func (err *s3Error) Cause() error {
return err.cause
}
func (err *s3Error) ErrorCode() string {
return err.code
}
func (err *s3Error) ErrorMessage() string {
return err.message
}
func (err *s3Error) HTTPStatusCode() int {
return err.statusCode
}
func (err *s3Error) Error() string {
return err.message
}
func errMalformedXML(err error) *s3Error {
return &s3Error{
code: "MalformedXML",
message: "The XML provided was not well-formed or did not validate against our published schema. Check the service documentation and try again.",
statusCode: 400,
cause: err,
}
}
func errInvalidCompressionFormat(err error) *s3Error {
return &s3Error{
code: "InvalidCompressionFormat",
message: "The file is not in a supported compression format. Only GZIP and BZIP2 are supported.",
statusCode: 400,
cause: err,
}
}
func errInvalidDataSource(err error) *s3Error {
return &s3Error{
code: "InvalidDataSource",
message: "Invalid data source type. Only CSV, JSON, and Parquet are supported.",
statusCode: 400,
cause: err,
}
}
func errInvalidRequestParameter(err error) *s3Error {
return &s3Error{
code: "InvalidRequestParameter",
message: "The value of a parameter in SelectRequest element is invalid. Check the service API documentation and try again.",
statusCode: 400,
cause: err,
}
}
func errObjectSerializationConflict(err error) *s3Error {
return &s3Error{
code: "ObjectSerializationConflict",
message: "InputSerialization specifies more than one format (CSV, JSON, or Parquet), or OutputSerialization specifies more than one format (CSV or JSON). InputSerialization and OutputSerialization can only specify one format each.",
statusCode: 400,
cause: err,
}
}
func errInvalidExpressionType(err error) *s3Error {
return &s3Error{
code: "InvalidExpressionType",
message: "The ExpressionType is invalid. Only SQL expressions are supported.",
statusCode: 400,
cause: err,
}
}
func errMissingRequiredParameter(err error) *s3Error {
return &s3Error{
code: "MissingRequiredParameter",
message: "The SelectRequest entity is missing a required parameter. Check the service documentation and try again.",
statusCode: 400,
cause: err,
}
}
func errTruncatedInput(err error) *s3Error {
return &s3Error{
code: "TruncatedInput",
message: "Object decompression failed. Check that the object is properly compressed using the format specified in the request.",
statusCode: 400,
cause: err,
}
}

View File

@@ -1,223 +0,0 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3select
import (
"strings"
"github.com/tidwall/gjson"
"github.com/xwb1989/sqlparser"
"github.com/minio/minio/pkg/s3select/format"
)
// stringOps is a function which handles the case in a clause
// if there is a need to perform a string function
func stringOps(myFunc *sqlparser.FuncExpr, record []byte, myReturnVal string) string {
var value string
funcName := myFunc.Name.CompliantName()
switch tempArg := myFunc.Exprs[0].(type) {
case *sqlparser.AliasedExpr:
switch col := tempArg.Expr.(type) {
case *sqlparser.FuncExpr:
// myReturnVal is actually the tail recursive value being used in the eval func.
return applyStrFunc(gjson.Parse(myReturnVal), funcName)
case *sqlparser.ColName:
value = applyStrFunc(gjson.GetBytes(record, col.Name.CompliantName()), funcName)
case *sqlparser.SQLVal:
value = applyStrFunc(gjson.ParseBytes(col.Val), funcName)
}
}
return value
}
// coalOps is a function which decomposes a COALESCE func expr into its struct.
func coalOps(myFunc *sqlparser.FuncExpr, record []byte, myReturnVal string) string {
myArgs := make([]string, len(myFunc.Exprs))
for i, expr := range myFunc.Exprs {
switch tempArg := expr.(type) {
case *sqlparser.AliasedExpr:
switch col := tempArg.Expr.(type) {
case *sqlparser.FuncExpr:
// myReturnVal is actually the tail recursive value being used in the eval func.
return myReturnVal
case *sqlparser.ColName:
myArgs[i] = gjson.GetBytes(record, col.Name.CompliantName()).String()
case *sqlparser.SQLVal:
myArgs[i] = string(col.Val)
}
}
}
return processCoalNoIndex(myArgs)
}
// nullOps is a function which decomposes a NullIf func expr into its struct.
func nullOps(myFunc *sqlparser.FuncExpr, record []byte, myReturnVal string) string {
myArgs := make([]string, 2)
for i, expr := range myFunc.Exprs {
switch tempArg := expr.(type) {
case *sqlparser.AliasedExpr:
switch col := tempArg.Expr.(type) {
case *sqlparser.FuncExpr:
return myReturnVal
case *sqlparser.ColName:
myArgs[i] = gjson.GetBytes(record, col.Name.CompliantName()).String()
case *sqlparser.SQLVal:
myArgs[i] = string(col.Val)
}
}
}
if myArgs[0] == myArgs[1] {
return ""
}
return myArgs[0]
}
// isValidString is a function that ensures the
// current index is one with a StrFunc
func isValidFunc(myList []int, index int) bool {
if myList == nil {
return false
}
for _, i := range myList {
if i == index {
return true
}
}
return false
}
// processCoalNoIndex is a function which evaluates a given COALESCE clause.
func processCoalNoIndex(coalStore []string) string {
for _, coal := range coalStore {
if coal != "null" && coal != "missing" && coal != "" {
return coal
}
}
return "null"
}
// evaluateFuncExpr is a function that allows for tail recursive evaluation of
// nested function expressions
func evaluateFuncExpr(myVal *sqlparser.FuncExpr, myReturnVal string, record []byte) string {
if myVal == nil {
return myReturnVal
}
// retrieve all the relevant arguments of the function
var mySubFunc []*sqlparser.FuncExpr
mySubFunc = make([]*sqlparser.FuncExpr, len(myVal.Exprs))
for i, expr := range myVal.Exprs {
switch col := expr.(type) {
case *sqlparser.AliasedExpr:
switch temp := col.Expr.(type) {
case *sqlparser.FuncExpr:
mySubFunc[i] = temp
}
}
}
// Need to do tree recursion so as to explore all possible directions of the
// nested function recursion
for i := 0; i < len(mySubFunc); i++ {
if supportedString(myVal.Name.CompliantName()) {
if mySubFunc != nil {
return stringOps(myVal, record, evaluateFuncExpr(mySubFunc[i], myReturnVal, record))
}
return stringOps(myVal, record, myReturnVal)
} else if strings.ToUpper(myVal.Name.CompliantName()) == "NULLIF" {
if mySubFunc != nil {
return nullOps(myVal, record, evaluateFuncExpr(mySubFunc[i], myReturnVal, record))
}
return nullOps(myVal, record, myReturnVal)
} else if strings.ToUpper(myVal.Name.CompliantName()) == "COALESCE" {
if mySubFunc != nil {
return coalOps(myVal, record, evaluateFuncExpr(mySubFunc[i], myReturnVal, record))
}
return coalOps(myVal, record, myReturnVal)
}
}
return ""
}
// evaluateFuncErr is a function that flags errors in nested functions.
func evaluateFuncErr(myVal *sqlparser.FuncExpr, reader format.Select) error {
if myVal == nil {
return nil
}
if !supportedFunc(myVal.Name.CompliantName()) {
return ErrUnsupportedSQLOperation
}
for _, expr := range myVal.Exprs {
switch tempArg := expr.(type) {
case *sqlparser.StarExpr:
return ErrParseUnsupportedCallWithStar
case *sqlparser.AliasedExpr:
switch col := tempArg.Expr.(type) {
case *sqlparser.FuncExpr:
if err := evaluateFuncErr(col, reader); err != nil {
return err
}
case *sqlparser.ColName:
if err := reader.ColNameErrs([]string{col.Name.CompliantName()}); err != nil {
return err
}
}
}
}
return nil
}
// evaluateIsExpr is a function for evaluating expressions of the form "column is ...."
func evaluateIsExpr(myFunc *sqlparser.IsExpr, row []byte, alias string) (bool, error) {
getMyVal := func() (myVal string) {
switch myIs := myFunc.Expr.(type) {
// case for literal val
case *sqlparser.SQLVal:
myVal = string(myIs.Val)
// case for nested func val
case *sqlparser.FuncExpr:
myVal = evaluateFuncExpr(myIs, "", row)
// case for col val
case *sqlparser.ColName:
myVal = gjson.GetBytes(row, myIs.Name.CompliantName()).String()
}
return myVal
}
operator := strings.ToLower(myFunc.Operator)
switch operator {
case "is null":
return getMyVal() == "", nil
case "is not null":
return getMyVal() != "", nil
default:
return false, ErrUnsupportedSQLOperation
}
}
// supportedString is a function that checks whether the function is a supported
// string one
func supportedString(strFunc string) bool {
return format.StringInSlice(strings.ToUpper(strFunc), []string{"TRIM", "SUBSTRING", "CHAR_LENGTH", "CHARACTER_LENGTH", "LOWER", "UPPER"})
}
// supportedFunc is a function that checks whether the function is a supported
// S3 one.
func supportedFunc(strFunc string) bool {
return format.StringInSlice(strings.ToUpper(strFunc), []string{"TRIM", "SUBSTRING", "CHAR_LENGTH", "CHARACTER_LENGTH", "LOWER", "UPPER", "COALESCE", "NULLIF"})
}

View File

@@ -1,339 +0,0 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package csv
import (
"encoding/csv"
"encoding/xml"
"io"
"strconv"
"strings"
"github.com/tidwall/sjson"
"github.com/minio/minio/pkg/ioutil"
"github.com/minio/minio/pkg/s3select/format"
)
// Options options are passed to the underlying encoding/csv reader.
type Options struct {
// HasHeader when true, will treat the first row as a header row.
HasHeader bool
// RecordDelimiter is the string that records are delimited by.
RecordDelimiter string
// FieldDelimiter is the string that fields are delimited by.
FieldDelimiter string
// Comments is the string the first character of a line of
// text matches the comment character.
Comments string
// Name of the table that is used for querying
Name string
// ReadFrom is where the data will be read from.
ReadFrom io.Reader
// If true then we need to add gzip or bzip reader.
// to extract the csv.
Compressed string
// SQL expression meant to be evaluated.
Expression string
// Output CSV will be delimited by.
OutputFieldDelimiter string
// Output CSV record will be delimited by.
OutputRecordDelimiter string
// Size of incoming object
StreamSize int64
// Whether Header is "USE" or another
HeaderOpt bool
// Progress enabled, enable/disable progress messages.
Progress bool
// Output format type, supported values are CSV and JSON
OutputType format.Type
}
// cinput represents a record producing input from a formatted object.
type cinput struct {
options *Options
reader *csv.Reader
firstRow []string
header []string
minOutputLength int
stats struct {
BytesScanned int64
BytesReturned int64
BytesProcessed int64
}
}
// New sets up a new Input, the first row is read when this is run.
// If there is a problem with reading the first row, the error is returned.
// Otherwise, the returned reader can be reliably consumed with Read().
// until Read() return err.
func New(opts *Options) (format.Select, error) {
// DelimitedReader treats custom record delimiter like `\r\n`,`\r`,`ab` etc and replaces it with `\n`.
normalizedReader := ioutil.NewDelimitedReader(opts.ReadFrom, []rune(opts.RecordDelimiter))
reader := &cinput{
options: opts,
reader: csv.NewReader(normalizedReader),
}
reader.stats.BytesScanned = opts.StreamSize
reader.stats.BytesProcessed = 0
reader.stats.BytesReturned = 0
reader.firstRow = nil
reader.reader.FieldsPerRecord = -1
if reader.options.FieldDelimiter != "" {
reader.reader.Comma = rune(reader.options.FieldDelimiter[0])
}
if reader.options.Comments != "" {
reader.reader.Comment = rune(reader.options.Comments[0])
}
// QuoteCharacter - " (defaulted currently)
reader.reader.LazyQuotes = true
if err := reader.readHeader(); err != nil {
return nil, err
}
return reader, nil
}
// Replace the spaces in columnnames with underscores
func cleanHeader(columns []string) []string {
for i := range columns {
// Even if header name is specified, some CSV's
// might have column header names might be empty
// and non-empty. In such a scenario we prepare
// indexed value.
if columns[i] == "" {
columns[i] = "_" + strconv.Itoa(i)
}
columns[i] = strings.Replace(columns[i], " ", "_", -1)
}
return columns
}
// readHeader reads the header into the header variable if the header is present
// as the first row of the csv
func (reader *cinput) readHeader() error {
var readErr error
if reader.options.HasHeader {
reader.firstRow, readErr = reader.reader.Read()
if readErr != nil {
return format.ErrCSVParsingError
}
reader.header = cleanHeader(reader.firstRow)
reader.firstRow = nil
} else {
reader.firstRow, readErr = reader.reader.Read()
if readErr != nil {
return format.ErrCSVParsingError
}
reader.header = make([]string, len(reader.firstRow))
for i := range reader.firstRow {
reader.header[i] = "_" + strconv.Itoa(i)
}
}
reader.minOutputLength = len(reader.header)
return nil
}
// Progress - return true if progress was requested.
func (reader *cinput) Progress() bool {
return reader.options.Progress
}
// UpdateBytesProcessed - populates the bytes Processed
func (reader *cinput) UpdateBytesProcessed(size int64) {
reader.stats.BytesProcessed += size
}
// Read returns byte sequence
func (reader *cinput) Read() ([]byte, error) {
dec := reader.readRecord()
if dec != nil {
var data []byte
var err error
// Navigate column values in reverse order to preserve
// the input order for AWS S3 compatibility, because
// sjson adds json key/value pairs in first in last out
// fashion. This should be fixed in sjson ideally. Following
// work around is needed to circumvent this issue for now.
for i := len(dec) - 1; i >= 0; i-- {
data, err = sjson.SetBytes(data, reader.header[i], dec[i])
if err != nil {
return nil, err
}
}
return data, nil
}
return nil, nil
}
// OutputFieldDelimiter - returns the requested output field delimiter.
func (reader *cinput) OutputFieldDelimiter() string {
return reader.options.OutputFieldDelimiter
}
// OutputRecordDelimiter - returns the requested output record delimiter.
func (reader *cinput) OutputRecordDelimiter() string {
return reader.options.OutputRecordDelimiter
}
// HasHeader - returns true or false depending upon the header.
func (reader *cinput) HasHeader() bool {
return reader.options.HasHeader
}
// Expression - return the Select Expression for
func (reader *cinput) Expression() string {
return reader.options.Expression
}
// UpdateBytesReturned - updates the Bytes returned for
func (reader *cinput) UpdateBytesReturned(size int64) {
reader.stats.BytesReturned += size
}
// Header returns the header of the reader. Either the first row if a header
// set in the options, or c#, where # is the column number, starting with 0.
func (reader *cinput) Header() []string {
return reader.header
}
// readRecord reads a single record from the stream and it always returns successfully.
// If the record is empty, an empty []string is returned.
// Record expand to match the current row size, adding blank fields as needed.
// Records never return less then the number of fields in the first row.
// Returns nil on EOF
// In the event of a parse error due to an invalid record, it is logged, and
// an empty []string is returned with the number of fields in the first row,
// as if the record were empty.
//
// In general, this is a very tolerant of problems reader.
func (reader *cinput) readRecord() []string {
var row []string
var fileErr error
if reader.firstRow != nil {
row = reader.firstRow
reader.firstRow = nil
return row
}
row, fileErr = reader.reader.Read()
emptysToAppend := reader.minOutputLength - len(row)
if fileErr == io.EOF || fileErr == io.ErrClosedPipe {
return nil
} else if _, ok := fileErr.(*csv.ParseError); ok {
emptysToAppend = reader.minOutputLength
}
if emptysToAppend > 0 {
for counter := 0; counter < emptysToAppend; counter++ {
row = append(row, "")
}
}
return row
}
// CreateStatXML is the function which does the marshaling from the stat
// structs into XML so that the progress and stat message can be sent
func (reader *cinput) CreateStatXML() (string, error) {
if reader.options.Compressed == "NONE" {
reader.stats.BytesProcessed = reader.options.StreamSize
reader.stats.BytesScanned = reader.stats.BytesProcessed
}
out, err := xml.Marshal(&format.Stats{
BytesScanned: reader.stats.BytesScanned,
BytesProcessed: reader.stats.BytesProcessed,
BytesReturned: reader.stats.BytesReturned,
})
if err != nil {
return "", err
}
return xml.Header + string(out), nil
}
// CreateProgressXML is the function which does the marshaling from the progress
// structs into XML so that the progress and stat message can be sent
func (reader *cinput) CreateProgressXML() (string, error) {
if reader.options.HasHeader {
reader.stats.BytesProcessed += format.ProcessSize(reader.header)
}
if reader.options.Compressed == "NONE" {
reader.stats.BytesScanned = reader.stats.BytesProcessed
}
out, err := xml.Marshal(&format.Progress{
BytesScanned: reader.stats.BytesScanned,
BytesProcessed: reader.stats.BytesProcessed,
BytesReturned: reader.stats.BytesReturned,
})
if err != nil {
return "", err
}
return xml.Header + string(out), nil
}
// Type - return the data format type
func (reader *cinput) Type() format.Type {
return format.CSV
}
// OutputType - return the data format type
func (reader *cinput) OutputType() format.Type {
return reader.options.OutputType
}
// ColNameErrs is a function which makes sure that the headers are requested are
// present in the file otherwise it throws an error.
func (reader *cinput) ColNameErrs(columnNames []string) error {
for i := 0; i < len(columnNames); i++ {
if columnNames[i] == "" {
continue
}
if !format.IsInt(columnNames[i]) && !reader.options.HeaderOpt {
return format.ErrInvalidColumnIndex
}
if format.IsInt(columnNames[i]) {
tempInt, _ := strconv.Atoi(columnNames[i])
if tempInt > len(reader.Header()) || tempInt == 0 {
return format.ErrInvalidColumnIndex
}
} else {
if reader.options.HeaderOpt && !format.StringInSlice(columnNames[i], reader.Header()) {
return format.ErrParseInvalidPathComponent
}
}
}
return nil
}

View File

@@ -1,38 +0,0 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package format
import "errors"
// ErrTruncatedInput is an error if the object is not compressed properly and an
// error occurs during decompression.
var ErrTruncatedInput = errors.New("Object decompression failed. Check that the object is properly compressed using the format specified in the request")
// ErrCSVParsingError is an error if the CSV presents an error while being
// parsed.
var ErrCSVParsingError = errors.New("Encountered an Error parsing the CSV file. Check the file and try again")
// ErrInvalidColumnIndex is an error if you provide a column index which is not
// valid.
var ErrInvalidColumnIndex = errors.New("Column index in the SQL expression is invalid")
// ErrParseInvalidPathComponent is an error that occurs if there is an invalid
// path component.
var ErrParseInvalidPathComponent = errors.New("The SQL expression contains an invalid path component")
// ErrJSONParsingError is an error if while parsing the JSON an error arises.
var ErrJSONParsingError = errors.New("Encountered an error parsing the JSON file. Check the file and try again")

View File

@@ -1,50 +0,0 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package format
import "strconv"
// IsInt - returns a true or false, whether a string can
// be represented as an int.
func IsInt(s string) bool {
_, err := strconv.Atoi(s)
return err == nil
}
// StringInSlice - this function finds whether a string is in a list
func StringInSlice(x string, list []string) bool {
for _, y := range list {
if x == y {
return true
}
}
return false
}
// ProcessSize - this function processes size so that we can calculate bytes BytesProcessed.
func ProcessSize(myrecord []string) int64 {
if len(myrecord) > 0 {
var size int64
size = int64(len(myrecord)-1) + 1
for i := range myrecord {
size += int64(len(myrecord[i]))
}
return size
}
return 0
}

View File

@@ -1,205 +0,0 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package json
import (
"bufio"
"encoding/xml"
"io"
"github.com/minio/minio/pkg/s3select/format"
"github.com/tidwall/gjson"
)
// Options options are passed to the underlying encoding/json reader.
type Options struct {
// Name of the table that is used for querying
Name string
// ReadFrom is where the data will be read from.
ReadFrom io.Reader
// If true then we need to add gzip or bzip reader.
// to extract the csv.
Compressed string
// SQL expression meant to be evaluated.
Expression string
// Input record delimiter.
RecordDelimiter string
// Output CSV will be delimited by.
OutputFieldDelimiter string
// Output record delimiter.
OutputRecordDelimiter string
// Size of incoming object
StreamSize int64
// True if DocumentType is DOCUMENTS
DocumentType bool
// Progress enabled, enable/disable progress messages.
Progress bool
// Output format type, supported values are CSV and JSON
OutputType format.Type
}
// jinput represents a record producing input from a formatted file or pipe.
type jinput struct {
options *Options
reader *bufio.Reader
header []string
minOutputLength int
stats struct {
BytesScanned int64
BytesReturned int64
BytesProcessed int64
}
}
// New sets up a new, the first Json is read when this is run.
// If there is a problem with reading the first Json, the error is returned.
// Otherwise, the returned reader can be reliably consumed with jsonRead()
// until jsonRead() returns nil.
func New(opts *Options) (format.Select, error) {
reader := &jinput{
options: opts,
reader: bufio.NewReader(opts.ReadFrom),
}
reader.stats.BytesScanned = opts.StreamSize
reader.stats.BytesProcessed = 0
reader.stats.BytesReturned = 0
return reader, nil
}
// Progress - return true if progress was requested.
func (reader *jinput) Progress() bool {
return reader.options.Progress
}
// UpdateBytesProcessed - populates the bytes Processed
func (reader *jinput) UpdateBytesProcessed(size int64) {
reader.stats.BytesProcessed += size
}
// Read the file and returns
func (reader *jinput) Read() ([]byte, error) {
data, _, err := reader.reader.ReadLine()
if err != nil {
if err == io.EOF || err == io.ErrClosedPipe {
err = nil
} else {
err = format.ErrJSONParsingError
}
}
if err == nil {
var header []string
gjson.ParseBytes(data).ForEach(func(key, value gjson.Result) bool {
header = append(header, key.String())
return true
})
reader.header = header
}
return data, err
}
// OutputFieldDelimiter - returns the delimiter specified in input request,
// for JSON output this value is empty, but does have a value when
// output type is CSV.
func (reader *jinput) OutputFieldDelimiter() string {
return reader.options.OutputFieldDelimiter
}
// OutputRecordDelimiter - returns the delimiter specified in input request, after each JSON record.
func (reader *jinput) OutputRecordDelimiter() string {
return reader.options.OutputRecordDelimiter
}
// HasHeader - returns true or false depending upon the header.
func (reader *jinput) HasHeader() bool {
return true
}
// Expression - return the Select Expression for
func (reader *jinput) Expression() string {
return reader.options.Expression
}
// UpdateBytesReturned - updates the Bytes returned for
func (reader *jinput) UpdateBytesReturned(size int64) {
reader.stats.BytesReturned += size
}
// Header returns a nil in case of
func (reader *jinput) Header() []string {
return reader.header
}
// CreateStatXML is the function which does the marshaling from the stat
// structs into XML so that the progress and stat message can be sent
func (reader *jinput) CreateStatXML() (string, error) {
if reader.options.Compressed == "NONE" {
reader.stats.BytesProcessed = reader.options.StreamSize
reader.stats.BytesScanned = reader.stats.BytesProcessed
}
out, err := xml.Marshal(&format.Stats{
BytesScanned: reader.stats.BytesScanned,
BytesProcessed: reader.stats.BytesProcessed,
BytesReturned: reader.stats.BytesReturned,
})
if err != nil {
return "", err
}
return xml.Header + string(out), nil
}
// CreateProgressXML is the function which does the marshaling from the progress
// structs into XML so that the progress and stat message can be sent
func (reader *jinput) CreateProgressXML() (string, error) {
if !(reader.options.Compressed != "NONE") {
reader.stats.BytesScanned = reader.stats.BytesProcessed
}
out, err := xml.Marshal(&format.Progress{
BytesScanned: reader.stats.BytesScanned,
BytesProcessed: reader.stats.BytesProcessed,
BytesReturned: reader.stats.BytesReturned,
})
if err != nil {
return "", err
}
return xml.Header + string(out), nil
}
// Type - return the data format type {
func (reader *jinput) Type() format.Type {
return format.JSON
}
// OutputType - return the data format type {
func (reader *jinput) OutputType() format.Type {
return reader.options.OutputType
}
// ColNameErrs - this is a dummy function for JSON input type.
func (reader *jinput) ColNameErrs(columnNames []string) error {
return nil
}

View File

@@ -1,65 +0,0 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package format
import "encoding/xml"
// Select Interface helper methods, implementing features needed for
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
type Select interface {
Type() Type
OutputType() Type
Read() ([]byte, error)
Header() []string
HasHeader() bool
OutputFieldDelimiter() string
OutputRecordDelimiter() string
UpdateBytesProcessed(int64)
Expression() string
UpdateBytesReturned(int64)
CreateStatXML() (string, error)
CreateProgressXML() (string, error)
ColNameErrs(columnNames []string) error
Progress() bool
}
// Progress represents a struct that represents the format for XML of the
// progress messages
type Progress struct {
XMLName xml.Name `xml:"Progress" json:"-"`
BytesScanned int64 `xml:"BytesScanned"`
BytesProcessed int64 `xml:"BytesProcessed"`
BytesReturned int64 `xml:"BytesReturned"`
}
// Stats represents a struct that represents the format for XML of the stat
// messages
type Stats struct {
XMLName xml.Name `xml:"Stats" json:"-"`
BytesScanned int64 `xml:"BytesScanned"`
BytesProcessed int64 `xml:"BytesProcessed"`
BytesReturned int64 `xml:"BytesReturned"`
}
// Type different types of support data format types.
type Type string
// Different data format types.
const (
JSON Type = "json"
CSV Type = "csv"
)

182
pkg/s3select/genmessage.go Normal file
View File

@@ -0,0 +1,182 @@
// +build ignore
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3select
import (
"bytes"
"encoding/binary"
"fmt"
"hash/crc32"
)
func genRecordsHeader() {
buf := new(bytes.Buffer)
buf.WriteByte(13)
buf.WriteString(":message-type")
buf.WriteByte(7)
buf.Write([]byte{0, 5})
buf.WriteString("event")
buf.WriteByte(13)
buf.WriteString(":content-type")
buf.WriteByte(7)
buf.Write([]byte{0, 24})
buf.WriteString("application/octet-stream")
buf.WriteByte(11)
buf.WriteString(":event-type")
buf.WriteByte(7)
buf.Write([]byte{0, 7})
buf.WriteString("Records")
fmt.Println(buf.Bytes())
}
// Continuation Message
// ====================
// Header specification
// --------------------
// Continuation messages contain two headers, as follows:
// https://docs.aws.amazon.com/AmazonS3/latest/API/images/s3select-frame-diagram-cont.png
//
// Payload specification
// ---------------------
// Continuation messages have no payload.
func genContinuationMessage() {
buf := new(bytes.Buffer)
buf.WriteByte(13)
buf.WriteString(":message-type")
buf.WriteByte(7)
buf.Write([]byte{0, 5})
buf.WriteString("event")
buf.WriteByte(11)
buf.WriteString(":event-type")
buf.WriteByte(7)
buf.Write([]byte{0, 4})
buf.WriteString("Cont")
header := buf.Bytes()
headerLength := len(header)
payloadLength := 0
totalLength := totalByteLength(headerLength, payloadLength)
buf = new(bytes.Buffer)
binary.Write(buf, binary.BigEndian, uint32(totalLength))
binary.Write(buf, binary.BigEndian, uint32(headerLength))
prelude := buf.Bytes()
binary.Write(buf, binary.BigEndian, crc32.ChecksumIEEE(prelude))
buf.Write(header)
message := buf.Bytes()
binary.Write(buf, binary.BigEndian, crc32.ChecksumIEEE(message))
fmt.Println(buf.Bytes())
}
func genProgressHeader() {
buf := new(bytes.Buffer)
buf.WriteByte(13)
buf.WriteString(":message-type")
buf.WriteByte(7)
buf.Write([]byte{0, 5})
buf.WriteString("event")
buf.WriteByte(13)
buf.WriteString(":content-type")
buf.WriteByte(7)
buf.Write([]byte{0, 8})
buf.WriteString("text/xml")
buf.WriteByte(11)
buf.WriteString(":event-type")
buf.WriteByte(7)
buf.Write([]byte{0, 8})
buf.WriteString("Progress")
fmt.Println(buf.Bytes())
}
func genStatsHeader() {
buf := new(bytes.Buffer)
buf.WriteByte(13)
buf.WriteString(":message-type")
buf.WriteByte(7)
buf.Write([]byte{0, 5})
buf.WriteString("event")
buf.WriteByte(13)
buf.WriteString(":content-type")
buf.WriteByte(7)
buf.Write([]byte{0, 8})
buf.WriteString("text/xml")
buf.WriteByte(11)
buf.WriteString(":event-type")
buf.WriteByte(7)
buf.Write([]byte{0, 5})
buf.WriteString("Stats")
fmt.Println(buf.Bytes())
}
// End Message
// ===========
// Header specification
// --------------------
// End messages contain two headers, as follows:
// https://docs.aws.amazon.com/AmazonS3/latest/API/images/s3select-frame-diagram-end.png
//
// Payload specification
// ---------------------
// End messages have no payload.
func genEndMessage() {
buf := new(bytes.Buffer)
buf.WriteByte(13)
buf.WriteString(":message-type")
buf.WriteByte(7)
buf.Write([]byte{0, 5})
buf.WriteString("event")
buf.WriteByte(11)
buf.WriteString(":event-type")
buf.WriteByte(7)
buf.Write([]byte{0, 3})
buf.WriteString("End")
header := buf.Bytes()
headerLength := len(header)
payloadLength := 0
totalLength := totalByteLength(headerLength, payloadLength)
buf = new(bytes.Buffer)
binary.Write(buf, binary.BigEndian, uint32(totalLength))
binary.Write(buf, binary.BigEndian, uint32(headerLength))
prelude := buf.Bytes()
binary.Write(buf, binary.BigEndian, crc32.ChecksumIEEE(prelude))
buf.Write(header)
message := buf.Bytes()
binary.Write(buf, binary.BigEndian, crc32.ChecksumIEEE(message))
fmt.Println(buf.Bytes())
}

View File

@@ -1,563 +0,0 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3select
import (
"fmt"
"math"
"strconv"
"strings"
"github.com/minio/minio/pkg/s3select/format"
"github.com/tidwall/gjson"
"github.com/xwb1989/sqlparser"
)
// MaxExpressionLength - 256KiB
const MaxExpressionLength = 256 * 1024
// matchesMyWhereClause takes []byte, process the where clause and returns true if the row suffices
func matchesMyWhereClause(record []byte, alias string, whereClause sqlparser.Expr) (bool, error) {
var conversionColumn string
var operator string
var operand gjson.Result
if fmt.Sprintf("%v", whereClause) == "false" {
return false, nil
}
switch expr := whereClause.(type) {
case *sqlparser.IsExpr:
return evaluateIsExpr(expr, record, alias)
case *sqlparser.RangeCond:
operator = expr.Operator
if operator != "between" && operator != "not between" {
return false, ErrUnsupportedSQLOperation
}
result, err := evaluateBetween(expr, alias, record)
if err != nil {
return false, err
}
if operator == "not between" {
return !result, nil
}
return result, nil
case *sqlparser.ComparisonExpr:
operator = expr.Operator
switch right := expr.Right.(type) {
case *sqlparser.FuncExpr:
operand = gjson.Parse(evaluateFuncExpr(right, "", record))
case *sqlparser.SQLVal:
operand = gjson.ParseBytes(right.Val)
}
var myVal string
switch left := expr.Left.(type) {
case *sqlparser.FuncExpr:
myVal = evaluateFuncExpr(left, "", record)
conversionColumn = ""
case *sqlparser.ColName:
conversionColumn = left.Name.CompliantName()
}
if myVal != "" {
return evaluateOperator(gjson.Parse(myVal), operator, operand)
}
return evaluateOperator(gjson.GetBytes(record, conversionColumn), operator, operand)
case *sqlparser.AndExpr:
var leftVal bool
var rightVal bool
switch left := expr.Left.(type) {
case *sqlparser.ComparisonExpr:
temp, err := matchesMyWhereClause(record, alias, left)
if err != nil {
return false, err
}
leftVal = temp
}
switch right := expr.Right.(type) {
case *sqlparser.ComparisonExpr:
temp, err := matchesMyWhereClause(record, alias, right)
if err != nil {
return false, err
}
rightVal = temp
}
return (rightVal && leftVal), nil
case *sqlparser.OrExpr:
var leftVal bool
var rightVal bool
switch left := expr.Left.(type) {
case *sqlparser.ComparisonExpr:
leftVal, _ = matchesMyWhereClause(record, alias, left)
}
switch right := expr.Right.(type) {
case *sqlparser.ComparisonExpr:
rightVal, _ = matchesMyWhereClause(record, alias, right)
}
return (rightVal || leftVal), nil
}
return true, nil
}
func applyStrFunc(rawArg gjson.Result, funcName string) string {
switch strings.ToUpper(funcName) {
case "TRIM":
// parser has an issue which does not allow it to support
// Trim with other arguments
return strings.Trim(rawArg.String(), " ")
case "SUBSTRING":
// TODO: parser has an issue which does not support substring
return rawArg.String()
case "CHAR_LENGTH":
return strconv.Itoa(len(rawArg.String()))
case "CHARACTER_LENGTH":
return strconv.Itoa(len(rawArg.String()))
case "LOWER":
return strings.ToLower(rawArg.String())
case "UPPER":
return strings.ToUpper(rawArg.String())
}
return rawArg.String()
}
// evaluateBetween is a function which evaluates a Between Clause.
func evaluateBetween(betweenExpr *sqlparser.RangeCond, alias string, record []byte) (bool, error) {
var colToVal gjson.Result
var colFromVal gjson.Result
var conversionColumn string
var funcName string
switch colTo := betweenExpr.To.(type) {
case sqlparser.Expr:
switch colToMyVal := colTo.(type) {
case *sqlparser.FuncExpr:
colToVal = gjson.Parse(stringOps(colToMyVal, record, ""))
case *sqlparser.SQLVal:
colToVal = gjson.ParseBytes(colToMyVal.Val)
}
}
switch colFrom := betweenExpr.From.(type) {
case sqlparser.Expr:
switch colFromMyVal := colFrom.(type) {
case *sqlparser.FuncExpr:
colFromVal = gjson.Parse(stringOps(colFromMyVal, record, ""))
case *sqlparser.SQLVal:
colFromVal = gjson.ParseBytes(colFromMyVal.Val)
}
}
var myFuncVal string
switch left := betweenExpr.Left.(type) {
case *sqlparser.FuncExpr:
myFuncVal = evaluateFuncExpr(left, "", record)
conversionColumn = ""
case *sqlparser.ColName:
conversionColumn = cleanCol(left.Name.CompliantName(), alias)
}
toGreater, err := evaluateOperator(colToVal, ">", colFromVal)
if err != nil {
return false, err
}
if toGreater {
return evalBetweenGreater(conversionColumn, record, funcName, colFromVal, colToVal, myFuncVal)
}
return evalBetweenLess(conversionColumn, record, funcName, colFromVal, colToVal, myFuncVal)
}
func evalBetween(conversionColumn string, record []byte, funcName string, colFromVal gjson.Result, colToVal gjson.Result, myColVal string, operator string) (bool, error) {
if format.IsInt(conversionColumn) {
myVal, err := evaluateOperator(gjson.GetBytes(record, "_"+conversionColumn), operator, colFromVal)
if err != nil {
return false, err
}
var myOtherVal bool
myOtherVal, err = evaluateOperator(colToVal, operator, gjson.GetBytes(record, "_"+conversionColumn))
if err != nil {
return false, err
}
return (myVal && myOtherVal), nil
}
if myColVal != "" {
myVal, err := evaluateOperator(gjson.Parse(myColVal), operator, colFromVal)
if err != nil {
return false, err
}
var myOtherVal bool
myOtherVal, err = evaluateOperator(colToVal, operator, gjson.Parse(myColVal))
if err != nil {
return false, err
}
return (myVal && myOtherVal), nil
}
myVal, err := evaluateOperator(gjson.GetBytes(record, conversionColumn), operator, colFromVal)
if err != nil {
return false, err
}
var myOtherVal bool
myOtherVal, err = evaluateOperator(colToVal, operator, gjson.GetBytes(record, conversionColumn))
if err != nil {
return false, err
}
return (myVal && myOtherVal), nil
}
// evalBetweenGreater is a function which evaluates the between given that the
// TO is > than the FROM.
func evalBetweenGreater(conversionColumn string, record []byte, funcName string, colFromVal gjson.Result, colToVal gjson.Result, myColVal string) (bool, error) {
return evalBetween(conversionColumn, record, funcName, colFromVal, colToVal, myColVal, ">=")
}
// evalBetweenLess is a function which evaluates the between given that the
// FROM is > than the TO.
func evalBetweenLess(conversionColumn string, record []byte, funcName string, colFromVal gjson.Result, colToVal gjson.Result, myColVal string) (bool, error) {
return evalBetween(conversionColumn, record, funcName, colFromVal, colToVal, myColVal, "<=")
}
// This is a really important function it actually evaluates the boolean
// statement and therefore actually returns a bool, it functions as the lowest
// level of the state machine.
func evaluateOperator(myTblVal gjson.Result, operator string, operand gjson.Result) (bool, error) {
if err := checkValidOperator(operator); err != nil {
return false, err
}
if !myTblVal.Exists() {
return false, nil
}
switch {
case operand.Type == gjson.String || operand.Type == gjson.Null:
return stringEval(myTblVal.String(), operator, operand.String())
case operand.Type == gjson.Number:
opInt := format.IsInt(operand.Raw)
tblValInt := format.IsInt(strings.Trim(myTblVal.Raw, "\""))
if opInt && tblValInt {
return intEval(int64(myTblVal.Float()), operator, operand.Int())
}
if !opInt && !tblValInt {
return floatEval(myTblVal.Float(), operator, operand.Float())
}
switch operator {
case "!=":
return true, nil
}
return false, nil
case myTblVal.Type != operand.Type:
return false, nil
default:
return false, ErrUnsupportedSyntax
}
}
// checkValidOperator ensures that the current operator is supported
func checkValidOperator(operator string) error {
listOfOps := []string{">", "<", "=", "<=", ">=", "!=", "like"}
for i := range listOfOps {
if operator == listOfOps[i] {
return nil
}
}
return ErrParseUnknownOperator
}
// stringEval is for evaluating the state of string comparison.
func stringEval(myRecordVal string, operator string, myOperand string) (bool, error) {
switch operator {
case ">":
return myRecordVal > myOperand, nil
case "<":
return myRecordVal < myOperand, nil
case "=":
return myRecordVal == myOperand, nil
case "<=":
return myRecordVal <= myOperand, nil
case ">=":
return myRecordVal >= myOperand, nil
case "!=":
return myRecordVal != myOperand, nil
case "like":
return likeConvert(myOperand, myRecordVal)
}
return false, ErrUnsupportedSyntax
}
// intEval is for evaluating integer comparisons.
func intEval(myRecordVal int64, operator string, myOperand int64) (bool, error) {
switch operator {
case ">":
return myRecordVal > myOperand, nil
case "<":
return myRecordVal < myOperand, nil
case "=":
return myRecordVal == myOperand, nil
case "<=":
return myRecordVal <= myOperand, nil
case ">=":
return myRecordVal >= myOperand, nil
case "!=":
return myRecordVal != myOperand, nil
}
return false, ErrUnsupportedSyntax
}
// floatEval is for evaluating the comparison of floats.
func floatEval(myRecordVal float64, operator string, myOperand float64) (bool, error) {
// Basically need some logic thats like, if the types dont match check for a cast
switch operator {
case ">":
return myRecordVal > myOperand, nil
case "<":
return myRecordVal < myOperand, nil
case "=":
return myRecordVal == myOperand, nil
case "<=":
return myRecordVal <= myOperand, nil
case ">=":
return myRecordVal >= myOperand, nil
case "!=":
return myRecordVal != myOperand, nil
}
return false, ErrUnsupportedSyntax
}
// prefixMatch allows for matching a prefix only like query e.g a%
func prefixMatch(pattern string, record string) bool {
for i := 0; i < len(pattern)-1; i++ {
if pattern[i] != record[i] && pattern[i] != byte('_') {
return false
}
}
return true
}
// suffixMatch allows for matching a suffix only like query e.g %an
func suffixMatch(pattern string, record string) bool {
for i := len(pattern) - 1; i > 0; i-- {
if pattern[i] != record[len(record)-(len(pattern)-i)] && pattern[i] != byte('_') {
return false
}
}
return true
}
// This function is for evaluating select statements which are case sensitive
func likeConvert(pattern string, record string) (bool, error) {
// If pattern is empty just return false
if pattern == "" || record == "" {
return false, nil
}
// for suffix match queries e.g %a
if len(pattern) >= 2 && pattern[0] == byte('%') && strings.Count(pattern, "%") == 1 {
return suffixMatch(pattern, record), nil
}
// for prefix match queries e.g a%
if len(pattern) >= 2 && pattern[len(pattern)-1] == byte('%') && strings.Count(pattern, "%") == 1 {
return prefixMatch(pattern, record), nil
}
charCount := 0
currPos := 0
// Loop through the pattern so that a boolean can be returned
for i := 0; i < len(pattern); i++ {
if pattern[i] == byte('_') {
// if its an underscore it can be anything so shift current position for
// pattern and string
charCount++
// if there have been more characters in the pattern than record, clearly
// there should be a return
if i != len(pattern)-1 {
if pattern[i+1] != byte('%') && pattern[i+1] != byte('_') {
if currPos != len(record)-1 && pattern[i+1] != record[currPos+1] {
return false, nil
}
}
}
if charCount > len(record) {
return false, nil
}
// if the pattern has been fully evaluated, then just return.
if len(pattern) == i+1 {
return true, nil
}
i++
currPos++
}
if pattern[i] == byte('%') || pattern[i] == byte('*') {
// if there is a wildcard then want to return true if its last and flag it.
if currPos == len(record) {
return false, nil
}
if i+1 == len(pattern) {
return true, nil
}
} else {
charCount++
matched := false
// iterate through the pattern and check if there is a match for the
// character
for currPos < len(record) {
if record[currPos] == pattern[i] || pattern[i] == byte('_') {
matched = true
break
}
currPos++
}
currPos++
// if the character did not match then return should occur.
if !matched {
return false, nil
}
}
}
if charCount > len(record) {
return false, nil
}
if currPos < len(record) {
return false, nil
}
return true, nil
}
// cleanCol cleans a column name from the parser so that the name is returned to
// original.
func cleanCol(myCol string, alias string) string {
if len(myCol) <= 0 {
return myCol
}
if !strings.HasPrefix(myCol, alias) && myCol[0] == '_' {
myCol = alias + myCol
}
if strings.Contains(myCol, ".") {
myCol = strings.Replace(myCol, alias+"._", "", len(myCol))
}
myCol = strings.Replace(myCol, alias+"_", "", len(myCol))
return myCol
}
// whereClauseNameErrs is a function which returns an error if there is a column
// in the where clause which does not exist.
func whereClauseNameErrs(whereClause interface{}, alias string, f format.Select) error {
var conversionColumn string
switch expr := whereClause.(type) {
// case for checking errors within a clause of the form "col_name is ..."
case *sqlparser.IsExpr:
switch myCol := expr.Expr.(type) {
case *sqlparser.FuncExpr:
if err := evaluateFuncErr(myCol, f); err != nil {
return err
}
case *sqlparser.ColName:
conversionColumn = cleanCol(myCol.Name.CompliantName(), alias)
}
case *sqlparser.RangeCond:
switch left := expr.Left.(type) {
case *sqlparser.FuncExpr:
if err := evaluateFuncErr(left, f); err != nil {
return err
}
case *sqlparser.ColName:
conversionColumn = cleanCol(left.Name.CompliantName(), alias)
}
case *sqlparser.ComparisonExpr:
switch left := expr.Left.(type) {
case *sqlparser.FuncExpr:
if err := evaluateFuncErr(left, f); err != nil {
return err
}
case *sqlparser.ColName:
conversionColumn = cleanCol(left.Name.CompliantName(), alias)
}
case *sqlparser.AndExpr:
switch left := expr.Left.(type) {
case *sqlparser.ComparisonExpr:
return whereClauseNameErrs(left, alias, f)
}
switch right := expr.Right.(type) {
case *sqlparser.ComparisonExpr:
return whereClauseNameErrs(right, alias, f)
}
case *sqlparser.OrExpr:
switch left := expr.Left.(type) {
case *sqlparser.ComparisonExpr:
return whereClauseNameErrs(left, alias, f)
}
switch right := expr.Right.(type) {
case *sqlparser.ComparisonExpr:
return whereClauseNameErrs(right, alias, f)
}
}
if conversionColumn != "" {
return f.ColNameErrs([]string{conversionColumn})
}
return nil
}
// aggFuncToStr converts an array of floats into a properly formatted string.
func aggFuncToStr(aggVals []float64, f format.Select) string {
// Define a number formatting function
numToStr := func(f float64) string {
if f == math.Trunc(f) {
return strconv.FormatInt(int64(f), 10)
}
return strconv.FormatFloat(f, 'f', 6, 64)
}
// Display all whole numbers in aggVals as integers
vals := make([]string, len(aggVals))
for i, v := range aggVals {
vals[i] = numToStr(v)
}
// Intersperse field delimiter
return strings.Join(vals, f.OutputFieldDelimiter())
}
// checkForDuplicates ensures we do not have an ambigious column name.
func checkForDuplicates(columns []string, columnsMap map[string]int) error {
for i, column := range columns {
columns[i] = strings.Replace(column, " ", "_", len(column))
if _, exist := columnsMap[columns[i]]; exist {
return ErrAmbiguousFieldName
}
columnsMap[columns[i]] = i
}
return nil
}
// parseErrs is the function which handles all the errors that could occur
// through use of function arguments such as column names in NULLIF
func parseErrs(columnNames []string, whereClause interface{}, alias string, myFuncs SelectFuncs, f format.Select) error {
// Below code cleans up column names.
processColumnNames(columnNames, alias, f)
if columnNames[0] != "*" {
if err := f.ColNameErrs(columnNames); err != nil {
return err
}
}
// Below code ensures the whereClause has no errors.
if whereClause != nil {
tempClause := whereClause
if err := whereClauseNameErrs(tempClause, alias, f); err != nil {
return err
}
}
for i := 0; i < len(myFuncs.funcExpr); i++ {
if myFuncs.funcExpr[i] == nil {
continue
}
if err := evaluateFuncErr(myFuncs.funcExpr[i], f); err != nil {
return err
}
}
return nil
}

View File

@@ -1,224 +0,0 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3select
import (
"bytes"
"compress/bzip2"
"io"
"net/http"
"strings"
"time"
humanize "github.com/dustin/go-humanize"
"github.com/klauspost/pgzip"
"github.com/minio/minio/pkg/s3select/format"
"github.com/minio/minio/pkg/s3select/format/csv"
"github.com/minio/minio/pkg/s3select/format/json"
)
const (
// progressTime is the time interval for which a progress message is sent.
progressTime time.Duration = 60 * time.Second
// continuationTime is the time interval for which a continuation message is
// sent.
continuationTime time.Duration = 5 * time.Second
)
// Row is a Struct for keeping track of key aspects of a row.
type Row struct {
record string
err error
}
// This function replaces "",'' with `` for the select parser
func cleanExpr(expr string) string {
r := strings.NewReplacer("\"", "`")
return r.Replace(expr)
}
// New - initialize new select format
func New(reader io.Reader, size int64, req ObjectSelectRequest) (s3s format.Select, err error) {
switch req.InputSerialization.CompressionType {
case SelectCompressionGZIP:
if reader, err = pgzip.NewReader(reader); err != nil {
return nil, format.ErrTruncatedInput
}
case SelectCompressionBZIP:
reader = bzip2.NewReader(reader)
}
// Initializating options for CSV
if req.InputSerialization.CSV != nil {
if req.InputSerialization.CSV.FileHeaderInfo == "" {
req.InputSerialization.CSV.FileHeaderInfo = CSVFileHeaderInfoNone
}
if req.InputSerialization.CSV.RecordDelimiter == "" {
req.InputSerialization.CSV.RecordDelimiter = "\n"
}
options := &csv.Options{
Name: "S3Object", // Default table name for all objects
HasHeader: req.InputSerialization.CSV.FileHeaderInfo == CSVFileHeaderInfoUse,
RecordDelimiter: req.InputSerialization.CSV.RecordDelimiter,
FieldDelimiter: req.InputSerialization.CSV.FieldDelimiter,
Comments: req.InputSerialization.CSV.Comments,
ReadFrom: reader,
Compressed: string(req.InputSerialization.CompressionType),
Expression: cleanExpr(req.Expression),
StreamSize: size,
HeaderOpt: req.InputSerialization.CSV.FileHeaderInfo == CSVFileHeaderInfoUse,
Progress: req.RequestProgress.Enabled,
}
if req.OutputSerialization.CSV != nil {
if req.OutputSerialization.CSV.FieldDelimiter == "" {
req.OutputSerialization.CSV.FieldDelimiter = ","
}
options.OutputFieldDelimiter = req.OutputSerialization.CSV.FieldDelimiter
options.OutputRecordDelimiter = req.OutputSerialization.CSV.RecordDelimiter
options.OutputType = format.CSV
}
if req.OutputSerialization.JSON != nil {
options.OutputRecordDelimiter = req.OutputSerialization.JSON.RecordDelimiter
options.OutputType = format.JSON
}
// Initialize CSV input type
s3s, err = csv.New(options)
} else if req.InputSerialization.JSON != nil {
options := &json.Options{
Name: "S3Object", // Default table name for all objects
ReadFrom: reader,
Compressed: string(req.InputSerialization.CompressionType),
Expression: cleanExpr(req.Expression),
StreamSize: size,
DocumentType: req.InputSerialization.JSON.Type == JSONTypeDocument,
Progress: req.RequestProgress.Enabled,
}
if req.OutputSerialization.JSON != nil {
options.OutputRecordDelimiter = req.OutputSerialization.JSON.RecordDelimiter
options.OutputType = format.JSON
}
if req.OutputSerialization.CSV != nil {
options.OutputFieldDelimiter = req.OutputSerialization.CSV.FieldDelimiter
options.OutputRecordDelimiter = req.OutputSerialization.CSV.RecordDelimiter
options.OutputType = format.CSV
}
// Initialize JSON input type
s3s, err = json.New(options)
}
return s3s, err
}
// Execute is the function where all the blocking occurs, It writes to the HTTP
// response writer in a streaming fashion so that the client can actively use
// the results before the query is finally finished executing. The
func Execute(writer io.Writer, f format.Select) error {
rowCh := make(chan Row)
curBuf := bytes.NewBuffer(make([]byte, humanize.MiByte))
curBuf.Reset()
progressTicker := time.NewTicker(progressTime)
continuationTimer := time.NewTimer(continuationTime)
defer progressTicker.Stop()
defer continuationTimer.Stop()
go runSelectParser(f, rowCh)
for {
select {
case row, ok := <-rowCh:
if ok && row.err != nil {
_, err := writeErrorMessage(row.err, curBuf).WriteTo(writer)
flusher, okFlush := writer.(http.Flusher)
if okFlush {
flusher.Flush()
}
if err != nil {
return err
}
curBuf.Reset()
close(rowCh)
return nil
} else if ok {
_, err := writeRecordMessage(row.record, curBuf).WriteTo(writer)
flusher, okFlush := writer.(http.Flusher)
if okFlush {
flusher.Flush()
}
if err != nil {
return err
}
curBuf.Reset()
f.UpdateBytesReturned(int64(len(row.record)))
if !continuationTimer.Stop() {
<-continuationTimer.C
}
continuationTimer.Reset(continuationTime)
} else if !ok {
statPayload, err := f.CreateStatXML()
if err != nil {
return err
}
_, err = writeStatMessage(statPayload, curBuf).WriteTo(writer)
flusher, ok := writer.(http.Flusher)
if ok {
flusher.Flush()
}
if err != nil {
return err
}
curBuf.Reset()
_, err = writeEndMessage(curBuf).WriteTo(writer)
flusher, ok = writer.(http.Flusher)
if ok {
flusher.Flush()
}
if err != nil {
return err
}
return nil
}
case <-progressTicker.C:
// Send progress messages only if requested by client.
if f.Progress() {
progressPayload, err := f.CreateProgressXML()
if err != nil {
return err
}
_, err = writeProgressMessage(progressPayload, curBuf).WriteTo(writer)
flusher, ok := writer.(http.Flusher)
if ok {
flusher.Flush()
}
if err != nil {
return err
}
curBuf.Reset()
}
case <-continuationTimer.C:
_, err := writeContinuationMessage(curBuf).WriteTo(writer)
flusher, ok := writer.(http.Flusher)
if ok {
flusher.Flush()
}
if err != nil {
return err
}
curBuf.Reset()
continuationTimer.Reset(continuationTime)
}
}
}

95
pkg/s3select/json/args.go Normal file
View File

@@ -0,0 +1,95 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package json
import (
"encoding/xml"
"fmt"
"strings"
)
const (
document = "document"
lines = "lines"
defaultRecordDelimiter = "\n"
)
// ReaderArgs - represents elements inside <InputSerialization><JSON/> in request XML.
type ReaderArgs struct {
ContentType string `xml:"Type"`
unmarshaled bool
}
// IsEmpty - returns whether reader args is empty or not.
func (args *ReaderArgs) IsEmpty() bool {
return !args.unmarshaled
}
// UnmarshalXML - decodes XML data.
func (args *ReaderArgs) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
// Make subtype to avoid recursive UnmarshalXML().
type subReaderArgs ReaderArgs
parsedArgs := subReaderArgs{}
if err := d.DecodeElement(&parsedArgs, &start); err != nil {
return err
}
parsedArgs.ContentType = strings.ToLower(parsedArgs.ContentType)
switch parsedArgs.ContentType {
case document, lines:
default:
return errInvalidJSONType(fmt.Errorf("invalid ContentType '%v'", parsedArgs.ContentType))
}
*args = ReaderArgs(parsedArgs)
args.unmarshaled = true
return nil
}
// WriterArgs - represents elements inside <OutputSerialization><JSON/> in request XML.
type WriterArgs struct {
RecordDelimiter string `xml:"RecordDelimiter"`
unmarshaled bool
}
// IsEmpty - returns whether writer args is empty or not.
func (args *WriterArgs) IsEmpty() bool {
return !args.unmarshaled
}
// UnmarshalXML - decodes XML data.
func (args *WriterArgs) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
// Make subtype to avoid recursive UnmarshalXML().
type subWriterArgs WriterArgs
parsedArgs := subWriterArgs{}
if err := d.DecodeElement(&parsedArgs, &start); err != nil {
return err
}
switch len(parsedArgs.RecordDelimiter) {
case 0:
parsedArgs.RecordDelimiter = defaultRecordDelimiter
case 1, 2:
default:
return fmt.Errorf("invalid RecordDelimiter '%v'", parsedArgs.RecordDelimiter)
}
*args = WriterArgs(parsedArgs)
args.unmarshaled = true
return nil
}

View File

@@ -0,0 +1,62 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package json
type s3Error struct {
code string
message string
statusCode int
cause error
}
func (err *s3Error) Cause() error {
return err.cause
}
func (err *s3Error) ErrorCode() string {
return err.code
}
func (err *s3Error) ErrorMessage() string {
return err.message
}
func (err *s3Error) HTTPStatusCode() int {
return err.statusCode
}
func (err *s3Error) Error() string {
return err.message
}
func errInvalidJSONType(err error) *s3Error {
return &s3Error{
code: "InvalidJsonType",
message: "The JsonType is invalid. Only DOCUMENT and LINES are supported.",
statusCode: 400,
cause: err,
}
}
func errJSONParsingError(err error) *s3Error {
return &s3Error{
code: "JSONParsingError",
message: "Encountered an error parsing the JSON file. Check the file and try again.",
statusCode: 400,
cause: err,
}
}

217
pkg/s3select/json/reader.go Normal file
View File

@@ -0,0 +1,217 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package json
import (
"bytes"
"io"
"io/ioutil"
"strconv"
"github.com/minio/minio/pkg/s3select/sql"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
)
func toSingleLineJSON(input string, currentKey string, result gjson.Result) (output string, err error) {
switch {
case result.IsObject():
result.ForEach(func(key, value gjson.Result) bool {
jsonKey := key.String()
if currentKey != "" {
jsonKey = currentKey + "." + key.String()
}
output, err = toSingleLineJSON(input, jsonKey, value)
input = output
return err == nil
})
case result.IsArray():
i := 0
result.ForEach(func(key, value gjson.Result) bool {
if currentKey == "" {
panic("currentKey is empty")
}
indexKey := currentKey + "." + strconv.Itoa(i)
output, err = toSingleLineJSON(input, indexKey, value)
input = output
i++
return err == nil
})
default:
output, err = sjson.Set(input, currentKey, result.Value())
}
return output, err
}
type objectReader struct {
reader io.Reader
err error
p []byte
start int
end int
escaped bool
quoteOpened bool
curlyCount uint64
endOfObject bool
}
func (or *objectReader) objectEndIndex(p []byte, length int) int {
for i := 0; i < length; i++ {
if p[i] == '\\' {
or.escaped = !or.escaped
continue
}
if p[i] == '"' && !or.escaped {
or.quoteOpened = !or.quoteOpened
}
or.escaped = false
switch p[i] {
case '{':
if !or.quoteOpened {
or.curlyCount++
}
case '}':
if or.quoteOpened || or.curlyCount == 0 {
break
}
if or.curlyCount--; or.curlyCount == 0 {
return i + 1
}
}
}
return -1
}
func (or *objectReader) Read(p []byte) (n int, err error) {
if or.endOfObject {
return 0, io.EOF
}
if or.p != nil {
n = copy(p, or.p[or.start:or.end])
or.start += n
if or.start == or.end {
// made full copy.
or.p = nil
or.start = 0
or.end = 0
}
} else {
if or.err != nil {
return 0, or.err
}
n, err = or.reader.Read(p)
or.err = err
switch err {
case nil:
case io.EOF, io.ErrUnexpectedEOF, io.ErrClosedPipe:
or.err = io.EOF
default:
return 0, err
}
}
index := or.objectEndIndex(p, n)
if index == -1 || index == n {
return n, nil
}
or.endOfObject = true
if or.p == nil {
or.p = p
or.start = index
or.end = n
} else {
or.start -= index
}
return index, nil
}
func (or *objectReader) Reset() error {
or.endOfObject = false
if or.p != nil {
return nil
}
return or.err
}
// Reader - JSON record reader for S3Select.
type Reader struct {
args *ReaderArgs
objectReader *objectReader
readCloser io.ReadCloser
}
// Read - reads single record.
func (r *Reader) Read() (sql.Record, error) {
if err := r.objectReader.Reset(); err != nil {
return nil, err
}
data, err := ioutil.ReadAll(r.objectReader)
if err != nil {
return nil, errJSONParsingError(err)
}
data = bytes.TrimSpace(data)
if len(data) == 0 {
return nil, io.EOF
}
if !gjson.ValidBytes(data) {
return nil, errJSONParsingError(err)
}
if bytes.Count(data, []byte("\n")) > 0 {
var s string
if s, err = toSingleLineJSON("", "", gjson.ParseBytes(data)); err != nil {
return nil, errJSONParsingError(err)
}
data = []byte(s)
}
return &Record{
data: data,
}, nil
}
// Close - closes underlaying reader.
func (r *Reader) Close() error {
return r.readCloser.Close()
}
// NewReader - creates new JSON reader using readCloser.
func NewReader(readCloser io.ReadCloser, args *ReaderArgs) *Reader {
return &Reader{
args: args,
objectReader: &objectReader{reader: readCloser},
readCloser: readCloser,
}
}

107
pkg/s3select/json/record.go Normal file
View File

@@ -0,0 +1,107 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package json
import (
"bytes"
"encoding/csv"
"fmt"
"strings"
"github.com/minio/minio/pkg/s3select/sql"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
)
// Record - is JSON record.
type Record struct {
data []byte
}
// Get - gets the value for a column name.
func (r *Record) Get(name string) (*sql.Value, error) {
result := gjson.GetBytes(r.data, name)
switch result.Type {
case gjson.False:
return sql.NewBool(false), nil
case gjson.Number:
return sql.NewFloat(result.Float()), nil
case gjson.String:
return sql.NewString(result.String()), nil
case gjson.True:
return sql.NewBool(true), nil
}
return nil, fmt.Errorf("unsupported gjson value %v; %v", result, result.Type)
}
// Set - sets the value for a column name.
func (r *Record) Set(name string, value *sql.Value) (err error) {
var v interface{}
switch value.Type() {
case sql.Bool:
v = value.BoolValue()
case sql.Int:
v = value.IntValue()
case sql.Float:
v = value.FloatValue()
case sql.String:
v = value.StringValue()
default:
return fmt.Errorf("unsupported sql value %v and type %v", value, value.Type())
}
name = strings.Replace(name, "*", "__ALL__", -1)
r.data, err = sjson.SetBytes(r.data, name, v)
return err
}
// MarshalCSV - encodes to CSV data.
func (r *Record) MarshalCSV(fieldDelimiter rune) ([]byte, error) {
var csvRecord []string
result := gjson.ParseBytes(r.data)
result.ForEach(func(key, value gjson.Result) bool {
csvRecord = append(csvRecord, value.String())
return true
})
buf := new(bytes.Buffer)
w := csv.NewWriter(buf)
w.Comma = fieldDelimiter
if err := w.Write(csvRecord); err != nil {
return nil, err
}
w.Flush()
if err := w.Error(); err != nil {
return nil, err
}
data := buf.Bytes()
return data[:len(data)-1], nil
}
// MarshalJSON - encodes to JSON data.
func (r *Record) MarshalJSON() ([]byte, error) {
return r.data, nil
}
// NewRecord - creates new empty JSON record.
func NewRecord() *Record {
return &Record{
data: []byte("{}"),
}
}

384
pkg/s3select/message.go Normal file
View File

@@ -0,0 +1,384 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3select
import (
"bytes"
"encoding/binary"
"fmt"
"hash/crc32"
"net/http"
"strconv"
"sync/atomic"
"time"
)
// A message is in the format specified in
// https://docs.aws.amazon.com/AmazonS3/latest/API/images/s3select-frame-diagram-frame-overview.png
// hence the calculation is made accordingly.
func totalByteLength(headerLength, payloadLength int) int {
return 4 + 4 + 4 + headerLength + payloadLength + 4
}
func genMessage(header, payload []byte) []byte {
headerLength := len(header)
payloadLength := len(payload)
totalLength := totalByteLength(headerLength, payloadLength)
buf := new(bytes.Buffer)
binary.Write(buf, binary.BigEndian, uint32(totalLength))
binary.Write(buf, binary.BigEndian, uint32(headerLength))
prelude := buf.Bytes()
binary.Write(buf, binary.BigEndian, crc32.ChecksumIEEE(prelude))
buf.Write(header)
if payload != nil {
buf.Write(payload)
}
message := buf.Bytes()
binary.Write(buf, binary.BigEndian, crc32.ChecksumIEEE(message))
return buf.Bytes()
}
// Refer genRecordsHeader().
var recordsHeader = []byte{
13, ':', 'm', 'e', 's', 's', 'a', 'g', 'e', '-', 't', 'y', 'p', 'e', 7, 0, 5, 'e', 'v', 'e', 'n', 't',
13, ':', 'c', 'o', 'n', 't', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 7, 0, 24, 'a', 'p', 'p', 'l', 'i', 'c', 'a', 't', 'i', 'o', 'n', '/', 'o', 'c', 't', 'e', 't', '-', 's', 't', 'r', 'e', 'a', 'm',
11, ':', 'e', 'v', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 7, 0, 7, 'R', 'e', 'c', 'o', 'r', 'd', 's',
}
// newRecordsMessage - creates new Records Message which can contain a single record, partial records,
// or multiple records. Depending on the size of the result, a response can contain one or more of these messages.
//
// Header specification
// Records messages contain three headers, as follows:
// https://docs.aws.amazon.com/AmazonS3/latest/API/images/s3select-frame-diagram-record.png
//
// Payload specification
// Records message payloads can contain a single record, partial records, or multiple records.
func newRecordsMessage(payload []byte) []byte {
return genMessage(recordsHeader, payload)
}
// continuationMessage - S3 periodically sends this message to keep the TCP connection open.
// These messages appear in responses at random. The client must detect the message type and process accordingly.
//
// Header specification:
// Continuation messages contain two headers, as follows:
// https://docs.aws.amazon.com/AmazonS3/latest/API/images/s3select-frame-diagram-cont.png
//
// Payload specification:
// Continuation messages have no payload.
var continuationMessage = []byte{
0, 0, 0, 57, // total byte-length.
0, 0, 0, 41, // headers byte-length.
139, 161, 157, 242, // prelude crc.
13, ':', 'm', 'e', 's', 's', 'a', 'g', 'e', '-', 't', 'y', 'p', 'e', 7, 0, 5, 'e', 'v', 'e', 'n', 't', // headers.
11, ':', 'e', 'v', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 7, 0, 4, 'C', 'o', 'n', 't', // headers.
156, 134, 74, 13, // message crc.
}
// Refer genProgressHeader().
var progressHeader = []byte{
13, ':', 'm', 'e', 's', 's', 'a', 'g', 'e', '-', 't', 'y', 'p', 'e', 7, 0, 5, 'e', 'v', 'e', 'n', 't',
13, ':', 'c', 'o', 'n', 't', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 7, 0, 8, 't', 'e', 'x', 't', '/', 'x', 'm', 'l',
11, ':', 'e', 'v', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 7, 0, 8, 'P', 'r', 'o', 'g', 'r', 'e', 's', 's',
}
// newProgressMessage - creates new Progress Message. S3 periodically sends this message, if requested.
// It contains information about the progress of a query that has started but has not yet completed.
//
// Header specification:
// Progress messages contain three headers, as follows:
// https://docs.aws.amazon.com/AmazonS3/latest/API/images/s3select-frame-diagram-progress.png
//
// Payload specification:
// Progress message payload is an XML document containing information about the progress of a request.
// * BytesScanned => Number of bytes that have been processed before being uncompressed (if the file is compressed).
// * BytesProcessed => Number of bytes that have been processed after being uncompressed (if the file is compressed).
// * BytesReturned => Current number of bytes of records payload data returned by S3.
//
// For uncompressed files, BytesScanned and BytesProcessed are equal.
//
// Example:
//
// <?xml version="1.0" encoding="UTF-8"?>
// <Progress>
// <BytesScanned>512</BytesScanned>
// <BytesProcessed>1024</BytesProcessed>
// <BytesReturned>1024</BytesReturned>
// </Progress>
//
func newProgressMessage(bytesScanned, bytesProcessed, bytesReturned int64) []byte {
payload := []byte(`<?xml version="1.0" encoding="UTF-8"?><Progress><BytesScanned>` +
strconv.FormatInt(bytesScanned, 10) + `</BytesScanned><BytesProcessed>` +
strconv.FormatInt(bytesProcessed, 10) + `</BytesProcessed><BytesReturned>` +
strconv.FormatInt(bytesReturned, 10) + `</BytesReturned></Stats>`)
return genMessage(progressHeader, payload)
}
// Refer genStatsHeader().
var statsHeader = []byte{
13, ':', 'm', 'e', 's', 's', 'a', 'g', 'e', '-', 't', 'y', 'p', 'e', 7, 0, 5, 'e', 'v', 'e', 'n', 't',
13, ':', 'c', 'o', 'n', 't', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 7, 0, 8, 't', 'e', 'x', 't', '/', 'x', 'm', 'l',
11, ':', 'e', 'v', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 7, 0, 5, 'S', 't', 'a', 't', 's',
}
// newStatsMessage - creates new Stats Message. S3 sends this message at the end of the request.
// It contains statistics about the query.
//
// Header specification:
// Stats messages contain three headers, as follows:
// https://docs.aws.amazon.com/AmazonS3/latest/API/images/s3select-frame-diagram-stats.png
//
// Payload specification:
// Stats message payload is an XML document containing information about a request's stats when processing is complete.
// * BytesScanned => Number of bytes that have been processed before being uncompressed (if the file is compressed).
// * BytesProcessed => Number of bytes that have been processed after being uncompressed (if the file is compressed).
// * BytesReturned => Total number of bytes of records payload data returned by S3.
//
// For uncompressed files, BytesScanned and BytesProcessed are equal.
//
// Example:
//
// <?xml version="1.0" encoding="UTF-8"?>
// <Stats>
// <BytesScanned>512</BytesScanned>
// <BytesProcessed>1024</BytesProcessed>
// <BytesReturned>1024</BytesReturned>
// </Stats>
func newStatsMessage(bytesScanned, bytesProcessed, bytesReturned int64) []byte {
payload := []byte(`<?xml version="1.0" encoding="UTF-8"?><Stats><BytesScanned>` +
strconv.FormatInt(bytesScanned, 10) + `</BytesScanned><BytesProcessed>` +
strconv.FormatInt(bytesProcessed, 10) + `</BytesProcessed><BytesReturned>` +
strconv.FormatInt(bytesReturned, 10) + `</BytesReturned></Stats>`)
return genMessage(statsHeader, payload)
}
// endMessage - indicates that the request is complete, and no more messages will be sent.
// You should not assume that the request is complete until the client receives an End message.
//
// Header specification:
// End messages contain two headers, as follows:
// https://docs.aws.amazon.com/AmazonS3/latest/API/images/s3select-frame-diagram-end.png
//
// Payload specification:
// End messages have no payload.
var endMessage = []byte{
0, 0, 0, 56, // total byte-length.
0, 0, 0, 40, // headers byte-length.
193, 198, 132, 212, // prelude crc.
13, ':', 'm', 'e', 's', 's', 'a', 'g', 'e', '-', 't', 'y', 'p', 'e', 7, 0, 5, 'e', 'v', 'e', 'n', 't', // headers.
11, ':', 'e', 'v', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 7, 0, 3, 'E', 'n', 'd', // headers.
207, 151, 211, 146, // message crc.
}
// newErrorMessage - creates new Request Level Error Message. S3 sends this message if the request failed for any reason.
// It contains the error code and error message for the failure. If S3 sends a RequestLevelError message,
// it doesn't send an End message.
//
// Header specification:
// Request-level error messages contain three headers, as follows:
// https://docs.aws.amazon.com/AmazonS3/latest/API/images/s3select-frame-diagram-error.png
//
// Payload specification:
// Request-level error messages have no payload.
func newErrorMessage(errorCode, errorMessage []byte) []byte {
buf := new(bytes.Buffer)
buf.Write([]byte{13, ':', 'm', 'e', 's', 's', 'a', 'g', 'e', '-', 't', 'y', 'p', 'e', 7, 0, 5, 'e', 'r', 'r', 'o', 'r'})
buf.Write([]byte{14, ':', 'e', 'r', 'r', 'o', 'r', '-', 'm', 'e', 's', 's', 'a', 'g', 'e', 7})
binary.Write(buf, binary.BigEndian, uint16(len(errorMessage)))
buf.Write(errorMessage)
buf.Write([]byte{11, ':', 'e', 'r', 'r', 'o', 'r', '-', 'c', 'o', 'd', 'e', 7})
binary.Write(buf, binary.BigEndian, uint16(len(errorCode)))
buf.Write(errorCode)
return genMessage(buf.Bytes(), nil)
}
// NewErrorMessage - creates new Request Level Error Message specified in
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html.
func NewErrorMessage(errorCode, errorMessage string) []byte {
return newErrorMessage([]byte(errorCode), []byte(errorMessage))
}
type messageWriter struct {
writer http.ResponseWriter
getProgressFunc func() (int64, int64)
bytesReturned int64
dataCh chan []byte
doneCh chan struct{}
closeCh chan struct{}
stopped uint32
closed uint32
}
func (writer *messageWriter) write(data []byte) bool {
if _, err := writer.writer.Write(data); err != nil {
return false
}
writer.writer.(http.Flusher).Flush()
return true
}
func (writer *messageWriter) start() {
keepAliveTicker := time.NewTicker(1 * time.Second)
var progressTicker *time.Ticker
if writer.getProgressFunc != nil {
progressTicker = time.NewTicker(1 * time.Minute)
}
go func() {
quitFlag := 0
for quitFlag == 0 {
if progressTicker == nil {
select {
case data, ok := <-writer.dataCh:
if !ok {
quitFlag = 1
break
}
if !writer.write(data) {
quitFlag = 1
}
case <-writer.doneCh:
quitFlag = 2
case <-keepAliveTicker.C:
if !writer.write(continuationMessage) {
quitFlag = 1
}
}
} else {
select {
case data, ok := <-writer.dataCh:
if !ok {
quitFlag = 1
break
}
if !writer.write(data) {
quitFlag = 1
}
case <-writer.doneCh:
quitFlag = 2
case <-keepAliveTicker.C:
if !writer.write(continuationMessage) {
quitFlag = 1
}
case <-progressTicker.C:
bytesScanned, bytesProcessed := writer.getProgressFunc()
bytesReturned := atomic.LoadInt64(&writer.bytesReturned)
if !writer.write(newProgressMessage(bytesScanned, bytesProcessed, bytesReturned)) {
quitFlag = 1
}
}
}
}
atomic.StoreUint32(&writer.stopped, 1)
close(writer.closeCh)
keepAliveTicker.Stop()
if progressTicker != nil {
progressTicker.Stop()
}
if quitFlag == 2 {
for data := range writer.dataCh {
if _, err := writer.writer.Write(data); err != nil {
break
}
}
}
}()
}
func (writer *messageWriter) close() {
if atomic.SwapUint32(&writer.closed, 1) == 0 {
close(writer.doneCh)
for range writer.closeCh {
close(writer.dataCh)
}
}
}
func (writer *messageWriter) send(data []byte) error {
err := func() error {
if atomic.LoadUint32(&writer.stopped) == 1 {
return fmt.Errorf("writer already closed")
}
select {
case writer.dataCh <- data:
case <-writer.doneCh:
return fmt.Errorf("closed writer")
}
return nil
}()
if err != nil {
writer.close()
}
return err
}
func (writer *messageWriter) SendRecords(payload []byte) error {
err := writer.send(newRecordsMessage(payload))
if err == nil {
atomic.AddInt64(&writer.bytesReturned, int64(len(payload)))
}
return err
}
func (writer *messageWriter) SendStats(bytesScanned, bytesProcessed int64) error {
bytesReturned := atomic.LoadInt64(&writer.bytesReturned)
err := writer.send(newStatsMessage(bytesScanned, bytesProcessed, bytesReturned))
if err != nil {
return err
}
err = writer.send(endMessage)
writer.close()
return err
}
func (writer *messageWriter) SendError(errorCode, errorMessage string) error {
err := writer.send(newErrorMessage([]byte(errorCode), []byte(errorMessage)))
if err == nil {
writer.close()
}
return err
}
func newMessageWriter(w http.ResponseWriter, getProgressFunc func() (bytesScanned, bytesProcessed int64)) *messageWriter {
writer := &messageWriter{
writer: w,
getProgressFunc: getProgressFunc,
dataCh: make(chan []byte),
doneCh: make(chan struct{}),
closeCh: make(chan struct{}),
}
writer.start()
return writer
}

View File

@@ -1,460 +0,0 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// DO NOT EDIT THIS PACKAGE DIRECTLY: This follows the protocol defined by
// AmazonS3 found at
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
// Consult the Spec before making direct edits.
package s3select
import (
"bytes"
"encoding/binary"
"hash/crc32"
)
// Record Headers
// -11 -event type - 7 - 7 "Records"
// -13 -content-type -7 -24 "application/octet-stream"
// -13 -message-type -7 5 "event"
// This is predefined from AMZ protocol found here:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
var recordHeaders []byte
// End Headers
// -13 -message-type -7 -5 "event"
// -11 -:event-type -7 -3 "End"
// This is predefined from AMZ protocol found here:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
var endHeaders []byte
// Continuation Headers
// -13 -message-type -7 -5 "event"
// -11 -:event-type -7 -4 "Cont"
// This is predefined from AMZ protocol found here:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
var contHeaders []byte
// Stat Headers
// -11 -event type - 7 - 5 "Stat" -20
// -13 -content-type -7 -8 "text/xml" -25
// -13 -message-type -7 -5 "event" -22
// This is predefined from AMZ protocol found here:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
var statHeaders []byte
// Progress Headers
// -11 -event type - 7 - 8 "Progress" -23
// -13 -content-type -7 -8 "text/xml" -25
// -13 -message-type -7 -5 "event" -22
// This is predefined from AMZ protocol found here:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
var progressHeaders []byte
// The length of the nonvariable portion of the ErrHeaders
// The below are the specifications of the header for a "error" event
// -11 -error-code - 7 - DEFINED "DEFINED"
// -14 -error-message -7 -DEFINED "DEFINED"
// -13 -message-type -7 -5 "error"
// This is predefined from AMZ protocol found here:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
var errHdrLen int
func init() {
recordHeaders = writeRecordHeader()
endHeaders = writeEndHeader()
contHeaders = writeContHeader()
statHeaders = writeStatHeader()
progressHeaders = writeProgressHeader()
errHdrLen = 55
}
// encodeString encodes a string in a []byte, lenBytes is the number of bytes
// used to encode the length of the string.
func encodeHeaderStringValue(s string) []byte {
n := uint16(len(s))
lenSlice := make([]byte, 2)
binary.BigEndian.PutUint16(lenSlice[0:], n)
return append(lenSlice, []byte(s)...)
}
func encodeHeaderStringName(s string) []byte {
lenSlice := make([]byte, 1)
lenSlice[0] = byte(len(s))
return append(lenSlice, []byte(s)...)
}
// encodeNumber encodes a number in a []byte, lenBytes is the number of bytes
// used to encode the length of the string.
func encodeNumber(n byte, lenBytes int) []byte {
lenSlice := make([]byte, lenBytes)
lenSlice[0] = n
return lenSlice
}
// writePayloadSize writes the 4byte payload size portion of the protocol.
func writePayloadSize(payloadSize int, headerLength int) []byte {
totalByteLen := make([]byte, 4)
totalMsgLen := uint32(payloadSize + headerLength + 16)
binary.BigEndian.PutUint32(totalByteLen, totalMsgLen)
return totalByteLen
}
// writeHeaderSize writes the 4byte header size portion of the protocol.
func writeHeaderSize(headerLength int) []byte {
totalHeaderLen := make([]byte, 4)
totalLen := uint32(headerLength)
binary.BigEndian.PutUint32(totalHeaderLen, totalLen)
return totalHeaderLen
}
// writeCRC writes the CRC for both the prelude and and the end of the protocol.
func writeCRC(buffer []byte) []byte {
// Calculate the CRC here:
crc := make([]byte, 4)
cksum := crc32.ChecksumIEEE(buffer)
binary.BigEndian.PutUint32(crc, cksum)
return crc
}
// writePayload writes the Payload for those protocols which the Payload is
// necessary.
func writePayload(myPayload string) []byte {
convertedPayload := []byte(myPayload)
payloadStore := make([]byte, len(convertedPayload))
copy(payloadStore[0:], myPayload)
return payloadStore
}
// writeRecordHeader is a function which writes the headers for the continuation
// Message
func writeRecordHeader() []byte {
// This is predefined from AMZ protocol found here:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
var currentMessage = &bytes.Buffer{}
// 11 -event type - 7 - 7 "Records"
// header name
currentMessage.Write(encodeHeaderStringName(":event-type"))
// header type
currentMessage.Write(encodeNumber(7, 1))
// header value and header value length
currentMessage.Write(encodeHeaderStringValue("Records"))
// Creation of the Header for Content-Type // 13 -content-type -7 -24
// "application/octet-stream"
// header name
currentMessage.Write(encodeHeaderStringName(":content-type"))
// header type
currentMessage.Write(encodeNumber(7, 1))
// header value and header value length
currentMessage.Write(encodeHeaderStringValue("application/octet-stream"))
// Creation of the Header for message-type 13 -message-type -7 5 "event"
// header name
currentMessage.Write(encodeHeaderStringName(":message-type"))
// header type
currentMessage.Write(encodeNumber(7, 1))
// header value and header value length
currentMessage.Write(encodeHeaderStringValue("event"))
return currentMessage.Bytes()
}
// writeEndHeader is a function which writes the headers for the continuation
// Message
func writeEndHeader() []byte {
// This is predefined from AMZ protocol found here:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
var currentMessage = &bytes.Buffer{}
// header name
currentMessage.Write(encodeHeaderStringName(":event-type"))
// header type
currentMessage.Write(encodeNumber(7, 1))
// header value and header value length
currentMessage.Write(encodeHeaderStringValue("End"))
// Creation of the Header for message-type 13 -message-type -7 5 "event"
// header name
currentMessage.Write(encodeHeaderStringName(":message-type"))
// header type
currentMessage.Write(encodeNumber(7, 1))
// header value and header value length
currentMessage.Write(encodeHeaderStringValue("event"))
return currentMessage.Bytes()
}
// writeContHeader is a function which writes the headers for the continuation
// Message
func writeContHeader() []byte {
// This is predefined from AMZ protocol found here:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
var currentMessage = &bytes.Buffer{}
// header name
currentMessage.Write(encodeHeaderStringName(":event-type"))
// header type
currentMessage.Write(encodeNumber(7, 1))
// header value and header value length
currentMessage.Write(encodeHeaderStringValue("Cont"))
// Creation of the Header for message-type 13 -message-type -7 5 "event"
// header name
currentMessage.Write(encodeHeaderStringName(":message-type"))
// header type
currentMessage.Write(encodeNumber(7, 1))
// header value and header value length
currentMessage.Write(encodeHeaderStringValue("event"))
return currentMessage.Bytes()
}
// writeStatHeader is a function which writes the headers for the Stat
// Message
func writeStatHeader() []byte {
// This is predefined from AMZ protocol found here:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
var currentMessage = &bytes.Buffer{}
// header name
currentMessage.Write(encodeHeaderStringName(":event-type"))
// header type
currentMessage.Write(encodeNumber(7, 1))
// header value and header value length
currentMessage.Write(encodeHeaderStringValue("Stats"))
// Creation of the Header for Content-Type // 13 -content-type -7 -8
// "text/xml"
// header name
currentMessage.Write(encodeHeaderStringName(":content-type"))
// header type
currentMessage.Write(encodeNumber(7, 1))
// header value and header value length
currentMessage.Write(encodeHeaderStringValue("text/xml"))
// Creation of the Header for message-type 13 -message-type -7 5 "event"
currentMessage.Write(encodeHeaderStringName(":message-type"))
// header type
currentMessage.Write(encodeNumber(7, 1))
// header value and header value length
currentMessage.Write(encodeHeaderStringValue("event"))
return currentMessage.Bytes()
}
// writeProgressHeader is a function which writes the headers for the Progress
// Message
func writeProgressHeader() []byte {
// This is predefined from AMZ protocol found here:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
var currentMessage = &bytes.Buffer{}
// header name
currentMessage.Write(encodeHeaderStringName(":event-type"))
// header type
currentMessage.Write(encodeNumber(7, 1))
// header value and header value length
currentMessage.Write(encodeHeaderStringValue("Progress"))
// Creation of the Header for Content-Type // 13 -content-type -7 -8
// "text/xml"
// header name
currentMessage.Write(encodeHeaderStringName(":content-type"))
// header type
currentMessage.Write(encodeNumber(7, 1))
// header value and header value length
currentMessage.Write(encodeHeaderStringValue("text/xml"))
// Creation of the Header for message-type 13 -message-type -7 5 "event"
// header name
currentMessage.Write(encodeHeaderStringName(":message-type"))
// header type
currentMessage.Write(encodeNumber(7, 1))
// header value and header value length
currentMessage.Write(encodeHeaderStringValue("event"))
return currentMessage.Bytes()
}
// writeRecordMessage is the function which constructs the binary message for a
// record message to be sent.
func writeRecordMessage(payload string, currentMessage *bytes.Buffer) *bytes.Buffer {
// The below are the specifications of the header for a "record" event
// 11 -event type - 7 - 7 "Records"
// 13 -content-type -7 -24 "application/octet-stream"
// 13 -message-type -7 5 "event"
// This is predefined from AMZ protocol found here:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
headerLen := len(recordHeaders)
// Writes the total size of the message.
currentMessage.Write(writePayloadSize(len(payload), headerLen))
// Writes the total size of the header.
currentMessage.Write(writeHeaderSize(headerLen))
// Writes the CRC of the Prelude
currentMessage.Write(writeCRC(currentMessage.Bytes()))
currentMessage.Write(recordHeaders)
// This part is where the payload is written, this will be only one row, since
// we're sending one message at a types
currentMessage.Write(writePayload(payload))
// Now we do a CRC check on the entire messages
currentMessage.Write(writeCRC(currentMessage.Bytes()))
return currentMessage
}
// writeContinuationMessage is the function which constructs the binary message
// for a continuation message to be sent.
func writeContinuationMessage(currentMessage *bytes.Buffer) *bytes.Buffer {
// 11 -event type - 7 - 4 "Cont"
// 13 -message-type -7 5 "event"
// This is predefined from AMZ protocol found here:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
headerLen := len(contHeaders)
currentMessage.Write(writePayloadSize(0, headerLen))
currentMessage.Write(writeHeaderSize(headerLen))
// Calculate the Prelude CRC here:
currentMessage.Write(writeCRC(currentMessage.Bytes()))
currentMessage.Write(contHeaders)
//Now we do a CRC check on the entire messages
currentMessage.Write(writeCRC(currentMessage.Bytes()))
return currentMessage
}
// writeEndMessage is the function which constructs the binary message
// for a end message to be sent.
func writeEndMessage(currentMessage *bytes.Buffer) *bytes.Buffer {
// 11 -event type - 7 - 3 "End"
// 13 -message-type -7 5 "event"
// This is predefined from AMZ protocol found here:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
headerLen := len(endHeaders)
currentMessage.Write(writePayloadSize(0, headerLen))
currentMessage.Write(writeHeaderSize(headerLen))
//Calculate the Prelude CRC here:
currentMessage.Write(writeCRC(currentMessage.Bytes()))
currentMessage.Write(endHeaders)
// Now we do a CRC check on the entire messages
currentMessage.Write(writeCRC(currentMessage.Bytes()))
return currentMessage
}
// writeStateMessage is the function which constructs the binary message for a
// state message to be sent.
func writeStatMessage(payload string, currentMessage *bytes.Buffer) *bytes.Buffer {
// 11 -event type - 7 - 5 "Stat" 20
// 13 -content-type -7 -8 "text/xml" 25
// 13 -message-type -7 5 "event" 22
// This is predefined from AMZ protocol found here:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
headerLen := len(statHeaders)
currentMessage.Write(writePayloadSize(len(payload), headerLen))
currentMessage.Write(writeHeaderSize(headerLen))
currentMessage.Write(writeCRC(currentMessage.Bytes()))
currentMessage.Write(statHeaders)
// This part is where the payload is written, this will be only one row, since
// we're sending one message at a types
currentMessage.Write(writePayload(payload))
// Now we do a CRC check on the entire messages
currentMessage.Write(writeCRC(currentMessage.Bytes()))
return currentMessage
}
// writeProgressMessage is the function which constructs the binary message for
// a progress message to be sent.
func writeProgressMessage(payload string, currentMessage *bytes.Buffer) *bytes.Buffer {
// The below are the specifications of the header for a "Progress" event
// 11 -event type - 7 - 8 "Progress" 23
// 13 -content-type -7 -8 "text/xml" 25
// 13 -message-type -7 5 "event" 22
// This is predefined from AMZ protocol found here:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
headerLen := len(progressHeaders)
currentMessage.Write(writePayloadSize(len(payload), headerLen))
currentMessage.Write(writeHeaderSize(headerLen))
currentMessage.Write(writeCRC(currentMessage.Bytes()))
currentMessage.Write(progressHeaders)
// This part is where the payload is written, this will be only one row, since
// we're sending one message at a types
currentMessage.Write(writePayload(payload))
// Now we do a CRC check on the entire messages
currentMessage.Write(writeCRC(currentMessage.Bytes()))
return currentMessage
}
// writeErrorMessage is the function which constructs the binary message for a
// error message to be sent.
func writeErrorMessage(errorMessage error, currentMessage *bytes.Buffer) *bytes.Buffer {
// The below are the specifications of the header for a "error" event
// 11 -error-code - 7 - DEFINED "DEFINED"
// 14 -error-message -7 -DEFINED "DEFINED"
// 13 -message-type -7 5 "error"
// This is predefined from AMZ protocol found here:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
sizeOfErrorCode := len(errorCodeResponse[errorMessage])
sizeOfErrorMessage := len(errorMessage.Error())
headerLen := errHdrLen + sizeOfErrorCode + sizeOfErrorMessage
currentMessage.Write(writePayloadSize(0, headerLen))
currentMessage.Write(writeHeaderSize(headerLen))
currentMessage.Write(writeCRC(currentMessage.Bytes()))
// header name
currentMessage.Write(encodeHeaderStringName(":error-code"))
// header type
currentMessage.Write(encodeNumber(7, 1))
// header value and header value length
currentMessage.Write(encodeHeaderStringValue(errorCodeResponse[errorMessage]))
// 14 -error-message -7 -DEFINED "DEFINED"
// header name
currentMessage.Write(encodeHeaderStringName(":error-message"))
// header type
currentMessage.Write(encodeNumber(7, 1))
// header value and header value length
currentMessage.Write(encodeHeaderStringValue(errorMessage.Error()))
// Creation of the Header for message-type 13 -message-type -7 5 "error"
// header name
currentMessage.Write(encodeHeaderStringName(":message-type"))
// header type
currentMessage.Write(encodeNumber(7, 1))
// header value and header value length
currentMessage.Write(encodeHeaderStringValue("error"))
// Now we do a CRC check on the entire messages
currentMessage.Write(writeCRC(currentMessage.Bytes()))
return currentMessage
}

View File

@@ -0,0 +1,42 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package parquet
import "encoding/xml"
// ReaderArgs - represents elements inside <InputSerialization><Parquet/> in request XML.
type ReaderArgs struct {
unmarshaled bool
}
// IsEmpty - returns whether reader args is empty or not.
func (args *ReaderArgs) IsEmpty() bool {
return !args.unmarshaled
}
// UnmarshalXML - decodes XML data.
func (args *ReaderArgs) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
// Make subtype to avoid recursive UnmarshalXML().
type subReaderArgs ReaderArgs
parsedArgs := subReaderArgs{}
if err := d.DecodeElement(&parsedArgs, &start); err != nil {
return err
}
args.unmarshaled = true
return nil
}

View File

@@ -0,0 +1,53 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package parquet
type s3Error struct {
code string
message string
statusCode int
cause error
}
func (err *s3Error) Cause() error {
return err.cause
}
func (err *s3Error) ErrorCode() string {
return err.code
}
func (err *s3Error) ErrorMessage() string {
return err.message
}
func (err *s3Error) HTTPStatusCode() int {
return err.statusCode
}
func (err *s3Error) Error() string {
return err.message
}
func errParquetParsingError(err error) *s3Error {
return &s3Error{
code: "ParquetParsingError",
message: "Error parsing Parquet file. Please check the file and try again.",
statusCode: 400,
cause: err,
}
}

View File

@@ -0,0 +1,93 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package parquet
import (
"io"
"github.com/minio/minio/pkg/s3select/json"
"github.com/minio/minio/pkg/s3select/sql"
parquetgo "github.com/minio/parquet-go"
parquetgen "github.com/minio/parquet-go/gen-go/parquet"
)
// Reader - Parquet record reader for S3Select.
type Reader struct {
args *ReaderArgs
file *parquetgo.File
}
// Read - reads single record.
func (r *Reader) Read() (sql.Record, error) {
parquetRecord, err := r.file.Read()
if err != nil {
if err != io.EOF {
return nil, errParquetParsingError(err)
}
return nil, err
}
record := json.NewRecord()
for name, v := range parquetRecord {
var value *sql.Value
switch v.Type {
case parquetgen.Type_BOOLEAN:
value = sql.NewBool(v.Value.(bool))
case parquetgen.Type_INT32:
value = sql.NewInt(int64(v.Value.(int32)))
case parquetgen.Type_INT64:
value = sql.NewInt(v.Value.(int64))
case parquetgen.Type_FLOAT:
value = sql.NewFloat(float64(v.Value.(float32)))
case parquetgen.Type_DOUBLE:
value = sql.NewFloat(v.Value.(float64))
case parquetgen.Type_INT96, parquetgen.Type_BYTE_ARRAY, parquetgen.Type_FIXED_LEN_BYTE_ARRAY:
value = sql.NewString(string(v.Value.([]byte)))
default:
return nil, errParquetParsingError(nil)
}
if err = record.Set(name, value); err != nil {
return nil, errParquetParsingError(err)
}
}
return record, nil
}
// Close - closes underlaying readers.
func (r *Reader) Close() error {
return r.file.Close()
}
// NewReader - creates new Parquet reader using readerFunc callback.
func NewReader(getReaderFunc func(offset, length int64) (io.ReadCloser, error), args *ReaderArgs) (*Reader, error) {
file, err := parquetgo.Open(getReaderFunc, nil)
if err != nil {
if err != io.EOF {
return nil, errParquetParsingError(err)
}
return nil, err
}
return &Reader{
args: args,
file: file,
}, nil
}

90
pkg/s3select/progress.go Normal file
View File

@@ -0,0 +1,90 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3select
import (
"compress/bzip2"
"fmt"
"io"
"sync/atomic"
gzip "github.com/klauspost/pgzip"
)
type countUpReader struct {
reader io.Reader
bytesRead int64
}
func (r *countUpReader) Read(p []byte) (n int, err error) {
n, err = r.reader.Read(p)
atomic.AddInt64(&r.bytesRead, int64(n))
return n, err
}
func (r *countUpReader) BytesRead() int64 {
return atomic.LoadInt64(&r.bytesRead)
}
func newCountUpReader(reader io.Reader) *countUpReader {
return &countUpReader{
reader: reader,
}
}
type progressReader struct {
rc io.ReadCloser
scannedReader *countUpReader
processedReader *countUpReader
}
func (pr *progressReader) Read(p []byte) (n int, err error) {
return pr.processedReader.Read(p)
}
func (pr *progressReader) Close() error {
return pr.rc.Close()
}
func (pr *progressReader) Stats() (bytesScanned, bytesProcessed int64) {
return pr.scannedReader.BytesRead(), pr.processedReader.BytesRead()
}
func newProgressReader(rc io.ReadCloser, compType CompressionType) (*progressReader, error) {
scannedReader := newCountUpReader(rc)
var r io.Reader
var err error
switch compType {
case noneType:
r = scannedReader
case gzipType:
if r, err = gzip.NewReader(scannedReader); err != nil {
return nil, errTruncatedInput(err)
}
case bzip2Type:
r = bzip2.NewReader(scannedReader)
default:
return nil, errInvalidCompressionFormat(fmt.Errorf("unknown compression type '%v'", compType))
}
return &progressReader{
rc: rc,
scannedReader: scannedReader,
processedReader: newCountUpReader(r),
}, nil
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,436 +17,377 @@
package s3select
import (
"math"
"sort"
"strconv"
"encoding/xml"
"fmt"
"io"
"net/http"
"strings"
"github.com/minio/minio/pkg/s3select/format"
"github.com/tidwall/gjson"
"github.com/xwb1989/sqlparser"
"github.com/minio/minio/pkg/s3select/csv"
"github.com/minio/minio/pkg/s3select/json"
"github.com/minio/minio/pkg/s3select/parquet"
"github.com/minio/minio/pkg/s3select/sql"
)
// SelectFuncs contains the relevant values from the parser for S3 Select
// Functions
type SelectFuncs struct {
funcExpr []*sqlparser.FuncExpr
index []int
type recordReader interface {
Read() (sql.Record, error)
Close() error
}
// RunSqlParser allows us to easily bundle all the functions from above and run
// them in the appropriate order.
func runSelectParser(f format.Select, rowCh chan Row) {
reqCols, alias, limit, wc, aggFunctionNames, fns, err := ParseSelect(f)
const (
csvFormat = "csv"
jsonFormat = "json"
parquetFormat = "parquet"
)
// CompressionType - represents value inside <CompressionType/> in request XML.
type CompressionType string
const (
noneType CompressionType = "none"
gzipType CompressionType = "gzip"
bzip2Type CompressionType = "bzip2"
)
// UnmarshalXML - decodes XML data.
func (c *CompressionType) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var s string
if err := d.DecodeElement(&s, &start); err != nil {
return errMalformedXML(err)
}
parsedType := CompressionType(strings.ToLower(s))
if s == "" {
parsedType = noneType
}
switch parsedType {
case noneType, gzipType, bzip2Type:
default:
return errInvalidCompressionFormat(fmt.Errorf("invalid compression format '%v'", s))
}
*c = parsedType
return nil
}
// InputSerialization - represents elements inside <InputSerialization/> in request XML.
type InputSerialization struct {
CompressionType CompressionType `xml:"CompressionType"`
CSVArgs csv.ReaderArgs `xml:"CSV"`
JSONArgs json.ReaderArgs `xml:"JSON"`
ParquetArgs parquet.ReaderArgs `xml:"Parquet"`
unmarshaled bool
format string
}
// IsEmpty - returns whether input serialization is empty or not.
func (input *InputSerialization) IsEmpty() bool {
return !input.unmarshaled
}
// UnmarshalXML - decodes XML data.
func (input *InputSerialization) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
// Make subtype to avoid recursive UnmarshalXML().
type subInputSerialization InputSerialization
parsedInput := subInputSerialization{}
if err := d.DecodeElement(&parsedInput, &start); err != nil {
return errMalformedXML(err)
}
found := 0
if !parsedInput.CSVArgs.IsEmpty() {
parsedInput.format = csvFormat
found++
}
if !parsedInput.JSONArgs.IsEmpty() {
parsedInput.format = jsonFormat
found++
}
if !parsedInput.ParquetArgs.IsEmpty() {
if parsedInput.CompressionType != noneType {
return errInvalidRequestParameter(fmt.Errorf("CompressionType must be NONE for Parquet format"))
}
parsedInput.format = parquetFormat
found++
}
if found != 1 {
return errInvalidDataSource(nil)
}
*input = InputSerialization(parsedInput)
input.unmarshaled = true
return nil
}
// OutputSerialization - represents elements inside <OutputSerialization/> in request XML.
type OutputSerialization struct {
CSVArgs csv.WriterArgs `xml:"CSV"`
JSONArgs json.WriterArgs `xml:"JSON"`
unmarshaled bool
format string
}
// IsEmpty - returns whether output serialization is empty or not.
func (output *OutputSerialization) IsEmpty() bool {
return !output.unmarshaled
}
// UnmarshalXML - decodes XML data.
func (output *OutputSerialization) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
// Make subtype to avoid recursive UnmarshalXML().
type subOutputSerialization OutputSerialization
parsedOutput := subOutputSerialization{}
if err := d.DecodeElement(&parsedOutput, &start); err != nil {
return errMalformedXML(err)
}
found := 0
if !parsedOutput.CSVArgs.IsEmpty() {
parsedOutput.format = csvFormat
found++
}
if !parsedOutput.JSONArgs.IsEmpty() {
parsedOutput.format = jsonFormat
found++
}
if found != 1 {
return errObjectSerializationConflict(fmt.Errorf("either CSV or JSON should be present in OutputSerialization"))
}
*output = OutputSerialization(parsedOutput)
output.unmarshaled = true
return nil
}
// RequestProgress - represents elements inside <RequestProgress/> in request XML.
type RequestProgress struct {
Enabled bool `xml:"Enabled"`
}
// S3Select - filters the contents on a simple structured query language (SQL) statement. It
// represents elements inside <SelectObjectContentRequest/> in request XML specified in detail at
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html.
type S3Select struct {
XMLName xml.Name `xml:"SelectObjectContentRequest"`
Expression string `xml:"Expression"`
ExpressionType string `xml:"ExpressionType"`
Input InputSerialization `xml:"InputSerialization"`
Output OutputSerialization `xml:"OutputSerialization"`
Progress RequestProgress `xml:"RequestProgress"`
statement *sql.Select
progressReader *progressReader
recordReader recordReader
}
// UnmarshalXML - decodes XML data.
func (s3Select *S3Select) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
// Make subtype to avoid recursive UnmarshalXML().
type subS3Select S3Select
parsedS3Select := subS3Select{}
if err := d.DecodeElement(&parsedS3Select, &start); err != nil {
if _, ok := err.(*s3Error); ok {
return err
}
return errMalformedXML(err)
}
parsedS3Select.ExpressionType = strings.ToLower(parsedS3Select.ExpressionType)
if parsedS3Select.ExpressionType != "sql" {
return errInvalidExpressionType(fmt.Errorf("invalid expression type '%v'", parsedS3Select.ExpressionType))
}
if parsedS3Select.Input.IsEmpty() {
return errMissingRequiredParameter(fmt.Errorf("InputSerialization must be provided"))
}
if parsedS3Select.Output.IsEmpty() {
return errMissingRequiredParameter(fmt.Errorf("OutputSerialization must be provided"))
}
statement, err := sql.NewSelect(parsedS3Select.Expression)
if err != nil {
rowCh <- Row{
err: err,
}
return
}
processSelectReq(reqCols, alias, wc, limit, aggFunctionNames, rowCh, fns, f)
}
// ParseSelect parses the SELECT expression, and effectively tokenizes it into
// its separate parts. It returns the requested column names,alias,limit of
// records, and the where clause.
func ParseSelect(f format.Select) ([]string, string, int64, sqlparser.Expr, []string, SelectFuncs, error) {
var sFuncs = SelectFuncs{}
var whereClause sqlparser.Expr
var alias string
var limit int64
stmt, err := sqlparser.Parse(f.Expression())
// TODO: Maybe can parse their errors a bit to return some more of the s3 errors
if err != nil {
return nil, "", 0, nil, nil, sFuncs, ErrLexerInvalidChar
return err
}
switch stmt := stmt.(type) {
case *sqlparser.Select:
// evaluates the where clause
fnNames := make([]string, len(stmt.SelectExprs))
columnNames := make([]string, len(stmt.SelectExprs))
parsedS3Select.statement = statement
if stmt.Where != nil {
whereClause = stmt.Where.Expr
}
for i, sexpr := range stmt.SelectExprs {
switch expr := sexpr.(type) {
case *sqlparser.StarExpr:
columnNames[0] = "*"
case *sqlparser.AliasedExpr:
switch smallerexpr := expr.Expr.(type) {
case *sqlparser.FuncExpr:
if smallerexpr.IsAggregate() {
fnNames[i] = smallerexpr.Name.CompliantName()
// Will return function name
// Case to deal with if we have functions and not an asterix
switch tempagg := smallerexpr.Exprs[0].(type) {
case *sqlparser.StarExpr:
columnNames[0] = "*"
if smallerexpr.Name.CompliantName() != "count" {
return nil, "", 0, nil, nil, sFuncs, ErrParseUnsupportedCallWithStar
}
case *sqlparser.AliasedExpr:
switch col := tempagg.Expr.(type) {
case *sqlparser.BinaryExpr:
return nil, "", 0, nil, nil, sFuncs, ErrParseNonUnaryAgregateFunctionCall
case *sqlparser.ColName:
columnNames[i] = col.Name.CompliantName()
}
}
// Case to deal with if COALESCE was used..
} else if supportedFunc(smallerexpr.Name.CompliantName()) {
if sFuncs.funcExpr == nil {
sFuncs.funcExpr = make([]*sqlparser.FuncExpr, len(stmt.SelectExprs))
sFuncs.index = make([]int, len(stmt.SelectExprs))
}
sFuncs.funcExpr[i] = smallerexpr
sFuncs.index[i] = i
} else {
return nil, "", 0, nil, nil, sFuncs, ErrUnsupportedSQLOperation
}
case *sqlparser.ColName:
columnNames[i] = smallerexpr.Name.CompliantName()
}
}
}
*s3Select = S3Select(parsedS3Select)
return nil
}
// This code retrieves the alias and makes sure it is set to the correct
// value, if not it sets it to the tablename
for _, fexpr := range stmt.From {
switch smallerexpr := fexpr.(type) {
case *sqlparser.JoinTableExpr:
return nil, "", 0, nil, nil, sFuncs, ErrParseMalformedJoin
case *sqlparser.AliasedTableExpr:
alias = smallerexpr.As.CompliantName()
if alias == "" {
alias = sqlparser.GetTableName(smallerexpr.Expr).CompliantName()
}
}
}
if stmt.Limit != nil {
switch expr := stmt.Limit.Rowcount.(type) {
case *sqlparser.SQLVal:
// The Value of how many rows we're going to limit by
parsedLimit, _ := strconv.Atoi(string(expr.Val[:]))
limit = int64(parsedLimit)
}
}
if stmt.GroupBy != nil {
return nil, "", 0, nil, nil, sFuncs, ErrParseUnsupportedLiteralsGroupBy
}
if stmt.OrderBy != nil {
return nil, "", 0, nil, nil, sFuncs, ErrParseUnsupportedToken
}
if err := parseErrs(columnNames, whereClause, alias, sFuncs, f); err != nil {
return nil, "", 0, nil, nil, sFuncs, err
}
return columnNames, alias, limit, whereClause, fnNames, sFuncs, nil
func (s3Select *S3Select) outputRecord() sql.Record {
switch s3Select.Output.format {
case csvFormat:
return csv.NewRecord()
case jsonFormat:
return json.NewRecord()
}
return nil, "", 0, nil, nil, sFuncs, nil
panic(fmt.Errorf("unknown output format '%v'", s3Select.Output.format))
}
type columnKv struct {
Key string
Value int
func (s3Select *S3Select) getProgress() (bytesScanned, bytesProcessed int64) {
if s3Select.progressReader != nil {
return s3Select.progressReader.Stats()
}
return -1, -1
}
func columnsIndex(reqColNames []string, f format.Select) ([]columnKv, error) {
var (
columnsKv []columnKv
columnsMap = make(map[string]int)
columns = f.Header()
)
if f.HasHeader() {
err := checkForDuplicates(columns, columnsMap)
if format.IsInt(reqColNames[0]) {
err = ErrMissingHeaders
// Open - opens S3 object by using callback for SQL selection query.
// Currently CSV, JSON and Apache Parquet formats are supported.
func (s3Select *S3Select) Open(getReader func(offset, length int64) (io.ReadCloser, error)) error {
switch s3Select.Input.format {
case csvFormat:
rc, err := getReader(0, -1)
if err != nil {
return err
}
s3Select.progressReader, err = newProgressReader(rc, s3Select.Input.CompressionType)
if err != nil {
return err
}
s3Select.recordReader, err = csv.NewReader(s3Select.progressReader, &s3Select.Input.CSVArgs)
if err != nil {
return err
}
return nil
case jsonFormat:
rc, err := getReader(0, -1)
if err != nil {
return err
}
s3Select.progressReader, err = newProgressReader(rc, s3Select.Input.CompressionType)
if err != nil {
return err
}
s3Select.recordReader = json.NewReader(s3Select.progressReader, &s3Select.Input.JSONArgs)
return nil
case parquetFormat:
var err error
s3Select.recordReader, err = parquet.NewReader(getReader, &s3Select.Input.ParquetArgs)
return err
}
panic(fmt.Errorf("unknown input format '%v'", s3Select.Input.format))
}
func (s3Select *S3Select) marshal(record sql.Record) ([]byte, error) {
switch s3Select.Output.format {
case csvFormat:
data, err := record.MarshalCSV([]rune(s3Select.Output.CSVArgs.FieldDelimiter)[0])
if err != nil {
return nil, err
}
for k, v := range columnsMap {
columnsKv = append(columnsKv, columnKv{
Key: k,
Value: v,
})
}
} else {
for i := range columns {
columnsKv = append(columnsKv, columnKv{
Key: "_" + strconv.Itoa(i),
Value: i,
})
return append(data, []byte(s3Select.Output.CSVArgs.RecordDelimiter)...), nil
case jsonFormat:
data, err := record.MarshalJSON()
if err != nil {
return nil, err
}
return append(data, []byte(s3Select.Output.JSONArgs.RecordDelimiter)...), nil
}
sort.Slice(columnsKv, func(i, j int) bool {
return columnsKv[i].Value < columnsKv[j].Value
})
return columnsKv, nil
panic(fmt.Errorf("unknown output format '%v'", s3Select.Output.format))
}
// This is the main function, It goes row by row and for records which validate
// the where clause it currently prints the appropriate row given the requested
// columns.
func processSelectReq(reqColNames []string, alias string, wc sqlparser.Expr, lrecords int64, fnNames []string, rowCh chan Row, fn SelectFuncs, f format.Select) {
counter := -1
filtrCount := 0
functionFlag := false
// Values used to store our aggregation values.
aggVals := make([]float64, len(reqColNames))
if lrecords == 0 {
lrecords = math.MaxInt64
// Evaluate - filters and sends records read from opened reader as per select statement to http response writer.
func (s3Select *S3Select) Evaluate(w http.ResponseWriter) {
getProgressFunc := s3Select.getProgress
if !s3Select.Progress.Enabled {
getProgressFunc = nil
}
writer := newMessageWriter(w, getProgressFunc)
var results []string
var columnsKv []columnKv
if f.Type() == format.CSV {
var err error
columnsKv, err = columnsIndex(reqColNames, f)
if err != nil {
rowCh <- Row{
err: err,
}
return
var inputRecord sql.Record
var outputRecord sql.Record
var err error
var data []byte
sendRecord := func() bool {
if outputRecord == nil {
return true
}
results = make([]string, len(columnsKv))
if data, err = s3Select.marshal(outputRecord); err != nil {
return false
}
if err = writer.SendRecords(data); err != nil {
// FIXME: log this error.
err = nil
return false
}
return true
}
for {
record, err := f.Read()
if err != nil {
rowCh <- Row{
err: err,
if inputRecord, err = s3Select.recordReader.Read(); err != nil {
if err != io.EOF {
break
}
return
}
if record == nil {
if functionFlag {
rowCh <- Row{
record: aggFuncToStr(aggVals, f) + "\n",
if s3Select.statement.IsAggregated() {
outputRecord = s3Select.outputRecord()
if err = s3Select.statement.AggregateResult(outputRecord); err != nil {
break
}
if !sendRecord() {
break
}
}
close(rowCh)
return
}
// For JSON multi-line input type columns needs
// to be handled for each record.
if f.Type() == format.JSON {
columnsKv, err = columnsIndex(reqColNames, f)
if err != nil {
rowCh <- Row{
err: err,
}
return
if err = writer.SendStats(s3Select.getProgress()); err != nil {
// FIXME: log this error.
err = nil
}
results = make([]string, len(columnsKv))
break
}
f.UpdateBytesProcessed(int64(len(record)))
// Return in case the number of record reaches the LIMIT
// defined in select query
if int64(filtrCount) == lrecords {
close(rowCh)
return
outputRecord = s3Select.outputRecord()
if outputRecord, err = s3Select.statement.Eval(inputRecord, outputRecord); err != nil {
break
}
// The call to the where function clause, ensures that
// the rows we print match our where clause.
condition, err := matchesMyWhereClause(record, alias, wc)
if err != nil {
rowCh <- Row{
err: err,
if !s3Select.statement.IsAggregated() {
if !sendRecord() {
break
}
return
}
}
if condition {
// if its an asterix we just print everything in the row
if reqColNames[0] == "*" && fnNames[0] == "" {
switch f.OutputType() {
case format.CSV:
for i, kv := range columnsKv {
results[i] = gjson.GetBytes(record, kv.Key).String()
}
rowCh <- Row{
record: strings.Join(results, f.OutputFieldDelimiter()) + f.OutputRecordDelimiter(),
}
case format.JSON:
rowCh <- Row{
record: string(record) + f.OutputRecordDelimiter(),
}
}
} else if alias != "" {
// This is for dealing with the case of if we have to deal with a
// request for a column with an index e.g A_1.
if format.IsInt(reqColNames[0]) {
// This checks whether any aggregation function was called as now we
// no longer will go through printing each row, and only print at the end
if len(fnNames) > 0 && fnNames[0] != "" {
functionFlag = true
aggregationFns(counter, filtrCount, aggVals, reqColNames, fnNames, record)
} else {
// The code below finds the appropriate columns of the row given the
// indicies provided in the SQL request.
var rowStr string
rowStr, err = processColNameIndex(record, reqColNames, f)
if err != nil {
rowCh <- Row{
err: err,
}
return
}
rowCh <- Row{
record: rowStr + "\n",
}
}
} else {
// This code does aggregation if we were provided column names in the
// form of actual names rather an indices.
if len(fnNames) > 0 && fnNames[0] != "" {
functionFlag = true
aggregationFns(counter, filtrCount, aggVals, reqColNames, fnNames, record)
} else {
// This code prints the appropriate part of the row given the filter
// and select request, if the select request was based on column
// names rather than indices.
var rowStr string
rowStr, err = processColNameLiteral(record, reqColNames, fn, f)
if err != nil {
rowCh <- Row{
err: err,
}
return
}
rowCh <- Row{
record: rowStr + "\n",
}
}
}
}
filtrCount++
if err != nil {
if serr := writer.SendError("InternalError", err.Error()); serr != nil {
// FIXME: log errors.
}
counter++
}
}
// processColumnNames is a function which allows for cleaning of column names.
func processColumnNames(reqColNames []string, alias string, f format.Select) error {
switch f.Type() {
case format.CSV:
for i := range reqColNames {
// The code below basically cleans the column name of its alias and other
// syntax, so that we can extract its pure name.
reqColNames[i] = cleanCol(reqColNames[i], alias)
}
case format.JSON:
// JSON doesnt have columns so no cleaning required
}
return nil
// Close - closes opened S3 object.
func (s3Select *S3Select) Close() error {
return s3Select.recordReader.Close()
}
// processColNameIndex is the function which creates the row for an index based query.
func processColNameIndex(record []byte, reqColNames []string, f format.Select) (string, error) {
var row []string
for _, colName := range reqColNames {
// COALESCE AND NULLIF do not support index based access.
if reqColNames[0] == "0" {
return "", format.ErrInvalidColumnIndex
}
cindex, err := strconv.Atoi(colName)
if err != nil {
return "", ErrMissingHeaders
}
if cindex > len(f.Header()) {
return "", format.ErrInvalidColumnIndex
}
// NewS3Select - creates new S3Select by given request XML reader.
func NewS3Select(r io.Reader) (*S3Select, error) {
s3Select := &S3Select{}
if err := xml.NewDecoder(r).Decode(s3Select); err != nil {
return nil, err
}
// Subtract 1 because SELECT indexing is not 0 based, it
// starts at 1 generating the key like "_1".
row = append(row, gjson.GetBytes(record, string("_"+strconv.Itoa(cindex-1))).String())
}
rowStr := strings.Join(row, f.OutputFieldDelimiter())
if len(rowStr) > MaxCharsPerRecord {
return "", ErrOverMaxRecordSize
}
return rowStr, nil
}
// processColNameLiteral is the function which creates the row for an name based query.
func processColNameLiteral(record []byte, reqColNames []string, fn SelectFuncs, f format.Select) (string, error) {
row := make([]string, len(reqColNames))
for i, colName := range reqColNames {
// this is the case to deal with COALESCE.
if colName == "" && isValidFunc(fn.index, i) {
row[i] = evaluateFuncExpr(fn.funcExpr[i], "", record)
continue
}
row[i] = gjson.GetBytes(record, colName).String()
}
rowStr := strings.Join(row, f.OutputFieldDelimiter())
if len(rowStr) > MaxCharsPerRecord {
return "", ErrOverMaxRecordSize
}
return rowStr, nil
}
// aggregationFns is a function which performs the actual aggregation
// methods on the given row, it uses an array defined in the main parsing
// function to keep track of values.
func aggregationFns(counter int, filtrCount int, aggVals []float64, storeReqCols []string, storeFns []string, record []byte) error {
for i, storeFn := range storeFns {
switch storeFn {
case "":
continue
case "count":
aggVals[i]++
default:
// Column names are provided as an index it'll use
// this if statement instead.
var convAggFloat float64
if format.IsInt(storeReqCols[i]) {
index, _ := strconv.Atoi(storeReqCols[i])
convAggFloat = gjson.GetBytes(record, "_"+strconv.Itoa(index)).Float()
} else {
// Named columns rather than indices.
convAggFloat = gjson.GetBytes(record, storeReqCols[i]).Float()
}
switch storeFn {
case "min":
if counter == -1 {
aggVals[i] = math.MaxFloat64
}
if convAggFloat < aggVals[i] {
aggVals[i] = convAggFloat
}
case "max":
// Calculate the max.
if counter == -1 {
aggVals[i] = math.SmallestNonzeroFloat64
}
if convAggFloat > aggVals[i] {
aggVals[i] = convAggFloat
}
case "sum":
// Calculate the sum.
aggVals[i] += convAggFloat
case "avg":
// Calculating the average.
if filtrCount == 0 {
aggVals[i] = convAggFloat
} else {
aggVals[i] = (convAggFloat + (aggVals[i] * float64(filtrCount))) / float64((filtrCount + 1))
}
default:
return ErrParseNonUnaryAgregateFunctionCall
}
}
}
return nil
return s3Select, nil
}

View File

@@ -0,0 +1,170 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3select
import (
"bytes"
"encoding/csv"
"io"
"io/ioutil"
"math/rand"
"net/http"
"strconv"
"testing"
"time"
humanize "github.com/dustin/go-humanize"
)
var randSrc = rand.New(rand.NewSource(time.Now().UnixNano()))
const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
func newRandString(length int) string {
b := make([]byte, length)
for i := range b {
b[i] = charset[randSrc.Intn(len(charset))]
}
return string(b)
}
func genSampleCSVData(count int) []byte {
buf := &bytes.Buffer{}
csvWriter := csv.NewWriter(buf)
csvWriter.Write([]string{"id", "name", "age", "city"})
for i := 0; i < count; i++ {
csvWriter.Write([]string{
strconv.Itoa(i),
newRandString(10),
newRandString(5),
newRandString(10),
})
}
csvWriter.Flush()
return buf.Bytes()
}
type nullResponseWriter struct {
}
func (w *nullResponseWriter) Header() http.Header {
return nil
}
func (w *nullResponseWriter) Write(p []byte) (int, error) {
return len(p), nil
}
func (w *nullResponseWriter) WriteHeader(statusCode int) {
}
func (w *nullResponseWriter) Flush() {
}
func benchmarkSelect(b *testing.B, count int, query string) {
var requestXML = []byte(`
<?xml version="1.0" encoding="UTF-8"?>
<SelectObjectContentRequest>
<Expression>` + query + `</Expression>
<ExpressionType>SQL</ExpressionType>
<InputSerialization>
<CompressionType>NONE</CompressionType>
<CSV>
<FileHeaderInfo>USE</FileHeaderInfo>
</CSV>
</InputSerialization>
<OutputSerialization>
<CSV>
</CSV>
</OutputSerialization>
<RequestProgress>
<Enabled>FALSE</Enabled>
</RequestProgress>
</SelectObjectContentRequest>
`)
s3Select, err := NewS3Select(bytes.NewReader(requestXML))
if err != nil {
b.Fatal(err)
}
csvData := genSampleCSVData(count)
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
if err = s3Select.Open(func(offset, length int64) (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(csvData)), nil
}); err != nil {
b.Fatal(err)
}
s3Select.Evaluate(&nullResponseWriter{})
s3Select.Close()
}
}
func benchmarkSelectAll(b *testing.B, count int) {
benchmarkSelect(b, count, "select * from S3Object")
}
// BenchmarkSelectAll_100K - benchmark * function with 100k records.
func BenchmarkSelectAll_100K(b *testing.B) {
benchmarkSelectAll(b, 100*humanize.KiByte)
}
// BenchmarkSelectAll_1M - benchmark * function with 1m records.
func BenchmarkSelectAll_1M(b *testing.B) {
benchmarkSelectAll(b, 1*humanize.MiByte)
}
// BenchmarkSelectAll_2M - benchmark * function with 2m records.
func BenchmarkSelectAll_2M(b *testing.B) {
benchmarkSelectAll(b, 2*humanize.MiByte)
}
// BenchmarkSelectAll_10M - benchmark * function with 10m records.
func BenchmarkSelectAll_10M(b *testing.B) {
benchmarkSelectAll(b, 10*humanize.MiByte)
}
func benchmarkAggregateCount(b *testing.B, count int) {
benchmarkSelect(b, count, "select count(*) from S3Object")
}
// BenchmarkAggregateCount_100K - benchmark count(*) function with 100k records.
func BenchmarkAggregateCount_100K(b *testing.B) {
benchmarkAggregateCount(b, 100*humanize.KiByte)
}
// BenchmarkAggregateCount_1M - benchmark count(*) function with 1m records.
func BenchmarkAggregateCount_1M(b *testing.B) {
benchmarkAggregateCount(b, 1*humanize.MiByte)
}
// BenchmarkAggregateCount_2M - benchmark count(*) function with 2m records.
func BenchmarkAggregateCount_2M(b *testing.B) {
benchmarkAggregateCount(b, 2*humanize.MiByte)
}
// BenchmarkAggregateCount_10M - benchmark count(*) function with 10m records.
func BenchmarkAggregateCount_10M(b *testing.B) {
benchmarkAggregateCount(b, 10*humanize.MiByte)
}

View File

@@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,594 +18,200 @@ package s3select
import (
"bytes"
"encoding/csv"
"fmt"
"math/rand"
"strconv"
"go/build"
"io"
"io/ioutil"
"net/http"
"os"
"path"
"reflect"
"testing"
"time"
humanize "github.com/dustin/go-humanize"
"github.com/tidwall/gjson"
"github.com/minio/minio/pkg/s3select/format"
)
// This function returns the index of a string in a list
func stringIndex(a string, list []string) int {
for i, v := range list {
if v == a {
return i
}
}
return -1
type testResponseWriter struct {
statusCode int
response []byte
}
// TestHelperFunctions is a unit test which tests some
// small helper string functions.
func TestHelperFunctions(t *testing.T) {
tables := []struct {
myReq string
myList []string
myIndex int
expected bool
}{
{"test1", []string{"test1", "test2", "test3", "test4", "test5"}, 0, true},
{"random", []string{"test1", "test2", "test3", "test4", "test5"}, -1, false},
{"test3", []string{"test1", "test2", "test3", "test4", "test5"}, 2, true},
}
for _, table := range tables {
if format.StringInSlice(table.myReq, table.myList) != table.expected {
t.Error()
}
if stringIndex(table.myReq, table.myList) != table.myIndex {
t.Error()
}
}
func (w *testResponseWriter) Header() http.Header {
return nil
}
// TestStateMachine is a unit test which ensures that the lowest level of the
// interpreter is converting properly.
func TestStateMachine(t *testing.T) {
tables := []struct {
operand string
operator string
leftArg string
err error
expected bool
}{
{"", ">", "2012", nil, true},
{"2005", ">", "2012", nil, true},
{"2005", ">", "2012", nil, true},
{"2012.0000", ">", "2014.000", nil, true},
{"2012", "!=", "2014.000", nil, true},
{"NA", ">", "2014.000", nil, true},
{"2012", ">", "2014.000", nil, false},
{"2012.0000", ">", "2014", nil, false},
{"", "<", "2012", nil, false},
{"2012.0000", "<", "2014.000", nil, false},
{"2014", ">", "Random", nil, false},
{"test3", ">", "aandom", nil, false},
{"true", ">", "true", ErrUnsupportedSyntax, false},
}
for i, table := range tables {
val, err := evaluateOperator(gjson.Parse(table.leftArg), table.operator, gjson.Parse(table.operand))
if err != table.err {
t.Errorf("Test %d: expected %v, got %v", i+1, table.err, err)
}
if val != table.expected {
t.Errorf("Test %d: expected %t, got %t", i+1, table.expected, val)
}
}
func (w *testResponseWriter) Write(p []byte) (int, error) {
w.response = append(w.response, p...)
return len(p), nil
}
// TestOperators is a unit test which ensures that the appropriate values are
// being returned from the operators functions.
func TestOperators(t *testing.T) {
tables := []struct {
operator string
err error
}{
{">", nil},
{"%", ErrParseUnknownOperator},
}
for _, table := range tables {
err := checkValidOperator(table.operator)
if err != table.err {
t.Error()
}
}
func (w *testResponseWriter) WriteHeader(statusCode int) {
w.statusCode = statusCode
}
// Unit tests for the main function that performs aggreggation.
func TestAggregationFunc(t *testing.T) {
columnsMap := make(map[string]int)
columnsMap["Col1"] = 0
columnsMap["Col2"] = 1
tables := []struct {
counter int
filtrCount int
myAggVals []float64
columnsMap map[string]int
storeReqCols []string
storeFunctions []string
record []byte
err error
expectedVal float64
}{
{10, 5, []float64{10, 11, 12, 13, 14}, columnsMap, []string{"Col1"}, []string{"count"}, []byte("{\"Col1\":\"1\",\"Col2\":\"2\"}"), nil, 11},
{10, 5, []float64{10}, columnsMap, []string{"Col1"}, []string{"min"}, []byte("{\"Col1\":\"1\",\"Col2\":\"2\"}"), nil, 1},
{10, 5, []float64{10}, columnsMap, []string{"Col1"}, []string{"max"}, []byte("{\"Col1\":\"1\",\"Col2\":\"2\"}"), nil, 10},
{10, 5, []float64{10}, columnsMap, []string{"Col1"}, []string{"sum"}, []byte("{\"Col1\":\"1\",\"Col2\":\"2\"}"), nil, 11},
{1, 1, []float64{10}, columnsMap, []string{"Col1"}, []string{"avg"}, []byte("{\"Col1\":\"1\",\"Col2\":\"2\"}"), nil, 5.500},
{10, 5, []float64{0.0000}, columnsMap, []string{"Col1"}, []string{"random"}, []byte("{\"Col1\":\"1\",\"Col2\":\"2\"}"),
ErrParseNonUnaryAgregateFunctionCall, 0},
{0, 5, []float64{0}, columnsMap, []string{"0"}, []string{"count"}, []byte("{\"Col1\":\"1\",\"Col2\":\"2\"}"), nil, 1},
{10, 5, []float64{10}, columnsMap, []string{"1"}, []string{"min"}, []byte("{\"_1\":\"1\",\"_2\":\"2\"}"), nil, 1},
}
for _, table := range tables {
err := aggregationFns(table.counter, table.filtrCount, table.myAggVals, table.storeReqCols, table.storeFunctions, table.record)
if table.err != err {
t.Error()
}
if table.myAggVals[0] != table.expectedVal {
t.Error()
}
}
func (w *testResponseWriter) Flush() {
}
// TestStringComparator is a unit test which ensures that the appropriate
// values are being compared for strings.
func TestStringComparator(t *testing.T) {
tables := []struct {
operand string
operator string
myVal string
expected bool
err error
}{
{"random", ">", "myName", "random" > "myName", nil},
{"12", "!=", "myName", "12" != "myName", nil},
{"12", "=", "myName", "12" == "myName", nil},
{"12", "<=", "myName", "12" <= "myName", nil},
{"12", ">=", "myName", "12" >= "myName", nil},
{"12", "<", "myName", "12" < "myName", nil},
{"name", "like", "_x%", false, nil},
{"12", "randomoperator", "myName", false, ErrUnsupportedSyntax},
}
for _, table := range tables {
myVal, err := stringEval(table.operand, table.operator, table.myVal)
if err != table.err {
t.Error()
}
if myVal != table.expected {
t.Error()
}
}
}
func TestCSVINput(t *testing.T) {
var requestXML = []byte(`
<?xml version="1.0" encoding="UTF-8"?>
<SelectObjectContentRequest>
<Expression>SELECT one, two, three from S3Object</Expression>
<ExpressionType>SQL</ExpressionType>
<InputSerialization>
<CompressionType>NONE</CompressionType>
<CSV>
<FileHeaderInfo>USE</FileHeaderInfo>
</CSV>
</InputSerialization>
<OutputSerialization>
<CSV>
</CSV>
</OutputSerialization>
<RequestProgress>
<Enabled>FALSE</Enabled>
</RequestProgress>
</SelectObjectContentRequest>
`)
// TestFloatComparator is a unit test which ensures that the appropriate
// values are being compared for floats.
func TestFloatComparator(t *testing.T) {
tables := []struct {
operand float64
operator string
myVal float64
expected bool
err error
}{
{12.000, ">", 13.000, 12.000 > 13.000, nil},
{1000.000, "!=", 1000.000, 1000.000 != 1000.000, nil},
{1000.000, "<", 1000.000, 1000.000 < 1000.000, nil},
{1000.000, "<=", 1000.000, 1000.000 <= 1000.000, nil},
{1000.000, ">=", 1000.000, 1000.000 >= 1000.000, nil},
{1000.000, "=", 1000.000, 1000.000 == 1000.000, nil},
{17.000, "randomoperator", 0.0, false, ErrUnsupportedSyntax},
}
for _, table := range tables {
myVal, err := floatEval(table.operand, table.operator, table.myVal)
if err != table.err {
t.Error()
}
if myVal != table.expected {
t.Error()
}
}
}
var csvData = []byte(`one,two,three
10,true,"foo"
-3,false,"bar baz"
`)
// TestIntComparator is a unit test which ensures that the appropriate values
// are being compared for ints.
func TestIntComparator(t *testing.T) {
tables := []struct {
operand int64
operator string
myVal int64
expected bool
err error
}{
{12, ">", 13, 12.000 > 13.000, nil},
{1000, "!=", 1000, 1000.000 != 1000.000, nil},
{1000, "<", 1000, 1000.000 < 1000.000, nil},
{1000, "<=", 1000, 1000.000 <= 1000.000, nil},
{1000, ">=", 1000, 1000.000 >= 1000.000, nil},
{1000, "=", 1000, 1000.000 >= 1000.000, nil},
{17, "randomoperator", 0, false, ErrUnsupportedSyntax},
}
for _, table := range tables {
myVal, err := intEval(table.operand, table.operator, table.myVal)
if err != table.err {
t.Error()
}
if myVal != table.expected {
t.Error()
}
}
}
// TestSizeFunction is a function which provides unit testing for the function
// which calculates size.
func TestSizeFunction(t *testing.T) {
tables := []struct {
myRecord []string
expected int64
}{
{[]string{"test1", "test2", "test3", "test4", "test5"}, 30},
}
for _, table := range tables {
if format.ProcessSize(table.myRecord) != table.expected {
t.Error()
}
}
}
func TestMatch(t *testing.T) {
testCases := []struct {
pattern string
text string
matched bool
}{
// Test case - 1.
// Test case so that the match occurs on the opening letter.
{
pattern: "a%",
text: "apple",
matched: true,
},
// Test case - 2.
// Test case so that the ending letter is true.
{
pattern: "%m",
text: "random",
matched: true,
},
// Test case - 3.
// Test case so that a character is at the appropriate position.
{
pattern: "_d%",
text: "adam",
matched: true,
},
// Test case - 4.
// Test case so that a character is at the appropriate position.
{
pattern: "_d%",
text: "apple",
matched: false,
},
// Test case - 5.
// Test case with checking that it is at least 3 in length
{
pattern: "a_%_%",
text: "ap",
matched: false,
},
{
pattern: "a_%_%",
text: "apple",
matched: true,
},
{
pattern: "%or%",
text: "orphan",
matched: true,
},
{
pattern: "%or%",
text: "dolphin",
matched: false,
},
{
pattern: "%or%",
text: "dorlphin",
matched: true,
},
{
pattern: "2__3",
text: "2003",
matched: true,
},
{
pattern: "_YYYY_",
text: "aYYYYa",
matched: true,
},
{
pattern: "C%",
text: "CA",
matched: true,
},
{
pattern: "C%",
text: "SC",
matched: false,
},
{
pattern: "%C",
text: "SC",
matched: true,
},
{
pattern: "%C",
text: "CA",
matched: false,
},
{
pattern: "%C",
text: "ACCC",
matched: true,
},
{
pattern: "C%",
text: "CCC",
matched: true,
},
{
pattern: "j%",
text: "mejri",
matched: false,
},
{
pattern: "a%o",
text: "ando",
matched: true,
},
{
pattern: "%j",
text: "mejri",
matched: false,
},
{
pattern: "%ja",
text: "mejrija",
matched: true,
},
{
pattern: "ja%",
text: "jamal",
matched: true,
},
{
pattern: "a%o",
text: "andp",
matched: false,
},
{
pattern: "_r%",
text: "arpa",
matched: true,
},
{
pattern: "_r%",
text: "apra",
matched: false,
},
{
pattern: "a_%_%",
text: "appple",
matched: true,
},
{
pattern: "l_b%",
text: "lebron",
matched: true,
},
{
pattern: "leb%",
text: "Dalembert",
matched: false,
},
{
pattern: "leb%",
text: "Landesberg",
matched: false,
},
{
pattern: "leb%",
text: "Mccalebb",
matched: false,
},
{
pattern: "%lebb",
text: "Mccalebb",
matched: true,
},
}
// Iterating over the test cases, call the function under test and asert the output.
for i, testCase := range testCases {
actualResult, err := likeConvert(testCase.pattern, testCase.text)
if err != nil {
t.Error()
}
if testCase.matched != actualResult {
fmt.Println("Expected Pattern", testCase.pattern, "Expected Text", testCase.text)
t.Errorf("Test %d: Expected the result to be `%v`, but instead found it to be `%v`", i+1, testCase.matched, actualResult)
}
}
}
// TestFuncProcessing is a unit test which ensures that the appropriate values are
// being returned from the Processing... functions.
func TestFuncProcessing(t *testing.T) {
tables := []struct {
myString string
coalList []string
myValString string
myValCoal string
myValNull string
stringFunc string
}{
{"lower", []string{"random", "hello", "random"}, "LOWER", "random", "", "UPPER"},
{"LOWER", []string{"missing", "hello", "random"}, "lower", "hello", "null", "LOWER"},
}
for _, table := range tables {
if table.coalList != nil {
myVal := processCoalNoIndex(table.coalList)
if myVal != table.myValCoal {
t.Error()
}
}
myVal := applyStrFunc(gjson.Result{
Type: gjson.String,
Str: table.myString,
}, table.stringFunc)
if myVal != table.myValString {
t.Error()
}
}
}
const charset = "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
var seededRand = rand.New(rand.NewSource(time.Now().UnixNano()))
func StringWithCharset(length int, charset string) string {
b := make([]byte, length)
for i := range b {
b[i] = charset[seededRand.Intn(len(charset))]
}
return string(b)
}
func String(length int) string {
return StringWithCharset(length, charset)
}
func genCSV(b *bytes.Buffer, records int) error {
b.Reset()
w := csv.NewWriter(b)
w.Write([]string{"id", "name", "age", "city"})
for i := 0; i < records; i++ {
w.Write([]string{
strconv.Itoa(i),
String(10),
String(5),
String(10),
})
var expectedResult = []byte{
0, 0, 0, 113, 0, 0, 0, 85, 186, 145, 179, 109, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 24, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 111, 99, 116, 101, 116, 45, 115, 116, 114, 101, 97, 109, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 7, 82, 101, 99, 111, 114, 100, 115, 49, 48, 44, 116, 114, 117, 101, 44, 102, 111, 111, 10, 225, 160, 249, 157, 0, 0, 0, 118, 0, 0, 0, 85, 8, 177, 111, 125, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 24, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 111, 99, 116, 101, 116, 45, 115, 116, 114, 101, 97, 109, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 7, 82, 101, 99, 111, 114, 100, 115, 45, 51, 44, 102, 97, 108, 115, 101, 44, 98, 97, 114, 32, 98, 97, 122, 10, 120, 72, 77, 126, 0, 0, 0, 235, 0, 0, 0, 67, 213, 243, 57, 141, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 8, 116, 101, 120, 116, 47, 120, 109, 108, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 5, 83, 116, 97, 116, 115, 60, 63, 120, 109, 108, 32, 118, 101, 114, 115, 105, 111, 110, 61, 34, 49, 46, 48, 34, 32, 101, 110, 99, 111, 100, 105, 110, 103, 61, 34, 85, 84, 70, 45, 56, 34, 63, 62, 60, 83, 116, 97, 116, 115, 62, 60, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 52, 55, 60, 47, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 60, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 52, 55, 60, 47, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 60, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 50, 57, 60, 47, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 60, 47, 83, 116, 97, 116, 115, 62, 214, 225, 163, 199, 0, 0, 0, 56, 0, 0, 0, 40, 193, 198, 132, 212, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 3, 69, 110, 100, 207, 151, 211, 146,
}
// Write any buffered data to the underlying writer (standard output).
w.Flush()
return w.Error()
}
func benchmarkSQLAll(b *testing.B, records int) {
benchmarkSQL(b, records, "select * from S3Object")
}
func benchmarkSQLAggregate(b *testing.B, records int) {
benchmarkSQL(b, records, "select count(*) from S3Object")
}
func benchmarkSQL(b *testing.B, records int, query string) {
var (
buf bytes.Buffer
output bytes.Buffer
)
genCSV(&buf, records)
b.ResetTimer()
b.ReportAllocs()
sreq := ObjectSelectRequest{}
sreq.Expression = query
sreq.ExpressionType = QueryExpressionTypeSQL
sreq.InputSerialization.CSV = &struct {
FileHeaderInfo CSVFileHeaderInfo
RecordDelimiter string
FieldDelimiter string
QuoteCharacter string
QuoteEscapeCharacter string
Comments string
}{}
sreq.InputSerialization.CSV.FileHeaderInfo = CSVFileHeaderInfoUse
sreq.InputSerialization.CSV.RecordDelimiter = "\n"
sreq.InputSerialization.CSV.FieldDelimiter = ","
sreq.OutputSerialization.CSV = &struct {
QuoteFields CSVQuoteFields
RecordDelimiter string
FieldDelimiter string
QuoteCharacter string
QuoteEscapeCharacter string
}{}
sreq.OutputSerialization.CSV.RecordDelimiter = "\n"
sreq.OutputSerialization.CSV.FieldDelimiter = ","
s3s, err := New(&buf, int64(buf.Len()), sreq)
s3Select, err := NewS3Select(bytes.NewReader(requestXML))
if err != nil {
b.Fatal(err)
t.Fatal(err)
}
for i := 0; i < b.N; i++ {
output.Reset()
if err = Execute(&output, s3s); err != nil {
b.Fatal(err)
if err = s3Select.Open(func(offset, length int64) (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(csvData)), nil
}); err != nil {
t.Fatal(err)
}
w := &testResponseWriter{}
s3Select.Evaluate(w)
s3Select.Close()
if !reflect.DeepEqual(w.response, expectedResult) {
t.Fatalf("received response does not match with expected reply")
}
}
func TestJSONInput(t *testing.T) {
var requestXML = []byte(`
<?xml version="1.0" encoding="UTF-8"?>
<SelectObjectContentRequest>
<Expression>SELECT one, two, three from S3Object</Expression>
<ExpressionType>SQL</ExpressionType>
<InputSerialization>
<CompressionType>NONE</CompressionType>
<JSON>
<Type>DOCUMENT</Type>
</JSON>
</InputSerialization>
<OutputSerialization>
<CSV>
</CSV>
</OutputSerialization>
<RequestProgress>
<Enabled>FALSE</Enabled>
</RequestProgress>
</SelectObjectContentRequest>
`)
var jsonData = []byte(`{"one":10,"two":true,"three":"foo"}
{"one":-3,"two":true,"three":"bar baz"}
`)
var expectedResult = []byte{
0, 0, 0, 113, 0, 0, 0, 85, 186, 145, 179, 109, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 24, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 111, 99, 116, 101, 116, 45, 115, 116, 114, 101, 97, 109, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 7, 82, 101, 99, 111, 114, 100, 115, 49, 48, 44, 116, 114, 117, 101, 44, 102, 111, 111, 10, 225, 160, 249, 157, 0, 0, 0, 117, 0, 0, 0, 85, 79, 17, 21, 173, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 24, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 111, 99, 116, 101, 116, 45, 115, 116, 114, 101, 97, 109, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 7, 82, 101, 99, 111, 114, 100, 115, 45, 51, 44, 116, 114, 117, 101, 44, 98, 97, 114, 32, 98, 97, 122, 10, 34, 12, 125, 218, 0, 0, 0, 235, 0, 0, 0, 67, 213, 243, 57, 141, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 8, 116, 101, 120, 116, 47, 120, 109, 108, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 5, 83, 116, 97, 116, 115, 60, 63, 120, 109, 108, 32, 118, 101, 114, 115, 105, 111, 110, 61, 34, 49, 46, 48, 34, 32, 101, 110, 99, 111, 100, 105, 110, 103, 61, 34, 85, 84, 70, 45, 56, 34, 63, 62, 60, 83, 116, 97, 116, 115, 62, 60, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 55, 54, 60, 47, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 60, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 55, 54, 60, 47, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 60, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 50, 56, 60, 47, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 60, 47, 83, 116, 97, 116, 115, 62, 124, 107, 174, 242, 0, 0, 0, 56, 0, 0, 0, 40, 193, 198, 132, 212, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 3, 69, 110, 100, 207, 151, 211, 146,
}
s3Select, err := NewS3Select(bytes.NewReader(requestXML))
if err != nil {
t.Fatal(err)
}
if err = s3Select.Open(func(offset, length int64) (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(jsonData)), nil
}); err != nil {
t.Fatal(err)
}
w := &testResponseWriter{}
s3Select.Evaluate(w)
s3Select.Close()
if !reflect.DeepEqual(w.response, expectedResult) {
t.Fatalf("received response does not match with expected reply")
}
}
func TestParquetInput(t *testing.T) {
var requestXML = []byte(`
<?xml version="1.0" encoding="UTF-8"?>
<SelectObjectContentRequest>
<Expression>SELECT one, two, three from S3Object</Expression>
<ExpressionType>SQL</ExpressionType>
<InputSerialization>
<CompressionType>NONE</CompressionType>
<Parquet>
</Parquet>
</InputSerialization>
<OutputSerialization>
<CSV>
</CSV>
</OutputSerialization>
<RequestProgress>
<Enabled>FALSE</Enabled>
</RequestProgress>
</SelectObjectContentRequest>
`)
getReader := func(offset int64, length int64) (io.ReadCloser, error) {
testdataFile := path.Join(build.Default.GOPATH, "src/github.com/minio/minio/pkg/s3select/testdata.parquet")
file, err := os.Open(testdataFile)
if err != nil {
return nil, err
}
fi, err := file.Stat()
if err != nil {
return nil, err
}
if offset < 0 {
offset = fi.Size() + offset
}
if _, err = file.Seek(offset, os.SEEK_SET); err != nil {
return nil, err
}
return file, nil
}
var expectedResult = []byte{
0, 0, 0, 114, 0, 0, 0, 85, 253, 49, 201, 189, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 24, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 111, 99, 116, 101, 116, 45, 115, 116, 114, 101, 97, 109, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 7, 82, 101, 99, 111, 114, 100, 115, 50, 46, 53, 44, 102, 111, 111, 44, 116, 114, 117, 101, 10, 209, 8, 249, 77, 0, 0, 0, 114, 0, 0, 0, 85, 253, 49, 201, 189, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 24, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 111, 99, 116, 101, 116, 45, 115, 116, 114, 101, 97, 109, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 7, 82, 101, 99, 111, 114, 100, 115, 45, 49, 44, 98, 97, 114, 44, 102, 97, 108, 115, 101, 10, 45, 143, 126, 67, 0, 0, 0, 113, 0, 0, 0, 85, 186, 145, 179, 109, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 24, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 111, 99, 116, 101, 116, 45, 115, 116, 114, 101, 97, 109, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 7, 82, 101, 99, 111, 114, 100, 115, 45, 49, 44, 98, 97, 122, 44, 116, 114, 117, 101, 10, 230, 139, 42, 176, 0, 0, 0, 235, 0, 0, 0, 67, 213, 243, 57, 141, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 8, 116, 101, 120, 116, 47, 120, 109, 108, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 5, 83, 116, 97, 116, 115, 60, 63, 120, 109, 108, 32, 118, 101, 114, 115, 105, 111, 110, 61, 34, 49, 46, 48, 34, 32, 101, 110, 99, 111, 100, 105, 110, 103, 61, 34, 85, 84, 70, 45, 56, 34, 63, 62, 60, 83, 116, 97, 116, 115, 62, 60, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 45, 49, 60, 47, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 60, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 45, 49, 60, 47, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 60, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 51, 56, 60, 47, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 60, 47, 83, 116, 97, 116, 115, 62, 199, 176, 2, 83, 0, 0, 0, 56, 0, 0, 0, 40, 193, 198, 132, 212, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 3, 69, 110, 100, 207, 151, 211, 146,
}
s3Select, err := NewS3Select(bytes.NewReader(requestXML))
if err != nil {
t.Fatal(err)
}
if err = s3Select.Open(getReader); err != nil {
t.Fatal(err)
}
w := &testResponseWriter{}
s3Select.Evaluate(w)
s3Select.Close()
if !reflect.DeepEqual(w.response, expectedResult) {
t.Fatalf("received response does not match with expected reply")
}
}
// BenchmarkSQLAggregate_100K - benchmark count(*) function with 100k records.
func BenchmarkSQLAggregate_100K(b *testing.B) {
benchmarkSQLAggregate(b, humanize.KiByte*100)
}
// BenchmarkSQLAggregate_1M - benchmark count(*) function with 1m records.
func BenchmarkSQLAggregate_1M(b *testing.B) {
benchmarkSQLAggregate(b, humanize.MiByte)
}
// BenchmarkSQLAggregate_2M - benchmark count(*) function with 2m records.
func BenchmarkSQLAggregate_2M(b *testing.B) {
benchmarkSQLAggregate(b, 2*humanize.MiByte)
}
// BenchmarkSQLAggregate_10M - benchmark count(*) function with 10m records.
func BenchmarkSQLAggregate_10M(b *testing.B) {
benchmarkSQLAggregate(b, 10*humanize.MiByte)
}
// BenchmarkSQLAll_100K - benchmark * function with 100k records.
func BenchmarkSQLAll_100K(b *testing.B) {
benchmarkSQLAll(b, humanize.KiByte*100)
}
// BenchmarkSQLAll_1M - benchmark * function with 1m records.
func BenchmarkSQLAll_1M(b *testing.B) {
benchmarkSQLAll(b, humanize.MiByte)
}
// BenchmarkSQLAll_2M - benchmark * function with 2m records.
func BenchmarkSQLAll_2M(b *testing.B) {
benchmarkSQLAll(b, 2*humanize.MiByte)
}
// BenchmarkSQLAll_10M - benchmark * function with 10m records.
func BenchmarkSQLAll_10M(b *testing.B) {
benchmarkSQLAll(b, 10*humanize.MiByte)
}

View File

@@ -0,0 +1,175 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sql
import "fmt"
// ArithOperator - arithmetic operator.
type ArithOperator string
const (
// Add operator '+'.
Add ArithOperator = "+"
// Subtract operator '-'.
Subtract ArithOperator = "-"
// Multiply operator '*'.
Multiply ArithOperator = "*"
// Divide operator '/'.
Divide ArithOperator = "/"
// Modulo operator '%'.
Modulo ArithOperator = "%"
)
// arithExpr - arithmetic function.
type arithExpr struct {
left Expr
right Expr
operator ArithOperator
funcType Type
}
// String - returns string representation of this function.
func (f *arithExpr) String() string {
return fmt.Sprintf("(%v %v %v)", f.left, f.operator, f.right)
}
func (f *arithExpr) compute(lv, rv *Value) (*Value, error) {
leftValueType := lv.Type()
rightValueType := rv.Type()
if !leftValueType.isNumber() {
err := fmt.Errorf("%v: left side expression evaluated to %v; not to number", f, leftValueType)
return nil, errExternalEvalException(err)
}
if !rightValueType.isNumber() {
err := fmt.Errorf("%v: right side expression evaluated to %v; not to number", f, rightValueType)
return nil, errExternalEvalException(err)
}
leftValue := lv.FloatValue()
rightValue := rv.FloatValue()
var result float64
switch f.operator {
case Add:
result = leftValue + rightValue
case Subtract:
result = leftValue - rightValue
case Multiply:
result = leftValue * rightValue
case Divide:
result = leftValue / rightValue
case Modulo:
result = float64(int64(leftValue) % int64(rightValue))
}
if leftValueType == Float || rightValueType == Float {
return NewFloat(result), nil
}
return NewInt(int64(result)), nil
}
// Call - evaluates this function for given arg values and returns result as Value.
func (f *arithExpr) Eval(record Record) (*Value, error) {
leftValue, err := f.left.Eval(record)
if err != nil {
return nil, err
}
rightValue, err := f.right.Eval(record)
if err != nil {
return nil, err
}
if f.funcType == aggregateFunction {
return nil, nil
}
return f.compute(leftValue, rightValue)
}
// AggregateValue - returns aggregated value.
func (f *arithExpr) AggregateValue() (*Value, error) {
if f.funcType != aggregateFunction {
err := fmt.Errorf("%v is not aggreate expression", f)
return nil, errExternalEvalException(err)
}
lv, err := f.left.AggregateValue()
if err != nil {
return nil, err
}
rv, err := f.right.AggregateValue()
if err != nil {
return nil, err
}
return f.compute(lv, rv)
}
// Type - returns arithmeticFunction or aggregateFunction type.
func (f *arithExpr) Type() Type {
return f.funcType
}
// ReturnType - returns Float as return type.
func (f *arithExpr) ReturnType() Type {
return Float
}
// newArithExpr - creates new arithmetic function.
func newArithExpr(operator ArithOperator, left, right Expr) (*arithExpr, error) {
if !left.ReturnType().isNumberKind() {
err := fmt.Errorf("operator %v: left side expression %v evaluate to %v, not number", operator, left, left.ReturnType())
return nil, errInvalidDataType(err)
}
if !right.ReturnType().isNumberKind() {
err := fmt.Errorf("operator %v: right side expression %v evaluate to %v; not number", operator, right, right.ReturnType())
return nil, errInvalidDataType(err)
}
funcType := arithmeticFunction
if left.Type() == aggregateFunction || right.Type() == aggregateFunction {
funcType = aggregateFunction
switch left.Type() {
case Int, Float, aggregateFunction:
default:
err := fmt.Errorf("operator %v: left side expression %v return type %v is incompatible for aggregate evaluation", operator, left, left.Type())
return nil, errUnsupportedSQLOperation(err)
}
switch right.Type() {
case Int, Float, aggregateFunction:
default:
err := fmt.Errorf("operator %v: right side expression %v return type %v is incompatible for aggregate evaluation", operator, right, right.Type())
return nil, errUnsupportedSQLOperation(err)
}
}
return &arithExpr{
left: left,
right: right,
operator: operator,
funcType: funcType,
}, nil
}

View File

@@ -0,0 +1,636 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sql
import (
"fmt"
"regexp"
"strings"
)
// ComparisonOperator - comparison operator.
type ComparisonOperator string
const (
// Equal operator '='.
Equal ComparisonOperator = "="
// NotEqual operator '!=' or '<>'.
NotEqual ComparisonOperator = "!="
// LessThan operator '<'.
LessThan ComparisonOperator = "<"
// GreaterThan operator '>'.
GreaterThan ComparisonOperator = ">"
// LessThanEqual operator '<='.
LessThanEqual ComparisonOperator = "<="
// GreaterThanEqual operator '>='.
GreaterThanEqual ComparisonOperator = ">="
// Between operator 'BETWEEN'
Between ComparisonOperator = "between"
// In operator 'IN'
In ComparisonOperator = "in"
// Like operator 'LIKE'
Like ComparisonOperator = "like"
// NotBetween operator 'NOT BETWEEN'
NotBetween ComparisonOperator = "not between"
// NotIn operator 'NOT IN'
NotIn ComparisonOperator = "not in"
// NotLike operator 'NOT LIKE'
NotLike ComparisonOperator = "not like"
// IsNull operator 'IS NULL'
IsNull ComparisonOperator = "is null"
// IsNotNull operator 'IS NOT NULL'
IsNotNull ComparisonOperator = "is not null"
)
// String - returns string representation of this operator.
func (operator ComparisonOperator) String() string {
return strings.ToUpper((string(operator)))
}
func equal(leftValue, rightValue *Value) (bool, error) {
switch {
case leftValue.Type() == Null && rightValue.Type() == Null:
return true, nil
case leftValue.Type() == Bool && rightValue.Type() == Bool:
return leftValue.BoolValue() == rightValue.BoolValue(), nil
case (leftValue.Type() == Int || leftValue.Type() == Float) &&
(rightValue.Type() == Int || rightValue.Type() == Float):
return leftValue.FloatValue() == rightValue.FloatValue(), nil
case leftValue.Type() == String && rightValue.Type() == String:
return leftValue.StringValue() == rightValue.StringValue(), nil
case leftValue.Type() == Timestamp && rightValue.Type() == Timestamp:
return leftValue.TimeValue() == rightValue.TimeValue(), nil
}
return false, fmt.Errorf("left value type %v and right value type %v are incompatible for equality check", leftValue.Type(), rightValue.Type())
}
// comparisonExpr - comparison function.
type comparisonExpr struct {
left Expr
right Expr
to Expr
operator ComparisonOperator
funcType Type
}
// String - returns string representation of this function.
func (f *comparisonExpr) String() string {
switch f.operator {
case Equal, NotEqual, LessThan, GreaterThan, LessThanEqual, GreaterThanEqual, In, Like, NotIn, NotLike:
return fmt.Sprintf("(%v %v %v)", f.left, f.operator, f.right)
case Between, NotBetween:
return fmt.Sprintf("(%v %v %v AND %v)", f.left, f.operator, f.right, f.to)
}
return fmt.Sprintf("(%v %v %v %v)", f.left, f.right, f.to, f.operator)
}
func (f *comparisonExpr) equal(leftValue, rightValue *Value) (*Value, error) {
result, err := equal(leftValue, rightValue)
if err != nil {
err = fmt.Errorf("%v: %v", f, err)
return nil, errExternalEvalException(err)
}
return NewBool(result), nil
}
func (f *comparisonExpr) notEqual(leftValue, rightValue *Value) (*Value, error) {
result, err := equal(leftValue, rightValue)
if err != nil {
err = fmt.Errorf("%v: %v", f, err)
return nil, errExternalEvalException(err)
}
return NewBool(!result), nil
}
func (f *comparisonExpr) lessThan(leftValue, rightValue *Value) (*Value, error) {
if !leftValue.Type().isNumber() {
err := fmt.Errorf("%v: left side expression evaluated to %v; not to number", f, leftValue.Type())
return nil, errExternalEvalException(err)
}
if !rightValue.Type().isNumber() {
err := fmt.Errorf("%v: right side expression evaluated to %v; not to number", f, rightValue.Type())
return nil, errExternalEvalException(err)
}
return NewBool(leftValue.FloatValue() < rightValue.FloatValue()), nil
}
func (f *comparisonExpr) greaterThan(leftValue, rightValue *Value) (*Value, error) {
if !leftValue.Type().isNumber() {
err := fmt.Errorf("%v: left side expression evaluated to %v; not to number", f, leftValue.Type())
return nil, errExternalEvalException(err)
}
if !rightValue.Type().isNumber() {
err := fmt.Errorf("%v: right side expression evaluated to %v; not to number", f, rightValue.Type())
return nil, errExternalEvalException(err)
}
return NewBool(leftValue.FloatValue() > rightValue.FloatValue()), nil
}
func (f *comparisonExpr) lessThanEqual(leftValue, rightValue *Value) (*Value, error) {
if !leftValue.Type().isNumber() {
err := fmt.Errorf("%v: left side expression evaluated to %v; not to number", f, leftValue.Type())
return nil, errExternalEvalException(err)
}
if !rightValue.Type().isNumber() {
err := fmt.Errorf("%v: right side expression evaluated to %v; not to number", f, rightValue.Type())
return nil, errExternalEvalException(err)
}
return NewBool(leftValue.FloatValue() <= rightValue.FloatValue()), nil
}
func (f *comparisonExpr) greaterThanEqual(leftValue, rightValue *Value) (*Value, error) {
if !leftValue.Type().isNumber() {
err := fmt.Errorf("%v: left side expression evaluated to %v; not to number", f, leftValue.Type())
return nil, errExternalEvalException(err)
}
if !rightValue.Type().isNumber() {
err := fmt.Errorf("%v: right side expression evaluated to %v; not to number", f, rightValue.Type())
return nil, errExternalEvalException(err)
}
return NewBool(leftValue.FloatValue() >= rightValue.FloatValue()), nil
}
func (f *comparisonExpr) computeBetween(leftValue, fromValue, toValue *Value) (bool, error) {
if !leftValue.Type().isNumber() {
err := fmt.Errorf("%v: left side expression evaluated to %v; not to number", f, leftValue.Type())
return false, errExternalEvalException(err)
}
if !fromValue.Type().isNumber() {
err := fmt.Errorf("%v: from side expression evaluated to %v; not to number", f, fromValue.Type())
return false, errExternalEvalException(err)
}
if !toValue.Type().isNumber() {
err := fmt.Errorf("%v: to side expression evaluated to %v; not to number", f, toValue.Type())
return false, errExternalEvalException(err)
}
return leftValue.FloatValue() >= fromValue.FloatValue() &&
leftValue.FloatValue() <= toValue.FloatValue(), nil
}
func (f *comparisonExpr) between(leftValue, fromValue, toValue *Value) (*Value, error) {
result, err := f.computeBetween(leftValue, fromValue, toValue)
if err != nil {
return nil, err
}
return NewBool(result), nil
}
func (f *comparisonExpr) notBetween(leftValue, fromValue, toValue *Value) (*Value, error) {
result, err := f.computeBetween(leftValue, fromValue, toValue)
if err != nil {
return nil, err
}
return NewBool(!result), nil
}
func (f *comparisonExpr) computeIn(leftValue, rightValue *Value) (found bool, err error) {
if rightValue.Type() != Array {
err := fmt.Errorf("%v: right side expression evaluated to %v; not to Array", f, rightValue.Type())
return false, errExternalEvalException(err)
}
values := rightValue.ArrayValue()
for i := range values {
found, err = equal(leftValue, values[i])
if err != nil {
return false, err
}
if found {
return true, nil
}
}
return false, nil
}
func (f *comparisonExpr) in(leftValue, rightValue *Value) (*Value, error) {
result, err := f.computeIn(leftValue, rightValue)
if err != nil {
err = fmt.Errorf("%v: %v", f, err)
return nil, errExternalEvalException(err)
}
return NewBool(result), nil
}
func (f *comparisonExpr) notIn(leftValue, rightValue *Value) (*Value, error) {
result, err := f.computeIn(leftValue, rightValue)
if err != nil {
err = fmt.Errorf("%v: %v", f, err)
return nil, errExternalEvalException(err)
}
return NewBool(!result), nil
}
func (f *comparisonExpr) computeLike(leftValue, rightValue *Value) (matched bool, err error) {
if leftValue.Type() != String {
err := fmt.Errorf("%v: left side expression evaluated to %v; not to string", f, leftValue.Type())
return false, errExternalEvalException(err)
}
if rightValue.Type() != String {
err := fmt.Errorf("%v: right side expression evaluated to %v; not to string", f, rightValue.Type())
return false, errExternalEvalException(err)
}
matched, err = regexp.MatchString(rightValue.StringValue(), leftValue.StringValue())
if err != nil {
err = fmt.Errorf("%v: %v", f, err)
return false, errExternalEvalException(err)
}
return matched, nil
}
func (f *comparisonExpr) like(leftValue, rightValue *Value) (*Value, error) {
result, err := f.computeLike(leftValue, rightValue)
if err != nil {
return nil, err
}
return NewBool(result), nil
}
func (f *comparisonExpr) notLike(leftValue, rightValue *Value) (*Value, error) {
result, err := f.computeLike(leftValue, rightValue)
if err != nil {
return nil, err
}
return NewBool(!result), nil
}
func (f *comparisonExpr) compute(leftValue, rightValue, toValue *Value) (*Value, error) {
switch f.operator {
case Equal:
return f.equal(leftValue, rightValue)
case NotEqual:
return f.notEqual(leftValue, rightValue)
case LessThan:
return f.lessThan(leftValue, rightValue)
case GreaterThan:
return f.greaterThan(leftValue, rightValue)
case LessThanEqual:
return f.lessThanEqual(leftValue, rightValue)
case GreaterThanEqual:
return f.greaterThanEqual(leftValue, rightValue)
case Between:
return f.between(leftValue, rightValue, toValue)
case In:
return f.in(leftValue, rightValue)
case Like:
return f.like(leftValue, rightValue)
case NotBetween:
return f.notBetween(leftValue, rightValue, toValue)
case NotIn:
return f.notIn(leftValue, rightValue)
case NotLike:
return f.notLike(leftValue, rightValue)
}
panic(fmt.Errorf("unexpected expression %v", f))
}
// Call - evaluates this function for given arg values and returns result as Value.
func (f *comparisonExpr) Eval(record Record) (*Value, error) {
leftValue, err := f.left.Eval(record)
if err != nil {
return nil, err
}
rightValue, err := f.right.Eval(record)
if err != nil {
return nil, err
}
var toValue *Value
if f.to != nil {
toValue, err = f.to.Eval(record)
if err != nil {
return nil, err
}
}
if f.funcType == aggregateFunction {
return nil, nil
}
return f.compute(leftValue, rightValue, toValue)
}
// AggregateValue - returns aggregated value.
func (f *comparisonExpr) AggregateValue() (*Value, error) {
if f.funcType != aggregateFunction {
err := fmt.Errorf("%v is not aggreate expression", f)
return nil, errExternalEvalException(err)
}
leftValue, err := f.left.AggregateValue()
if err != nil {
return nil, err
}
rightValue, err := f.right.AggregateValue()
if err != nil {
return nil, err
}
var toValue *Value
if f.to != nil {
toValue, err = f.to.AggregateValue()
if err != nil {
return nil, err
}
}
return f.compute(leftValue, rightValue, toValue)
}
// Type - returns comparisonFunction or aggregateFunction type.
func (f *comparisonExpr) Type() Type {
return f.funcType
}
// ReturnType - returns Bool as return type.
func (f *comparisonExpr) ReturnType() Type {
return Bool
}
// newComparisonExpr - creates new comparison function.
func newComparisonExpr(operator ComparisonOperator, funcs ...Expr) (*comparisonExpr, error) {
funcType := comparisonFunction
switch operator {
case Equal, NotEqual:
if len(funcs) != 2 {
panic(fmt.Sprintf("exactly two arguments are expected, but found %v", len(funcs)))
}
left := funcs[0]
if !left.ReturnType().isBaseKind() {
err := fmt.Errorf("operator %v: left side expression %v evaluate to %v is incompatible for equality check", operator, left, left.ReturnType())
return nil, errInvalidDataType(err)
}
right := funcs[1]
if !right.ReturnType().isBaseKind() {
err := fmt.Errorf("operator %v: right side expression %v evaluate to %v is incompatible for equality check", operator, right, right.ReturnType())
return nil, errInvalidDataType(err)
}
if left.Type() == aggregateFunction || right.Type() == aggregateFunction {
funcType = aggregateFunction
switch left.Type() {
case column, Array, function, arithmeticFunction, comparisonFunction, logicalFunction, record:
err := fmt.Errorf("operator %v: left side expression %v return type %v is incompatible for equality check", operator, left, left.Type())
return nil, errUnsupportedSQLOperation(err)
}
switch right.Type() {
case column, Array, function, arithmeticFunction, comparisonFunction, logicalFunction, record:
err := fmt.Errorf("operator %v: right side expression %v return type %v is incompatible for equality check", operator, right, right.Type())
return nil, errUnsupportedSQLOperation(err)
}
}
return &comparisonExpr{
left: left,
right: right,
operator: operator,
funcType: funcType,
}, nil
case LessThan, GreaterThan, LessThanEqual, GreaterThanEqual:
if len(funcs) != 2 {
panic(fmt.Sprintf("exactly two arguments are expected, but found %v", len(funcs)))
}
left := funcs[0]
if !left.ReturnType().isNumberKind() {
err := fmt.Errorf("operator %v: left side expression %v evaluate to %v, not number", operator, left, left.ReturnType())
return nil, errInvalidDataType(err)
}
right := funcs[1]
if !right.ReturnType().isNumberKind() {
err := fmt.Errorf("operator %v: right side expression %v evaluate to %v; not number", operator, right, right.ReturnType())
return nil, errInvalidDataType(err)
}
if left.Type() == aggregateFunction || right.Type() == aggregateFunction {
funcType = aggregateFunction
switch left.Type() {
case Int, Float, aggregateFunction:
default:
err := fmt.Errorf("operator %v: left side expression %v return type %v is incompatible for aggregate evaluation", operator, left, left.Type())
return nil, errUnsupportedSQLOperation(err)
}
switch right.Type() {
case Int, Float, aggregateFunction:
default:
err := fmt.Errorf("operator %v: right side expression %v return type %v is incompatible for aggregate evaluation", operator, right, right.Type())
return nil, errUnsupportedSQLOperation(err)
}
}
return &comparisonExpr{
left: left,
right: right,
operator: operator,
funcType: funcType,
}, nil
case In, NotIn:
if len(funcs) != 2 {
panic(fmt.Sprintf("exactly two arguments are expected, but found %v", len(funcs)))
}
left := funcs[0]
if !left.ReturnType().isBaseKind() {
err := fmt.Errorf("operator %v: left side expression %v evaluate to %v is incompatible for equality check", operator, left, left.ReturnType())
return nil, errInvalidDataType(err)
}
right := funcs[1]
if right.ReturnType() != Array {
err := fmt.Errorf("operator %v: right side expression %v evaluate to %v is incompatible for equality check", operator, right, right.ReturnType())
return nil, errInvalidDataType(err)
}
if left.Type() == aggregateFunction || right.Type() == aggregateFunction {
funcType = aggregateFunction
switch left.Type() {
case column, Array, function, arithmeticFunction, comparisonFunction, logicalFunction, record:
err := fmt.Errorf("operator %v: left side expression %v return type %v is incompatible for aggregate evaluation", operator, left, left.Type())
return nil, errUnsupportedSQLOperation(err)
}
switch right.Type() {
case Array, aggregateFunction:
default:
err := fmt.Errorf("operator %v: right side expression %v return type %v is incompatible for aggregate evaluation", operator, right, right.Type())
return nil, errUnsupportedSQLOperation(err)
}
}
return &comparisonExpr{
left: left,
right: right,
operator: operator,
funcType: funcType,
}, nil
case Like, NotLike:
if len(funcs) != 2 {
panic(fmt.Sprintf("exactly two arguments are expected, but found %v", len(funcs)))
}
left := funcs[0]
if !left.ReturnType().isStringKind() {
err := fmt.Errorf("operator %v: left side expression %v evaluate to %v, not string", operator, left, left.ReturnType())
return nil, errLikeInvalidInputs(err)
}
right := funcs[1]
if !right.ReturnType().isStringKind() {
err := fmt.Errorf("operator %v: right side expression %v evaluate to %v, not string", operator, right, right.ReturnType())
return nil, errLikeInvalidInputs(err)
}
if left.Type() == aggregateFunction || right.Type() == aggregateFunction {
funcType = aggregateFunction
switch left.Type() {
case String, aggregateFunction:
default:
err := fmt.Errorf("operator %v: left side expression %v return type %v is incompatible for aggregate evaluation", operator, left, left.Type())
return nil, errUnsupportedSQLOperation(err)
}
switch right.Type() {
case String, aggregateFunction:
default:
err := fmt.Errorf("operator %v: right side expression %v return type %v is incompatible for aggregate evaluation", operator, right, right.Type())
return nil, errUnsupportedSQLOperation(err)
}
}
return &comparisonExpr{
left: left,
right: right,
operator: operator,
funcType: funcType,
}, nil
case Between, NotBetween:
if len(funcs) != 3 {
panic(fmt.Sprintf("too many values in funcs %v", funcs))
}
left := funcs[0]
if !left.ReturnType().isNumberKind() {
err := fmt.Errorf("operator %v: left side expression %v evaluate to %v, not number", operator, left, left.ReturnType())
return nil, errInvalidDataType(err)
}
from := funcs[1]
if !from.ReturnType().isNumberKind() {
err := fmt.Errorf("operator %v: from expression %v evaluate to %v, not number", operator, from, from.ReturnType())
return nil, errInvalidDataType(err)
}
to := funcs[2]
if !to.ReturnType().isNumberKind() {
err := fmt.Errorf("operator %v: to expression %v evaluate to %v, not number", operator, to, to.ReturnType())
return nil, errInvalidDataType(err)
}
if left.Type() == aggregateFunction || from.Type() == aggregateFunction || to.Type() == aggregateFunction {
funcType = aggregateFunction
switch left.Type() {
case Int, Float, aggregateFunction:
default:
err := fmt.Errorf("operator %v: left side expression %v return type %v is incompatible for aggregate evaluation", operator, left, left.Type())
return nil, errUnsupportedSQLOperation(err)
}
switch from.Type() {
case Int, Float, aggregateFunction:
default:
err := fmt.Errorf("operator %v: from expression %v return type %v is incompatible for aggregate evaluation", operator, from, from.Type())
return nil, errUnsupportedSQLOperation(err)
}
switch to.Type() {
case Int, Float, aggregateFunction:
default:
err := fmt.Errorf("operator %v: to expression %v return type %v is incompatible for aggregate evaluation", operator, to, to.Type())
return nil, errUnsupportedSQLOperation(err)
}
}
return &comparisonExpr{
left: left,
right: from,
to: to,
operator: operator,
funcType: funcType,
}, nil
case IsNull, IsNotNull:
if len(funcs) != 1 {
panic(fmt.Sprintf("too many values in funcs %v", funcs))
}
if funcs[0].Type() == aggregateFunction {
funcType = aggregateFunction
}
if operator == IsNull {
operator = Equal
} else {
operator = NotEqual
}
return &comparisonExpr{
left: funcs[0],
right: newValueExpr(NewNull()),
operator: operator,
funcType: funcType,
}, nil
}
return nil, errParseUnknownOperator(fmt.Errorf("unknown operator %v", operator))
}

215
pkg/s3select/sql/errors.go Normal file
View File

@@ -0,0 +1,215 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sql
type s3Error struct {
code string
message string
statusCode int
cause error
}
func (err *s3Error) Cause() error {
return err.cause
}
func (err *s3Error) ErrorCode() string {
return err.code
}
func (err *s3Error) ErrorMessage() string {
return err.message
}
func (err *s3Error) HTTPStatusCode() int {
return err.statusCode
}
func (err *s3Error) Error() string {
return err.message
}
func errUnsupportedSQLStructure(err error) *s3Error {
return &s3Error{
code: "UnsupportedSqlStructure",
message: "Encountered an unsupported SQL structure. Check the SQL Reference.",
statusCode: 400,
cause: err,
}
}
func errParseUnsupportedSelect(err error) *s3Error {
return &s3Error{
code: "ParseUnsupportedSelect",
message: "The SQL expression contains an unsupported use of SELECT.",
statusCode: 400,
cause: err,
}
}
func errParseAsteriskIsNotAloneInSelectList(err error) *s3Error {
return &s3Error{
code: "ParseAsteriskIsNotAloneInSelectList",
message: "Other expressions are not allowed in the SELECT list when '*' is used without dot notation in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errParseInvalidContextForWildcardInSelectList(err error) *s3Error {
return &s3Error{
code: "ParseInvalidContextForWildcardInSelectList",
message: "Invalid use of * in SELECT list in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errInvalidDataType(err error) *s3Error {
return &s3Error{
code: "InvalidDataType",
message: "The SQL expression contains an invalid data type.",
statusCode: 400,
cause: err,
}
}
func errUnsupportedFunction(err error) *s3Error {
return &s3Error{
code: "UnsupportedFunction",
message: "Encountered an unsupported SQL function.",
statusCode: 400,
cause: err,
}
}
func errParseNonUnaryAgregateFunctionCall(err error) *s3Error {
return &s3Error{
code: "ParseNonUnaryAgregateFunctionCall",
message: "Only one argument is supported for aggregate functions in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errIncorrectSQLFunctionArgumentType(err error) *s3Error {
return &s3Error{
code: "IncorrectSqlFunctionArgumentType",
message: "Incorrect type of arguments in function call in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errEvaluatorInvalidArguments(err error) *s3Error {
return &s3Error{
code: "EvaluatorInvalidArguments",
message: "Incorrect number of arguments in the function call in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errUnsupportedSQLOperation(err error) *s3Error {
return &s3Error{
code: "UnsupportedSqlOperation",
message: "Encountered an unsupported SQL operation.",
statusCode: 400,
cause: err,
}
}
func errParseUnknownOperator(err error) *s3Error {
return &s3Error{
code: "ParseUnknownOperator",
message: "The SQL expression contains an invalid operator.",
statusCode: 400,
cause: err,
}
}
func errLikeInvalidInputs(err error) *s3Error {
return &s3Error{
code: "LikeInvalidInputs",
message: "Invalid argument given to the LIKE clause in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errExternalEvalException(err error) *s3Error {
return &s3Error{
code: "ExternalEvalException",
message: "The query cannot be evaluated. Check the file and try again.",
statusCode: 400,
cause: err,
}
}
func errValueParseFailure(err error) *s3Error {
return &s3Error{
code: "ValueParseFailure",
message: "Time stamp parse failure in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errEvaluatorBindingDoesNotExist(err error) *s3Error {
return &s3Error{
code: "EvaluatorBindingDoesNotExist",
message: "A column name or a path provided does not exist in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errInternalError(err error) *s3Error {
return &s3Error{
code: "InternalError",
message: "Encountered an internal error.",
statusCode: 500,
cause: err,
}
}
func errParseInvalidTypeParam(err error) *s3Error {
return &s3Error{
code: "ParseInvalidTypeParam",
message: "The SQL expression contains an invalid parameter value.",
statusCode: 400,
cause: err,
}
}
func errParseUnsupportedSyntax(err error) *s3Error {
return &s3Error{
code: "ParseUnsupportedSyntax",
message: "The SQL expression contains unsupported syntax.",
statusCode: 400,
cause: err,
}
}
func errInvalidKeyPath(err error) *s3Error {
return &s3Error{
code: "InvalidKeyPath",
message: "Key path in the SQL expression is invalid.",
statusCode: 400,
cause: err,
}
}

160
pkg/s3select/sql/expr.go Normal file
View File

@@ -0,0 +1,160 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sql
import (
"fmt"
)
// Expr - a SQL expression type.
type Expr interface {
AggregateValue() (*Value, error)
Eval(record Record) (*Value, error)
ReturnType() Type
Type() Type
}
// aliasExpr - aliases expression by alias.
type aliasExpr struct {
alias string
expr Expr
}
// String - returns string representation of this expression.
func (expr *aliasExpr) String() string {
return fmt.Sprintf("(%v AS %v)", expr.expr, expr.alias)
}
// Eval - evaluates underlaying expression for given record and returns evaluated result.
func (expr *aliasExpr) Eval(record Record) (*Value, error) {
return expr.expr.Eval(record)
}
// AggregateValue - returns aggregated value from underlaying expression.
func (expr *aliasExpr) AggregateValue() (*Value, error) {
return expr.expr.AggregateValue()
}
// Type - returns underlaying expression type.
func (expr *aliasExpr) Type() Type {
return expr.expr.Type()
}
// ReturnType - returns underlaying expression's return type.
func (expr *aliasExpr) ReturnType() Type {
return expr.expr.ReturnType()
}
// newAliasExpr - creates new alias expression.
func newAliasExpr(alias string, expr Expr) *aliasExpr {
return &aliasExpr{alias, expr}
}
// starExpr - asterisk (*) expression.
type starExpr struct {
}
// String - returns string representation of this expression.
func (expr *starExpr) String() string {
return "*"
}
// Eval - returns given args as map value.
func (expr *starExpr) Eval(record Record) (*Value, error) {
return newRecordValue(record), nil
}
// AggregateValue - returns nil value.
func (expr *starExpr) AggregateValue() (*Value, error) {
return nil, nil
}
// Type - returns record type.
func (expr *starExpr) Type() Type {
return record
}
// ReturnType - returns record as return type.
func (expr *starExpr) ReturnType() Type {
return record
}
// newStarExpr - returns new asterisk (*) expression.
func newStarExpr() *starExpr {
return &starExpr{}
}
type valueExpr struct {
value *Value
}
func (expr *valueExpr) String() string {
return expr.value.String()
}
func (expr *valueExpr) Eval(record Record) (*Value, error) {
return expr.value, nil
}
func (expr *valueExpr) AggregateValue() (*Value, error) {
return expr.value, nil
}
func (expr *valueExpr) Type() Type {
return expr.value.Type()
}
func (expr *valueExpr) ReturnType() Type {
return expr.value.Type()
}
func newValueExpr(value *Value) *valueExpr {
return &valueExpr{value: value}
}
type columnExpr struct {
name string
}
func (expr *columnExpr) String() string {
return expr.name
}
func (expr *columnExpr) Eval(record Record) (*Value, error) {
value, err := record.Get(expr.name)
if err != nil {
return nil, errEvaluatorBindingDoesNotExist(err)
}
return value, nil
}
func (expr *columnExpr) AggregateValue() (*Value, error) {
return nil, nil
}
func (expr *columnExpr) Type() Type {
return column
}
func (expr *columnExpr) ReturnType() Type {
return column
}
func newColumnExpr(columnName string) *columnExpr {
return &columnExpr{name: columnName}
}

View File

@@ -0,0 +1,550 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sql
import (
"fmt"
"strings"
"time"
)
// FuncName - SQL function name.
type FuncName string
const (
// Avg - aggregate SQL function AVG().
Avg FuncName = "AVG"
// Count - aggregate SQL function COUNT().
Count FuncName = "COUNT"
// Max - aggregate SQL function MAX().
Max FuncName = "MAX"
// Min - aggregate SQL function MIN().
Min FuncName = "MIN"
// Sum - aggregate SQL function SUM().
Sum FuncName = "SUM"
// Coalesce - conditional SQL function COALESCE().
Coalesce FuncName = "COALESCE"
// NullIf - conditional SQL function NULLIF().
NullIf FuncName = "NULLIF"
// ToTimestamp - conversion SQL function TO_TIMESTAMP().
ToTimestamp FuncName = "TO_TIMESTAMP"
// UTCNow - date SQL function UTCNOW().
UTCNow FuncName = "UTCNOW"
// CharLength - string SQL function CHAR_LENGTH().
CharLength FuncName = "CHAR_LENGTH"
// CharacterLength - string SQL function CHARACTER_LENGTH() same as CHAR_LENGTH().
CharacterLength FuncName = "CHARACTER_LENGTH"
// Lower - string SQL function LOWER().
Lower FuncName = "LOWER"
// Substring - string SQL function SUBSTRING().
Substring FuncName = "SUBSTRING"
// Trim - string SQL function TRIM().
Trim FuncName = "TRIM"
// Upper - string SQL function UPPER().
Upper FuncName = "UPPER"
// DateAdd FuncName = "DATE_ADD"
// DateDiff FuncName = "DATE_DIFF"
// Extract FuncName = "EXTRACT"
// ToString FuncName = "TO_STRING"
// Cast FuncName = "CAST" // CAST('2007-04-05T14:30Z' AS TIMESTAMP)
)
func isAggregateFuncName(s string) bool {
switch FuncName(s) {
case Avg, Count, Max, Min, Sum:
return true
}
return false
}
func callForNumber(f Expr, record Record) (*Value, error) {
value, err := f.Eval(record)
if err != nil {
return nil, err
}
if !value.Type().isNumber() {
err := fmt.Errorf("%v evaluated to %v; not to number", f, value.Type())
return nil, errExternalEvalException(err)
}
return value, nil
}
func callForInt(f Expr, record Record) (*Value, error) {
value, err := f.Eval(record)
if err != nil {
return nil, err
}
if value.Type() != Int {
err := fmt.Errorf("%v evaluated to %v; not to int", f, value.Type())
return nil, errExternalEvalException(err)
}
return value, nil
}
func callForString(f Expr, record Record) (*Value, error) {
value, err := f.Eval(record)
if err != nil {
return nil, err
}
if value.Type() != String {
err := fmt.Errorf("%v evaluated to %v; not to string", f, value.Type())
return nil, errExternalEvalException(err)
}
return value, nil
}
// funcExpr - SQL function.
type funcExpr struct {
args []Expr
name FuncName
sumValue float64
countValue int64
maxValue float64
minValue float64
}
// String - returns string representation of this function.
func (f *funcExpr) String() string {
var argStrings []string
for _, arg := range f.args {
argStrings = append(argStrings, fmt.Sprintf("%v", arg))
}
return fmt.Sprintf("%v(%v)", f.name, strings.Join(argStrings, ","))
}
func (f *funcExpr) sum(record Record) (*Value, error) {
value, err := callForNumber(f.args[0], record)
if err != nil {
return nil, err
}
f.sumValue += value.FloatValue()
f.countValue++
return nil, nil
}
func (f *funcExpr) count(record Record) (*Value, error) {
value, err := f.args[0].Eval(record)
if err != nil {
return nil, err
}
if value.valueType != Null {
f.countValue++
}
return nil, nil
}
func (f *funcExpr) max(record Record) (*Value, error) {
value, err := callForNumber(f.args[0], record)
if err != nil {
return nil, err
}
v := value.FloatValue()
if v > f.maxValue {
f.maxValue = v
}
return nil, nil
}
func (f *funcExpr) min(record Record) (*Value, error) {
value, err := callForNumber(f.args[0], record)
if err != nil {
return nil, err
}
v := value.FloatValue()
if v < f.minValue {
f.minValue = v
}
return nil, nil
}
func (f *funcExpr) charLength(record Record) (*Value, error) {
value, err := callForString(f.args[0], record)
if err != nil {
return nil, err
}
return NewInt(int64(len(value.StringValue()))), nil
}
func (f *funcExpr) trim(record Record) (*Value, error) {
value, err := callForString(f.args[0], record)
if err != nil {
return nil, err
}
return NewString(strings.TrimSpace(value.StringValue())), nil
}
func (f *funcExpr) lower(record Record) (*Value, error) {
value, err := callForString(f.args[0], record)
if err != nil {
return nil, err
}
return NewString(strings.ToLower(value.StringValue())), nil
}
func (f *funcExpr) upper(record Record) (*Value, error) {
value, err := callForString(f.args[0], record)
if err != nil {
return nil, err
}
return NewString(strings.ToUpper(value.StringValue())), nil
}
func (f *funcExpr) substring(record Record) (*Value, error) {
stringValue, err := callForString(f.args[0], record)
if err != nil {
return nil, err
}
offsetValue, err := callForInt(f.args[1], record)
if err != nil {
return nil, err
}
var lengthValue *Value
if len(f.args) == 3 {
lengthValue, err = callForInt(f.args[2], record)
if err != nil {
return nil, err
}
}
value := stringValue.StringValue()
offset := int(offsetValue.FloatValue())
if offset < 0 || offset > len(value) {
offset = 0
}
length := len(value)
if lengthValue != nil {
length = int(lengthValue.FloatValue())
if length < 0 || length > len(value) {
length = len(value)
}
}
return NewString(value[offset:length]), nil
}
func (f *funcExpr) coalesce(record Record) (*Value, error) {
values := make([]*Value, len(f.args))
var err error
for i := range f.args {
values[i], err = f.args[i].Eval(record)
if err != nil {
return nil, err
}
}
for i := range values {
if values[i].Type() != Null {
return values[i], nil
}
}
return values[0], nil
}
func (f *funcExpr) nullIf(record Record) (*Value, error) {
value1, err := f.args[0].Eval(record)
if err != nil {
return nil, err
}
value2, err := f.args[1].Eval(record)
if err != nil {
return nil, err
}
result, err := equal(value1, value2)
if err != nil {
return nil, err
}
if result {
return NewNull(), nil
}
return value1, nil
}
func (f *funcExpr) toTimeStamp(record Record) (*Value, error) {
value, err := callForString(f.args[0], record)
if err != nil {
return nil, err
}
t, err := time.Parse(time.RFC3339, value.StringValue())
if err != nil {
err := fmt.Errorf("%v: value '%v': %v", f, value, err)
return nil, errValueParseFailure(err)
}
return NewTime(t), nil
}
func (f *funcExpr) utcNow(record Record) (*Value, error) {
return NewTime(time.Now().UTC()), nil
}
// Call - evaluates this function for given arg values and returns result as Value.
func (f *funcExpr) Eval(record Record) (*Value, error) {
switch f.name {
case Avg, Sum:
return f.sum(record)
case Count:
return f.count(record)
case Max:
return f.max(record)
case Min:
return f.min(record)
case Coalesce:
return f.coalesce(record)
case NullIf:
return f.nullIf(record)
case ToTimestamp:
return f.toTimeStamp(record)
case UTCNow:
return f.utcNow(record)
case Substring:
return f.substring(record)
case CharLength, CharacterLength:
return f.charLength(record)
case Trim:
return f.trim(record)
case Lower:
return f.lower(record)
case Upper:
return f.upper(record)
}
panic(fmt.Sprintf("unsupported aggregate function %v", f.name))
}
// AggregateValue - returns aggregated value.
func (f *funcExpr) AggregateValue() (*Value, error) {
switch f.name {
case Avg:
return NewFloat(f.sumValue / float64(f.countValue)), nil
case Count:
return NewInt(f.countValue), nil
case Max:
return NewFloat(f.maxValue), nil
case Min:
return NewFloat(f.minValue), nil
case Sum:
return NewFloat(f.sumValue), nil
}
err := fmt.Errorf("%v is not aggreate function", f)
return nil, errExternalEvalException(err)
}
// Type - returns Function or aggregateFunction type.
func (f *funcExpr) Type() Type {
switch f.name {
case Avg, Count, Max, Min, Sum:
return aggregateFunction
}
return function
}
// ReturnType - returns respective primitive type depending on SQL function.
func (f *funcExpr) ReturnType() Type {
switch f.name {
case Avg, Max, Min, Sum:
return Float
case Count:
return Int
case CharLength, CharacterLength, Trim, Lower, Upper, Substring:
return String
case ToTimestamp, UTCNow:
return Timestamp
case Coalesce, NullIf:
return column
}
return function
}
// newFuncExpr - creates new SQL function.
func newFuncExpr(funcName FuncName, funcs ...Expr) (*funcExpr, error) {
switch funcName {
case Avg, Max, Min, Sum:
if len(funcs) != 1 {
err := fmt.Errorf("%v(): exactly one argument expected; got %v", funcName, len(funcs))
return nil, errParseNonUnaryAgregateFunctionCall(err)
}
if !funcs[0].ReturnType().isNumberKind() {
err := fmt.Errorf("%v(): argument %v evaluate to %v, not number", funcName, funcs[0], funcs[0].ReturnType())
return nil, errIncorrectSQLFunctionArgumentType(err)
}
return &funcExpr{
args: funcs,
name: funcName,
}, nil
case Count:
if len(funcs) != 1 {
err := fmt.Errorf("%v(): exactly one argument expected; got %v", funcName, len(funcs))
return nil, errParseNonUnaryAgregateFunctionCall(err)
}
switch funcs[0].ReturnType() {
case Null, Bool, Int, Float, String, Timestamp, column, record:
default:
err := fmt.Errorf("%v(): argument %v evaluate to %v is incompatible", funcName, funcs[0], funcs[0].ReturnType())
return nil, errIncorrectSQLFunctionArgumentType(err)
}
return &funcExpr{
args: funcs,
name: funcName,
}, nil
case CharLength, CharacterLength, Trim, Lower, Upper, ToTimestamp:
if len(funcs) != 1 {
err := fmt.Errorf("%v(): exactly one argument expected; got %v", funcName, len(funcs))
return nil, errEvaluatorInvalidArguments(err)
}
if !funcs[0].ReturnType().isStringKind() {
err := fmt.Errorf("%v(): argument %v evaluate to %v, not string", funcName, funcs[0], funcs[0].ReturnType())
return nil, errIncorrectSQLFunctionArgumentType(err)
}
return &funcExpr{
args: funcs,
name: funcName,
}, nil
case Coalesce:
if len(funcs) < 1 {
err := fmt.Errorf("%v(): one or more argument expected; got %v", funcName, len(funcs))
return nil, errEvaluatorInvalidArguments(err)
}
for i := range funcs {
if !funcs[i].ReturnType().isBaseKind() {
err := fmt.Errorf("%v(): argument-%v %v evaluate to %v is incompatible", funcName, i+1, funcs[i], funcs[i].ReturnType())
return nil, errIncorrectSQLFunctionArgumentType(err)
}
}
return &funcExpr{
args: funcs,
name: funcName,
}, nil
case NullIf:
if len(funcs) != 2 {
err := fmt.Errorf("%v(): exactly two arguments expected; got %v", funcName, len(funcs))
return nil, errEvaluatorInvalidArguments(err)
}
if !funcs[0].ReturnType().isBaseKind() {
err := fmt.Errorf("%v(): argument-1 %v evaluate to %v is incompatible", funcName, funcs[0], funcs[0].ReturnType())
return nil, errIncorrectSQLFunctionArgumentType(err)
}
if !funcs[1].ReturnType().isBaseKind() {
err := fmt.Errorf("%v(): argument-2 %v evaluate to %v is incompatible", funcName, funcs[1], funcs[1].ReturnType())
return nil, errIncorrectSQLFunctionArgumentType(err)
}
return &funcExpr{
args: funcs,
name: funcName,
}, nil
case UTCNow:
if len(funcs) != 0 {
err := fmt.Errorf("%v(): no argument expected; got %v", funcName, len(funcs))
return nil, errEvaluatorInvalidArguments(err)
}
return &funcExpr{
args: funcs,
name: funcName,
}, nil
case Substring:
if len(funcs) < 2 || len(funcs) > 3 {
err := fmt.Errorf("%v(): exactly two or three arguments expected; got %v", funcName, len(funcs))
return nil, errEvaluatorInvalidArguments(err)
}
if !funcs[0].ReturnType().isStringKind() {
err := fmt.Errorf("%v(): argument-1 %v evaluate to %v, not string", funcName, funcs[0], funcs[0].ReturnType())
return nil, errIncorrectSQLFunctionArgumentType(err)
}
if !funcs[1].ReturnType().isIntKind() {
err := fmt.Errorf("%v(): argument-2 %v evaluate to %v, not int", funcName, funcs[1], funcs[1].ReturnType())
return nil, errIncorrectSQLFunctionArgumentType(err)
}
if len(funcs) > 2 {
if !funcs[2].ReturnType().isIntKind() {
err := fmt.Errorf("%v(): argument-3 %v evaluate to %v, not int", funcName, funcs[2], funcs[2].ReturnType())
return nil, errIncorrectSQLFunctionArgumentType(err)
}
}
return &funcExpr{
args: funcs,
name: funcName,
}, nil
}
return nil, errUnsupportedFunction(fmt.Errorf("unknown function name %v", funcName))
}

View File

@@ -0,0 +1,336 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sql
import "fmt"
// andExpr - logical AND function.
type andExpr struct {
left Expr
right Expr
funcType Type
}
// String - returns string representation of this function.
func (f *andExpr) String() string {
return fmt.Sprintf("(%v AND %v)", f.left, f.right)
}
// Call - evaluates this function for given arg values and returns result as Value.
func (f *andExpr) Eval(record Record) (*Value, error) {
leftValue, err := f.left.Eval(record)
if err != nil {
return nil, err
}
if f.funcType == aggregateFunction {
_, err = f.right.Eval(record)
return nil, err
}
if leftValue.Type() != Bool {
err := fmt.Errorf("%v: left side expression evaluated to %v; not to bool", f, leftValue.Type())
return nil, errExternalEvalException(err)
}
if !leftValue.BoolValue() {
return leftValue, nil
}
rightValue, err := f.right.Eval(record)
if err != nil {
return nil, err
}
if rightValue.Type() != Bool {
err := fmt.Errorf("%v: right side expression evaluated to %v; not to bool", f, rightValue.Type())
return nil, errExternalEvalException(err)
}
return rightValue, nil
}
// AggregateValue - returns aggregated value.
func (f *andExpr) AggregateValue() (*Value, error) {
if f.funcType != aggregateFunction {
err := fmt.Errorf("%v is not aggreate expression", f)
return nil, errExternalEvalException(err)
}
leftValue, err := f.left.AggregateValue()
if err != nil {
return nil, err
}
if leftValue.Type() != Bool {
err := fmt.Errorf("%v: left side expression evaluated to %v; not to bool", f, leftValue.Type())
return nil, errExternalEvalException(err)
}
if !leftValue.BoolValue() {
return leftValue, nil
}
rightValue, err := f.right.AggregateValue()
if err != nil {
return nil, err
}
if rightValue.Type() != Bool {
err := fmt.Errorf("%v: right side expression evaluated to %v; not to bool", f, rightValue.Type())
return nil, errExternalEvalException(err)
}
return rightValue, nil
}
// Type - returns logicalFunction or aggregateFunction type.
func (f *andExpr) Type() Type {
return f.funcType
}
// ReturnType - returns Bool as return type.
func (f *andExpr) ReturnType() Type {
return Bool
}
// newAndExpr - creates new AND logical function.
func newAndExpr(left, right Expr) (*andExpr, error) {
if !left.ReturnType().isBoolKind() {
err := fmt.Errorf("operator AND: left side expression %v evaluate to %v, not bool", left, left.ReturnType())
return nil, errInvalidDataType(err)
}
if !right.ReturnType().isBoolKind() {
err := fmt.Errorf("operator AND: right side expression %v evaluate to %v; not bool", right, right.ReturnType())
return nil, errInvalidDataType(err)
}
funcType := logicalFunction
if left.Type() == aggregateFunction || right.Type() == aggregateFunction {
funcType = aggregateFunction
if left.Type() == column {
err := fmt.Errorf("operator AND: left side expression %v return type %v is incompatible for aggregate evaluation", left, left.Type())
return nil, errUnsupportedSQLOperation(err)
}
if right.Type() == column {
err := fmt.Errorf("operator AND: right side expression %v return type %v is incompatible for aggregate evaluation", right, right.Type())
return nil, errUnsupportedSQLOperation(err)
}
}
return &andExpr{
left: left,
right: right,
funcType: funcType,
}, nil
}
// orExpr - logical OR function.
type orExpr struct {
left Expr
right Expr
funcType Type
}
// String - returns string representation of this function.
func (f *orExpr) String() string {
return fmt.Sprintf("(%v OR %v)", f.left, f.right)
}
// Call - evaluates this function for given arg values and returns result as Value.
func (f *orExpr) Eval(record Record) (*Value, error) {
leftValue, err := f.left.Eval(record)
if err != nil {
return nil, err
}
if f.funcType == aggregateFunction {
_, err = f.right.Eval(record)
return nil, err
}
if leftValue.Type() != Bool {
err := fmt.Errorf("%v: left side expression evaluated to %v; not to bool", f, leftValue.Type())
return nil, errExternalEvalException(err)
}
if leftValue.BoolValue() {
return leftValue, nil
}
rightValue, err := f.right.Eval(record)
if err != nil {
return nil, err
}
if rightValue.Type() != Bool {
err := fmt.Errorf("%v: right side expression evaluated to %v; not to bool", f, rightValue.Type())
return nil, errExternalEvalException(err)
}
return rightValue, nil
}
// AggregateValue - returns aggregated value.
func (f *orExpr) AggregateValue() (*Value, error) {
if f.funcType != aggregateFunction {
err := fmt.Errorf("%v is not aggreate expression", f)
return nil, errExternalEvalException(err)
}
leftValue, err := f.left.AggregateValue()
if err != nil {
return nil, err
}
if leftValue.Type() != Bool {
err := fmt.Errorf("%v: left side expression evaluated to %v; not to bool", f, leftValue.Type())
return nil, errExternalEvalException(err)
}
if leftValue.BoolValue() {
return leftValue, nil
}
rightValue, err := f.right.AggregateValue()
if err != nil {
return nil, err
}
if rightValue.Type() != Bool {
err := fmt.Errorf("%v: right side expression evaluated to %v; not to bool", f, rightValue.Type())
return nil, errExternalEvalException(err)
}
return rightValue, nil
}
// Type - returns logicalFunction or aggregateFunction type.
func (f *orExpr) Type() Type {
return f.funcType
}
// ReturnType - returns Bool as return type.
func (f *orExpr) ReturnType() Type {
return Bool
}
// newOrExpr - creates new OR logical function.
func newOrExpr(left, right Expr) (*orExpr, error) {
if !left.ReturnType().isBoolKind() {
err := fmt.Errorf("operator OR: left side expression %v evaluate to %v, not bool", left, left.ReturnType())
return nil, errInvalidDataType(err)
}
if !right.ReturnType().isBoolKind() {
err := fmt.Errorf("operator OR: right side expression %v evaluate to %v; not bool", right, right.ReturnType())
return nil, errInvalidDataType(err)
}
funcType := logicalFunction
if left.Type() == aggregateFunction || right.Type() == aggregateFunction {
funcType = aggregateFunction
if left.Type() == column {
err := fmt.Errorf("operator OR: left side expression %v return type %v is incompatible for aggregate evaluation", left, left.Type())
return nil, errUnsupportedSQLOperation(err)
}
if right.Type() == column {
err := fmt.Errorf("operator OR: right side expression %v return type %v is incompatible for aggregate evaluation", right, right.Type())
return nil, errUnsupportedSQLOperation(err)
}
}
return &orExpr{
left: left,
right: right,
funcType: funcType,
}, nil
}
// notExpr - logical NOT function.
type notExpr struct {
right Expr
funcType Type
}
// String - returns string representation of this function.
func (f *notExpr) String() string {
return fmt.Sprintf("(%v)", f.right)
}
// Call - evaluates this function for given arg values and returns result as Value.
func (f *notExpr) Eval(record Record) (*Value, error) {
rightValue, err := f.right.Eval(record)
if err != nil {
return nil, err
}
if f.funcType == aggregateFunction {
return nil, nil
}
if rightValue.Type() != Bool {
err := fmt.Errorf("%v: right side expression evaluated to %v; not to bool", f, rightValue.Type())
return nil, errExternalEvalException(err)
}
return NewBool(!rightValue.BoolValue()), nil
}
// AggregateValue - returns aggregated value.
func (f *notExpr) AggregateValue() (*Value, error) {
if f.funcType != aggregateFunction {
err := fmt.Errorf("%v is not aggreate expression", f)
return nil, errExternalEvalException(err)
}
rightValue, err := f.right.AggregateValue()
if err != nil {
return nil, err
}
if rightValue.Type() != Bool {
err := fmt.Errorf("%v: right side expression evaluated to %v; not to bool", f, rightValue.Type())
return nil, errExternalEvalException(err)
}
return NewBool(!rightValue.BoolValue()), nil
}
// Type - returns logicalFunction or aggregateFunction type.
func (f *notExpr) Type() Type {
return f.funcType
}
// ReturnType - returns Bool as return type.
func (f *notExpr) ReturnType() Type {
return Bool
}
// newNotExpr - creates new NOT logical function.
func newNotExpr(right Expr) (*notExpr, error) {
if !right.ReturnType().isBoolKind() {
err := fmt.Errorf("operator NOT: right side expression %v evaluate to %v; not bool", right, right.ReturnType())
return nil, errInvalidDataType(err)
}
funcType := logicalFunction
if right.Type() == aggregateFunction {
funcType = aggregateFunction
}
return &notExpr{
right: right,
funcType: funcType,
}, nil
}

View File

@@ -0,0 +1,25 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sql
// Record - is a type containing columns and their values.
type Record interface {
Get(name string) (*Value, error)
Set(name string, value *Value) error
MarshalCSV(fieldDelimiter rune) ([]byte, error)
MarshalJSON() ([]byte, error)
}

529
pkg/s3select/sql/sql.go Normal file
View File

@@ -0,0 +1,529 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sql
import (
"fmt"
"strings"
"github.com/xwb1989/sqlparser"
)
func getColumnName(colName *sqlparser.ColName) string {
columnName := colName.Qualifier.Name.String()
if qualifier := colName.Qualifier.Qualifier.String(); qualifier != "" {
columnName = qualifier + "." + columnName
}
if columnName == "" {
columnName = colName.Name.String()
} else {
columnName = columnName + "." + colName.Name.String()
}
return columnName
}
func newLiteralExpr(parserExpr sqlparser.Expr, tableAlias string) (Expr, error) {
switch parserExpr.(type) {
case *sqlparser.NullVal:
return newValueExpr(NewNull()), nil
case sqlparser.BoolVal:
return newValueExpr(NewBool((bool(parserExpr.(sqlparser.BoolVal))))), nil
case *sqlparser.SQLVal:
sqlValue := parserExpr.(*sqlparser.SQLVal)
value, err := NewValue(sqlValue)
if err != nil {
return nil, err
}
return newValueExpr(value), nil
case *sqlparser.ColName:
columnName := getColumnName(parserExpr.(*sqlparser.ColName))
if tableAlias != "" {
if !strings.HasPrefix(columnName, tableAlias+".") {
err := fmt.Errorf("column name %v does not start with table alias %v", columnName, tableAlias)
return nil, errInvalidKeyPath(err)
}
columnName = strings.TrimPrefix(columnName, tableAlias+".")
}
return newColumnExpr(columnName), nil
case sqlparser.ValTuple:
var valueType Type
var values []*Value
for i, valExpr := range parserExpr.(sqlparser.ValTuple) {
sqlVal, ok := valExpr.(*sqlparser.SQLVal)
if !ok {
return nil, errParseInvalidTypeParam(fmt.Errorf("value %v in Tuple should be primitive value", i+1))
}
val, err := NewValue(sqlVal)
if err != nil {
return nil, err
}
if i == 0 {
valueType = val.Type()
} else if valueType != val.Type() {
return nil, errParseInvalidTypeParam(fmt.Errorf("mixed value type is not allowed in Tuple"))
}
values = append(values, val)
}
return newValueExpr(NewArray(values)), nil
}
return nil, nil
}
func isExprToComparisonExpr(parserExpr *sqlparser.IsExpr, tableAlias string, isSelectExpr bool) (Expr, error) {
leftExpr, err := newExpr(parserExpr.Expr, tableAlias, isSelectExpr)
if err != nil {
return nil, err
}
f, err := newComparisonExpr(ComparisonOperator(parserExpr.Operator), leftExpr)
if err != nil {
return nil, err
}
if !leftExpr.Type().isBase() {
return f, nil
}
value, err := f.Eval(nil)
if err != nil {
return nil, err
}
return newValueExpr(value), nil
}
func rangeCondToComparisonFunc(parserExpr *sqlparser.RangeCond, tableAlias string, isSelectExpr bool) (Expr, error) {
leftExpr, err := newExpr(parserExpr.Left, tableAlias, isSelectExpr)
if err != nil {
return nil, err
}
fromExpr, err := newExpr(parserExpr.From, tableAlias, isSelectExpr)
if err != nil {
return nil, err
}
toExpr, err := newExpr(parserExpr.To, tableAlias, isSelectExpr)
if err != nil {
return nil, err
}
f, err := newComparisonExpr(ComparisonOperator(parserExpr.Operator), leftExpr, fromExpr, toExpr)
if err != nil {
return nil, err
}
if !leftExpr.Type().isBase() || !fromExpr.Type().isBase() || !toExpr.Type().isBase() {
return f, nil
}
value, err := f.Eval(nil)
if err != nil {
return nil, err
}
return newValueExpr(value), nil
}
func toComparisonExpr(parserExpr *sqlparser.ComparisonExpr, tableAlias string, isSelectExpr bool) (Expr, error) {
leftExpr, err := newExpr(parserExpr.Left, tableAlias, isSelectExpr)
if err != nil {
return nil, err
}
rightExpr, err := newExpr(parserExpr.Right, tableAlias, isSelectExpr)
if err != nil {
return nil, err
}
f, err := newComparisonExpr(ComparisonOperator(parserExpr.Operator), leftExpr, rightExpr)
if err != nil {
return nil, err
}
if !leftExpr.Type().isBase() || !rightExpr.Type().isBase() {
return f, nil
}
value, err := f.Eval(nil)
if err != nil {
return nil, err
}
return newValueExpr(value), nil
}
func toArithExpr(parserExpr *sqlparser.BinaryExpr, tableAlias string, isSelectExpr bool) (Expr, error) {
leftExpr, err := newExpr(parserExpr.Left, tableAlias, isSelectExpr)
if err != nil {
return nil, err
}
rightExpr, err := newExpr(parserExpr.Right, tableAlias, isSelectExpr)
if err != nil {
return nil, err
}
f, err := newArithExpr(ArithOperator(parserExpr.Operator), leftExpr, rightExpr)
if err != nil {
return nil, err
}
if !leftExpr.Type().isBase() || !rightExpr.Type().isBase() {
return f, nil
}
value, err := f.Eval(nil)
if err != nil {
return nil, err
}
return newValueExpr(value), nil
}
func toFuncExpr(parserExpr *sqlparser.FuncExpr, tableAlias string, isSelectExpr bool) (Expr, error) {
funcName := strings.ToUpper(parserExpr.Name.String())
if !isSelectExpr && isAggregateFuncName(funcName) {
return nil, errUnsupportedSQLOperation(fmt.Errorf("%v() must be used in select expression", funcName))
}
funcs, aggregatedExprFound, err := newSelectExprs(parserExpr.Exprs, tableAlias)
if err != nil {
return nil, err
}
if aggregatedExprFound {
return nil, errIncorrectSQLFunctionArgumentType(fmt.Errorf("%v(): aggregated expression must not be used as argument", funcName))
}
return newFuncExpr(FuncName(funcName), funcs...)
}
func toAndExpr(parserExpr *sqlparser.AndExpr, tableAlias string, isSelectExpr bool) (Expr, error) {
leftExpr, err := newExpr(parserExpr.Left, tableAlias, isSelectExpr)
if err != nil {
return nil, err
}
rightExpr, err := newExpr(parserExpr.Right, tableAlias, isSelectExpr)
if err != nil {
return nil, err
}
f, err := newAndExpr(leftExpr, rightExpr)
if err != nil {
return nil, err
}
if leftExpr.Type() != Bool || rightExpr.Type() != Bool {
return f, nil
}
value, err := f.Eval(nil)
if err != nil {
return nil, err
}
return newValueExpr(value), nil
}
func toOrExpr(parserExpr *sqlparser.OrExpr, tableAlias string, isSelectExpr bool) (Expr, error) {
leftExpr, err := newExpr(parserExpr.Left, tableAlias, isSelectExpr)
if err != nil {
return nil, err
}
rightExpr, err := newExpr(parserExpr.Right, tableAlias, isSelectExpr)
if err != nil {
return nil, err
}
f, err := newOrExpr(leftExpr, rightExpr)
if err != nil {
return nil, err
}
if leftExpr.Type() != Bool || rightExpr.Type() != Bool {
return f, nil
}
value, err := f.Eval(nil)
if err != nil {
return nil, err
}
return newValueExpr(value), nil
}
func toNotExpr(parserExpr *sqlparser.NotExpr, tableAlias string, isSelectExpr bool) (Expr, error) {
rightExpr, err := newExpr(parserExpr.Expr, tableAlias, isSelectExpr)
if err != nil {
return nil, err
}
f, err := newNotExpr(rightExpr)
if err != nil {
return nil, err
}
if rightExpr.Type() != Bool {
return f, nil
}
value, err := f.Eval(nil)
if err != nil {
return nil, err
}
return newValueExpr(value), nil
}
func newExpr(parserExpr sqlparser.Expr, tableAlias string, isSelectExpr bool) (Expr, error) {
f, err := newLiteralExpr(parserExpr, tableAlias)
if err != nil {
return nil, err
}
if f != nil {
return f, nil
}
switch parserExpr.(type) {
case *sqlparser.ParenExpr:
return newExpr(parserExpr.(*sqlparser.ParenExpr).Expr, tableAlias, isSelectExpr)
case *sqlparser.IsExpr:
return isExprToComparisonExpr(parserExpr.(*sqlparser.IsExpr), tableAlias, isSelectExpr)
case *sqlparser.RangeCond:
return rangeCondToComparisonFunc(parserExpr.(*sqlparser.RangeCond), tableAlias, isSelectExpr)
case *sqlparser.ComparisonExpr:
return toComparisonExpr(parserExpr.(*sqlparser.ComparisonExpr), tableAlias, isSelectExpr)
case *sqlparser.BinaryExpr:
return toArithExpr(parserExpr.(*sqlparser.BinaryExpr), tableAlias, isSelectExpr)
case *sqlparser.FuncExpr:
return toFuncExpr(parserExpr.(*sqlparser.FuncExpr), tableAlias, isSelectExpr)
case *sqlparser.AndExpr:
return toAndExpr(parserExpr.(*sqlparser.AndExpr), tableAlias, isSelectExpr)
case *sqlparser.OrExpr:
return toOrExpr(parserExpr.(*sqlparser.OrExpr), tableAlias, isSelectExpr)
case *sqlparser.NotExpr:
return toNotExpr(parserExpr.(*sqlparser.NotExpr), tableAlias, isSelectExpr)
}
return nil, errParseUnsupportedSyntax(fmt.Errorf("unknown expression type %T; %v", parserExpr, parserExpr))
}
func newSelectExprs(parserSelectExprs []sqlparser.SelectExpr, tableAlias string) ([]Expr, bool, error) {
var funcs []Expr
starExprFound := false
aggregatedExprFound := false
for _, selectExpr := range parserSelectExprs {
switch selectExpr.(type) {
case *sqlparser.AliasedExpr:
if starExprFound {
return nil, false, errParseAsteriskIsNotAloneInSelectList(nil)
}
aliasedExpr := selectExpr.(*sqlparser.AliasedExpr)
f, err := newExpr(aliasedExpr.Expr, tableAlias, true)
if err != nil {
return nil, false, err
}
if f.Type() == aggregateFunction {
if !aggregatedExprFound {
aggregatedExprFound = true
if len(funcs) > 0 {
return nil, false, errParseUnsupportedSyntax(fmt.Errorf("expression must not mixed with aggregated expression"))
}
}
} else if aggregatedExprFound {
return nil, false, errParseUnsupportedSyntax(fmt.Errorf("expression must not mixed with aggregated expression"))
}
alias := aliasedExpr.As.String()
if alias != "" {
f = newAliasExpr(alias, f)
}
funcs = append(funcs, f)
case *sqlparser.StarExpr:
if starExprFound {
err := fmt.Errorf("only single star expression allowed")
return nil, false, errParseInvalidContextForWildcardInSelectList(err)
}
starExprFound = true
funcs = append(funcs, newStarExpr())
default:
return nil, false, errParseUnsupportedSyntax(fmt.Errorf("unknown select expression %v", selectExpr))
}
}
return funcs, aggregatedExprFound, nil
}
// Select - SQL Select statement.
type Select struct {
tableName string
tableAlias string
selectExprs []Expr
aggregatedExprFound bool
whereExpr Expr
}
// TableAlias - returns table alias name.
func (statement *Select) TableAlias() string {
return statement.tableAlias
}
// IsSelectAll - returns whether '*' is used in select expression or not.
func (statement *Select) IsSelectAll() bool {
if len(statement.selectExprs) == 1 {
_, ok := statement.selectExprs[0].(*starExpr)
return ok
}
return false
}
// IsAggregated - returns whether aggregated functions are used in select expression or not.
func (statement *Select) IsAggregated() bool {
return statement.aggregatedExprFound
}
// AggregateResult - returns aggregate result as record.
func (statement *Select) AggregateResult(output Record) error {
if !statement.aggregatedExprFound {
return nil
}
for i, expr := range statement.selectExprs {
value, err := expr.AggregateValue()
if err != nil {
return err
}
if value == nil {
return errInternalError(fmt.Errorf("%v returns <nil> for AggregateValue()", expr))
}
name := fmt.Sprintf("_%v", i+1)
if _, ok := expr.(*aliasExpr); ok {
name = expr.(*aliasExpr).alias
}
if err = output.Set(name, value); err != nil {
return errInternalError(fmt.Errorf("error occurred to store value %v for %v; %v", value, name, err))
}
}
return nil
}
// Eval - evaluates this Select expressions for given record.
func (statement *Select) Eval(input, output Record) (Record, error) {
if statement.whereExpr != nil {
value, err := statement.whereExpr.Eval(input)
if err != nil {
return nil, err
}
if value == nil || value.valueType != Bool {
err = fmt.Errorf("WHERE expression %v returns invalid bool value %v", statement.whereExpr, value)
return nil, errInternalError(err)
}
if !value.BoolValue() {
return nil, nil
}
}
// Call selectExprs
for i, expr := range statement.selectExprs {
value, err := expr.Eval(input)
if err != nil {
return nil, err
}
if statement.aggregatedExprFound {
continue
}
name := fmt.Sprintf("_%v", i+1)
switch expr.(type) {
case *starExpr:
return value.recordValue(), nil
case *aliasExpr:
name = expr.(*aliasExpr).alias
case *columnExpr:
name = expr.(*columnExpr).name
}
if err = output.Set(name, value); err != nil {
return nil, errInternalError(fmt.Errorf("error occurred to store value %v for %v; %v", value, name, err))
}
}
return output, nil
}
// NewSelect - creates new Select by parsing sql.
func NewSelect(sql string) (*Select, error) {
stmt, err := sqlparser.Parse(sql)
if err != nil {
return nil, errUnsupportedSQLStructure(err)
}
selectStmt, ok := stmt.(*sqlparser.Select)
if !ok {
return nil, errParseUnsupportedSelect(fmt.Errorf("unsupported SQL statement %v", sql))
}
var tableName, tableAlias string
for _, fromExpr := range selectStmt.From {
tableExpr := fromExpr.(*sqlparser.AliasedTableExpr)
tableName = tableExpr.Expr.(sqlparser.TableName).Name.String()
tableAlias = tableExpr.As.String()
}
selectExprs, aggregatedExprFound, err := newSelectExprs(selectStmt.SelectExprs, tableAlias)
if err != nil {
return nil, err
}
var whereExpr Expr
if selectStmt.Where != nil {
whereExpr, err = newExpr(selectStmt.Where.Expr, tableAlias, false)
if err != nil {
return nil, err
}
}
return &Select{
tableName: tableName,
tableAlias: tableAlias,
selectExprs: selectExprs,
aggregatedExprFound: aggregatedExprFound,
whereExpr: whereExpr,
}, nil
}

118
pkg/s3select/sql/type.go Normal file
View File

@@ -0,0 +1,118 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sql
// Type - value type.
type Type string
const (
// Null - represents NULL value type.
Null Type = "null"
// Bool - represents boolean value type.
Bool Type = "bool"
// Int - represents integer value type.
Int Type = "int"
// Float - represents floating point value type.
Float Type = "float"
// String - represents string value type.
String Type = "string"
// Timestamp - represents time value type.
Timestamp Type = "timestamp"
// Array - represents array of values where each value type is one of above.
Array Type = "array"
column Type = "column"
record Type = "record"
function Type = "function"
aggregateFunction Type = "aggregatefunction"
arithmeticFunction Type = "arithmeticfunction"
comparisonFunction Type = "comparisonfunction"
logicalFunction Type = "logicalfunction"
// Integer Type = "integer" // Same as Int
// Decimal Type = "decimal" // Same as Float
// Numeric Type = "numeric" // Same as Float
)
func (t Type) isBase() bool {
switch t {
case Null, Bool, Int, Float, String, Timestamp:
return true
}
return false
}
func (t Type) isBaseKind() bool {
switch t {
case Null, Bool, Int, Float, String, Timestamp, column:
return true
}
return false
}
func (t Type) isNumber() bool {
switch t {
case Int, Float:
return true
}
return false
}
func (t Type) isNumberKind() bool {
switch t {
case Int, Float, column:
return true
}
return false
}
func (t Type) isIntKind() bool {
switch t {
case Int, column:
return true
}
return false
}
func (t Type) isBoolKind() bool {
switch t {
case Bool, column:
return true
}
return false
}
func (t Type) isStringKind() bool {
switch t {
case String, column:
return true
}
return false
}

223
pkg/s3select/sql/value.go Normal file
View File

@@ -0,0 +1,223 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sql
import (
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"github.com/xwb1989/sqlparser"
)
// Value - represents any primitive value of bool, int, float, string and time.
type Value struct {
value interface{}
valueType Type
}
// String - represents value as string.
func (value *Value) String() string {
if value.value == nil {
if value.valueType == Null {
return "NULL"
}
return "<nil>"
}
switch value.valueType {
case String:
return fmt.Sprintf("'%v'", value.value)
case Array:
var valueStrings []string
for _, v := range value.value.([]*Value) {
valueStrings = append(valueStrings, fmt.Sprintf("%v", v))
}
return fmt.Sprintf("(%v)", strings.Join(valueStrings, ","))
}
return fmt.Sprintf("%v", value.value)
}
// CSVString - encodes to CSV string.
func (value *Value) CSVString() string {
return fmt.Sprintf("%v", value.value)
}
// MarshalJSON - encodes to JSON data.
func (value *Value) MarshalJSON() ([]byte, error) {
return json.Marshal(value.value)
}
// BoolValue - returns underlying bool value. It panics if value is not Bool type.
func (value *Value) BoolValue() bool {
if value.valueType == Bool {
return value.value.(bool)
}
panic(fmt.Sprintf("requested bool value but found %T type", value.value))
}
// IntValue - returns underlying int value. It panics if value is not Int type.
func (value *Value) IntValue() int64 {
if value.valueType == Int {
return value.value.(int64)
}
panic(fmt.Sprintf("requested int value but found %T type", value.value))
}
// FloatValue - returns underlying int/float value as float64. It panics if value is not Int/Float type.
func (value *Value) FloatValue() float64 {
switch value.valueType {
case Int:
return float64(value.value.(int64))
case Float:
return value.value.(float64)
}
panic(fmt.Sprintf("requested float value but found %T type", value.value))
}
// StringValue - returns underlying string value. It panics if value is not String type.
func (value *Value) StringValue() string {
if value.valueType == String {
return value.value.(string)
}
panic(fmt.Sprintf("requested string value but found %T type", value.value))
}
// TimeValue - returns underlying time value. It panics if value is not Timestamp type.
func (value *Value) TimeValue() time.Time {
if value.valueType == Timestamp {
return value.value.(time.Time)
}
panic(fmt.Sprintf("requested time value but found %T type", value.value))
}
// ArrayValue - returns underlying value array. It panics if value is not Array type.
func (value *Value) ArrayValue() []*Value {
if value.valueType == Array {
return value.value.([]*Value)
}
panic(fmt.Sprintf("requested array value but found %T type", value.value))
}
func (value *Value) recordValue() Record {
if value.valueType == record {
return value.value.(Record)
}
panic(fmt.Sprintf("requested record value but found %T type", value.value))
}
// Type - returns value type.
func (value *Value) Type() Type {
return value.valueType
}
// Value - returns underneath value interface.
func (value *Value) Value() interface{} {
return value.value
}
// NewNull - creates new null value.
func NewNull() *Value {
return &Value{nil, Null}
}
// NewBool - creates new Bool value of b.
func NewBool(b bool) *Value {
return &Value{b, Bool}
}
// NewInt - creates new Int value of i.
func NewInt(i int64) *Value {
return &Value{i, Int}
}
// NewFloat - creates new Float value of f.
func NewFloat(f float64) *Value {
return &Value{f, Float}
}
// NewString - creates new Sring value of s.
func NewString(s string) *Value {
return &Value{s, String}
}
// NewTime - creates new Time value of t.
func NewTime(t time.Time) *Value {
return &Value{t, Timestamp}
}
// NewArray - creates new Array value of values.
func NewArray(values []*Value) *Value {
return &Value{values, Array}
}
func newRecordValue(r Record) *Value {
return &Value{r, record}
}
// NewValue - creates new Value from SQLVal v.
func NewValue(v *sqlparser.SQLVal) (*Value, error) {
switch v.Type {
case sqlparser.StrVal:
return NewString(string(v.Val)), nil
case sqlparser.IntVal:
i64, err := strconv.ParseInt(string(v.Val), 10, 64)
if err != nil {
return nil, err
}
return NewInt(i64), nil
case sqlparser.FloatVal:
f64, err := strconv.ParseFloat(string(v.Val), 64)
if err != nil {
return nil, err
}
return NewFloat(f64), nil
case sqlparser.HexNum: // represented as 0xDD
i64, err := strconv.ParseInt(string(v.Val), 16, 64)
if err != nil {
return nil, err
}
return NewInt(i64), nil
case sqlparser.HexVal: // represented as X'0DD'
i64, err := strconv.ParseInt(string(v.Val), 16, 64)
if err != nil {
return nil, err
}
return NewInt(i64), nil
case sqlparser.BitVal: // represented as B'00'
i64, err := strconv.ParseInt(string(v.Val), 2, 64)
if err != nil {
return nil, err
}
return NewInt(i64), nil
case sqlparser.ValArg:
// FIXME: the format is unknown and not sure how to handle it.
}
return nil, fmt.Errorf("unknown SQL value %v; %v ", v, v.Type)
}

Binary file not shown.

View File

@@ -0,0 +1,642 @@
// +build ignore
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3select
///////////////////////////////////////////////////////////////////////
//
// Validation errors.
//
///////////////////////////////////////////////////////////////////////
func errExpressionTooLong(err error) *s3Error {
return &s3Error{
code: "ExpressionTooLong",
message: "The SQL expression is too long: The maximum byte-length for the SQL expression is 256 KB.",
statusCode: 400,
cause: err,
}
}
func errColumnTooLong(err error) *s3Error {
return &s3Error{
code: "ColumnTooLong",
message: "The length of a column in the result is greater than maxCharsPerColumn of 1 MB.",
statusCode: 400,
cause: err,
}
}
func errOverMaxColumn(err error) *s3Error {
return &s3Error{
code: "OverMaxColumn",
message: "The number of columns in the result is greater than the maximum allowable number of columns.",
statusCode: 400,
cause: err,
}
}
func errOverMaxRecordSize(err error) *s3Error {
return &s3Error{
code: "OverMaxRecordSize",
message: "The length of a record in the input or result is greater than maxCharsPerRecord of 1 MB.",
statusCode: 400,
cause: err,
}
}
func errInvalidColumnIndex(err error) *s3Error {
return &s3Error{
code: "InvalidColumnIndex",
message: "Column index in the SQL expression is invalid.",
statusCode: 400,
cause: err,
}
}
func errInvalidTextEncoding(err error) *s3Error {
return &s3Error{
code: "InvalidTextEncoding",
message: "Invalid encoding type. Only UTF-8 encoding is supported.",
statusCode: 400,
cause: err,
}
}
func errInvalidTableAlias(err error) *s3Error {
return &s3Error{
code: "InvalidTableAlias",
message: "The SQL expression contains an invalid table alias.",
statusCode: 400,
cause: err,
}
}
func errUnsupportedSyntax(err error) *s3Error {
return &s3Error{
code: "UnsupportedSyntax",
message: "Encountered invalid syntax.",
statusCode: 400,
cause: err,
}
}
func errAmbiguousFieldName(err error) *s3Error {
return &s3Error{
code: "AmbiguousFieldName",
message: "Field name matches to multiple fields in the file. Check the SQL expression and the file, and try again.",
statusCode: 400,
cause: err,
}
}
func errIntegerOverflow(err error) *s3Error {
return &s3Error{
code: "IntegerOverflow",
message: "Integer overflow or underflow in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errIllegalSQLFunctionArgument(err error) *s3Error {
return &s3Error{
code: "IllegalSqlFunctionArgument",
message: "Illegal argument was used in the SQL function.",
statusCode: 400,
cause: err,
}
}
func errMultipleDataSourcesUnsupported(err error) *s3Error {
return &s3Error{
code: "MultipleDataSourcesUnsupported",
message: "Multiple data sources are not supported.",
statusCode: 400,
cause: err,
}
}
func errMissingHeaders(err error) *s3Error {
return &s3Error{
code: "MissingHeaders",
message: "Some headers in the query are missing from the file. Check the file and try again.",
statusCode: 400,
cause: err,
}
}
func errUnrecognizedFormatException(err error) *s3Error {
return &s3Error{
code: "UnrecognizedFormatException",
message: "Encountered an invalid record type.",
statusCode: 400,
cause: err,
}
}
//////////////////////////////////////////////////////////////////////////////////////
//
// SQL parsing errors.
//
//////////////////////////////////////////////////////////////////////////////////////
func errLexerInvalidChar(err error) *s3Error {
return &s3Error{
code: "LexerInvalidChar",
message: "The SQL expression contains an invalid character.",
statusCode: 400,
cause: err,
}
}
func errLexerInvalidOperator(err error) *s3Error {
return &s3Error{
code: "LexerInvalidOperator",
message: "The SQL expression contains an invalid literal.",
statusCode: 400,
cause: err,
}
}
func errLexerInvalidLiteral(err error) *s3Error {
return &s3Error{
code: "LexerInvalidLiteral",
message: "The SQL expression contains an invalid operator.",
statusCode: 400,
cause: err,
}
}
func errLexerInvalidIONLiteral(err error) *s3Error {
return &s3Error{
code: "LexerInvalidIONLiteral",
message: "The SQL expression contains an invalid operator.",
statusCode: 400,
cause: err,
}
}
func errParseExpectedDatePart(err error) *s3Error {
return &s3Error{
code: "ParseExpectedDatePart",
message: "Did not find the expected date part in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errParseExpectedKeyword(err error) *s3Error {
return &s3Error{
code: "ParseExpectedKeyword",
message: "Did not find the expected keyword in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errParseExpectedTokenType(err error) *s3Error {
return &s3Error{
code: "ParseExpectedTokenType",
message: "Did not find the expected token in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errParseExpected2TokenTypes(err error) *s3Error {
return &s3Error{
code: "ParseExpected2TokenTypes",
message: "Did not find the expected token in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errParseExpectedNumber(err error) *s3Error {
return &s3Error{
code: "ParseExpectedNumber",
message: "Did not find the expected number in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errParseExpectedRightParenBuiltinFunctionCall(err error) *s3Error {
return &s3Error{
code: "ParseExpectedRightParenBuiltinFunctionCall",
message: "Did not find the expected right parenthesis character in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errParseExpectedTypeName(err error) *s3Error {
return &s3Error{
code: "ParseExpectedTypeName",
message: "Did not find the expected type name in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errParseExpectedWhenClause(err error) *s3Error {
return &s3Error{
code: "ParseExpectedWhenClause",
message: "Did not find the expected WHEN clause in the SQL expression. CASE is not supported.",
statusCode: 400,
cause: err,
}
}
func errParseUnsupportedToken(err error) *s3Error {
return &s3Error{
code: "ParseUnsupportedToken",
message: "The SQL expression contains an unsupported token.",
statusCode: 400,
cause: err,
}
}
func errParseUnsupportedLiteralsGroupBy(err error) *s3Error {
return &s3Error{
code: "ParseUnsupportedLiteralsGroupBy",
message: "The SQL expression contains an unsupported use of GROUP BY.",
statusCode: 400,
cause: err,
}
}
func errParseExpectedMember(err error) *s3Error {
return &s3Error{
code: "ParseExpectedMember",
message: "The SQL expression contains an unsupported use of MEMBER.",
statusCode: 400,
cause: err,
}
}
func errParseUnsupportedCase(err error) *s3Error {
return &s3Error{
code: "ParseUnsupportedCase",
message: "The SQL expression contains an unsupported use of CASE.",
statusCode: 400,
cause: err,
}
}
func errParseUnsupportedCaseClause(err error) *s3Error {
return &s3Error{
code: "ParseUnsupportedCaseClause",
message: "The SQL expression contains an unsupported use of CASE.",
statusCode: 400,
cause: err,
}
}
func errParseUnsupportedAlias(err error) *s3Error {
return &s3Error{
code: "ParseUnsupportedAlias",
message: "The SQL expression contains an unsupported use of ALIAS.",
statusCode: 400,
cause: err,
}
}
func errParseInvalidPathComponent(err error) *s3Error {
return &s3Error{
code: "ParseInvalidPathComponent",
message: "The SQL expression contains an invalid path component.",
statusCode: 400,
cause: err,
}
}
func errParseMissingIdentAfterAt(err error) *s3Error {
return &s3Error{
code: "ParseMissingIdentAfterAt",
message: "Did not find the expected identifier after the @ symbol in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errParseUnexpectedOperator(err error) *s3Error {
return &s3Error{
code: "ParseUnexpectedOperator",
message: "The SQL expression contains an unexpected operator.",
statusCode: 400,
cause: err,
}
}
func errParseUnexpectedTerm(err error) *s3Error {
return &s3Error{
code: "ParseUnexpectedTerm",
message: "The SQL expression contains an unexpected term.",
statusCode: 400,
cause: err,
}
}
func errParseUnexpectedToken(err error) *s3Error {
return &s3Error{
code: "ParseUnexpectedToken",
message: "The SQL expression contains an unexpected token.",
statusCode: 400,
cause: err,
}
}
func errParseUnExpectedKeyword(err error) *s3Error {
return &s3Error{
code: "ParseUnExpectedKeyword",
message: "The SQL expression contains an unexpected keyword.",
statusCode: 400,
cause: err,
}
}
func errParseExpectedExpression(err error) *s3Error {
return &s3Error{
code: "ParseExpectedExpression",
message: "Did not find the expected SQL expression.",
statusCode: 400,
cause: err,
}
}
func errParseExpectedLeftParenAfterCast(err error) *s3Error {
return &s3Error{
code: "ParseExpectedLeftParenAfterCast",
message: "Did not find the expected left parenthesis after CAST in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errParseExpectedLeftParenValueConstructor(err error) *s3Error {
return &s3Error{
code: "ParseExpectedLeftParenValueConstructor",
message: "Did not find expected the left parenthesis in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errParseExpectedLeftParenBuiltinFunctionCall(err error) *s3Error {
return &s3Error{
code: "ParseExpectedLeftParenBuiltinFunctionCall",
message: "Did not find the expected left parenthesis in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errParseExpectedArgumentDelimiter(err error) *s3Error {
return &s3Error{
code: "ParseExpectedArgumentDelimiter",
message: "Did not find the expected argument delimiter in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errParseCastArity(err error) *s3Error {
return &s3Error{
code: "ParseCastArity",
message: "The SQL expression CAST has incorrect arity.",
statusCode: 400,
cause: err,
}
}
func errParseEmptySelect(err error) *s3Error {
return &s3Error{
code: "ParseEmptySelect",
message: "The SQL expression contains an empty SELECT.",
statusCode: 400,
cause: err,
}
}
func errParseSelectMissingFrom(err error) *s3Error {
return &s3Error{
code: "ParseSelectMissingFrom",
message: "The SQL expression contains a missing FROM after SELECT list.",
statusCode: 400,
cause: err,
}
}
func errParseExpectedIdentForGroupName(err error) *s3Error {
return &s3Error{
code: "ParseExpectedIdentForGroupName",
message: "GROUP is not supported in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errParseExpectedIdentForAlias(err error) *s3Error {
return &s3Error{
code: "ParseExpectedIdentForAlias",
message: "Did not find the expected identifier for the alias in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errParseUnsupportedCallWithStar(err error) *s3Error {
return &s3Error{
code: "ParseUnsupportedCallWithStar",
message: "Only COUNT with (*) as a parameter is supported in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errParseMalformedJoin(err error) *s3Error {
return &s3Error{
code: "ParseMalformedJoin",
message: "JOIN is not supported in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errParseExpectedIdentForAt(err error) *s3Error {
return &s3Error{
code: "ParseExpectedIdentForAt",
message: "Did not find the expected identifier for AT name in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errParseCannotMixSqbAndWildcardInSelectList(err error) *s3Error {
return &s3Error{
code: "ParseCannotMixSqbAndWildcardInSelectList",
message: "Cannot mix [] and * in the same expression in a SELECT list in SQL expression.",
statusCode: 400,
cause: err,
}
}
//////////////////////////////////////////////////////////////////////////////////////
//
// CAST() related errors.
//
//////////////////////////////////////////////////////////////////////////////////////
func errCastFailed(err error) *s3Error {
return &s3Error{
code: "CastFailed",
message: "Attempt to convert from one data type to another using CAST failed in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errInvalidCast(err error) *s3Error {
return &s3Error{
code: "InvalidCast",
message: "Attempt to convert from one data type to another using CAST failed in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errEvaluatorInvalidTimestampFormatPattern(err error) *s3Error {
return &s3Error{
code: "EvaluatorInvalidTimestampFormatPattern",
message: "Invalid time stamp format string in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errEvaluatorInvalidTimestampFormatPatternAdditionalFieldsRequired(err error) *s3Error {
return &s3Error{
code: "EvaluatorInvalidTimestampFormatPattern",
message: "Time stamp format pattern requires additional fields in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errEvaluatorInvalidTimestampFormatPatternSymbolForParsing(err error) *s3Error {
return &s3Error{
code: "EvaluatorInvalidTimestampFormatPatternSymbolForParsing",
message: "Time stamp format pattern contains a valid format symbol that cannot be applied to time stamp parsing in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errEvaluatorTimestampFormatPatternDuplicateFields(err error) *s3Error {
return &s3Error{
code: "EvaluatorTimestampFormatPatternDuplicateFields",
message: "Time stamp format pattern contains multiple format specifiers representing the time stamp field in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errEvaluatorTimestampFormatPatternHourClockAmPmMismatch(err error) *s3Error {
return &s3Error{
code: "EvaluatorTimestampFormatPatternHourClockAmPmMismatch",
message: "Time stamp format pattern contains a 12-hour hour of day format symbol but doesn't also contain an AM/PM field, or it contains a 24-hour hour of day format specifier and contains an AM/PM field in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errEvaluatorUnterminatedTimestampFormatPatternToken(err error) *s3Error {
return &s3Error{
code: "EvaluatorUnterminatedTimestampFormatPatternToken",
message: "Time stamp format pattern contains unterminated token in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errEvaluatorInvalidTimestampFormatPatternToken(err error) *s3Error {
return &s3Error{
code: "EvaluatorInvalidTimestampFormatPatternToken",
message: "Time stamp format pattern contains an invalid token in the SQL expression.",
statusCode: 400,
cause: err,
}
}
func errEvaluatorInvalidTimestampFormatPatternSymbol(err error) *s3Error {
return &s3Error{
code: "EvaluatorInvalidTimestampFormatPatternSymbol",
message: "Time stamp format pattern contains an invalid symbol in the SQL expression.",
statusCode: 400,
cause: err,
}
}
////////////////////////////////////////////////////////////////////////
//
// Generic S3 HTTP handler errors.
//
////////////////////////////////////////////////////////////////////////
func errBusy(err error) *s3Error {
return &s3Error{
code: "Busy",
message: "The service is unavailable. Please retry.",
statusCode: 503,
cause: err,
}
}
func errUnauthorizedAccess(err error) *s3Error {
return &s3Error{
code: "UnauthorizedAccess",
message: "You are not authorized to perform this operation",
statusCode: 401,
cause: err,
}
}
func errEmptyRequestBody(err error) *s3Error {
return &s3Error{
code: "EmptyRequestBody",
message: "Request body cannot be empty.",
statusCode: 400,
cause: err,
}
}
func errUnsupportedRangeHeader(err error) *s3Error {
return &s3Error{
code: "UnsupportedRangeHeader",
message: "Range header is not supported for this operation.",
statusCode: 400,
cause: err,
}
}
func errUnsupportedStorageClass(err error) *s3Error {
return &s3Error{
code: "UnsupportedStorageClass",
message: "Encountered an invalid storage class. Only STANDARD, STANDARD_IA, and ONEZONE_IA storage classes are supported.",
statusCode: 400,
cause: err,
}
}