Remove GPL go-lzo dependency for parquet-go (#7220)

Also remove any other unused dependencies
This commit is contained in:
Harshavardhana 2019-02-11 01:27:24 -08:00 committed by Nitish Tiwari
parent 082f777281
commit a8cd70f3e5
27 changed files with 20 additions and 6386 deletions

View File

@ -5,7 +5,7 @@ You can use the Select API to query objects with following features:
- CSV, JSON and Parquet - Objects must be in CSV, JSON, or Parquet format.
- UTF-8 is the only encoding type the Select API supports.
- GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. The Select API supports columnar compression for Parquet using GZIP, Snappy, LZO, LZ4. Whole object compression is not supported for Parquet objects.
- GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. The Select API supports columnar compression for Parquet using GZIP, Snappy, LZ4. Whole object compression is not supported for Parquet objects.
- Server-side encryption - The Select API supports querying objects that are protected with server-side encryption.
Type inference and automatic conversion of values is performed based on the context when the value is un-typed (such as when reading CSV data). If present, the CAST function overrides automatic conversion.

View File

@ -25,7 +25,6 @@ import (
"github.com/golang/snappy"
"github.com/minio/parquet-go/gen-go/parquet"
"github.com/pierrec/lz4"
lzo "github.com/rasky/go-lzo"
)
type compressionCodec parquet.CompressionCodec
@ -46,9 +45,6 @@ func (c compressionCodec) uncompress(buf []byte) ([]byte, error) {
defer reader.Close()
return ioutil.ReadAll(reader)
case parquet.CompressionCodec_LZO:
return lzo.Decompress1X(bytes.NewReader(buf), len(buf), 0)
case parquet.CompressionCodec_LZ4:
return ioutil.ReadAll(lz4.NewReader(bytes.NewReader(buf)))
}

8
vendor/github.com/minio/parquet-go/go.mod generated vendored Normal file
View File

@ -0,0 +1,8 @@
module github.com/minio/parquet-go
require (
git.apache.org/thrift.git v0.12.0
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db
github.com/minio/minio-go v6.0.14+incompatible
github.com/pierrec/lz4 v2.0.5+incompatible
)

8
vendor/github.com/minio/parquet-go/go.sum generated vendored Normal file
View File

@ -0,0 +1,8 @@
git.apache.org/thrift.git v0.12.0 h1:CMxsZlAmxKs+VAZMlDDL0wXciMblJcutQbEe3A9CYUM=
git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/minio/minio-go v6.0.14+incompatible h1:fnV+GD28LeqdN6vT2XdGKW8Qe/IfjJDswNVuni6km9o=
github.com/minio/minio-go v6.0.14+incompatible/go.mod h1:7guKYtitv8dktvNUGrhzmNlA5wrAABTQXCoesZdFQO8=
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=

View File

@ -1,339 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

View File

@ -1,36 +0,0 @@
# go-lzo
[![Build status](https://travis-ci.org/rasky/go-lzo.svg)](https://travis-ci.org/rasky/go-lzo)
[![Coverage Status](https://coveralls.io/repos/rasky/go-lzo/badge.svg?branch=master&service=github)](https://coveralls.io/github/rasky/go-lzo?branch=master)
Native LZO1X implementation in Golang
This code has been written using the original LZO1X source code as a reference,
to study and understand the algorithms. Both the LZO1X-1 and LZO1X-999
algorithms are implemented. These are the most popular of the whole LZO suite
of algorithms.
Being a straightforward port of the original source code, it shares the same
license (GPLv2) as I can't possibly claim any copyright on it.
I plan to eventually reimplement LZO1X-1 from scratch. At that point, I will be
also changing license.
# Benchmarks
These are the benchmarks obtained running the testsuite over the Canterbury
corpus for the available compressor levels:
Compressor | Level | Original | Compressed | Factor | Time | Speed
-----------|-------|----------|------------|--------|------|------
LZO1X-1 | - | 18521760 | 8957481 | 51.6% | 0.16s | 109MiB/s
LZO1X-999 | 1 | 18521760 | 8217347 | 55.6% | 1.38s | 13MiB/s
LZO1X-999 | 2 | 18521760 | 7724879 | 58.3% | 1.50s | 12MiB/s
LZO1X-999 | 3 | 18521760 | 7384377 | 60.1% | 1.68s | 10MiB/s
LZO1X-999 | 4 | 18521760 | 7266674 | 60.8% | 1.69s | 10MiB/s
LZO1X-999 | 5 | 18521760 | 6979879 | 62.3% | 2.75s | 6.4MiB/s
LZO1X-999 | 6 | 18521760 | 6938593 | 62.5% | 4.53s | 3.9MiB/s
LZO1X-999 | 7 | 18521760 | 6905362 | 62.7% | 6.94s | 2.5MiB/s
LZO1X-999 | 8 | 18521760 | 6713477 | 63.8% | 20.96s | 863KiB/s
LZO1X-999 | 9 | 18521760 | 6712069 | 63.8% | 22.82s | 792KiB/s

View File

@ -1,178 +0,0 @@
package lzo
func appendMulti(out []byte, t int) []byte {
for t > 255 {
out = append(out, 0)
t -= 255
}
out = append(out, byte(t))
return out
}
func compress(in []byte) (out []byte, sz int) {
var m_off int
in_len := len(in)
ip_len := in_len - m2_MAX_LEN - 5
dict := make([]int32, 1<<d_BITS)
ii := 0
ip := 4
for {
key := int(in[ip+3])
key = (key << 6) ^ int(in[ip+2])
key = (key << 5) ^ int(in[ip+1])
key = (key << 5) ^ int(in[ip+0])
dindex := ((0x21 * key) >> 5) & d_MASK
m_pos := int(dict[dindex]) - 1
if m_pos < 0 {
goto literal
}
if ip == m_pos || (ip-m_pos) > m4_MAX_OFFSET {
goto literal
}
m_off = ip - m_pos
if m_off <= m2_MAX_OFFSET || in[m_pos+3] == in[ip+3] {
goto try_match
}
dindex = (dindex & (d_MASK & 0x7ff)) ^ (d_HIGH | 0x1f)
m_pos = int(dict[dindex]) - 1
if m_pos < 0 {
goto literal
}
if ip == m_pos || (ip-m_pos) > m4_MAX_OFFSET {
goto literal
}
m_off = ip - m_pos
if m_off <= m2_MAX_OFFSET || in[m_pos+3] == in[ip+3] {
goto try_match
}
goto literal
try_match:
if in[m_pos] == in[ip] && in[m_pos+1] == in[ip+1] && in[m_pos+2] == in[ip+2] {
goto match
}
literal:
dict[dindex] = int32(ip + 1)
ip += 1 + (ip-ii)>>5
if ip >= ip_len {
break
}
continue
match:
dict[dindex] = int32(ip + 1)
if ip != ii {
t := ip - ii
if t <= 3 {
out[len(out)-2] |= byte(t)
} else if t <= 18 {
out = append(out, byte(t-3))
} else {
out = append(out, 0)
out = appendMulti(out, t-18)
}
out = append(out, in[ii:ii+t]...)
ii += t
}
var i int
ip += 3
for i = 3; i < 9; i++ {
ip++
if in[m_pos+i] != in[ip-1] {
break
}
}
if i < 9 {
ip--
m_len := ip - ii
if m_off <= m2_MAX_OFFSET {
m_off -= 1
out = append(out,
byte((((m_len - 1) << 5) | ((m_off & 7) << 2))),
byte((m_off >> 3)))
} else if m_off <= m3_MAX_OFFSET {
m_off -= 1
out = append(out,
byte(m3_MARKER|(m_len-2)),
byte((m_off&63)<<2),
byte(m_off>>6))
} else {
m_off -= 0x4000
out = append(out,
byte(m4_MARKER|((m_off&0x4000)>>11)|(m_len-2)),
byte((m_off&63)<<2),
byte(m_off>>6))
}
} else {
m := m_pos + m2_MAX_LEN + 1
for ip < in_len && in[m] == in[ip] {
m++
ip++
}
m_len := ip - ii
if m_off <= m3_MAX_OFFSET {
m_off -= 1
if m_len <= 33 {
out = append(out, byte(m3_MARKER|(m_len-2)))
} else {
m_len -= 33
out = append(out, byte(m3_MARKER|0))
out = appendMulti(out, m_len)
}
} else {
m_off -= 0x4000
if m_len <= m4_MAX_LEN {
out = append(out, byte(m4_MARKER|((m_off&0x4000)>>11)|(m_len-2)))
} else {
m_len -= m4_MAX_LEN
out = append(out, byte(m4_MARKER|((m_off&0x4000)>>11)))
out = appendMulti(out, m_len)
}
}
out = append(out, byte((m_off&63)<<2), byte(m_off>>6))
}
ii = ip
if ip >= ip_len {
break
}
}
sz = in_len - ii
return
}
// Compress an input buffer with LZO1X
func Compress1X(in []byte) (out []byte) {
var t int
in_len := len(in)
if in_len <= m2_MAX_LEN+5 {
t = in_len
} else {
out, t = compress(in)
}
if t > 0 {
ii := in_len - t
if len(out) == 0 && t <= 238 {
out = append(out, byte(17+t))
} else if t <= 3 {
out[len(out)-2] |= byte(t)
} else if t <= 18 {
out = append(out, byte(t-3))
} else {
out = append(out, 0)
out = appendMulti(out, t-18)
}
out = append(out, in[ii:ii+t]...)
}
out = append(out, m4_MARKER|1, 0, 0)
return
}

View File

@ -1,416 +0,0 @@
package lzo
type compressor struct {
in []byte
ip int
bp int
// stats
matchBytes int
litBytes int
lazy int
r1lit int
r2lit int
m1am uint
m2m uint
m1bm uint
m3m uint
m4m uint
lit1r uint
lit2r uint
lit3r uint
r1mlen int
lastmlen int
lastmoff int
textsize uint
mlen int
moff int
look uint
}
func (ctx *compressor) codeMatch(out []byte, mlen int, moff int) []byte {
xlen := mlen
xoff := moff
ctx.matchBytes += mlen
switch {
case mlen == 2:
if moff > m1_MAX_OFFSET {
panic("codeMatch: mlen 2: moff error")
}
if ctx.r1lit < 1 || ctx.r1lit >= 4 {
panic("codeMatch: mlen 2: r1lit error")
}
moff -= 1
out = append(out,
m1_MARKER|byte((moff&3)<<2),
byte(moff>>2))
ctx.m1am++
case mlen <= m2_MAX_LEN && moff <= m2_MAX_OFFSET:
if mlen < 3 {
panic("codeMatch: m2: mlen error")
}
moff -= 1
out = append(out,
byte((mlen-1)<<5|(moff&7)<<2),
byte(moff>>3))
if out[len(out)-2] < m2_MARKER {
panic("codeMatch: m2: invalid marker")
}
ctx.m2m++
case mlen == m2_MIN_LEN && moff <= mX_MAX_OFFSET && ctx.r1lit >= 4:
if mlen != 3 {
panic("codeMatch: m2min: invalid mlen")
}
if moff <= m2_MAX_OFFSET {
panic("codeMatch: m2min: invalid moff")
}
moff -= 1 + m2_MAX_OFFSET
out = append(out,
byte(m1_MARKER|((moff&3)<<2)),
byte(moff>>2))
ctx.m1bm++
case moff <= m3_MAX_OFFSET:
if mlen < 3 {
panic("codeMatch: m3max: invalid mlen")
}
moff -= 1
if mlen <= m3_MAX_LEN {
out = append(out, byte(m3_MARKER|(mlen-2)))
} else {
mlen -= m3_MAX_LEN
out = append(out, byte(m3_MARKER|0))
out = appendMulti(out, mlen)
}
out = append(out, byte(moff<<2), byte(moff>>6))
ctx.m3m++
default:
if mlen < 3 {
panic("codeMatch: default: invalid mlen")
}
if moff <= 0x4000 || moff >= 0xc000 {
panic("codeMatch: default: invalid moff")
}
moff -= 0x4000
k := (moff & 0x4000) >> 11
if mlen <= m4_MAX_LEN {
out = append(out, byte(m4_MARKER|k|(mlen-2)))
} else {
mlen -= m4_MAX_LEN
out = append(out, byte(m4_MARKER|k|0))
out = appendMulti(out, mlen)
}
out = append(out, byte(moff<<2), byte(moff>>6))
ctx.m4m++
}
ctx.lastmlen = xlen
ctx.lastmoff = xoff
return out
}
func (ctx *compressor) storeRun(out []byte, ii int, t int) []byte {
ctx.litBytes += t
if len(out) == 0 && t <= 238 {
out = append(out, byte(17+t))
} else if t <= 3 {
out[len(out)-2] |= byte(t)
ctx.lit1r++
} else if t <= 18 {
out = append(out, byte(t-3))
ctx.lit2r++
} else {
out = append(out, 0)
out = appendMulti(out, t-18)
ctx.lit3r++
}
out = append(out, ctx.in[ii:ii+t]...)
return out
}
func (ctx *compressor) codeRun(out []byte, ii int, lit int, mlen int) []byte {
if lit > 0 {
if mlen < 2 {
panic("codeRun: invalid mlen")
}
out = ctx.storeRun(out, ii, lit)
ctx.r1mlen = mlen
ctx.r1lit = lit
} else {
if mlen < 3 {
panic("codeRun: invalid mlen")
}
ctx.r1mlen = 0
ctx.r1lit = 0
}
return out
}
func (ctx *compressor) lenOfCodedMatch(mlen int, moff int, lit int) int {
switch {
case mlen < 2:
return 0
case mlen == 2:
if moff <= m1_MAX_OFFSET && lit > 0 && lit < 4 {
return 2
}
return 0
case mlen <= m2_MAX_LEN && moff <= m2_MAX_OFFSET:
return 2
case mlen == m2_MIN_LEN && moff <= mX_MAX_OFFSET && lit >= 4:
return 2
case moff <= m3_MAX_OFFSET:
if mlen <= m3_MAX_LEN {
return 3
}
n := 4
mlen -= m3_MAX_LEN
for mlen > 255 {
mlen -= 255
n++
}
return n
case moff <= m4_MAX_OFFSET:
if mlen <= m4_MAX_LEN {
return 3
}
n := 4
mlen -= m4_MAX_LEN
for mlen > 255 {
mlen -= 255
n++
}
return n
default:
return 0
}
}
func (ctx *compressor) minGain(ahead int,
lit1, lit2 int, l1, l2, l3 int) int {
if ahead <= 0 {
panic("minGain: invalid ahead")
}
mingain := int(ahead)
if lit1 <= 3 {
if lit2 > 3 {
mingain += 2
}
} else if lit1 <= 18 {
if lit2 > 18 {
mingain += 1
}
}
mingain += int((l2 - l1) * 2)
if l3 != 0 {
mingain -= int((ahead - l3) * 2)
}
if mingain < 0 {
mingain = 0
}
return mingain
}
type parms struct {
TryLazy int
GoodLen uint
MaxLazy uint
NiceLen uint
MaxChain uint
Flags uint32
}
func compress999(in []byte, p parms) []byte {
ctx := compressor{}
swd := swd{}
if p.TryLazy < 0 {
p.TryLazy = 1
}
if p.GoodLen == 0 {
p.GoodLen = 32
}
if p.MaxLazy == 0 {
p.MaxLazy = 32
}
if p.MaxChain == 0 {
p.MaxChain = cSWD_MAX_CHAIN
}
ctx.in = in
out := make([]byte, 0, len(in)/2)
ii := 0
lit := 0
ctx.initMatch(&swd, p.Flags)
if p.MaxChain > 0 {
swd.MaxChain = p.MaxChain
}
if p.NiceLen > 0 {
swd.NiceLength = p.NiceLen
}
ctx.findMatch(&swd, 0, 0)
for ctx.look > 0 {
mlen := ctx.mlen
moff := ctx.moff
if ctx.bp != ctx.ip-int(ctx.look) {
panic("assert: compress: invalid bp")
}
if ctx.bp < 0 {
panic("assert: compress: negative bp")
}
if lit == 0 {
ii = ctx.bp
}
if ii+lit != ctx.bp {
panic("assert: compress: invalid ii")
}
if swd.BChar != int(ctx.in[ctx.bp]) {
panic("assert: compress: invalid bchar")
}
if mlen < 2 ||
(mlen == 2 && (moff > m1_MAX_OFFSET || lit == 0 || lit >= 4)) ||
(mlen == 2 && len(out) == 0) ||
(len(out) == 0 && lit == 0) {
// literal
mlen = 0
} else if mlen == m2_MIN_LEN {
if moff > mX_MAX_OFFSET && lit >= 4 {
mlen = 0
}
}
if mlen == 0 {
// literal
lit++
swd.MaxChain = p.MaxChain
ctx.findMatch(&swd, 1, 0)
continue
}
// a match
if swd.UseBestOff {
mlen, moff = ctx.betterMatch(&swd, mlen, moff)
}
ctx.assertMatch(&swd, mlen, moff)
// check if we want to try a lazy match
ahead := 0
l1 := 0
maxahead := 0
if p.TryLazy != 0 && mlen < int(p.MaxLazy) {
l1 = ctx.lenOfCodedMatch(mlen, moff, lit)
if l1 == 0 {
panic("assert: compress: invalid len of coded match")
}
maxahead = p.TryLazy
if maxahead > l1-1 {
maxahead = l1 - 1
}
}
matchdone := false
for ahead < maxahead && int(ctx.look) > mlen {
if mlen >= int(p.GoodLen) {
swd.MaxChain = p.MaxChain >> 2
} else {
swd.MaxChain = p.MaxChain
}
ctx.findMatch(&swd, 1, 0)
ahead++
if ctx.look <= 0 {
panic("assert: compress: invalid look")
}
if ii+lit+ahead != ctx.bp {
panic("assert: compress: invalid bp")
}
if ctx.mlen < mlen {
continue
}
if ctx.mlen == mlen && ctx.moff >= moff {
continue
}
if swd.UseBestOff {
ctx.mlen, ctx.moff = ctx.betterMatch(&swd, ctx.mlen, ctx.moff)
}
l2 := ctx.lenOfCodedMatch(ctx.mlen, ctx.moff, lit+ahead)
if l2 == 0 {
continue
}
l3 := 0
if len(out) > 0 {
l3 = ctx.lenOfCodedMatch(ahead, moff, lit)
}
mingain := ctx.minGain(ahead, lit, lit+ahead, l1, l2, l3)
if ctx.mlen >= mlen+mingain {
ctx.lazy++
ctx.assertMatch(&swd, ctx.mlen, ctx.moff)
if l3 > 0 {
out = ctx.codeRun(out, ii, lit, ahead)
lit = 0
out = ctx.codeMatch(out, ahead, moff)
} else {
lit += ahead
if ii+lit != ctx.bp {
panic("assert: compress: invalid bp after l3")
}
}
matchdone = true
break
}
}
if !matchdone {
if ii+lit+ahead != ctx.bp {
panic("assert: compress: invalid bp out of for loop")
}
out = ctx.codeRun(out, ii, lit, mlen)
lit = 0
out = ctx.codeMatch(out, mlen, moff)
swd.MaxChain = p.MaxChain
ctx.findMatch(&swd, uint(mlen), uint(1+ahead))
}
}
if lit > 0 {
out = ctx.storeRun(out, ii, lit)
}
out = append(out, m4_MARKER|1, 0, 0)
if ctx.litBytes+ctx.matchBytes != len(ctx.in) {
panic("assert: compress999: not processed full input")
}
return out
}
var fixedLevels = [...]parms{
{0, 0, 0, 8, 4, 0},
{0, 0, 0, 16, 8, 0},
{0, 0, 0, 32, 16, 0},
{1, 4, 4, 16, 16, 0},
{1, 8, 16, 32, 32, 0},
{1, 8, 16, 128, 128, 0},
{2, 8, 32, 128, 256, 0},
{2, 32, 128, cSWD_F, 2048, 1},
{2, cSWD_F, cSWD_F, cSWD_F, 4096, 1},
}
func Compress1X999Level(in []byte, level int) []byte {
return compress999(in, fixedLevels[level-1])
}
func Compress1X999(in []byte) []byte {
return Compress1X999Level(in, 9)
}

View File

@ -1,289 +0,0 @@
package lzo
import (
"errors"
"io"
"runtime"
)
var (
InputUnderrun = errors.New("input underrun")
LookBehindUnderrun = errors.New("lookbehind underrun")
)
type reader struct {
r io.Reader
len int
buf [4096]byte
cur []byte
Err error
}
func newReader(r io.Reader, inlen int) *reader {
if inlen == 0 {
inlen = -1
}
in := &reader{r: r, len: inlen}
in.Rebuffer()
return in
}
// Read more data from the underlying reader and put it into the buffer.
// Also makes sure there is always at least 32 bytes in the buffer, so that
// in the main loop we can avoid checking for the end of buffer.
func (in *reader) Rebuffer() {
const RBUF_WND = 32
var rbuf [RBUF_WND]byte
if len(in.cur) > RBUF_WND || in.len == 0 {
return
}
rb := rbuf[:len(in.cur)]
copy(rb, in.cur)
in.cur = in.buf[:]
copy(in.cur, rb)
cur := in.cur[len(rb):]
if in.len >= 0 && len(cur) > in.len {
cur = cur[:in.len]
}
n, err := in.r.Read(cur)
if err != nil {
// If EOF is returned, treat it as error only if there are no further
// bytes in the window. Otherwise, let's postpone because those bytes
// could contain the terminator.
if err != io.EOF || len(rb) == 0 {
in.Err = err
in.cur = nil
}
}
in.cur = in.cur[:len(rb)+n]
if in.len >= 0 {
in.len -= n
}
}
func (in *reader) ReadAppend(out *[]byte, n int) {
for n > 0 {
m := len(in.cur)
if m > n {
m = n
}
*out = append(*out, in.cur[:m]...)
in.cur = in.cur[m:]
n -= m
if len(in.cur) == 0 {
in.Rebuffer()
if len(in.cur) == 0 {
in.Err = io.EOF
return
}
}
}
return
}
func (in *reader) ReadU8() (ch byte) {
ch = in.cur[0]
in.cur = in.cur[1:]
return
}
func (in *reader) ReadU16() int {
b0 := in.cur[0]
b1 := in.cur[1]
in.cur = in.cur[2:]
return int(b0) + int(b1)<<8
}
func (in *reader) ReadMulti(base int) (b int) {
for {
for i := 0; i < len(in.cur); i++ {
v := in.cur[i]
if v == 0 {
b += 255
} else {
b += int(v) + base
in.cur = in.cur[i+1:]
return
}
}
in.cur = in.cur[0:0]
in.Rebuffer()
if len(in.cur) == 0 {
in.Err = io.EOF
return
}
}
}
func copyMatch(out *[]byte, m_pos int, n int) {
if m_pos+n > len(*out) {
// fmt.Println("copy match WITH OVERLAP!")
for i := 0; i < n; i++ {
*out = append(*out, (*out)[m_pos])
m_pos++
}
} else {
// fmt.Println("copy match:", len(*out), m_pos, m_pos+n)
*out = append(*out, (*out)[m_pos:m_pos+n]...)
}
}
// Decompress an input compressed with LZO1X.
//
// LZO1X has a stream terminator marker, so the decompression will always stop
// when this marker is found.
//
// If inLen is not zero, it is expected to match the length of the compressed
// input stream, and it is used to limit reads from the underlying reader; if
// inLen is smaller than the real stream, the decompression will abort with an
// error; if inLen is larger than the real stream, or if it is zero, the
// decompression will succeed but more bytes than necessary might be read
// from the underlying reader. If the reader returns EOF before the termination
// marker is found, the decompression aborts and EOF is returned.
//
// outLen is optional; if it's not zero, it is used as a hint to preallocate the
// output buffer to increase performance of the decompression.
func Decompress1X(r io.Reader, inLen int, outLen int) (out []byte, err error) {
var t, m_pos int
var last2 byte
defer func() {
// To gain performance, we don't do any bounds checking while reading
// the input, so if the decompressor reads past the end of the input
// stream, a runtime error is raised. This saves about 7% of performance
// as the reading functions are very hot in the decompressor.
if r := recover(); r != nil {
if re, ok := r.(runtime.Error); ok {
if re.Error() == "runtime error: index out of range" {
err = io.EOF
return
}
}
panic(r)
}
}()
out = make([]byte, 0, outLen)
in := newReader(r, inLen)
ip := in.ReadU8()
if ip > 17 {
t = int(ip) - 17
if t < 4 {
goto match_next
}
in.ReadAppend(&out, t)
// fmt.Println("begin:", string(out))
goto first_literal_run
}
begin_loop:
t = int(ip)
if t >= 16 {
goto match
}
if t == 0 {
t = in.ReadMulti(15)
}
in.ReadAppend(&out, t+3)
// fmt.Println("readappend", t+3, string(out[len(out)-t-3:]))
first_literal_run:
ip = in.ReadU8()
last2 = ip
t = int(ip)
if t >= 16 {
goto match
}
m_pos = len(out) - (1 + m2_MAX_OFFSET)
m_pos -= t >> 2
ip = in.ReadU8()
m_pos -= int(ip) << 2
// fmt.Println("m_pos flr", m_pos, len(out), "\n", string(out))
if m_pos < 0 {
err = LookBehindUnderrun
return
}
copyMatch(&out, m_pos, 3)
goto match_done
match:
in.Rebuffer()
if in.Err != nil {
err = in.Err
return
}
t = int(ip)
last2 = ip
if t >= 64 {
m_pos = len(out) - 1
m_pos -= (t >> 2) & 7
ip = in.ReadU8()
m_pos -= int(ip) << 3
// fmt.Println("m_pos t64", m_pos, t, int(ip))
t = (t >> 5) - 1
goto copy_match
} else if t >= 32 {
t &= 31
if t == 0 {
t = in.ReadMulti(31)
}
m_pos = len(out) - 1
v16 := in.ReadU16()
m_pos -= v16 >> 2
last2 = byte(v16 & 0xFF)
// fmt.Println("m_pos t32", m_pos)
} else if t >= 16 {
m_pos = len(out)
m_pos -= (t & 8) << 11
t &= 7
if t == 0 {
t = in.ReadMulti(7)
}
v16 := in.ReadU16()
m_pos -= v16 >> 2
if m_pos == len(out) {
// fmt.Println("END", t, v16, m_pos)
return
}
m_pos -= 0x4000
last2 = byte(v16 & 0xFF)
// fmt.Println("m_pos t16", m_pos)
} else {
m_pos = len(out) - 1
m_pos -= t >> 2
ip = in.ReadU8()
m_pos -= int(ip) << 2
if m_pos < 0 {
err = LookBehindUnderrun
return
}
// fmt.Println("m_pos tX", m_pos)
copyMatch(&out, m_pos, 2)
goto match_done
}
copy_match:
if m_pos < 0 {
err = LookBehindUnderrun
return
}
copyMatch(&out, m_pos, t+2)
match_done:
t = int(last2 & 3)
if t == 0 {
goto match_end
}
match_next:
// fmt.Println("read append finale:", t)
in.ReadAppend(&out, t)
ip = in.ReadU8()
goto match
match_end:
ip = in.ReadU8()
goto begin_loop
}

View File

@ -1,29 +0,0 @@
package lzo
const (
m1_MAX_OFFSET = 0x0400
m2_MAX_OFFSET = 0x0800
m3_MAX_OFFSET = 0x4000
m4_MAX_OFFSET = 0xbfff
mX_MAX_OFFSET = m1_MAX_OFFSET + m2_MAX_OFFSET
m1_MIN_LEN = 2
m1_MAX_LEN = 2
m2_MIN_LEN = 3
m2_MAX_LEN = 8
m3_MIN_LEN = 3
m3_MAX_LEN = 33
m4_MIN_LEN = 3
m4_MAX_LEN = 9
m1_MARKER = 0
m2_MARKER = 64
m3_MARKER = 32
m4_MARKER = 16
)
const (
d_BITS = 14
d_MASK = (1 << d_BITS) - 1
d_HIGH = (d_MASK >> 1) + 1
)

View File

@ -1,10 +0,0 @@
// +build gofuzz
package lzo
import "bytes"
func Fuzz(data []byte) int {
Decompress1X(bytes.NewBuffer(data), 0, 0)
return 0
}

View File

@ -1,101 +0,0 @@
package lzo
func (ctx *compressor) initMatch(s *swd, flags uint32) {
s.ctx = ctx
s.init()
if flags&1 != 0 {
s.UseBestOff = true
}
}
func (ctx *compressor) findMatch(s *swd, thislen uint, skip uint) {
if skip > 0 {
if thislen < skip {
panic("assert: findMatch: invalid thislen")
}
s.accept(thislen - skip)
ctx.textsize += thislen - skip + 1
} else {
if thislen > 1 {
panic("assert: findMatch: invalid thislen")
}
ctx.textsize += thislen - skip
}
s.MLen = cSWD_THRESHOLD
s.MOff = 0
for i := 0; i < len(s.bestPos); i++ {
s.bestPos[i] = 0
}
s.findbest()
ctx.mlen = int(s.MLen)
ctx.moff = int(s.MOff)
s.getbyte()
if s.BChar < 0 {
ctx.look = 0
ctx.mlen = 0
} else {
ctx.look = s.Look + 1
}
ctx.bp = ctx.ip - int(ctx.look)
}
func (ctx *compressor) betterMatch(s *swd, imlen, imoff int) (mlen int, moff int) {
mlen, moff = imlen, imoff
if mlen <= m2_MIN_LEN {
return
}
if moff <= m2_MAX_OFFSET {
return
}
if moff > m2_MAX_OFFSET && mlen >= m2_MIN_LEN+1 && mlen <= m2_MAX_LEN+1 &&
s.BestOff[mlen-1] > 0 && s.BestOff[mlen-1] <= m2_MAX_OFFSET {
mlen -= 1
moff = int(s.BestOff[mlen])
return
}
if moff > m3_MAX_OFFSET && mlen >= m4_MAX_LEN+1 && mlen <= m2_MAX_LEN+2 &&
s.BestOff[mlen-2] > 0 && s.BestOff[mlen-2] <= m2_MAX_OFFSET {
mlen -= 2
moff = int(s.BestOff[mlen])
return
}
if moff > m3_MAX_OFFSET && mlen >= m4_MAX_LEN+1 && mlen <= m3_MAX_LEN+1 &&
s.BestOff[mlen-1] > 0 && s.BestOff[mlen-1] <= m3_MAX_OFFSET {
mlen -= 1
moff = int(s.BestOff[mlen])
return
}
return
}
func assertMemcmp(b1, b2 []byte, l int) {
b1 = b1[:l]
b2 = b2[:l]
for i := 0; i < len(b1); i++ {
if b1[i] != b2[i] {
panic("assertMemcmp: dosn't match")
}
}
}
func (ctx *compressor) assertMatch(s *swd, mlen, moff int) {
if mlen < 2 {
panic("assertMatch: invalid mlen")
}
if moff <= ctx.bp {
if ctx.bp-moff+mlen >= ctx.ip {
panic("assertMatch: invalid bp")
}
assertMemcmp(ctx.in[ctx.bp:], ctx.in[ctx.bp-moff:], mlen)
} else {
panic("dict should not exit")
}
}

349
vendor/github.com/rasky/go-lzo/swd.go generated vendored
View File

@ -1,349 +0,0 @@
package lzo
const (
cSWD_N = m4_MAX_OFFSET // ring buffer size
cSWD_THRESHOLD = 1 // lower limit for match length
cSWD_F = 2048 // upper limit for match length
cSWD_BEST_OFF = m3_MAX_LEN + 1 // max(m2,m3,m4)+1
cSWD_HSIZE = 16384
cSWD_MAX_CHAIN = 2048
)
type swd struct {
// Public builtin
SwdN uint
SwdF uint
SwdThreshold uint
// Public configuration
MaxChain uint
NiceLength uint
UseBestOff bool
LazyInsert uint
// Output
MLen uint
MOff uint
Look uint
BChar int
BestOff [cSWD_BEST_OFF]uint
// Semi-public
ctx *compressor
mpos uint
bestPos [cSWD_BEST_OFF]uint
// Private
ip uint // input pointer (lookahead)
bp uint // buffer pointer
rp uint // remove pointer
bsize uint
bwrap []byte
nodecount uint
firstrp uint
b [cSWD_N + cSWD_F + cSWD_F]byte
head3 [cSWD_HSIZE]uint16
succ3 [cSWD_N + cSWD_F]uint16
best3 [cSWD_N + cSWD_F]uint16
llen3 [cSWD_HSIZE]uint16
head2 [65536]uint16
}
func head2(data []byte) uint {
return uint(data[1])<<8 | uint(data[0])
}
func head3(data []byte) uint {
key := uint(data[0])
key = (key << 5) ^ uint(data[1])
key = (key << 5) ^ uint(data[2])
key = (key * 0x9f5f) >> 5
return key & (cSWD_HSIZE - 1)
}
func (s *swd) gethead3(key uint) uint16 {
if s.llen3[key] == 0 {
return 0xFFFF
}
return s.head3[key]
}
func (s *swd) removeNode(node uint) {
if s.nodecount == 0 {
key := head3(s.b[node:])
if s.llen3[key] == 0 {
panic("assert: swd.removeNode: invalid llen3")
}
s.llen3[key]--
key = head2(s.b[node:])
if s.head2[key] == 0xFFFF {
panic("assert: swd.removeNode: invalid head2")
}
if uint(s.head2[key]) == node {
s.head2[key] = 0xFFFF
}
return
}
s.nodecount--
}
func (s *swd) init() {
s.SwdN = cSWD_N
s.SwdF = cSWD_F
s.SwdThreshold = cSWD_THRESHOLD
s.MaxChain = cSWD_MAX_CHAIN
s.NiceLength = s.SwdF
s.bsize = s.SwdN + s.SwdF
s.bwrap = s.b[s.bsize:]
s.nodecount = s.SwdN
for i := 0; i < len(s.head2); i++ {
s.head2[i] = 0xFFFF
}
s.ip = 0
s.bp = s.ip
s.firstrp = s.ip
if s.ip+s.SwdF > s.bsize {
panic("assert: swd.init: invalid ip")
}
s.Look = uint(len(s.ctx.in)) - s.ip
if s.Look > 0 {
if s.Look > s.SwdF {
s.Look = s.SwdF
}
copy(s.b[s.ip:], s.ctx.in[:s.Look])
s.ctx.ip += int(s.Look)
s.ip += s.Look
}
if s.ip == s.bsize {
s.ip = 0
}
s.rp = s.firstrp
if s.rp >= s.nodecount {
s.rp -= s.nodecount
} else {
s.rp += s.bsize - s.nodecount
}
if s.Look < 3 {
s.b[s.bp+s.Look] = 0
s.b[s.bp+s.Look+1] = 0
s.b[s.bp+s.Look+2] = 0
}
}
func (s *swd) getbyte() {
c := -1
if s.ctx.ip < len(s.ctx.in) {
c = int(s.ctx.in[s.ctx.ip])
s.ctx.ip++
s.b[s.ip] = byte(c)
if s.ip < s.SwdF {
s.bwrap[s.ip] = byte(c)
}
} else {
if s.Look > 0 {
s.Look--
}
s.b[s.ip] = 0
if s.ip < s.SwdF {
s.bwrap[s.ip] = 0
}
}
s.ip++
if s.ip == s.bsize {
s.ip = 0
}
s.bp++
if s.bp == s.bsize {
s.bp = 0
}
s.rp++
if s.rp == s.bsize {
s.rp = 0
}
}
func (s *swd) accept(n uint) {
if n > s.Look {
panic("swd: accept: invalid n")
}
for i := uint(0); i < n; i++ {
s.removeNode(s.rp)
key := head3(s.b[s.bp:])
s.succ3[s.bp] = s.gethead3(key)
s.head3[key] = uint16(s.bp)
s.best3[s.bp] = uint16(s.SwdF + 1)
s.llen3[key]++
if uint(s.llen3[key]) > s.SwdN {
panic("swd: accept: invalid llen3")
}
key = head2(s.b[s.bp:])
s.head2[key] = uint16(s.bp)
s.getbyte()
}
}
func (s *swd) search(node uint, cnt uint) {
if s.MLen <= 0 {
panic("assert: search: invalid mlen")
}
mlen := s.MLen
bp := s.bp
bx := s.bp + s.Look
scanend1 := s.b[s.bp+mlen-1]
for ; cnt > 0; cnt-- {
p1 := bp
p2 := node
px := bx
if mlen >= s.Look {
panic("assert: search: invalid mlen in loop")
}
if s.b[p2+mlen-1] == scanend1 &&
s.b[p2+mlen] == s.b[p1+mlen] &&
s.b[p2] == s.b[p1] &&
s.b[p2+1] == s.b[p1+1] {
if s.b[bp] != s.b[node] || s.b[bp+1] != s.b[node+1] || s.b[bp+2] != s.b[node+2] {
panic("assert: seach: invalid initial match")
}
p1 = p1 + 2
p2 = p2 + 2
for p1 < px {
p1++
p2++
if s.b[p1] != s.b[p2] {
break
}
}
i := p1 - bp
for j := uint(0); j < i; j++ {
if s.b[s.bp+j] != s.b[node+j] {
panic("assert: search: invalid final match")
}
}
if i < cSWD_BEST_OFF {
if s.bestPos[i] == 0 {
s.bestPos[i] = node + 1
}
}
if i > mlen {
mlen = i
s.MLen = mlen
s.mpos = node
if mlen == s.Look {
return
}
if mlen >= s.NiceLength {
return
}
if mlen > uint(s.best3[node]) {
return
}
scanend1 = s.b[s.bp+mlen-1]
}
}
node = uint(s.succ3[node])
}
}
func (s *swd) search2() bool {
if s.Look < 2 {
panic("assert: search2: invalid look")
}
if s.MLen <= 0 {
panic("assert: search2: invalid mlen")
}
key := s.head2[head2(s.b[s.bp:])]
if key == 0xFFFF {
return false
}
if s.b[s.bp] != s.b[key] || s.b[s.bp+1] != s.b[key+1] {
panic("assert: search2: invalid key found")
}
if s.bestPos[2] == 0 {
s.bestPos[2] = uint(key + 1)
}
if s.MLen < 2 {
s.MLen = 2
s.mpos = uint(key)
}
return true
}
func (s *swd) findbest() {
if s.MLen == 0 {
panic("swd: findbest: invalid mlen")
}
key := head3(s.b[s.bp:])
node := s.gethead3(key)
s.succ3[s.bp] = node
cnt := uint(s.llen3[key])
s.llen3[key]++
if cnt > s.SwdN+s.SwdF {
panic("swd: findbest: invalid llen3")
}
if cnt > s.MaxChain && s.MaxChain > 0 {
cnt = s.MaxChain
}
s.head3[key] = uint16(s.bp)
s.BChar = int(s.b[s.bp])
len := s.MLen
if s.MLen >= s.Look {
if s.Look == 0 {
s.BChar = -1
}
s.MOff = 0
s.best3[s.bp] = uint16(s.SwdF + 1)
} else {
if s.search2() && s.Look >= 3 {
s.search(uint(node), cnt)
}
if s.MLen > len {
s.MOff = s.pos2off(s.mpos)
}
if s.UseBestOff {
for i := 2; i < cSWD_BEST_OFF; i++ {
if s.bestPos[i] > 0 {
s.BestOff[i] = s.pos2off(s.bestPos[i] - 1)
} else {
s.BestOff[i] = 0
}
}
}
}
s.removeNode(s.rp)
key = head2(s.b[s.bp:])
s.head2[key] = uint16(s.bp)
}
func (s *swd) pos2off(pos uint) uint {
if s.bp > pos {
return s.bp - pos
}
return s.bsize - (pos - s.bp)
}

View File

@ -1,19 +0,0 @@
Copyright (c) 2015 Olivier Poitrey <rs@dailymotion.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished
to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,134 +0,0 @@
# XHandler
[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/xhandler) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/xhandler/master/LICENSE) [![Build Status](https://travis-ci.org/rs/xhandler.svg?branch=master)](https://travis-ci.org/rs/xhandler) [![Coverage](http://gocover.io/_badge/github.com/rs/xhandler)](http://gocover.io/github.com/rs/xhandler)
XHandler is a bridge between [net/context](https://godoc.org/golang.org/x/net/context) and `http.Handler`.
It lets you enforce `net/context` in your handlers without sacrificing compatibility with existing `http.Handlers` nor imposing a specific router.
Thanks to `net/context` deadline management, `xhandler` is able to enforce a per request deadline and will cancel the context when the client closes the connection unexpectedly.
You may create your own `net/context` aware handler pretty much the same way as you would do with http.Handler.
Read more about xhandler on [Dailymotion engineering blog](http://engineering.dailymotion.com/our-way-to-go/).
## Installing
go get -u github.com/rs/xhandler
## Usage
```go
package main
import (
"log"
"net/http"
"time"
"github.com/rs/cors"
"github.com/rs/xhandler"
"golang.org/x/net/context"
)
type myMiddleware struct {
next xhandler.HandlerC
}
func (h myMiddleware) ServeHTTPC(ctx context.Context, w http.ResponseWriter, r *http.Request) {
ctx = context.WithValue(ctx, "test", "World")
h.next.ServeHTTPC(ctx, w, r)
}
func main() {
c := xhandler.Chain{}
// Add close notifier handler so context is cancelled when the client closes
// the connection
c.UseC(xhandler.CloseHandler)
// Add timeout handler
c.UseC(xhandler.TimeoutHandler(2 * time.Second))
// Middleware putting something in the context
c.UseC(func(next xhandler.HandlerC) xhandler.HandlerC {
return myMiddleware{next: next}
})
// Mix it with a non-context-aware middleware handler
c.Use(cors.Default().Handler)
// Final handler (using handlerFuncC), reading from the context
xh := xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
value := ctx.Value("test").(string)
w.Write([]byte("Hello " + value))
})
// Bridge context aware handlers with http.Handler using xhandler.Handle()
http.Handle("/test", c.Handler(xh))
if err := http.ListenAndServe(":8080", nil); err != nil {
log.Fatal(err)
}
}
```
### Using xmux
Xhandler comes with an optional context aware [muxer](https://github.com/rs/xmux) forked from [httprouter](https://github.com/julienschmidt/httprouter):
```go
package main
import (
"fmt"
"log"
"net/http"
"time"
"github.com/rs/xhandler"
"github.com/rs/xmux"
"golang.org/x/net/context"
)
func main() {
c := xhandler.Chain{}
// Append a context-aware middleware handler
c.UseC(xhandler.CloseHandler)
// Another context-aware middleware handler
c.UseC(xhandler.TimeoutHandler(2 * time.Second))
mux := xmux.New()
// Use c.Handler to terminate the chain with your final handler
mux.GET("/welcome/:name", xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, req *http.Request) {
fmt.Fprintf(w, "Welcome %s!", xmux.Params(ctx).Get("name"))
}))
if err := http.ListenAndServe(":8080", c.Handler(mux)); err != nil {
log.Fatal(err)
}
}
```
See [xmux](https://github.com/rs/xmux) for more examples.
## Context Aware Middleware
Here is a list of `net/context` aware middleware handlers implementing `xhandler.HandlerC` interface.
Feel free to put up a PR linking your middleware if you have built one:
| Middleware | Author | Description |
| ---------- | ------ | ----------- |
| [xmux](https://github.com/rs/xmux) | [Olivier Poitrey](https://github.com/rs) | HTTP request muxer |
| [xlog](https://github.com/rs/xlog) | [Olivier Poitrey](https://github.com/rs) | HTTP handler logger |
| [xstats](https://github.com/rs/xstats) | [Olivier Poitrey](https://github.com/rs) | A generic client for service instrumentation |
| [xaccess](https://github.com/rs/xaccess) | [Olivier Poitrey](https://github.com/rs) | HTTP handler access logger with [xlog](https://github.com/rs/xlog) and [xstats](https://github.com/rs/xstats) |
| [cors](https://github.com/rs/cors) | [Olivier Poitrey](https://github.com/rs) | [Cross Origin Resource Sharing](http://www.w3.org/TR/cors/) (CORS) support |
## Licenses
All source code is licensed under the [MIT License](https://raw.github.com/rs/xhandler/master/LICENSE).

View File

@ -1,121 +0,0 @@
package xhandler
import (
"net/http"
"golang.org/x/net/context"
)
// Chain is a helper for chaining middleware handlers together for easier
// management.
type Chain []func(next HandlerC) HandlerC
// Add appends a variable number of additional middleware handlers
// to the middleware chain. Middleware handlers can either be
// context-aware or non-context aware handlers with the appropriate
// function signatures.
func (c *Chain) Add(f ...interface{}) {
for _, h := range f {
switch v := h.(type) {
case func(http.Handler) http.Handler:
c.Use(v)
case func(HandlerC) HandlerC:
c.UseC(v)
default:
panic("Adding invalid handler to the middleware chain")
}
}
}
// With creates a new middleware chain from an existing chain,
// extending it with additional middleware. Middleware handlers
// can either be context-aware or non-context aware handlers
// with the appropriate function signatures.
func (c *Chain) With(f ...interface{}) *Chain {
n := make(Chain, len(*c))
copy(n, *c)
n.Add(f...)
return &n
}
// UseC appends a context-aware handler to the middleware chain.
func (c *Chain) UseC(f func(next HandlerC) HandlerC) {
*c = append(*c, f)
}
// Use appends a standard http.Handler to the middleware chain without
// losing track of the context when inserted between two context aware handlers.
//
// Caveat: the f function will be called on each request so you are better off putting
// any initialization sequence outside of this function.
func (c *Chain) Use(f func(next http.Handler) http.Handler) {
xf := func(next HandlerC) HandlerC {
return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
n := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTPC(ctx, w, r)
})
f(n).ServeHTTP(w, r)
})
}
*c = append(*c, xf)
}
// Handler wraps the provided final handler with all the middleware appended to
// the chain and returns a new standard http.Handler instance.
// The context.Background() context is injected automatically.
func (c Chain) Handler(xh HandlerC) http.Handler {
ctx := context.Background()
return c.HandlerCtx(ctx, xh)
}
// HandlerFC is a helper to provide a function (HandlerFuncC) to Handler().
//
// HandlerFC is equivalent to:
// c.Handler(xhandler.HandlerFuncC(xhc))
func (c Chain) HandlerFC(xhf HandlerFuncC) http.Handler {
ctx := context.Background()
return c.HandlerCtx(ctx, HandlerFuncC(xhf))
}
// HandlerH is a helper to provide a standard http handler (http.HandlerFunc)
// to Handler(). Your final handler won't have access to the context though.
func (c Chain) HandlerH(h http.Handler) http.Handler {
ctx := context.Background()
return c.HandlerCtx(ctx, HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
h.ServeHTTP(w, r)
}))
}
// HandlerF is a helper to provide a standard http handler function
// (http.HandlerFunc) to Handler(). Your final handler won't have access
// to the context though.
func (c Chain) HandlerF(hf http.HandlerFunc) http.Handler {
ctx := context.Background()
return c.HandlerCtx(ctx, HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
hf(w, r)
}))
}
// HandlerCtx wraps the provided final handler with all the middleware appended to
// the chain and returns a new standard http.Handler instance.
func (c Chain) HandlerCtx(ctx context.Context, xh HandlerC) http.Handler {
return New(ctx, c.HandlerC(xh))
}
// HandlerC wraps the provided final handler with all the middleware appended to
// the chain and returns a HandlerC instance.
func (c Chain) HandlerC(xh HandlerC) HandlerC {
for i := len(c) - 1; i >= 0; i-- {
xh = c[i](xh)
}
return xh
}
// HandlerCF wraps the provided final handler func with all the middleware appended to
// the chain and returns a HandlerC instance.
//
// HandlerCF is equivalent to:
// c.HandlerC(xhandler.HandlerFuncC(xhc))
func (c Chain) HandlerCF(xhc HandlerFuncC) HandlerC {
return c.HandlerC(HandlerFuncC(xhc))
}

View File

@ -1,59 +0,0 @@
package xhandler
import (
"net/http"
"time"
"golang.org/x/net/context"
)
// CloseHandler returns a Handler, cancelling the context when the client
// connection closes unexpectedly.
func CloseHandler(next HandlerC) HandlerC {
return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
// Cancel the context if the client closes the connection
if wcn, ok := w.(http.CloseNotifier); ok {
var cancel context.CancelFunc
ctx, cancel = context.WithCancel(ctx)
defer cancel()
notify := wcn.CloseNotify()
go func() {
select {
case <-notify:
cancel()
case <-ctx.Done():
}
}()
}
next.ServeHTTPC(ctx, w, r)
})
}
// TimeoutHandler returns a Handler which adds a timeout to the context.
//
// Child handlers have the responsability of obeying the context deadline and to return
// an appropriate error (or not) response in case of timeout.
func TimeoutHandler(timeout time.Duration) func(next HandlerC) HandlerC {
return func(next HandlerC) HandlerC {
return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
ctx, _ = context.WithTimeout(ctx, timeout)
next.ServeHTTPC(ctx, w, r)
})
}
}
// If is a special handler that will skip insert the condNext handler only if a condition
// applies at runtime.
func If(cond func(ctx context.Context, w http.ResponseWriter, r *http.Request) bool, condNext func(next HandlerC) HandlerC) func(next HandlerC) HandlerC {
return func(next HandlerC) HandlerC {
return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
if cond(ctx, w, r) {
condNext(next).ServeHTTPC(ctx, w, r)
} else {
next.ServeHTTPC(ctx, w, r)
}
})
}
}

View File

@ -1,42 +0,0 @@
// Package xhandler provides a bridge between http.Handler and net/context.
//
// xhandler enforces net/context in your handlers without sacrificing
// compatibility with existing http.Handlers nor imposing a specific router.
//
// Thanks to net/context deadline management, xhandler is able to enforce
// a per request deadline and will cancel the context in when the client close
// the connection unexpectedly.
//
// You may create net/context aware middlewares pretty much the same way as
// you would with http.Handler.
package xhandler // import "github.com/rs/xhandler"
import (
"net/http"
"golang.org/x/net/context"
)
// HandlerC is a net/context aware http.Handler
type HandlerC interface {
ServeHTTPC(context.Context, http.ResponseWriter, *http.Request)
}
// HandlerFuncC type is an adapter to allow the use of ordinary functions
// as an xhandler.Handler. If f is a function with the appropriate signature,
// xhandler.HandlerFuncC(f) is a xhandler.Handler object that calls f.
type HandlerFuncC func(context.Context, http.ResponseWriter, *http.Request)
// ServeHTTPC calls f(ctx, w, r).
func (f HandlerFuncC) ServeHTTPC(ctx context.Context, w http.ResponseWriter, r *http.Request) {
f(ctx, w, r)
}
// New creates a conventional http.Handler injecting the provided root
// context to sub handlers. This handler is used as a bridge between conventional
// http.Handler and context aware handlers.
func New(ctx context.Context, h HandlerC) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
h.ServeHTTPC(ctx, w, r)
})
}

View File

@ -1,65 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bytes2
// Buffer implements a subset of the write portion of
// bytes.Buffer, but more efficiently. This is meant to
// be used in very high QPS operations, especially for
// WriteByte, and without abstracting it as a Writer.
// Function signatures contain errors for compatibility,
// but they do not return errors.
type Buffer struct {
bytes []byte
}
// NewBuffer is equivalent to bytes.NewBuffer.
func NewBuffer(b []byte) *Buffer {
return &Buffer{bytes: b}
}
// Write is equivalent to bytes.Buffer.Write.
func (buf *Buffer) Write(b []byte) (int, error) {
buf.bytes = append(buf.bytes, b...)
return len(b), nil
}
// WriteString is equivalent to bytes.Buffer.WriteString.
func (buf *Buffer) WriteString(s string) (int, error) {
buf.bytes = append(buf.bytes, s...)
return len(s), nil
}
// WriteByte is equivalent to bytes.Buffer.WriteByte.
func (buf *Buffer) WriteByte(b byte) error {
buf.bytes = append(buf.bytes, b)
return nil
}
// Bytes is equivalent to bytes.Buffer.Bytes.
func (buf *Buffer) Bytes() []byte {
return buf.bytes
}
// Strings is equivalent to bytes.Buffer.Strings.
func (buf *Buffer) String() string {
return string(buf.bytes)
}
// Len is equivalent to bytes.Buffer.Len.
func (buf *Buffer) Len() int {
return len(buf.bytes)
}

View File

@ -1,79 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package hack gives you some efficient functionality at the cost of
// breaking some Go rules.
package hack
import (
"reflect"
"unsafe"
)
// StringArena lets you consolidate allocations for a group of strings
// that have similar life length
type StringArena struct {
buf []byte
str string
}
// NewStringArena creates an arena of the specified size.
func NewStringArena(size int) *StringArena {
sa := &StringArena{buf: make([]byte, 0, size)}
pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&sa.buf))
pstring := (*reflect.StringHeader)(unsafe.Pointer(&sa.str))
pstring.Data = pbytes.Data
pstring.Len = pbytes.Cap
return sa
}
// NewString copies a byte slice into the arena and returns it as a string.
// If the arena is full, it returns a traditional go string.
func (sa *StringArena) NewString(b []byte) string {
if len(b) == 0 {
return ""
}
if len(sa.buf)+len(b) > cap(sa.buf) {
return string(b)
}
start := len(sa.buf)
sa.buf = append(sa.buf, b...)
return sa.str[start : start+len(b)]
}
// SpaceLeft returns the amount of space left in the arena.
func (sa *StringArena) SpaceLeft() int {
return cap(sa.buf) - len(sa.buf)
}
// String force casts a []byte to a string.
// USE AT YOUR OWN RISK
func String(b []byte) (s string) {
if len(b) == 0 {
return ""
}
pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
pstring.Data = pbytes.Data
pstring.Len = pbytes.Len
return
}
// StringPointer returns &s[0], which is not allowed in go
func StringPointer(s string) unsafe.Pointer {
pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
return unsafe.Pointer(pstring.Data)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,266 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sqltypes
import (
"errors"
"fmt"
"reflect"
"strconv"
"github.com/xwb1989/sqlparser/dependency/querypb"
)
// NullBindVariable is a bindvar with NULL value.
var NullBindVariable = &querypb.BindVariable{Type: querypb.Type_NULL_TYPE}
// ValueToProto converts Value to a *querypb.Value.
func ValueToProto(v Value) *querypb.Value {
return &querypb.Value{Type: v.typ, Value: v.val}
}
// ProtoToValue converts a *querypb.Value to a Value.
func ProtoToValue(v *querypb.Value) Value {
return MakeTrusted(v.Type, v.Value)
}
// BuildBindVariables builds a map[string]*querypb.BindVariable from a map[string]interface{}.
func BuildBindVariables(in map[string]interface{}) (map[string]*querypb.BindVariable, error) {
if len(in) == 0 {
return nil, nil
}
out := make(map[string]*querypb.BindVariable, len(in))
for k, v := range in {
bv, err := BuildBindVariable(v)
if err != nil {
return nil, fmt.Errorf("%s: %v", k, err)
}
out[k] = bv
}
return out, nil
}
// Int32BindVariable converts an int32 to a bind var.
func Int32BindVariable(v int32) *querypb.BindVariable {
return ValueBindVariable(NewInt32(v))
}
// Int64BindVariable converts an int64 to a bind var.
func Int64BindVariable(v int64) *querypb.BindVariable {
return ValueBindVariable(NewInt64(v))
}
// Uint64BindVariable converts a uint64 to a bind var.
func Uint64BindVariable(v uint64) *querypb.BindVariable {
return ValueBindVariable(NewUint64(v))
}
// Float64BindVariable converts a float64 to a bind var.
func Float64BindVariable(v float64) *querypb.BindVariable {
return ValueBindVariable(NewFloat64(v))
}
// StringBindVariable converts a string to a bind var.
func StringBindVariable(v string) *querypb.BindVariable {
return ValueBindVariable(NewVarChar(v))
}
// BytesBindVariable converts a []byte to a bind var.
func BytesBindVariable(v []byte) *querypb.BindVariable {
return &querypb.BindVariable{Type: VarBinary, Value: v}
}
// ValueBindVariable converts a Value to a bind var.
func ValueBindVariable(v Value) *querypb.BindVariable {
return &querypb.BindVariable{Type: v.typ, Value: v.val}
}
// BuildBindVariable builds a *querypb.BindVariable from a valid input type.
func BuildBindVariable(v interface{}) (*querypb.BindVariable, error) {
switch v := v.(type) {
case string:
return StringBindVariable(v), nil
case []byte:
return BytesBindVariable(v), nil
case int:
return &querypb.BindVariable{
Type: querypb.Type_INT64,
Value: strconv.AppendInt(nil, int64(v), 10),
}, nil
case int64:
return Int64BindVariable(v), nil
case uint64:
return Uint64BindVariable(v), nil
case float64:
return Float64BindVariable(v), nil
case nil:
return NullBindVariable, nil
case Value:
return ValueBindVariable(v), nil
case *querypb.BindVariable:
return v, nil
case []interface{}:
bv := &querypb.BindVariable{
Type: querypb.Type_TUPLE,
Values: make([]*querypb.Value, len(v)),
}
values := make([]querypb.Value, len(v))
for i, lv := range v {
lbv, err := BuildBindVariable(lv)
if err != nil {
return nil, err
}
values[i].Type = lbv.Type
values[i].Value = lbv.Value
bv.Values[i] = &values[i]
}
return bv, nil
case []string:
bv := &querypb.BindVariable{
Type: querypb.Type_TUPLE,
Values: make([]*querypb.Value, len(v)),
}
values := make([]querypb.Value, len(v))
for i, lv := range v {
values[i].Type = querypb.Type_VARCHAR
values[i].Value = []byte(lv)
bv.Values[i] = &values[i]
}
return bv, nil
case [][]byte:
bv := &querypb.BindVariable{
Type: querypb.Type_TUPLE,
Values: make([]*querypb.Value, len(v)),
}
values := make([]querypb.Value, len(v))
for i, lv := range v {
values[i].Type = querypb.Type_VARBINARY
values[i].Value = lv
bv.Values[i] = &values[i]
}
return bv, nil
case []int:
bv := &querypb.BindVariable{
Type: querypb.Type_TUPLE,
Values: make([]*querypb.Value, len(v)),
}
values := make([]querypb.Value, len(v))
for i, lv := range v {
values[i].Type = querypb.Type_INT64
values[i].Value = strconv.AppendInt(nil, int64(lv), 10)
bv.Values[i] = &values[i]
}
return bv, nil
case []int64:
bv := &querypb.BindVariable{
Type: querypb.Type_TUPLE,
Values: make([]*querypb.Value, len(v)),
}
values := make([]querypb.Value, len(v))
for i, lv := range v {
values[i].Type = querypb.Type_INT64
values[i].Value = strconv.AppendInt(nil, lv, 10)
bv.Values[i] = &values[i]
}
return bv, nil
case []uint64:
bv := &querypb.BindVariable{
Type: querypb.Type_TUPLE,
Values: make([]*querypb.Value, len(v)),
}
values := make([]querypb.Value, len(v))
for i, lv := range v {
values[i].Type = querypb.Type_UINT64
values[i].Value = strconv.AppendUint(nil, lv, 10)
bv.Values[i] = &values[i]
}
return bv, nil
case []float64:
bv := &querypb.BindVariable{
Type: querypb.Type_TUPLE,
Values: make([]*querypb.Value, len(v)),
}
values := make([]querypb.Value, len(v))
for i, lv := range v {
values[i].Type = querypb.Type_FLOAT64
values[i].Value = strconv.AppendFloat(nil, lv, 'g', -1, 64)
bv.Values[i] = &values[i]
}
return bv, nil
}
return nil, fmt.Errorf("type %T not supported as bind var: %v", v, v)
}
// ValidateBindVariables validates a map[string]*querypb.BindVariable.
func ValidateBindVariables(bv map[string]*querypb.BindVariable) error {
for k, v := range bv {
if err := ValidateBindVariable(v); err != nil {
return fmt.Errorf("%s: %v", k, err)
}
}
return nil
}
// ValidateBindVariable returns an error if the bind variable has inconsistent
// fields.
func ValidateBindVariable(bv *querypb.BindVariable) error {
if bv == nil {
return errors.New("bind variable is nil")
}
if bv.Type == querypb.Type_TUPLE {
if len(bv.Values) == 0 {
return errors.New("empty tuple is not allowed")
}
for _, val := range bv.Values {
if val.Type == querypb.Type_TUPLE {
return errors.New("tuple not allowed inside another tuple")
}
if err := ValidateBindVariable(&querypb.BindVariable{Type: val.Type, Value: val.Value}); err != nil {
return err
}
}
return nil
}
// If NewValue succeeds, the value is valid.
_, err := NewValue(bv.Type, bv.Value)
return err
}
// BindVariableToValue converts a bind var into a Value.
func BindVariableToValue(bv *querypb.BindVariable) (Value, error) {
if bv.Type == querypb.Type_TUPLE {
return NULL, errors.New("cannot convert a TUPLE bind var into a value")
}
return MakeTrusted(bv.Type, bv.Value), nil
}
// BindVariablesEqual compares two maps of bind variables.
func BindVariablesEqual(x, y map[string]*querypb.BindVariable) bool {
return reflect.DeepEqual(&querypb.BoundQuery{BindVariables: x}, &querypb.BoundQuery{BindVariables: y})
}
// CopyBindVariables returns a shallow-copy of the given bindVariables map.
func CopyBindVariables(bindVariables map[string]*querypb.BindVariable) map[string]*querypb.BindVariable {
result := make(map[string]*querypb.BindVariable, len(bindVariables))
for key, value := range bindVariables {
result[key] = value
}
return result
}

View File

@ -1,259 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sqltypes
import (
"encoding/json"
"errors"
"fmt"
"github.com/xwb1989/sqlparser/dependency/querypb"
)
// PlanValue represents a value or a list of values for
// a column that will later be resolved using bind vars and used
// to perform plan actions like generating the final query or
// deciding on a route.
//
// Plan values are typically used as a slice ([]planValue)
// where each entry is for one column. For situations where
// the required output is a list of rows (like in the case
// of multi-value inserts), the representation is pivoted.
// For example, a statement like this:
// INSERT INTO t VALUES (1, 2), (3, 4)
// will be represented as follows:
// []PlanValue{
// Values: {1, 3},
// Values: {2, 4},
// }
//
// For WHERE clause items that contain a combination of
// equality expressions and IN clauses like this:
// WHERE pk1 = 1 AND pk2 IN (2, 3, 4)
// The plan values will be represented as follows:
// []PlanValue{
// Value: 1,
// Values: {2, 3, 4},
// }
// When converted into rows, columns with single values
// are replicated as the same for all rows:
// [][]Value{
// {1, 2},
// {1, 3},
// {1, 4},
// }
type PlanValue struct {
Key string
Value Value
ListKey string
Values []PlanValue
}
// IsNull returns true if the PlanValue is NULL.
func (pv PlanValue) IsNull() bool {
return pv.Key == "" && pv.Value.IsNull() && pv.ListKey == "" && pv.Values == nil
}
// IsList returns true if the PlanValue is a list.
func (pv PlanValue) IsList() bool {
return pv.ListKey != "" || pv.Values != nil
}
// ResolveValue resolves a PlanValue as a single value based on the supplied bindvars.
func (pv PlanValue) ResolveValue(bindVars map[string]*querypb.BindVariable) (Value, error) {
switch {
case pv.Key != "":
bv, err := pv.lookupValue(bindVars)
if err != nil {
return NULL, err
}
return MakeTrusted(bv.Type, bv.Value), nil
case !pv.Value.IsNull():
return pv.Value, nil
case pv.ListKey != "" || pv.Values != nil:
// This code is unreachable because the parser does not allow
// multi-value constructs where a single value is expected.
return NULL, errors.New("a list was supplied where a single value was expected")
}
return NULL, nil
}
func (pv PlanValue) lookupValue(bindVars map[string]*querypb.BindVariable) (*querypb.BindVariable, error) {
bv, ok := bindVars[pv.Key]
if !ok {
return nil, fmt.Errorf("missing bind var %s", pv.Key)
}
if bv.Type == querypb.Type_TUPLE {
return nil, fmt.Errorf("TUPLE was supplied for single value bind var %s", pv.ListKey)
}
return bv, nil
}
// ResolveList resolves a PlanValue as a list of values based on the supplied bindvars.
func (pv PlanValue) ResolveList(bindVars map[string]*querypb.BindVariable) ([]Value, error) {
switch {
case pv.ListKey != "":
bv, err := pv.lookupList(bindVars)
if err != nil {
return nil, err
}
values := make([]Value, 0, len(bv.Values))
for _, val := range bv.Values {
values = append(values, MakeTrusted(val.Type, val.Value))
}
return values, nil
case pv.Values != nil:
values := make([]Value, 0, len(pv.Values))
for _, val := range pv.Values {
v, err := val.ResolveValue(bindVars)
if err != nil {
return nil, err
}
values = append(values, v)
}
return values, nil
}
// This code is unreachable because the parser does not allow
// single value constructs where multiple values are expected.
return nil, errors.New("a single value was supplied where a list was expected")
}
func (pv PlanValue) lookupList(bindVars map[string]*querypb.BindVariable) (*querypb.BindVariable, error) {
bv, ok := bindVars[pv.ListKey]
if !ok {
return nil, fmt.Errorf("missing bind var %s", pv.ListKey)
}
if bv.Type != querypb.Type_TUPLE {
return nil, fmt.Errorf("single value was supplied for TUPLE bind var %s", pv.ListKey)
}
return bv, nil
}
// MarshalJSON should be used only for testing.
func (pv PlanValue) MarshalJSON() ([]byte, error) {
switch {
case pv.Key != "":
return json.Marshal(":" + pv.Key)
case !pv.Value.IsNull():
if pv.Value.IsIntegral() {
return pv.Value.ToBytes(), nil
}
return json.Marshal(pv.Value.ToString())
case pv.ListKey != "":
return json.Marshal("::" + pv.ListKey)
case pv.Values != nil:
return json.Marshal(pv.Values)
}
return []byte("null"), nil
}
func rowCount(pvs []PlanValue, bindVars map[string]*querypb.BindVariable) (int, error) {
count := -1
setCount := func(l int) error {
switch count {
case -1:
count = l
return nil
case l:
return nil
default:
return errors.New("mismatch in number of column values")
}
}
for _, pv := range pvs {
switch {
case pv.Key != "" || !pv.Value.IsNull():
continue
case pv.Values != nil:
if err := setCount(len(pv.Values)); err != nil {
return 0, err
}
case pv.ListKey != "":
bv, err := pv.lookupList(bindVars)
if err != nil {
return 0, err
}
if err := setCount(len(bv.Values)); err != nil {
return 0, err
}
}
}
if count == -1 {
// If there were no lists inside, it was a single row.
// Note that count can never be 0 because there is enough
// protection at the top level: list bind vars must have
// at least one value (enforced by vtgate), and AST lists
// must have at least one value (enforced by the parser).
// Also lists created internally after vtgate validation
// ensure at least one value.
// TODO(sougou): verify and change API to enforce this.
return 1, nil
}
return count, nil
}
// ResolveRows resolves a []PlanValue as rows based on the supplied bindvars.
func ResolveRows(pvs []PlanValue, bindVars map[string]*querypb.BindVariable) ([][]Value, error) {
count, err := rowCount(pvs, bindVars)
if err != nil {
return nil, err
}
// Allocate the rows.
rows := make([][]Value, count)
for i := range rows {
rows[i] = make([]Value, len(pvs))
}
// Using j becasue we're resolving by columns.
for j, pv := range pvs {
switch {
case pv.Key != "":
bv, err := pv.lookupValue(bindVars)
if err != nil {
return nil, err
}
for i := range rows {
rows[i][j] = MakeTrusted(bv.Type, bv.Value)
}
case !pv.Value.IsNull():
for i := range rows {
rows[i][j] = pv.Value
}
case pv.ListKey != "":
bv, err := pv.lookupList(bindVars)
if err != nil {
// This code is unreachable because pvRowCount already checks this.
return nil, err
}
for i := range rows {
rows[i][j] = MakeTrusted(bv.Values[i].Type, bv.Values[i].Value)
}
case pv.Values != nil:
for i := range rows {
rows[i][j], err = pv.Values[i].ResolveValue(bindVars)
if err != nil {
return nil, err
}
}
// default case is a NULL value, which the row values are already initialized to.
}
}
return rows, nil
}

View File

@ -1,154 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sqltypes
import (
querypb "github.com/xwb1989/sqlparser/dependency/querypb"
)
// Functions in this file should only be used for testing.
// This is an experiment to see if test code bloat can be
// reduced and readability improved.
/*
// MakeTestFields builds a []*querypb.Field for testing.
// fields := sqltypes.MakeTestFields(
// "a|b",
// "int64|varchar",
// )
// The field types are as defined in querypb and are case
// insensitive. Column delimiters must be used only to sepearate
// strings and not at the beginning or the end.
func MakeTestFields(names, types string) []*querypb.Field {
n := split(names)
t := split(types)
var fields []*querypb.Field
for i := range n {
fields = append(fields, &querypb.Field{
Name: n[i],
Type: querypb.Type(querypb.Type_value[strings.ToUpper(t[i])]),
})
}
return fields
}
// MakeTestResult builds a *sqltypes.Result object for testing.
// result := sqltypes.MakeTestResult(
// fields,
// " 1|a",
// "10|abcd",
// )
// The field type values are set as the types for the rows built.
// Spaces are trimmed from row values. "null" is treated as NULL.
func MakeTestResult(fields []*querypb.Field, rows ...string) *Result {
result := &Result{
Fields: fields,
}
if len(rows) > 0 {
result.Rows = make([][]Value, len(rows))
}
for i, row := range rows {
result.Rows[i] = make([]Value, len(fields))
for j, col := range split(row) {
if col == "null" {
continue
}
result.Rows[i][j] = MakeTrusted(fields[j].Type, []byte(col))
}
}
result.RowsAffected = uint64(len(result.Rows))
return result
}
// MakeTestStreamingResults builds a list of results for streaming.
// results := sqltypes.MakeStreamingResults(
// fields,
// "1|a",
// "2|b",
// "---",
// "c|c",
// )
// The first result contains only the fields. Subsequent results
// are built using the field types. Every input that starts with a "-"
// is treated as streaming delimiter for one result. A final
// delimiter must not be supplied.
func MakeTestStreamingResults(fields []*querypb.Field, rows ...string) []*Result {
var results []*Result
results = append(results, &Result{Fields: fields})
start := 0
cur := 0
// Add a final streaming delimiter to simplify the loop below.
rows = append(rows, "-")
for cur < len(rows) {
if rows[cur][0] != '-' {
cur++
continue
}
result := MakeTestResult(fields, rows[start:cur]...)
result.Fields = nil
result.RowsAffected = 0
results = append(results, result)
start = cur + 1
cur = start
}
return results
}
*/
// TestBindVariable makes a *querypb.BindVariable from
// an interface{}.It panics on invalid input.
// This function should only be used for testing.
func TestBindVariable(v interface{}) *querypb.BindVariable {
if v == nil {
return NullBindVariable
}
bv, err := BuildBindVariable(v)
if err != nil {
panic(err)
}
return bv
}
// TestValue builds a Value from typ and val.
// This function should only be used for testing.
func TestValue(typ querypb.Type, val string) Value {
return MakeTrusted(typ, []byte(val))
}
/*
// PrintResults prints []*Results into a string.
// This function should only be used for testing.
func PrintResults(results []*Result) string {
b := new(bytes.Buffer)
for i, r := range results {
if i == 0 {
fmt.Fprintf(b, "%v", r)
continue
}
fmt.Fprintf(b, ", %v", r)
}
return b.String()
}
func split(str string) []string {
splits := strings.Split(str, "|")
for i, v := range splits {
splits[i] = strings.TrimSpace(v)
}
return splits
}
*/

View File

@ -1,288 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sqltypes
import (
"fmt"
"github.com/xwb1989/sqlparser/dependency/querypb"
)
// This file provides wrappers and support
// functions for querypb.Type.
// These bit flags can be used to query on the
// common properties of types.
const (
flagIsIntegral = int(querypb.Flag_ISINTEGRAL)
flagIsUnsigned = int(querypb.Flag_ISUNSIGNED)
flagIsFloat = int(querypb.Flag_ISFLOAT)
flagIsQuoted = int(querypb.Flag_ISQUOTED)
flagIsText = int(querypb.Flag_ISTEXT)
flagIsBinary = int(querypb.Flag_ISBINARY)
)
// IsIntegral returns true if querypb.Type is an integral
// (signed/unsigned) that can be represented using
// up to 64 binary bits.
// If you have a Value object, use its member function.
func IsIntegral(t querypb.Type) bool {
return int(t)&flagIsIntegral == flagIsIntegral
}
// IsSigned returns true if querypb.Type is a signed integral.
// If you have a Value object, use its member function.
func IsSigned(t querypb.Type) bool {
return int(t)&(flagIsIntegral|flagIsUnsigned) == flagIsIntegral
}
// IsUnsigned returns true if querypb.Type is an unsigned integral.
// Caution: this is not the same as !IsSigned.
// If you have a Value object, use its member function.
func IsUnsigned(t querypb.Type) bool {
return int(t)&(flagIsIntegral|flagIsUnsigned) == flagIsIntegral|flagIsUnsigned
}
// IsFloat returns true is querypb.Type is a floating point.
// If you have a Value object, use its member function.
func IsFloat(t querypb.Type) bool {
return int(t)&flagIsFloat == flagIsFloat
}
// IsQuoted returns true if querypb.Type is a quoted text or binary.
// If you have a Value object, use its member function.
func IsQuoted(t querypb.Type) bool {
return int(t)&flagIsQuoted == flagIsQuoted
}
// IsText returns true if querypb.Type is a text.
// If you have a Value object, use its member function.
func IsText(t querypb.Type) bool {
return int(t)&flagIsText == flagIsText
}
// IsBinary returns true if querypb.Type is a binary.
// If you have a Value object, use its member function.
func IsBinary(t querypb.Type) bool {
return int(t)&flagIsBinary == flagIsBinary
}
// isNumber returns true if the type is any type of number.
func isNumber(t querypb.Type) bool {
return IsIntegral(t) || IsFloat(t) || t == Decimal
}
// Vitess data types. These are idiomatically
// named synonyms for the querypb.Type values.
// Although these constants are interchangeable,
// they should be treated as different from querypb.Type.
// Use the synonyms only to refer to the type in Value.
// For proto variables, use the querypb.Type constants
// instead.
// The following conditions are non-overlapping
// and cover all types: IsSigned(), IsUnsigned(),
// IsFloat(), IsQuoted(), Null, Decimal, Expression.
// Also, IsIntegral() == (IsSigned()||IsUnsigned()).
// TestCategory needs to be updated accordingly if
// you add a new type.
// If IsBinary or IsText is true, then IsQuoted is
// also true. But there are IsQuoted types that are
// neither binary or text.
// querypb.Type_TUPLE is not included in this list
// because it's not a valid Value type.
// TODO(sougou): provide a categorization function
// that returns enums, which will allow for cleaner
// switch statements for those who want to cover types
// by their category.
const (
Null = querypb.Type_NULL_TYPE
Int8 = querypb.Type_INT8
Uint8 = querypb.Type_UINT8
Int16 = querypb.Type_INT16
Uint16 = querypb.Type_UINT16
Int24 = querypb.Type_INT24
Uint24 = querypb.Type_UINT24
Int32 = querypb.Type_INT32
Uint32 = querypb.Type_UINT32
Int64 = querypb.Type_INT64
Uint64 = querypb.Type_UINT64
Float32 = querypb.Type_FLOAT32
Float64 = querypb.Type_FLOAT64
Timestamp = querypb.Type_TIMESTAMP
Date = querypb.Type_DATE
Time = querypb.Type_TIME
Datetime = querypb.Type_DATETIME
Year = querypb.Type_YEAR
Decimal = querypb.Type_DECIMAL
Text = querypb.Type_TEXT
Blob = querypb.Type_BLOB
VarChar = querypb.Type_VARCHAR
VarBinary = querypb.Type_VARBINARY
Char = querypb.Type_CHAR
Binary = querypb.Type_BINARY
Bit = querypb.Type_BIT
Enum = querypb.Type_ENUM
Set = querypb.Type_SET
Geometry = querypb.Type_GEOMETRY
TypeJSON = querypb.Type_JSON
Expression = querypb.Type_EXPRESSION
)
// bit-shift the mysql flags by two byte so we
// can merge them with the mysql or vitess types.
const (
mysqlUnsigned = 32
mysqlBinary = 128
mysqlEnum = 256
mysqlSet = 2048
)
// If you add to this map, make sure you add a test case
// in tabletserver/endtoend.
var mysqlToType = map[int64]querypb.Type{
1: Int8,
2: Int16,
3: Int32,
4: Float32,
5: Float64,
6: Null,
7: Timestamp,
8: Int64,
9: Int24,
10: Date,
11: Time,
12: Datetime,
13: Year,
16: Bit,
245: TypeJSON,
246: Decimal,
249: Text,
250: Text,
251: Text,
252: Text,
253: VarChar,
254: Char,
255: Geometry,
}
// modifyType modifies the vitess type based on the
// mysql flag. The function checks specific flags based
// on the type. This allows us to ignore stray flags
// that MySQL occasionally sets.
func modifyType(typ querypb.Type, flags int64) querypb.Type {
switch typ {
case Int8:
if flags&mysqlUnsigned != 0 {
return Uint8
}
return Int8
case Int16:
if flags&mysqlUnsigned != 0 {
return Uint16
}
return Int16
case Int32:
if flags&mysqlUnsigned != 0 {
return Uint32
}
return Int32
case Int64:
if flags&mysqlUnsigned != 0 {
return Uint64
}
return Int64
case Int24:
if flags&mysqlUnsigned != 0 {
return Uint24
}
return Int24
case Text:
if flags&mysqlBinary != 0 {
return Blob
}
return Text
case VarChar:
if flags&mysqlBinary != 0 {
return VarBinary
}
return VarChar
case Char:
if flags&mysqlBinary != 0 {
return Binary
}
if flags&mysqlEnum != 0 {
return Enum
}
if flags&mysqlSet != 0 {
return Set
}
return Char
}
return typ
}
// MySQLToType computes the vitess type from mysql type and flags.
func MySQLToType(mysqlType, flags int64) (typ querypb.Type, err error) {
result, ok := mysqlToType[mysqlType]
if !ok {
return 0, fmt.Errorf("unsupported type: %d", mysqlType)
}
return modifyType(result, flags), nil
}
// typeToMySQL is the reverse of mysqlToType.
var typeToMySQL = map[querypb.Type]struct {
typ int64
flags int64
}{
Int8: {typ: 1},
Uint8: {typ: 1, flags: mysqlUnsigned},
Int16: {typ: 2},
Uint16: {typ: 2, flags: mysqlUnsigned},
Int32: {typ: 3},
Uint32: {typ: 3, flags: mysqlUnsigned},
Float32: {typ: 4},
Float64: {typ: 5},
Null: {typ: 6, flags: mysqlBinary},
Timestamp: {typ: 7},
Int64: {typ: 8},
Uint64: {typ: 8, flags: mysqlUnsigned},
Int24: {typ: 9},
Uint24: {typ: 9, flags: mysqlUnsigned},
Date: {typ: 10, flags: mysqlBinary},
Time: {typ: 11, flags: mysqlBinary},
Datetime: {typ: 12, flags: mysqlBinary},
Year: {typ: 13, flags: mysqlUnsigned},
Bit: {typ: 16, flags: mysqlUnsigned},
TypeJSON: {typ: 245},
Decimal: {typ: 246},
Text: {typ: 252},
Blob: {typ: 252, flags: mysqlBinary},
VarChar: {typ: 253},
VarBinary: {typ: 253, flags: mysqlBinary},
Char: {typ: 254},
Binary: {typ: 254, flags: mysqlBinary},
Enum: {typ: 254, flags: mysqlEnum},
Set: {typ: 254, flags: mysqlSet},
Geometry: {typ: 255},
}
// TypeToMySQL returns the equivalent mysql type and flag for a vitess type.
func TypeToMySQL(typ querypb.Type) (mysqlType, flags int64) {
val := typeToMySQL[typ]
return val.typ, val.flags
}

View File

@ -1,376 +0,0 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package sqltypes implements interfaces and types that represent SQL values.
package sqltypes
import (
"encoding/base64"
"encoding/json"
"fmt"
"strconv"
"github.com/xwb1989/sqlparser/dependency/bytes2"
"github.com/xwb1989/sqlparser/dependency/hack"
"github.com/xwb1989/sqlparser/dependency/querypb"
)
var (
// NULL represents the NULL value.
NULL = Value{}
// DontEscape tells you if a character should not be escaped.
DontEscape = byte(255)
nullstr = []byte("null")
)
// BinWriter interface is used for encoding values.
// Types like bytes.Buffer conform to this interface.
// We expect the writer objects to be in-memory buffers.
// So, we don't expect the write operations to fail.
type BinWriter interface {
Write([]byte) (int, error)
}
// Value can store any SQL value. If the value represents
// an integral type, the bytes are always stored as a cannonical
// representation that matches how MySQL returns such values.
type Value struct {
typ querypb.Type
val []byte
}
// NewValue builds a Value using typ and val. If the value and typ
// don't match, it returns an error.
func NewValue(typ querypb.Type, val []byte) (v Value, err error) {
switch {
case IsSigned(typ):
if _, err := strconv.ParseInt(string(val), 0, 64); err != nil {
return NULL, err
}
return MakeTrusted(typ, val), nil
case IsUnsigned(typ):
if _, err := strconv.ParseUint(string(val), 0, 64); err != nil {
return NULL, err
}
return MakeTrusted(typ, val), nil
case IsFloat(typ) || typ == Decimal:
if _, err := strconv.ParseFloat(string(val), 64); err != nil {
return NULL, err
}
return MakeTrusted(typ, val), nil
case IsQuoted(typ) || typ == Null:
return MakeTrusted(typ, val), nil
}
// All other types are unsafe or invalid.
return NULL, fmt.Errorf("invalid type specified for MakeValue: %v", typ)
}
// MakeTrusted makes a new Value based on the type.
// This function should only be used if you know the value
// and type conform to the rules. Every place this function is
// called, a comment is needed that explains why it's justified.
// Exceptions: The current package and mysql package do not need
// comments. Other packages can also use the function to create
// VarBinary or VarChar values.
func MakeTrusted(typ querypb.Type, val []byte) Value {
if typ == Null {
return NULL
}
return Value{typ: typ, val: val}
}
// NewInt64 builds an Int64 Value.
func NewInt64(v int64) Value {
return MakeTrusted(Int64, strconv.AppendInt(nil, v, 10))
}
// NewInt32 builds an Int64 Value.
func NewInt32(v int32) Value {
return MakeTrusted(Int32, strconv.AppendInt(nil, int64(v), 10))
}
// NewUint64 builds an Uint64 Value.
func NewUint64(v uint64) Value {
return MakeTrusted(Uint64, strconv.AppendUint(nil, v, 10))
}
// NewFloat64 builds an Float64 Value.
func NewFloat64(v float64) Value {
return MakeTrusted(Float64, strconv.AppendFloat(nil, v, 'g', -1, 64))
}
// NewVarChar builds a VarChar Value.
func NewVarChar(v string) Value {
return MakeTrusted(VarChar, []byte(v))
}
// NewVarBinary builds a VarBinary Value.
// The input is a string because it's the most common use case.
func NewVarBinary(v string) Value {
return MakeTrusted(VarBinary, []byte(v))
}
// NewIntegral builds an integral type from a string representaion.
// The type will be Int64 or Uint64. Int64 will be preferred where possible.
func NewIntegral(val string) (n Value, err error) {
signed, err := strconv.ParseInt(val, 0, 64)
if err == nil {
return MakeTrusted(Int64, strconv.AppendInt(nil, signed, 10)), nil
}
unsigned, err := strconv.ParseUint(val, 0, 64)
if err != nil {
return Value{}, err
}
return MakeTrusted(Uint64, strconv.AppendUint(nil, unsigned, 10)), nil
}
// InterfaceToValue builds a value from a go type.
// Supported types are nil, int64, uint64, float64,
// string and []byte.
// This function is deprecated. Use the type-specific
// functions instead.
func InterfaceToValue(goval interface{}) (Value, error) {
switch goval := goval.(type) {
case nil:
return NULL, nil
case []byte:
return MakeTrusted(VarBinary, goval), nil
case int64:
return NewInt64(goval), nil
case uint64:
return NewUint64(goval), nil
case float64:
return NewFloat64(goval), nil
case string:
return NewVarChar(goval), nil
default:
return NULL, fmt.Errorf("unexpected type %T: %v", goval, goval)
}
}
// Type returns the type of Value.
func (v Value) Type() querypb.Type {
return v.typ
}
// Raw returns the internal represenation of the value. For newer types,
// this may not match MySQL's representation.
func (v Value) Raw() []byte {
return v.val
}
// ToBytes returns the value as MySQL would return it as []byte.
// In contrast, Raw returns the internal representation of the Value, which may not
// match MySQL's representation for newer types.
// If the value is not convertible like in the case of Expression, it returns nil.
func (v Value) ToBytes() []byte {
if v.typ == Expression {
return nil
}
return v.val
}
// Len returns the length.
func (v Value) Len() int {
return len(v.val)
}
// ToString returns the value as MySQL would return it as string.
// If the value is not convertible like in the case of Expression, it returns nil.
func (v Value) ToString() string {
if v.typ == Expression {
return ""
}
return hack.String(v.val)
}
// String returns a printable version of the value.
func (v Value) String() string {
if v.typ == Null {
return "NULL"
}
if v.IsQuoted() {
return fmt.Sprintf("%v(%q)", v.typ, v.val)
}
return fmt.Sprintf("%v(%s)", v.typ, v.val)
}
// EncodeSQL encodes the value into an SQL statement. Can be binary.
func (v Value) EncodeSQL(b BinWriter) {
switch {
case v.typ == Null:
b.Write(nullstr)
case v.IsQuoted():
encodeBytesSQL(v.val, b)
default:
b.Write(v.val)
}
}
// EncodeASCII encodes the value using 7-bit clean ascii bytes.
func (v Value) EncodeASCII(b BinWriter) {
switch {
case v.typ == Null:
b.Write(nullstr)
case v.IsQuoted():
encodeBytesASCII(v.val, b)
default:
b.Write(v.val)
}
}
// IsNull returns true if Value is null.
func (v Value) IsNull() bool {
return v.typ == Null
}
// IsIntegral returns true if Value is an integral.
func (v Value) IsIntegral() bool {
return IsIntegral(v.typ)
}
// IsSigned returns true if Value is a signed integral.
func (v Value) IsSigned() bool {
return IsSigned(v.typ)
}
// IsUnsigned returns true if Value is an unsigned integral.
func (v Value) IsUnsigned() bool {
return IsUnsigned(v.typ)
}
// IsFloat returns true if Value is a float.
func (v Value) IsFloat() bool {
return IsFloat(v.typ)
}
// IsQuoted returns true if Value must be SQL-quoted.
func (v Value) IsQuoted() bool {
return IsQuoted(v.typ)
}
// IsText returns true if Value is a collatable text.
func (v Value) IsText() bool {
return IsText(v.typ)
}
// IsBinary returns true if Value is binary.
func (v Value) IsBinary() bool {
return IsBinary(v.typ)
}
// MarshalJSON should only be used for testing.
// It's not a complete implementation.
func (v Value) MarshalJSON() ([]byte, error) {
switch {
case v.IsQuoted():
return json.Marshal(v.ToString())
case v.typ == Null:
return nullstr, nil
}
return v.val, nil
}
// UnmarshalJSON should only be used for testing.
// It's not a complete implementation.
func (v *Value) UnmarshalJSON(b []byte) error {
if len(b) == 0 {
return fmt.Errorf("error unmarshaling empty bytes")
}
var val interface{}
var err error
switch b[0] {
case '-':
var ival int64
err = json.Unmarshal(b, &ival)
val = ival
case '"':
var bval []byte
err = json.Unmarshal(b, &bval)
val = bval
case 'n': // null
err = json.Unmarshal(b, &val)
default:
var uval uint64
err = json.Unmarshal(b, &uval)
val = uval
}
if err != nil {
return err
}
*v, err = InterfaceToValue(val)
return err
}
func encodeBytesSQL(val []byte, b BinWriter) {
buf := &bytes2.Buffer{}
buf.WriteByte('\'')
for _, ch := range val {
if encodedChar := SQLEncodeMap[ch]; encodedChar == DontEscape {
buf.WriteByte(ch)
} else {
buf.WriteByte('\\')
buf.WriteByte(encodedChar)
}
}
buf.WriteByte('\'')
b.Write(buf.Bytes())
}
func encodeBytesASCII(val []byte, b BinWriter) {
buf := &bytes2.Buffer{}
buf.WriteByte('\'')
encoder := base64.NewEncoder(base64.StdEncoding, buf)
encoder.Write(val)
encoder.Close()
buf.WriteByte('\'')
b.Write(buf.Bytes())
}
// SQLEncodeMap specifies how to escape binary data with '\'.
// Complies to http://dev.mysql.com/doc/refman/5.1/en/string-syntax.html
var SQLEncodeMap [256]byte
// SQLDecodeMap is the reverse of SQLEncodeMap
var SQLDecodeMap [256]byte
var encodeRef = map[byte]byte{
'\x00': '0',
'\'': '\'',
'"': '"',
'\b': 'b',
'\n': 'n',
'\r': 'r',
'\t': 't',
26: 'Z', // ctl-Z
'\\': '\\',
}
func init() {
for i := range SQLEncodeMap {
SQLEncodeMap[i] = DontEscape
SQLDecodeMap[i] = DontEscape
}
for i := range SQLEncodeMap {
if to, ok := encodeRef[byte(i)]; ok {
SQLEncodeMap[byte(i)] = to
SQLDecodeMap[to] = byte(i)
}
}
}

41
vendor/vendor.json vendored
View File

@ -662,10 +662,10 @@
"revisionTime": "2019-01-20T10:05:29Z"
},
{
"checksumSHA1": "ik77jlf0oMQTlSndP85DlIVOnOY=",
"checksumSHA1": "FO6q7sC2QTZMGDWv+/cGWJfjTCk=",
"path": "github.com/minio/parquet-go",
"revision": "7a17a919eeed02c393f3117a9ed1ac6df0da9aa5",
"revisionTime": "2019-01-18T04:40:39Z"
"revision": "d5e4e922da820530a1851afc22499c826c08f1e8",
"revisionTime": "2019-02-10T14:56:30Z"
},
{
"checksumSHA1": "N4WRPw4p3AN958RH/O53kUsJacQ=",
@ -829,12 +829,6 @@
"revision": "8b1c2da0d56deffdbb9e48d4414b4e674bd8083e",
"revisionTime": "2018-04-08T09:29:02Z"
},
{
"checksumSHA1": "lOtA2sG01oAO4Z/VSGqvX45CjfA=",
"path": "github.com/rasky/go-lzo",
"revision": "affec0788321cffe2c68821be1e07e87127b17f0",
"revisionTime": "2015-10-23T00:10:55Z"
},
{
"checksumSHA1": "D8AVDI39CJ+jvw0HOotYU2gz54c=",
"path": "github.com/rjeczalik/notify",
@ -847,11 +841,6 @@
"revision": "76f58f330d76a55c5badc74f6212e8a15e742c77",
"revisionTime": "2019-01-16T17:59:10Z"
},
{
"path": "github.com/rs/xhandler",
"revision": "ed27b6fd65218132ee50cd95f38474a3d8a2cd12",
"revisionTime": "2016-06-18T12:32:21-07:00"
},
{
"checksumSHA1": "6JP37UqrI0H80Gpk0Y2P+KXgn5M=",
"origin": "github.com/hashicorp/vault/vendor/github.com/ryanuber/go-glob",
@ -907,30 +896,6 @@
"revision": "ceec8f93295a060cdb565ec25e4ccf17941dbd55",
"revisionTime": "2016-11-14T21:01:44Z"
},
{
"checksumSHA1": "L/Q8Ylbo+wnj5whDFfMxxwyxmdo=",
"path": "github.com/xwb1989/sqlparser/dependency/bytes2",
"revision": "120387863bf27d04bc07db8015110a6e96d0146c",
"revisionTime": "2018-06-06T15:21:19Z"
},
{
"checksumSHA1": "f9K0yQdwD0Z2yc3bmDw2uqXt4hU=",
"path": "github.com/xwb1989/sqlparser/dependency/hack",
"revision": "120387863bf27d04bc07db8015110a6e96d0146c",
"revisionTime": "2018-06-06T15:21:19Z"
},
{
"checksumSHA1": "xpu1JU/VZ7gGNbU5Ol9Nm1oS4tY=",
"path": "github.com/xwb1989/sqlparser/dependency/querypb",
"revision": "120387863bf27d04bc07db8015110a6e96d0146c",
"revisionTime": "2018-06-06T15:21:19Z"
},
{
"checksumSHA1": "KbNIySCQgMG81TRMJp1IDRfSgv8=",
"path": "github.com/xwb1989/sqlparser/dependency/sqltypes",
"revision": "120387863bf27d04bc07db8015110a6e96d0146c",
"revisionTime": "2018-06-06T15:21:19Z"
},
{
"checksumSHA1": "aggkkQOGetHku/ZafO6GKmI2eBk=",
"path": "go.opencensus.io",