Replace godep with dep

This commit is contained in:
Manuel de Brito Fontes 2017-10-06 17:26:14 -03:00
parent 1e7489927c
commit bf5616c65b
14883 changed files with 3937406 additions and 361781 deletions

24
vendor/github.com/mailru/easyjson/jlexer/bytestostr.go generated vendored Normal file
View file

@ -0,0 +1,24 @@
// This file will only be included to the build if neither
// easyjson_nounsafe nor appengine build tag is set. See README notes
// for more details.
//+build !easyjson_nounsafe
//+build !appengine
package jlexer
import (
"reflect"
"unsafe"
)
// bytesToStr creates a string pointing at the slice to avoid copying.
//
// Warning: the string returned by the function should be used with care, as the whole input data
// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data
// may be garbage-collected even when the string exists.
func bytesToStr(data []byte) string {
h := (*reflect.SliceHeader)(unsafe.Pointer(&data))
shdr := reflect.StringHeader{Data: h.Data, Len: h.Len}
return *(*string)(unsafe.Pointer(&shdr))
}

View file

@ -0,0 +1,13 @@
// This file is included to the build if any of the buildtags below
// are defined. Refer to README notes for more details.
//+build easyjson_nounsafe appengine
package jlexer
// bytesToStr creates a string normally from []byte
//
// Note that this method is roughly 1.5x slower than using the 'unsafe' method.
func bytesToStr(data []byte) string {
return string(data)
}

View file

@ -5,12 +5,14 @@
package jlexer
import (
"encoding/base64"
"errors"
"fmt"
"io"
"reflect"
"strconv"
"unicode"
"unicode/utf16"
"unicode/utf8"
"unsafe"
)
// tokenKind determines type of a token.
@ -45,11 +47,13 @@ type Lexer struct {
firstElement bool // Whether current element is the first in array or an object.
wantSep byte // A comma or a colon character, which need to occur before a token.
err error // Error encountered during lexing, if any.
UseMultipleErrors bool // If we want to use multiple errors.
fatalError error // Fatal error occurred during lexing. It is usually a syntax error.
multipleErrors []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors.
}
// fetchToken scans the input for the next token.
func (r *Lexer) fetchToken() {
// FetchToken scans the input for the next token.
func (r *Lexer) FetchToken() {
r.token.kind = tokenUndef
r.start = r.pos
@ -147,7 +151,7 @@ func (r *Lexer) fetchToken() {
return
}
}
r.err = io.EOF
r.fatalError = io.EOF
return
}
@ -199,17 +203,6 @@ func (r *Lexer) fetchFalse() {
}
}
// bytesToStr creates a string pointing at the slice to avoid copying.
//
// Warning: the string returned by the function should be used with care, as the whole input data
// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data
// may be garbage-collected even when the string exists.
func bytesToStr(data []byte) string {
h := (*reflect.SliceHeader)(unsafe.Pointer(&data))
shdr := reflect.StringHeader{h.Data, h.Len}
return *(*string)(unsafe.Pointer(&shdr))
}
// fetchNumber scans a number literal token.
func (r *Lexer) fetchNumber() {
hasE := false
@ -265,6 +258,33 @@ func findStringLen(data []byte) (hasEscapes bool, length int) {
return false, len(data)
}
// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
// or it returns -1.
func getu4(s []byte) rune {
if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
return -1
}
var val rune
for i := 2; i < len(s) && i < 6; i++ {
var v byte
c := s[i]
switch c {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
v = c - '0'
case 'a', 'b', 'c', 'd', 'e', 'f':
v = c - 'a' + 10
case 'A', 'B', 'C', 'D', 'E', 'F':
v = c - 'A' + 10
default:
return -1
}
val <<= 4
val |= rune(v)
}
return val
}
// processEscape processes a single escape sequence and returns number of bytes processed.
func (r *Lexer) processEscape(data []byte) (int, error) {
if len(data) < 2 {
@ -292,39 +312,28 @@ func (r *Lexer) processEscape(data []byte) (int, error) {
r.token.byteValue = append(r.token.byteValue, '\t')
return 2, nil
case 'u':
default:
return 0, fmt.Errorf("syntax error")
}
var val rune
for i := 2; i < len(data) && i < 6; i++ {
var v byte
c = data[i]
switch c {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
v = c - '0'
case 'a', 'b', 'c', 'd', 'e', 'f':
v = c - 'a' + 10
case 'A', 'B', 'C', 'D', 'E', 'F':
v = c - 'A' + 10
default:
return 0, fmt.Errorf("syntax error")
rr := getu4(data)
if rr < 0 {
return 0, errors.New("syntax error")
}
val <<= 4
val |= rune(v)
read := 6
if utf16.IsSurrogate(rr) {
rr1 := getu4(data[read:])
if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
read += 6
rr = dec
} else {
rr = unicode.ReplacementChar
}
}
var d [4]byte
s := utf8.EncodeRune(d[:], rr)
r.token.byteValue = append(r.token.byteValue, d[:s]...)
return read, nil
}
l := utf8.RuneLen(val)
if l == -1 {
return 0, fmt.Errorf("invalid unicode escape")
}
var d [4]byte
utf8.EncodeRune(d[:], val)
r.token.byteValue = append(r.token.byteValue, d[:l]...)
return 6, nil
return 0, errors.New("syntax error")
}
// fetchString scans a string literal token.
@ -368,11 +377,11 @@ func (r *Lexer) fetchString() {
// scanToken scans the next token if no token is currently available in the lexer.
func (r *Lexer) scanToken() {
if r.token.kind != tokenUndef || r.err != nil {
if r.token.kind != tokenUndef || r.fatalError != nil {
return
}
r.fetchToken()
r.FetchToken()
}
// consume resets the current token to allow scanning the next one.
@ -383,20 +392,20 @@ func (r *Lexer) consume() {
// Ok returns true if no error (including io.EOF) was encountered during scanning.
func (r *Lexer) Ok() bool {
return r.err == nil
return r.fatalError == nil
}
const maxErrorContextLen = 13
func (r *Lexer) errParse(what string) {
if r.err == nil {
if r.fatalError == nil {
var str string
if len(r.Data)-r.pos <= maxErrorContextLen {
str = string(r.Data)
} else {
str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..."
}
r.err = &LexerError{
r.fatalError = &LexerError{
Reason: what,
Offset: r.pos,
Data: str,
@ -409,36 +418,64 @@ func (r *Lexer) errSyntax() {
}
func (r *Lexer) errInvalidToken(expected string) {
if r.err == nil {
var str string
if len(r.token.byteValue) <= maxErrorContextLen {
str = string(r.token.byteValue)
} else {
str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..."
}
r.err = &LexerError{
Reason: fmt.Sprintf("expected %s", expected),
Offset: r.pos,
Data: str,
}
if r.fatalError != nil {
return
}
if r.UseMultipleErrors {
r.pos = r.start
r.consume()
r.SkipRecursive()
switch expected {
case "[":
r.token.delimValue = ']'
r.token.kind = tokenDelim
case "{":
r.token.delimValue = '}'
r.token.kind = tokenDelim
}
r.addNonfatalError(&LexerError{
Reason: fmt.Sprintf("expected %s", expected),
Offset: r.start,
Data: string(r.Data[r.start:r.pos]),
})
return
}
var str string
if len(r.token.byteValue) <= maxErrorContextLen {
str = string(r.token.byteValue)
} else {
str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..."
}
r.fatalError = &LexerError{
Reason: fmt.Sprintf("expected %s", expected),
Offset: r.pos,
Data: str,
}
}
func (r *Lexer) GetPos() int {
return r.pos
}
// Delim consumes a token and verifies that it is the given delimiter.
func (r *Lexer) Delim(c byte) {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
r.FetchToken()
}
if !r.Ok() || r.token.delimValue != c {
r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled.
r.errInvalidToken(string([]byte{c}))
} else {
r.consume()
}
r.consume()
}
// IsDelim returns true if there was no scanning error and next token is the given delimiter.
func (r *Lexer) IsDelim(c byte) bool {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
r.FetchToken()
}
return !r.Ok() || r.token.delimValue == c
}
@ -446,7 +483,7 @@ func (r *Lexer) IsDelim(c byte) bool {
// Null verifies that the next token is null and consumes it.
func (r *Lexer) Null() {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
r.FetchToken()
}
if !r.Ok() || r.token.kind != tokenNull {
r.errInvalidToken("null")
@ -457,7 +494,7 @@ func (r *Lexer) Null() {
// IsNull returns true if the next token is a null keyword.
func (r *Lexer) IsNull() bool {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
r.FetchToken()
}
return r.Ok() && r.token.kind == tokenNull
}
@ -465,7 +502,7 @@ func (r *Lexer) IsNull() bool {
// Skip skips a single token.
func (r *Lexer) Skip() {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
r.FetchToken()
}
r.consume()
}
@ -476,7 +513,6 @@ func (r *Lexer) Skip() {
// Note: no syntax validation is performed on the skipped data.
func (r *Lexer) SkipRecursive() {
r.scanToken()
var start, end byte
if r.token.delimValue == '{' {
@ -505,7 +541,7 @@ func (r *Lexer) SkipRecursive() {
return
}
case c == '\\' && inQuotes:
wasEscape = true
wasEscape = !wasEscape
continue
case c == '"' && inQuotes:
inQuotes = wasEscape
@ -515,7 +551,11 @@ func (r *Lexer) SkipRecursive() {
wasEscape = false
}
r.pos = len(r.Data)
r.err = io.EOF
r.fatalError = &LexerError{
Reason: "EOF reached while skipping array/object or token",
Offset: r.pos,
Data: string(r.Data[r.pos:]),
}
}
// Raw fetches the next item recursively as a data slice
@ -527,48 +567,107 @@ func (r *Lexer) Raw() []byte {
return r.Data[r.start:r.pos]
}
// IsStart returns whether the lexer is positioned at the start
// of an input string.
func (r *Lexer) IsStart() bool {
return r.pos == 0
}
// Consumed reads all remaining bytes from the input, publishing an error if
// there is anything but whitespace remaining.
func (r *Lexer) Consumed() {
if r.pos > len(r.Data) || !r.Ok() {
return
}
for _, c := range r.Data[r.pos:] {
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
r.AddError(&LexerError{
Reason: "invalid character '" + string(c) + "' after top-level value",
Offset: r.pos,
Data: string(r.Data[r.pos:]),
})
return
}
r.pos++
r.start++
}
}
func (r *Lexer) unsafeString() (string, []byte) {
if r.token.kind == tokenUndef && r.Ok() {
r.FetchToken()
}
if !r.Ok() || r.token.kind != tokenString {
r.errInvalidToken("string")
return "", nil
}
bytes := r.token.byteValue
ret := bytesToStr(r.token.byteValue)
r.consume()
return ret, bytes
}
// UnsafeString returns the string value if the token is a string literal.
//
// Warning: returned string may point to the input buffer, so the string should not outlive
// the input buffer. Intended pattern of usage is as an argument to a switch statement.
func (r *Lexer) UnsafeString() string {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
if !r.Ok() || r.token.kind != tokenString {
r.errInvalidToken("string")
return ""
}
ret, _ := r.unsafeString()
return ret
}
ret := bytesToStr(r.token.byteValue)
r.consume()
// UnsafeBytes returns the byte slice if the token is a string literal.
func (r *Lexer) UnsafeBytes() []byte {
_, ret := r.unsafeString()
return ret
}
// String reads a string literal.
func (r *Lexer) String() string {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
r.FetchToken()
}
if !r.Ok() || r.token.kind != tokenString {
r.errInvalidToken("string")
return ""
}
ret := string(r.token.byteValue)
r.consume()
return ret
}
// Bytes reads a string literal and base64 decodes it into a byte slice.
func (r *Lexer) Bytes() []byte {
if r.token.kind == tokenUndef && r.Ok() {
r.FetchToken()
}
if !r.Ok() || r.token.kind != tokenString {
r.errInvalidToken("string")
return nil
}
ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue)))
len, err := base64.StdEncoding.Decode(ret, r.token.byteValue)
if err != nil {
r.fatalError = &LexerError{
Reason: err.Error(),
}
return nil
}
r.consume()
return ret[:len]
}
// Bool reads a true or false boolean keyword.
func (r *Lexer) Bool() bool {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
r.FetchToken()
}
if !r.Ok() || r.token.kind != tokenBool {
r.errInvalidToken("bool")
return false
}
ret := r.token.boolValue
r.consume()
@ -577,12 +676,11 @@ func (r *Lexer) Bool() bool {
func (r *Lexer) number() string {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
r.FetchToken()
}
if !r.Ok() || r.token.kind != tokenNumber {
r.errInvalidToken("number")
return ""
}
ret := bytesToStr(r.token.byteValue)
r.consume()
@ -597,9 +695,11 @@ func (r *Lexer) Uint8() uint8 {
n, err := strconv.ParseUint(s, 10, 8)
if err != nil {
r.err = &LexerError{
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
}
Data: s,
})
}
return uint8(n)
}
@ -612,9 +712,11 @@ func (r *Lexer) Uint16() uint16 {
n, err := strconv.ParseUint(s, 10, 16)
if err != nil {
r.err = &LexerError{
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
}
Data: s,
})
}
return uint16(n)
}
@ -627,9 +729,11 @@ func (r *Lexer) Uint32() uint32 {
n, err := strconv.ParseUint(s, 10, 32)
if err != nil {
r.err = &LexerError{
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
}
Data: s,
})
}
return uint32(n)
}
@ -642,9 +746,11 @@ func (r *Lexer) Uint64() uint64 {
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
r.err = &LexerError{
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
}
Data: s,
})
}
return n
}
@ -661,9 +767,11 @@ func (r *Lexer) Int8() int8 {
n, err := strconv.ParseInt(s, 10, 8)
if err != nil {
r.err = &LexerError{
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
}
Data: s,
})
}
return int8(n)
}
@ -676,9 +784,11 @@ func (r *Lexer) Int16() int16 {
n, err := strconv.ParseInt(s, 10, 16)
if err != nil {
r.err = &LexerError{
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
}
Data: s,
})
}
return int16(n)
}
@ -691,9 +801,11 @@ func (r *Lexer) Int32() int32 {
n, err := strconv.ParseInt(s, 10, 32)
if err != nil {
r.err = &LexerError{
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
}
Data: s,
})
}
return int32(n)
}
@ -706,9 +818,11 @@ func (r *Lexer) Int64() int64 {
n, err := strconv.ParseInt(s, 10, 64)
if err != nil {
r.err = &LexerError{
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
}
Data: s,
})
}
return n
}
@ -718,61 +832,69 @@ func (r *Lexer) Int() int {
}
func (r *Lexer) Uint8Str() uint8 {
s := r.UnsafeString()
s, b := r.unsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 8)
if err != nil {
r.err = &LexerError{
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
}
Data: string(b),
})
}
return uint8(n)
}
func (r *Lexer) Uint16Str() uint16 {
s := r.UnsafeString()
s, b := r.unsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 16)
if err != nil {
r.err = &LexerError{
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
}
Data: string(b),
})
}
return uint16(n)
}
func (r *Lexer) Uint32Str() uint32 {
s := r.UnsafeString()
s, b := r.unsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 32)
if err != nil {
r.err = &LexerError{
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
}
Data: string(b),
})
}
return uint32(n)
}
func (r *Lexer) Uint64Str() uint64 {
s := r.UnsafeString()
s, b := r.unsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
r.err = &LexerError{
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
}
Data: string(b),
})
}
return n
}
@ -782,61 +904,69 @@ func (r *Lexer) UintStr() uint {
}
func (r *Lexer) Int8Str() int8 {
s := r.UnsafeString()
s, b := r.unsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 8)
if err != nil {
r.err = &LexerError{
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
}
Data: string(b),
})
}
return int8(n)
}
func (r *Lexer) Int16Str() int16 {
s := r.UnsafeString()
s, b := r.unsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 16)
if err != nil {
r.err = &LexerError{
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
}
Data: string(b),
})
}
return int16(n)
}
func (r *Lexer) Int32Str() int32 {
s := r.UnsafeString()
s, b := r.unsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 32)
if err != nil {
r.err = &LexerError{
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
}
Data: string(b),
})
}
return int32(n)
}
func (r *Lexer) Int64Str() int64 {
s := r.UnsafeString()
s, b := r.unsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 64)
if err != nil {
r.err = &LexerError{
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
}
Data: string(b),
})
}
return n
}
@ -853,9 +983,11 @@ func (r *Lexer) Float32() float32 {
n, err := strconv.ParseFloat(s, 32)
if err != nil {
r.err = &LexerError{
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
}
Data: s,
})
}
return float32(n)
}
@ -868,27 +1000,53 @@ func (r *Lexer) Float64() float64 {
n, err := strconv.ParseFloat(s, 64)
if err != nil {
r.err = &LexerError{
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
}
Data: s,
})
}
return n
}
func (r *Lexer) Error() error {
return r.err
return r.fatalError
}
func (r *Lexer) AddError(e error) {
if r.err == nil {
r.err = e
if r.fatalError == nil {
r.fatalError = e
}
}
func (r *Lexer) AddNonFatalError(e error) {
r.addNonfatalError(&LexerError{
Offset: r.start,
Data: string(r.Data[r.start:r.pos]),
Reason: e.Error(),
})
}
func (r *Lexer) addNonfatalError(err *LexerError) {
if r.UseMultipleErrors {
// We don't want to add errors with the same offset.
if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset {
return
}
r.multipleErrors = append(r.multipleErrors, err)
return
}
r.fatalError = err
}
func (r *Lexer) GetNonFatalErrors() []*LexerError {
return r.multipleErrors
}
// Interface fetches an interface{} analogous to the 'encoding/json' package.
func (r *Lexer) Interface() interface{} {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
r.FetchToken()
}
if !r.Ok() {

251
vendor/github.com/mailru/easyjson/jlexer/lexer_test.go generated vendored Normal file
View file

@ -0,0 +1,251 @@
package jlexer
import (
"bytes"
"reflect"
"testing"
)
func TestString(t *testing.T) {
for i, test := range []struct {
toParse string
want string
wantError bool
}{
{toParse: `"simple string"`, want: "simple string"},
{toParse: " \r\r\n\t " + `"test"`, want: "test"},
{toParse: `"\n\t\"\/\\\f\r"`, want: "\n\t\"/\\\f\r"},
{toParse: `"\u0020"`, want: " "},
{toParse: `"\u0020-\t"`, want: " -\t"},
{toParse: `"\ufffd\uFFFD"`, want: "\ufffd\ufffd"},
{toParse: `"\ud83d\ude00"`, want: "😀"},
{toParse: `"\ud83d\ude08"`, want: "😈"},
{toParse: `"\ud8"`, wantError: true},
{toParse: `"test"junk`, want: "test"},
{toParse: `5`, wantError: true}, // not a string
{toParse: `"\x"`, wantError: true}, // invalid escape
{toParse: `"\ud800"`, want: "<22>"}, // invalid utf-8 char; return replacement char
} {
l := Lexer{Data: []byte(test.toParse)}
got := l.String()
if got != test.want {
t.Errorf("[%d, %q] String() = %v; want %v", i, test.toParse, got, test.want)
}
err := l.Error()
if err != nil && !test.wantError {
t.Errorf("[%d, %q] String() error: %v", i, test.toParse, err)
} else if err == nil && test.wantError {
t.Errorf("[%d, %q] String() ok; want error", i, test.toParse)
}
}
}
func TestBytes(t *testing.T) {
for i, test := range []struct {
toParse string
want string
wantError bool
}{
{toParse: `"c2ltcGxlIHN0cmluZw=="`, want: "simple string"},
{toParse: " \r\r\n\t " + `"dGVzdA=="`, want: "test"},
{toParse: `5`, wantError: true}, // not a JSON string
{toParse: `"foobar"`, wantError: true}, // not base64 encoded
{toParse: `"c2ltcGxlIHN0cmluZw="`, wantError: true}, // invalid base64 padding
} {
l := Lexer{Data: []byte(test.toParse)}
got := l.Bytes()
if bytes.Compare(got, []byte(test.want)) != 0 {
t.Errorf("[%d, %q] Bytes() = %v; want: %v", i, test.toParse, got, []byte(test.want))
}
err := l.Error()
if err != nil && !test.wantError {
t.Errorf("[%d, %q] Bytes() error: %v", i, test.toParse, err)
} else if err == nil && test.wantError {
t.Errorf("[%d, %q] Bytes() ok; want error", i, test.toParse)
}
}
}
func TestNumber(t *testing.T) {
for i, test := range []struct {
toParse string
want string
wantError bool
}{
{toParse: "123", want: "123"},
{toParse: "-123", want: "-123"},
{toParse: "\r\n12.35", want: "12.35"},
{toParse: "12.35e+1", want: "12.35e+1"},
{toParse: "12.35e-15", want: "12.35e-15"},
{toParse: "12.35E-15", want: "12.35E-15"},
{toParse: "12.35E15", want: "12.35E15"},
{toParse: `"a"`, wantError: true},
{toParse: "123junk", wantError: true},
{toParse: "1.2.3", wantError: true},
{toParse: "1e2e3", wantError: true},
{toParse: "1e2.3", wantError: true},
} {
l := Lexer{Data: []byte(test.toParse)}
got := l.number()
if got != test.want {
t.Errorf("[%d, %q] number() = %v; want %v", i, test.toParse, got, test.want)
}
err := l.Error()
if err != nil && !test.wantError {
t.Errorf("[%d, %q] number() error: %v", i, test.toParse, err)
} else if err == nil && test.wantError {
t.Errorf("[%d, %q] number() ok; want error", i, test.toParse)
}
}
}
func TestBool(t *testing.T) {
for i, test := range []struct {
toParse string
want bool
wantError bool
}{
{toParse: "true", want: true},
{toParse: "false", want: false},
{toParse: "1", wantError: true},
{toParse: "truejunk", wantError: true},
{toParse: `false"junk"`, wantError: true},
{toParse: "True", wantError: true},
{toParse: "False", wantError: true},
} {
l := Lexer{Data: []byte(test.toParse)}
got := l.Bool()
if got != test.want {
t.Errorf("[%d, %q] Bool() = %v; want %v", i, test.toParse, got, test.want)
}
err := l.Error()
if err != nil && !test.wantError {
t.Errorf("[%d, %q] Bool() error: %v", i, test.toParse, err)
} else if err == nil && test.wantError {
t.Errorf("[%d, %q] Bool() ok; want error", i, test.toParse)
}
}
}
func TestSkipRecursive(t *testing.T) {
for i, test := range []struct {
toParse string
left string
wantError bool
}{
{toParse: "5, 4", left: ", 4"},
{toParse: "[5, 6], 4", left: ", 4"},
{toParse: "[5, [7,8]]: 4", left: ": 4"},
{toParse: `{"a":1}, 4`, left: ", 4"},
{toParse: `{"a":1, "b":{"c": 5}, "e":[12,15]}, 4`, left: ", 4"},
// array start/end chars in a string
{toParse: `[5, "]"], 4`, left: ", 4"},
{toParse: `[5, "\"]"], 4`, left: ", 4"},
{toParse: `[5, "["], 4`, left: ", 4"},
{toParse: `[5, "\"["], 4`, left: ", 4"},
// object start/end chars in a string
{toParse: `{"a}":1}, 4`, left: ", 4"},
{toParse: `{"a\"}":1}, 4`, left: ", 4"},
{toParse: `{"a{":1}, 4`, left: ", 4"},
{toParse: `{"a\"{":1}, 4`, left: ", 4"},
// object with double slashes at the end of string
{toParse: `{"a":"hey\\"}, 4`, left: ", 4"},
} {
l := Lexer{Data: []byte(test.toParse)}
l.SkipRecursive()
got := string(l.Data[l.pos:])
if got != test.left {
t.Errorf("[%d, %q] SkipRecursive() left = %v; want %v", i, test.toParse, got, test.left)
}
err := l.Error()
if err != nil && !test.wantError {
t.Errorf("[%d, %q] SkipRecursive() error: %v", i, test.toParse, err)
} else if err == nil && test.wantError {
t.Errorf("[%d, %q] SkipRecursive() ok; want error", i, test.toParse)
}
}
}
func TestInterface(t *testing.T) {
for i, test := range []struct {
toParse string
want interface{}
wantError bool
}{
{toParse: "null", want: nil},
{toParse: "true", want: true},
{toParse: `"a"`, want: "a"},
{toParse: "5", want: float64(5)},
{toParse: `{}`, want: map[string]interface{}{}},
{toParse: `[]`, want: []interface{}(nil)},
{toParse: `{"a": "b"}`, want: map[string]interface{}{"a": "b"}},
{toParse: `[5]`, want: []interface{}{float64(5)}},
{toParse: `{"a":5 , "b" : "string"}`, want: map[string]interface{}{"a": float64(5), "b": "string"}},
{toParse: `["a", 5 , null, true]`, want: []interface{}{"a", float64(5), nil, true}},
{toParse: `{"a" "b"}`, wantError: true},
{toParse: `{"a": "b",}`, wantError: true},
{toParse: `{"a":"b","c" "b"}`, wantError: true},
{toParse: `{"a": "b","c":"d",}`, wantError: true},
{toParse: `{,}`, wantError: true},
{toParse: `[1, 2,]`, wantError: true},
{toParse: `[1 2]`, wantError: true},
{toParse: `[,]`, wantError: true},
} {
l := Lexer{Data: []byte(test.toParse)}
got := l.Interface()
if !reflect.DeepEqual(got, test.want) {
t.Errorf("[%d, %q] Interface() = %v; want %v", i, test.toParse, got, test.want)
}
err := l.Error()
if err != nil && !test.wantError {
t.Errorf("[%d, %q] Interface() error: %v", i, test.toParse, err)
} else if err == nil && test.wantError {
t.Errorf("[%d, %q] Interface() ok; want error", i, test.toParse)
}
}
}
func TestConsumed(t *testing.T) {
for i, test := range []struct {
toParse string
wantError bool
}{
{toParse: "", wantError: false},
{toParse: " ", wantError: false},
{toParse: "\r\n", wantError: false},
{toParse: "\t\t", wantError: false},
{toParse: "{", wantError: true},
} {
l := Lexer{Data: []byte(test.toParse)}
l.Consumed()
err := l.Error()
if err != nil && !test.wantError {
t.Errorf("[%d, %q] Consumed() error: %v", i, test.toParse, err)
} else if err == nil && test.wantError {
t.Errorf("[%d, %q] Consumed() ok; want error", i, test.toParse)
}
}
}