Fix escaping
This commit is contained in:
parent
c62cd5cc55
commit
627ac3fc45
|
@ -33,8 +33,13 @@ const (
|
|||
var (
|
||||
// jsonHandle and jsonHandlePretty are the codec handles to JSON encode
|
||||
// structs. The pretty handle will add indents for easier human consumption.
|
||||
jsonHandle = &codec.JsonHandle{}
|
||||
jsonHandlePretty = &codec.JsonHandle{Indent: 4}
|
||||
jsonHandle = &codec.JsonHandle{
|
||||
HTMLCharsAsIs: true,
|
||||
}
|
||||
jsonHandlePretty = &codec.JsonHandle{
|
||||
HTMLCharsAsIs: true,
|
||||
Indent: 4,
|
||||
}
|
||||
)
|
||||
|
||||
// HTTPServer is used to wrap an Agent and expose it over an HTTP interface
|
||||
|
|
|
@ -2,10 +2,18 @@ package command
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"text/template"
|
||||
|
||||
"github.com/ugorji/go/codec"
|
||||
)
|
||||
|
||||
var (
|
||||
jsonHandlePretty = &codec.JsonHandle{
|
||||
HTMLCharsAsIs: true,
|
||||
Indent: 4,
|
||||
}
|
||||
)
|
||||
|
||||
//DataFormatter is a transformer of the data.
|
||||
|
@ -33,12 +41,16 @@ type JSONFormat struct {
|
|||
|
||||
// TransformData returns JSON format string data.
|
||||
func (p *JSONFormat) TransformData(data interface{}) (string, error) {
|
||||
out, err := json.MarshalIndent(&data, "", " ")
|
||||
var buf bytes.Buffer
|
||||
enc := codec.NewEncoder(&buf, jsonHandlePretty)
|
||||
err := enc.Encode(data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else {
|
||||
buf.Write([]byte("\n"))
|
||||
}
|
||||
|
||||
return string(out), nil
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
type TemplateFormat struct {
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
|
@ -158,12 +157,17 @@ func (c *InspectCommand) Run(args []string) int {
|
|||
|
||||
// Print the contents of the job
|
||||
req := api.RegisterJobRequest{Job: job}
|
||||
buf, err := json.MarshalIndent(req, "", " ")
|
||||
f, err := DataFormat("json", "")
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error converting job: %s", err))
|
||||
c.Ui.Error(fmt.Sprintf("Error getting formatter: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
c.Ui.Output(string(buf))
|
||||
out, err := f.TransformData(req)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error formatting the data: %s", err))
|
||||
return 1
|
||||
}
|
||||
c.Ui.Output(out)
|
||||
return 0
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ Rich Feature Set includes:
|
|||
- Encode/Decode from/to chan types (for iterative streaming support)
|
||||
- Drop-in replacement for encoding/json. `json:` key in struct tag supported.
|
||||
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
|
||||
- Handle unique idiosynchracies of codecs e.g.
|
||||
- Handle unique idiosyncrasies of codecs e.g.
|
||||
- For messagepack, configure how ambiguities in handling raw bytes are resolved
|
||||
- For messagepack, provide rpc server/client codec to support
|
||||
msgpack-rpc protocol defined at:
|
||||
|
@ -181,7 +181,7 @@ package codec
|
|||
// - Decoding using a chan is good, but incurs concurrency costs.
|
||||
// This is because there's no fast way to use a channel without it
|
||||
// having to switch goroutines constantly.
|
||||
// Callback pattern is still the best. Maybe cnsider supporting something like:
|
||||
// Callback pattern is still the best. Maybe consider supporting something like:
|
||||
// type X struct {
|
||||
// Name string
|
||||
// Ys []Y
|
||||
|
|
|
@ -68,7 +68,7 @@ Rich Feature Set includes:
|
|||
- Encode/Decode from/to chan types (for iterative streaming support)
|
||||
- Drop-in replacement for encoding/json. `json:` key in struct tag supported.
|
||||
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
|
||||
- Handle unique idiosynchracies of codecs e.g.
|
||||
- Handle unique idiosyncrasies of codecs e.g.
|
||||
- For messagepack, configure how ambiguities in handling raw bytes are resolved
|
||||
- For messagepack, provide rpc server/client codec to support
|
||||
msgpack-rpc protocol defined at:
|
||||
|
|
|
@ -348,6 +348,13 @@ func (d *bincDecDriver) readNextBd() {
|
|||
d.bdRead = true
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) uncacheRead() {
|
||||
if d.bdRead {
|
||||
d.r.unreadn1()
|
||||
d.bdRead = false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) ContainerType() (vt valueType) {
|
||||
if d.vd == bincVdSpecial && d.vs == bincSpNil {
|
||||
return valueTypeNil
|
||||
|
@ -632,12 +639,12 @@ func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool)
|
|||
if d.br {
|
||||
bs2 = d.r.readx(slen)
|
||||
} else if len(bs) == 0 {
|
||||
bs2 = decByteSlice(d.r, slen, d.b[:])
|
||||
bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, d.b[:])
|
||||
} else {
|
||||
bs2 = decByteSlice(d.r, slen, bs)
|
||||
bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs)
|
||||
}
|
||||
} else {
|
||||
bs2 = decByteSlice(d.r, slen, bs)
|
||||
bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs)
|
||||
}
|
||||
if withString {
|
||||
s = string(bs2)
|
||||
|
@ -689,7 +696,7 @@ func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool)
|
|||
// since using symbols, do not store any part of
|
||||
// the parameter bs in the map, as it might be a shared buffer.
|
||||
// bs2 = decByteSlice(d.r, slen, bs)
|
||||
bs2 = decByteSlice(d.r, slen, nil)
|
||||
bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, nil)
|
||||
if withString {
|
||||
s = string(bs2)
|
||||
}
|
||||
|
@ -705,7 +712,7 @@ func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool)
|
|||
}
|
||||
|
||||
func (d *bincDecDriver) DecodeString() (s string) {
|
||||
// DecodeBytes does not accomodate symbols, whose impl stores string version in map.
|
||||
// DecodeBytes does not accommodate symbols, whose impl stores string version in map.
|
||||
// Use decStringAndBytes directly.
|
||||
// return string(d.DecodeBytes(d.b[:], true, true))
|
||||
_, s = d.decStringAndBytes(d.b[:], true, true)
|
||||
|
@ -740,7 +747,7 @@ func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut [
|
|||
bs = d.b[:]
|
||||
}
|
||||
}
|
||||
return decByteSlice(d.r, clen, bs)
|
||||
return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
|
||||
|
|
|
@ -188,6 +188,13 @@ func (d *cborDecDriver) readNextBd() {
|
|||
d.bdRead = true
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) uncacheRead() {
|
||||
if d.bdRead {
|
||||
d.r.unreadn1()
|
||||
d.bdRead = false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) ContainerType() (vt valueType) {
|
||||
if d.bd == cborBdNil {
|
||||
return valueTypeNil
|
||||
|
@ -414,7 +421,7 @@ func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut [
|
|||
bs = d.b[:]
|
||||
}
|
||||
}
|
||||
return decByteSlice(d.r, clen, bs)
|
||||
return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) DecodeString() (s string) {
|
||||
|
|
|
@ -91,10 +91,12 @@ type decDriver interface {
|
|||
uncacheRead()
|
||||
}
|
||||
|
||||
type decNoSeparator struct{}
|
||||
type decNoSeparator struct {
|
||||
}
|
||||
|
||||
func (_ decNoSeparator) ReadEnd() {}
|
||||
func (_ decNoSeparator) uncacheRead() {}
|
||||
func (_ decNoSeparator) ReadEnd() {}
|
||||
|
||||
// func (_ decNoSeparator) uncacheRead() {}
|
||||
|
||||
type DecodeOptions struct {
|
||||
// MapType specifies type to use during schema-less decoding of a map in the stream.
|
||||
|
@ -105,10 +107,10 @@ type DecodeOptions struct {
|
|||
// If nil, we use []interface{}
|
||||
SliceType reflect.Type
|
||||
|
||||
// MaxInitLen defines the initial length that we "make" a collection (slice, chan or map) with.
|
||||
// MaxInitLen defines the maxinum initial length that we "make" a collection (string, slice, map, chan).
|
||||
// If 0 or negative, we default to a sensible value based on the size of an element in the collection.
|
||||
//
|
||||
// For example, when decoding, a stream may say that it has MAX_UINT elements.
|
||||
// For example, when decoding, a stream may say that it has 2^64 elements.
|
||||
// We should not auto-matically provision a slice of that length, to prevent Out-Of-Memory crash.
|
||||
// Instead, we provision up to MaxInitLen, fill that up, and start appending after that.
|
||||
MaxInitLen int
|
||||
|
@ -161,6 +163,15 @@ type DecodeOptions struct {
|
|||
// Note: Handles will be smart when using the intern functionality.
|
||||
// So everything will not be interned.
|
||||
InternString bool
|
||||
|
||||
// PreferArrayOverSlice controls whether to decode to an array or a slice.
|
||||
//
|
||||
// This only impacts decoding into a nil interface{}.
|
||||
// Consequently, it has no effect on codecgen.
|
||||
//
|
||||
// *Note*: This only applies if using go1.5 and above,
|
||||
// as it requires reflect.ArrayOf support which was absent before go1.5.
|
||||
PreferArrayOverSlice bool
|
||||
}
|
||||
|
||||
// ------------------------------------
|
||||
|
@ -433,6 +444,10 @@ func (f *decFnInfo) rawExt(rv reflect.Value) {
|
|||
f.d.d.DecodeExt(rv.Addr().Interface(), 0, nil)
|
||||
}
|
||||
|
||||
func (f *decFnInfo) raw(rv reflect.Value) {
|
||||
rv.SetBytes(f.d.raw())
|
||||
}
|
||||
|
||||
func (f *decFnInfo) ext(rv reflect.Value) {
|
||||
f.d.d.DecodeExt(rv.Addr().Interface(), f.xfTag, f.xfFn)
|
||||
}
|
||||
|
@ -605,8 +620,11 @@ func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) {
|
|||
n.ss = append(n.ss, nil)
|
||||
var v2 interface{} = &n.ss[l]
|
||||
d.decode(v2)
|
||||
rvn = reflect.ValueOf(v2).Elem()
|
||||
n.ss = n.ss[:l]
|
||||
rvn = reflect.ValueOf(v2).Elem()
|
||||
if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice {
|
||||
rvn = reflectArrayOf(rvn)
|
||||
}
|
||||
} else {
|
||||
rvn = reflect.New(d.h.SliceType).Elem()
|
||||
d.decodeValue(rvn, nil)
|
||||
|
@ -1169,7 +1187,7 @@ type decRtidFn struct {
|
|||
// primitives are being decoded.
|
||||
//
|
||||
// maps and arrays are not handled by this mechanism.
|
||||
// However, RawExt is, and we accomodate for extensions that decode
|
||||
// However, RawExt is, and we accommodate for extensions that decode
|
||||
// RawExt from DecodeNaked, but need to decode the value subsequently.
|
||||
// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat.
|
||||
//
|
||||
|
@ -1507,6 +1525,8 @@ func (d *Decoder) decode(iv interface{}) {
|
|||
*v = 0
|
||||
case *[]uint8:
|
||||
*v = nil
|
||||
case *Raw:
|
||||
*v = nil
|
||||
case reflect.Value:
|
||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||
d.errNotValidPtrValue(v)
|
||||
|
@ -1576,6 +1596,9 @@ func (d *Decoder) decode(iv interface{}) {
|
|||
case *[]uint8:
|
||||
*v = d.d.DecodeBytes(*v, false, false)
|
||||
|
||||
case *Raw:
|
||||
*v = d.raw()
|
||||
|
||||
case *interface{}:
|
||||
d.decodeValueNotNil(reflect.ValueOf(iv).Elem(), nil)
|
||||
|
||||
|
@ -1697,6 +1720,8 @@ func (d *Decoder) getDecFn(rt reflect.Type, checkFastpath, checkCodecSelfer bool
|
|||
fn.f = (*decFnInfo).selferUnmarshal
|
||||
} else if rtid == rawExtTypId {
|
||||
fn.f = (*decFnInfo).rawExt
|
||||
} else if rtid == rawTypId {
|
||||
fn.f = (*decFnInfo).raw
|
||||
} else if d.d.IsBuiltinType(rtid) {
|
||||
fn.f = (*decFnInfo).builtin
|
||||
} else if xfFn := d.h.getExt(rtid); xfFn != nil {
|
||||
|
@ -1873,6 +1898,15 @@ func (d *Decoder) nextValueBytes() []byte {
|
|||
return d.r.stopTrack()
|
||||
}
|
||||
|
||||
func (d *Decoder) raw() []byte {
|
||||
// ensure that this is not a view into the bytes
|
||||
// i.e. make new copy always.
|
||||
bs := d.nextValueBytes()
|
||||
bs2 := make([]byte, len(bs))
|
||||
copy(bs2, bs)
|
||||
return bs2
|
||||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
|
||||
// decSliceHelper assists when decoding into a slice, from a map or an array in the stream.
|
||||
|
@ -1927,18 +1961,31 @@ func (x decSliceHelper) ElemContainerState(index int) {
|
|||
}
|
||||
}
|
||||
|
||||
func decByteSlice(r decReader, clen int, bs []byte) (bsOut []byte) {
|
||||
func decByteSlice(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) {
|
||||
if clen == 0 {
|
||||
return zeroByteSlice
|
||||
}
|
||||
if len(bs) == clen {
|
||||
bsOut = bs
|
||||
r.readb(bsOut)
|
||||
} else if cap(bs) >= clen {
|
||||
bsOut = bs[:clen]
|
||||
r.readb(bsOut)
|
||||
} else {
|
||||
bsOut = make([]byte, clen)
|
||||
// bsOut = make([]byte, clen)
|
||||
len2, _ := decInferLen(clen, maxInitLen, 1)
|
||||
bsOut = make([]byte, len2)
|
||||
r.readb(bsOut)
|
||||
for len2 < clen {
|
||||
len3, _ := decInferLen(clen-len2, maxInitLen, 1)
|
||||
// fmt.Printf(">>>>> TESTING: in loop: clen: %v, maxInitLen: %v, len2: %v, len3: %v\n", clen, maxInitLen, len2, len3)
|
||||
bs3 := bsOut
|
||||
bsOut = make([]byte, len2+len3)
|
||||
copy(bsOut, bs3)
|
||||
r.readb(bsOut[len2:])
|
||||
len2 += len3
|
||||
}
|
||||
}
|
||||
r.readb(bsOut)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
// +build go1.5
|
||||
|
||||
package codec
|
||||
|
||||
import "reflect"
|
||||
|
||||
const reflectArrayOfSupported = true
|
||||
|
||||
func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) {
|
||||
rvn2 = reflect.New(reflect.ArrayOf(rvn.Len(), intfTyp)).Elem()
|
||||
reflect.Copy(rvn2, rvn)
|
||||
return
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
// +build !go1.5
|
||||
|
||||
package codec
|
||||
|
||||
import "reflect"
|
||||
|
||||
const reflectArrayOfSupported = false
|
||||
|
||||
func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) {
|
||||
panic("reflect.ArrayOf unsupported")
|
||||
}
|
|
@ -119,6 +119,19 @@ type EncodeOptions struct {
|
|||
// This is opt-in, as there may be a performance hit to checking circular references.
|
||||
CheckCircularRef bool
|
||||
|
||||
// RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers
|
||||
// when checking if a value is empty.
|
||||
//
|
||||
// Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls.
|
||||
RecursiveEmptyCheck bool
|
||||
|
||||
// Raw controls whether we encode Raw values.
|
||||
// This is a "dangerous" option and must be explicitly set.
|
||||
// If set, we blindly encode Raw values as-is, without checking
|
||||
// if they are a correct representation of a value in that format.
|
||||
// If unset, we error out.
|
||||
Raw bool
|
||||
|
||||
// AsSymbols defines what should be encoded as symbols.
|
||||
//
|
||||
// Encoding as symbols can reduce the encoded size significantly.
|
||||
|
@ -222,45 +235,57 @@ type bytesEncWriter struct {
|
|||
}
|
||||
|
||||
func (z *bytesEncWriter) writeb(s []byte) {
|
||||
if len(s) > 0 {
|
||||
c := z.grow(len(s))
|
||||
copy(z.b[c:], s)
|
||||
if len(s) == 0 {
|
||||
return
|
||||
}
|
||||
oc, a := z.growNoAlloc(len(s))
|
||||
if a {
|
||||
z.growAlloc(len(s), oc)
|
||||
}
|
||||
copy(z.b[oc:], s)
|
||||
}
|
||||
|
||||
func (z *bytesEncWriter) writestr(s string) {
|
||||
if len(s) > 0 {
|
||||
c := z.grow(len(s))
|
||||
copy(z.b[c:], s)
|
||||
if len(s) == 0 {
|
||||
return
|
||||
}
|
||||
oc, a := z.growNoAlloc(len(s))
|
||||
if a {
|
||||
z.growAlloc(len(s), oc)
|
||||
}
|
||||
copy(z.b[oc:], s)
|
||||
}
|
||||
|
||||
func (z *bytesEncWriter) writen1(b1 byte) {
|
||||
c := z.grow(1)
|
||||
z.b[c] = b1
|
||||
oc, a := z.growNoAlloc(1)
|
||||
if a {
|
||||
z.growAlloc(1, oc)
|
||||
}
|
||||
z.b[oc] = b1
|
||||
}
|
||||
|
||||
func (z *bytesEncWriter) writen2(b1 byte, b2 byte) {
|
||||
c := z.grow(2)
|
||||
z.b[c] = b1
|
||||
z.b[c+1] = b2
|
||||
oc, a := z.growNoAlloc(2)
|
||||
if a {
|
||||
z.growAlloc(2, oc)
|
||||
}
|
||||
z.b[oc+1] = b2
|
||||
z.b[oc] = b1
|
||||
}
|
||||
|
||||
func (z *bytesEncWriter) atEndOfEncode() {
|
||||
*(z.out) = z.b[:z.c]
|
||||
}
|
||||
|
||||
func (z *bytesEncWriter) grow(n int) (oldcursor int) {
|
||||
// have a growNoalloc(n int), which can be inlined.
|
||||
// if allocation is needed, then call growAlloc(n int)
|
||||
|
||||
func (z *bytesEncWriter) growNoAlloc(n int) (oldcursor int, allocNeeded bool) {
|
||||
oldcursor = z.c
|
||||
z.c = oldcursor + n
|
||||
z.c = z.c + n
|
||||
if z.c > len(z.b) {
|
||||
if z.c > cap(z.b) {
|
||||
// appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls.
|
||||
// bytes.Buffer model (2*cap + n): much better
|
||||
// bs := make([]byte, 2*cap(z.b)+n)
|
||||
bs := make([]byte, growCap(cap(z.b), 1, n))
|
||||
copy(bs, z.b[:oldcursor])
|
||||
z.b = bs
|
||||
allocNeeded = true
|
||||
} else {
|
||||
z.b = z.b[:cap(z.b)]
|
||||
}
|
||||
|
@ -268,6 +293,15 @@ func (z *bytesEncWriter) grow(n int) (oldcursor int) {
|
|||
return
|
||||
}
|
||||
|
||||
func (z *bytesEncWriter) growAlloc(n int, oldcursor int) {
|
||||
// appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls.
|
||||
// bytes.Buffer model (2*cap + n): much better
|
||||
// bs := make([]byte, 2*cap(z.b)+n)
|
||||
bs := make([]byte, growCap(cap(z.b), 1, n))
|
||||
copy(bs, z.b[:oldcursor])
|
||||
z.b = bs
|
||||
}
|
||||
|
||||
// ---------------------------------------------
|
||||
|
||||
type encFnInfo struct {
|
||||
|
@ -282,6 +316,10 @@ func (f *encFnInfo) builtin(rv reflect.Value) {
|
|||
f.e.e.EncodeBuiltin(f.ti.rtid, rv.Interface())
|
||||
}
|
||||
|
||||
func (f *encFnInfo) raw(rv reflect.Value) {
|
||||
f.e.raw(rv.Interface().(Raw))
|
||||
}
|
||||
|
||||
func (f *encFnInfo) rawExt(rv reflect.Value) {
|
||||
// rev := rv.Interface().(RawExt)
|
||||
// f.e.e.EncodeRawExt(&rev, f.e)
|
||||
|
@ -308,7 +346,7 @@ func (f *encFnInfo) getValueForMarshalInterface(rv reflect.Value, indir int8) (v
|
|||
v = rv.Interface()
|
||||
} else if indir == -1 {
|
||||
// If a non-pointer was passed to Encode(), then that value is not addressable.
|
||||
// Take addr if addresable, else copy value to an addressable value.
|
||||
// Take addr if addressable, else copy value to an addressable value.
|
||||
if rv.CanAddr() {
|
||||
v = rv.Addr().Interface()
|
||||
} else {
|
||||
|
@ -524,20 +562,20 @@ func (f *encFnInfo) kStruct(rv reflect.Value) {
|
|||
}
|
||||
newlen = 0
|
||||
var kv stringRv
|
||||
recur := e.h.RecursiveEmptyCheck
|
||||
for _, si := range tisfi {
|
||||
kv.r = si.field(rv, false)
|
||||
if toMap {
|
||||
if si.omitEmpty && isEmptyValue(kv.r) {
|
||||
if si.omitEmpty && isEmptyValue(kv.r, recur, recur) {
|
||||
continue
|
||||
}
|
||||
kv.v = si.encName
|
||||
} else {
|
||||
// use the zero value.
|
||||
// if a reference or struct, set to nil (so you do not output too much)
|
||||
if si.omitEmpty && isEmptyValue(kv.r) {
|
||||
if si.omitEmpty && isEmptyValue(kv.r, recur, recur) {
|
||||
switch kv.r.Kind() {
|
||||
case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array,
|
||||
reflect.Map, reflect.Slice:
|
||||
case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice:
|
||||
kv.r = reflect.Value{} //encode as nil
|
||||
}
|
||||
}
|
||||
|
@ -548,7 +586,7 @@ func (f *encFnInfo) kStruct(rv reflect.Value) {
|
|||
|
||||
// debugf(">>>> kStruct: newlen: %v", newlen)
|
||||
// sep := !e.be
|
||||
ee := e.e //don't dereference everytime
|
||||
ee := e.e //don't dereference every time
|
||||
|
||||
if toMap {
|
||||
ee.EncodeMapStart(newlen)
|
||||
|
@ -935,7 +973,7 @@ func newEncoder(h Handle) *Encoder {
|
|||
|
||||
// Reset the Encoder with a new output stream.
|
||||
//
|
||||
// This accomodates using the state of the Encoder,
|
||||
// This accommodates using the state of the Encoder,
|
||||
// where it has "cached" information about sub-engines.
|
||||
func (e *Encoder) Reset(w io.Writer) {
|
||||
ww, ok := w.(ioEncWriterWriter)
|
||||
|
@ -1042,20 +1080,6 @@ func (e *Encoder) MustEncode(v interface{}) {
|
|||
e.w.atEndOfEncode()
|
||||
}
|
||||
|
||||
// comment out these (Must)Write methods. They were only put there to support cbor.
|
||||
// However, users already have access to the streams, and can write directly.
|
||||
//
|
||||
// // Write allows users write to the Encoder stream directly.
|
||||
// func (e *Encoder) Write(bs []byte) (err error) {
|
||||
// defer panicToErr(&err)
|
||||
// e.w.writeb(bs)
|
||||
// return
|
||||
// }
|
||||
// // MustWrite is like write, but panics if unable to Write.
|
||||
// func (e *Encoder) MustWrite(bs []byte) {
|
||||
// e.w.writeb(bs)
|
||||
// }
|
||||
|
||||
func (e *Encoder) encode(iv interface{}) {
|
||||
// if ics, ok := iv.(Selfer); ok {
|
||||
// ics.CodecEncodeSelf(e)
|
||||
|
@ -1067,7 +1091,8 @@ func (e *Encoder) encode(iv interface{}) {
|
|||
e.e.EncodeNil()
|
||||
case Selfer:
|
||||
v.CodecEncodeSelf(e)
|
||||
|
||||
case Raw:
|
||||
e.raw(v)
|
||||
case reflect.Value:
|
||||
e.encodeValue(v, nil)
|
||||
|
||||
|
@ -1252,6 +1277,8 @@ func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCo
|
|||
|
||||
if checkCodecSelfer && ti.cs {
|
||||
fn.f = (*encFnInfo).selferMarshal
|
||||
} else if rtid == rawTypId {
|
||||
fn.f = (*encFnInfo).raw
|
||||
} else if rtid == rawExtTypId {
|
||||
fn.f = (*encFnInfo).rawExt
|
||||
} else if e.e.IsBuiltinType(rtid) {
|
||||
|
@ -1356,6 +1383,18 @@ func (e *Encoder) asis(v []byte) {
|
|||
}
|
||||
}
|
||||
|
||||
func (e *Encoder) raw(vv Raw) {
|
||||
v := []byte(vv)
|
||||
if !e.h.Raw {
|
||||
e.errorf("Raw values cannot be encoded: %v", v)
|
||||
}
|
||||
if e.as == nil {
|
||||
e.w.writeb(v)
|
||||
} else {
|
||||
e.as.EncodeAsis(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Encoder) errorf(format string, params ...interface{}) {
|
||||
err := fmt.Errorf(format, params...)
|
||||
panic(err)
|
||||
|
|
|
@ -23,7 +23,7 @@ package codec
|
|||
// Currently support
|
||||
// - slice of all builtin types,
|
||||
// - map of all builtin types to string or interface value
|
||||
// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8)
|
||||
// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8)
|
||||
// This should provide adequate "typical" implementations.
|
||||
//
|
||||
// Note that fast track decode functions must handle values for which an address cannot be obtained.
|
||||
|
@ -38,6 +38,8 @@ import (
|
|||
"sort"
|
||||
)
|
||||
|
||||
const fastpathEnabled = true
|
||||
|
||||
const fastpathCheckNilFalse = false // for reflect
|
||||
const fastpathCheckNilTrue = true // for type switch
|
||||
|
||||
|
@ -81,9 +83,6 @@ var fastpathAV fastpathA
|
|||
|
||||
// due to possible initialization loop error, make fastpath in an init()
|
||||
func init() {
|
||||
if !fastpathEnabled {
|
||||
return
|
||||
}
|
||||
i := 0
|
||||
fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) {
|
||||
xrt := reflect.TypeOf(v)
|
||||
|
@ -373,9 +372,6 @@ func init() {
|
|||
|
||||
// -- -- fast path type switch
|
||||
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
|
||||
if !fastpathEnabled {
|
||||
return false
|
||||
}
|
||||
switch v := iv.(type) {
|
||||
|
||||
case []interface{}:
|
||||
|
@ -1741,9 +1737,6 @@ func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
|
|||
}
|
||||
|
||||
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
|
||||
if !fastpathEnabled {
|
||||
return false
|
||||
}
|
||||
switch v := iv.(type) {
|
||||
|
||||
case []interface{}:
|
||||
|
@ -1829,9 +1822,6 @@ func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
|
|||
}
|
||||
|
||||
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
|
||||
if !fastpathEnabled {
|
||||
return false
|
||||
}
|
||||
switch v := iv.(type) {
|
||||
|
||||
case map[interface{}]interface{}:
|
||||
|
@ -15954,9 +15944,6 @@ func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, checkNil bool, e *Encoder) {
|
|||
|
||||
// -- -- fast path type switch
|
||||
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
|
||||
if !fastpathEnabled {
|
||||
return false
|
||||
}
|
||||
switch v := iv.(type) {
|
||||
|
||||
case []interface{}:
|
||||
|
|
|
@ -23,7 +23,7 @@ package codec
|
|||
// Currently support
|
||||
// - slice of all builtin types,
|
||||
// - map of all builtin types to string or interface value
|
||||
// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8)
|
||||
// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8)
|
||||
// This should provide adequate "typical" implementations.
|
||||
//
|
||||
// Note that fast track decode functions must handle values for which an address cannot be obtained.
|
||||
|
@ -38,6 +38,8 @@ import (
|
|||
"sort"
|
||||
)
|
||||
|
||||
const fastpathEnabled = true
|
||||
|
||||
const fastpathCheckNilFalse = false // for reflect
|
||||
const fastpathCheckNilTrue = true // for type switch
|
||||
|
||||
|
@ -81,9 +83,6 @@ var fastpathAV fastpathA
|
|||
|
||||
// due to possible initialization loop error, make fastpath in an init()
|
||||
func init() {
|
||||
if !fastpathEnabled {
|
||||
return
|
||||
}
|
||||
i := 0
|
||||
fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) {
|
||||
xrt := reflect.TypeOf(v)
|
||||
|
@ -106,9 +105,6 @@ func init() {
|
|||
|
||||
// -- -- fast path type switch
|
||||
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
|
||||
if !fastpathEnabled {
|
||||
return false
|
||||
}
|
||||
switch v := iv.(type) {
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
|
||||
case []{{ .Elem }}:{{else}}
|
||||
|
@ -126,9 +122,6 @@ func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
|
|||
}
|
||||
|
||||
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
|
||||
if !fastpathEnabled {
|
||||
return false
|
||||
}
|
||||
switch v := iv.(type) {
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
|
||||
case []{{ .Elem }}:
|
||||
|
@ -144,9 +137,6 @@ func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
|
|||
}
|
||||
|
||||
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
|
||||
if !fastpathEnabled {
|
||||
return false
|
||||
}
|
||||
switch v := iv.(type) {
|
||||
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
|
||||
case map[{{ .MapKey }}]{{ .Elem }}:
|
||||
|
@ -286,9 +276,6 @@ func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Ele
|
|||
|
||||
// -- -- fast path type switch
|
||||
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
|
||||
if !fastpathEnabled {
|
||||
return false
|
||||
}
|
||||
switch v := iv.(type) {
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
|
||||
case []{{ .Elem }}:{{else}}
|
||||
|
|
|
@ -4,6 +4,8 @@ package codec
|
|||
|
||||
import "reflect"
|
||||
|
||||
const fastpathEnabled = false
|
||||
|
||||
// The generated fast-path code is very large, and adds a few seconds to the build time.
|
||||
// This causes test execution, execution of small tools which use codec, etc
|
||||
// to take a long time.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// //+build ignore
|
||||
/* // +build ignore */
|
||||
|
||||
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
@ -17,7 +17,7 @@ import (
|
|||
|
||||
// This file is used to generate helper code for codecgen.
|
||||
// The values here i.e. genHelper(En|De)coder are not to be used directly by
|
||||
// library users. They WILL change continously and without notice.
|
||||
// library users. They WILL change continuously and without notice.
|
||||
//
|
||||
// To help enforce this, we create an unexported type with exported members.
|
||||
// The only way to get the type is via the one exported type that we control (somewhat).
|
||||
|
@ -83,6 +83,11 @@ func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
|
|||
f.e.marshal(bs, fnerr, false, c_RAW)
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) EncRaw(iv Raw) {
|
||||
f.e.raw(iv)
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
|
||||
if _, ok := f.e.hh.(*BincHandle); ok {
|
||||
|
@ -191,6 +196,11 @@ func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
|
|||
}
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) DecRaw() []byte {
|
||||
return f.d.raw()
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
|
||||
if _, ok := f.d.hh.(*BincHandle); ok {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// //+build ignore
|
||||
/* // +build ignore */
|
||||
|
||||
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
@ -17,7 +17,7 @@ import (
|
|||
|
||||
// This file is used to generate helper code for codecgen.
|
||||
// The values here i.e. genHelper(En|De)coder are not to be used directly by
|
||||
// library users. They WILL change continously and without notice.
|
||||
// library users. They WILL change continuously and without notice.
|
||||
//
|
||||
// To help enforce this, we create an unexported type with exported members.
|
||||
// The only way to get the type is via the one exported type that we control (somewhat).
|
||||
|
@ -79,6 +79,10 @@ func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
|
|||
f.e.marshal(bs, fnerr, false, c_RAW)
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) EncRaw(iv Raw) {
|
||||
f.e.raw(iv)
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
|
||||
if _, ok := f.e.hh.(*BincHandle); ok {
|
||||
return timeTypId
|
||||
|
@ -172,6 +176,10 @@ func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
|
|||
}
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) DecRaw() []byte {
|
||||
return f.d.raw()
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
|
||||
if _, ok := f.d.hh.(*BincHandle); ok {
|
||||
return timeTypId
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
// ---------------------------------------------------
|
||||
// codecgen supports the full cycle of reflection-based codec:
|
||||
// - RawExt
|
||||
// - Raw
|
||||
// - Builtins
|
||||
// - Extensions
|
||||
// - (Binary|Text|JSON)(Unm|M)arshal
|
||||
|
@ -77,7 +78,7 @@ import (
|
|||
// codecgen will panic if the file was generated with an old version of the library in use.
|
||||
//
|
||||
// Note:
|
||||
// It was a concious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil.
|
||||
// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil.
|
||||
// This way, there isn't a function call overhead just to see that we should not enter a block of code.
|
||||
|
||||
// GenVersion is the current version of codecgen.
|
||||
|
@ -164,15 +165,9 @@ type genRunner struct {
|
|||
//
|
||||
// Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.*
|
||||
func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) {
|
||||
// trim out all types which already implement Selfer
|
||||
typ2 := make([]reflect.Type, 0, len(typ))
|
||||
for _, t := range typ {
|
||||
if reflect.PtrTo(t).Implements(selferTyp) || t.Implements(selferTyp) {
|
||||
continue
|
||||
}
|
||||
typ2 = append(typ2, t)
|
||||
}
|
||||
typ = typ2
|
||||
// All types passed to this method do not have a codec.Selfer method implemented directly.
|
||||
// codecgen already checks the AST and skips any types that define the codec.Selfer methods.
|
||||
// Consequently, there's no need to check and trim them if they implement codec.Selfer
|
||||
|
||||
if len(typ) == 0 {
|
||||
return
|
||||
|
@ -211,7 +206,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn
|
|||
x.genRefPkgs(t)
|
||||
}
|
||||
if buildTags != "" {
|
||||
x.line("//+build " + buildTags)
|
||||
x.line("// +build " + buildTags)
|
||||
x.line("")
|
||||
}
|
||||
x.line(`
|
||||
|
@ -701,13 +696,17 @@ func (x *genRunner) enc(varname string, t reflect.Type) {
|
|||
}
|
||||
|
||||
// check if
|
||||
// - type is RawExt
|
||||
// - type is RawExt, Raw
|
||||
// - the type implements (Text|JSON|Binary)(Unm|M)arshal
|
||||
x.linef("%sm%s := z.EncBinary()", genTempVarPfx, mi)
|
||||
x.linef("_ = %sm%s", genTempVarPfx, mi)
|
||||
x.line("if false {") //start if block
|
||||
defer func() { x.line("}") }() //end if block
|
||||
|
||||
if t == rawTyp {
|
||||
x.linef("} else { z.EncRaw(%v)", varname)
|
||||
return
|
||||
}
|
||||
if t == rawExtTyp {
|
||||
x.linef("} else { r.EncodeRawExt(%v, e)", varname)
|
||||
return
|
||||
|
@ -987,6 +986,14 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
|
|||
}
|
||||
|
||||
func (x *genRunner) encListFallback(varname string, t reflect.Type) {
|
||||
if t.AssignableTo(uint8SliceTyp) {
|
||||
x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, []byte(%s))", x.xs, varname)
|
||||
return
|
||||
}
|
||||
if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 {
|
||||
x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, ([%v]byte(%s))[:])", x.xs, t.Len(), varname)
|
||||
return
|
||||
}
|
||||
i := x.varsfx()
|
||||
g := genTempVarPfx
|
||||
x.line("r.EncodeArrayStart(len(" + varname + "))")
|
||||
|
@ -1123,7 +1130,7 @@ func (x *genRunner) dec(varname string, t reflect.Type) {
|
|||
}
|
||||
|
||||
// check if
|
||||
// - type is RawExt
|
||||
// - type is Raw, RawExt
|
||||
// - the type implements (Text|JSON|Binary)(Unm|M)arshal
|
||||
mi := x.varsfx()
|
||||
x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi)
|
||||
|
@ -1131,6 +1138,10 @@ func (x *genRunner) dec(varname string, t reflect.Type) {
|
|||
x.line("if false {") //start if block
|
||||
defer func() { x.line("}") }() //end if block
|
||||
|
||||
if t == rawTyp {
|
||||
x.linef("} else { *%v = z.DecRaw()", varname)
|
||||
return
|
||||
}
|
||||
if t == rawExtTyp {
|
||||
x.linef("} else { r.DecodeExt(%v, 0, nil)", varname)
|
||||
return
|
||||
|
@ -1306,6 +1317,14 @@ func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAs
|
|||
}
|
||||
|
||||
func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) {
|
||||
if t.AssignableTo(uint8SliceTyp) {
|
||||
x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false, false)")
|
||||
return
|
||||
}
|
||||
if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 {
|
||||
x.linef("r.DecodeBytes( ((*[%s]byte)(%s))[:], false, true)", t.Len(), varname)
|
||||
return
|
||||
}
|
||||
type tstruc struct {
|
||||
TempVar string
|
||||
Rand string
|
||||
|
@ -1421,7 +1440,7 @@ func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintpt
|
|||
if si.i != -1 {
|
||||
t2 = t.Field(int(si.i))
|
||||
} else {
|
||||
//we must accomodate anonymous fields, where the embedded field is a nil pointer in the value.
|
||||
//we must accommodate anonymous fields, where the embedded field is a nil pointer in the value.
|
||||
// t2 = t.FieldByIndex(si.is)
|
||||
t2typ := t
|
||||
varname3 := varname
|
||||
|
@ -1509,7 +1528,7 @@ func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid
|
|||
if si.i != -1 {
|
||||
t2 = t.Field(int(si.i))
|
||||
} else {
|
||||
//we must accomodate anonymous fields, where the embedded field is a nil pointer in the value.
|
||||
//we must accommodate anonymous fields, where the embedded field is a nil pointer in the value.
|
||||
// t2 = t.FieldByIndex(si.is)
|
||||
t2typ := t
|
||||
varname3 := varname
|
||||
|
@ -1931,7 +1950,7 @@ func genInternalInit() {
|
|||
}
|
||||
var gt genInternal
|
||||
|
||||
// For each slice or map type, there must be a (symetrical) Encode and Decode fast-path function
|
||||
// For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function
|
||||
for _, s := range types {
|
||||
gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]})
|
||||
if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already.
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package codec
|
||||
|
||||
func init() {
|
||||
genCheckVendor = true
|
||||
}
|
|
@ -38,10 +38,6 @@ package codec
|
|||
// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that
|
||||
// it has to be binary, and we do not even try to read separators.
|
||||
//
|
||||
// The only codec that may suffer (slightly) is cbor, and only when decoding indefinite-length.
|
||||
// It may suffer because we treat it like a text-based codec, and read separators.
|
||||
// However, this read is a no-op and the cost is insignificant.
|
||||
//
|
||||
// Philosophy
|
||||
// ------------
|
||||
// On decode, this codec will update containers appropriately:
|
||||
|
@ -137,17 +133,6 @@ const (
|
|||
// Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic.
|
||||
recoverPanicToErr = true
|
||||
|
||||
// Fast path functions try to create a fast path encode or decode implementation
|
||||
// for common maps and slices, by by-passing reflection altogether.
|
||||
fastpathEnabled = true
|
||||
|
||||
// if checkStructForEmptyValue, check structs fields to see if an empty value.
|
||||
// This could be an expensive call, so possibly disable it.
|
||||
checkStructForEmptyValue = false
|
||||
|
||||
// if derefForIsEmptyValue, deref pointers and interfaces when checking isEmptyValue
|
||||
derefForIsEmptyValue = false
|
||||
|
||||
// if resetSliceElemToZeroValue, then on decoding a slice, reset the element to a zero value first.
|
||||
// Only concern is that, if the slice already contained some garbage, we will decode into that garbage.
|
||||
// The chances of this are slim, so leave this "optimization".
|
||||
|
@ -217,16 +202,21 @@ const (
|
|||
containerArrayEnd
|
||||
)
|
||||
|
||||
type rgetPoolT struct {
|
||||
encNames [8]string
|
||||
fNames [8]string
|
||||
etypes [8]uintptr
|
||||
sfis [8]*structFieldInfo
|
||||
// sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo
|
||||
type sfiIdx struct {
|
||||
name string
|
||||
index int
|
||||
}
|
||||
|
||||
var rgetPool = sync.Pool{
|
||||
New: func() interface{} { return new(rgetPoolT) },
|
||||
}
|
||||
// do not recurse if a containing type refers to an embedded type
|
||||
// which refers back to its containing type (via a pointer).
|
||||
// The second time this back-reference happens, break out,
|
||||
// so as not to cause an infinite loop.
|
||||
const rgetMaxRecursion = 2
|
||||
|
||||
// Anecdotally, we believe most types have <= 12 fields.
|
||||
// Java's PMD rules set TooManyFields threshold to 15.
|
||||
const rgetPoolTArrayLen = 12
|
||||
|
||||
type rgetT struct {
|
||||
fNames []string
|
||||
|
@ -235,6 +225,18 @@ type rgetT struct {
|
|||
sfis []*structFieldInfo
|
||||
}
|
||||
|
||||
type rgetPoolT struct {
|
||||
fNames [rgetPoolTArrayLen]string
|
||||
encNames [rgetPoolTArrayLen]string
|
||||
etypes [rgetPoolTArrayLen]uintptr
|
||||
sfis [rgetPoolTArrayLen]*structFieldInfo
|
||||
sfiidx [rgetPoolTArrayLen]sfiIdx
|
||||
}
|
||||
|
||||
var rgetPool = sync.Pool{
|
||||
New: func() interface{} { return new(rgetPoolT) },
|
||||
}
|
||||
|
||||
type containerStateRecv interface {
|
||||
sendContainerState(containerState)
|
||||
}
|
||||
|
@ -260,6 +262,7 @@ var (
|
|||
stringTyp = reflect.TypeOf("")
|
||||
timeTyp = reflect.TypeOf(time.Time{})
|
||||
rawExtTyp = reflect.TypeOf(RawExt{})
|
||||
rawTyp = reflect.TypeOf(Raw{})
|
||||
uint8SliceTyp = reflect.TypeOf([]uint8(nil))
|
||||
|
||||
mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
|
||||
|
@ -277,6 +280,7 @@ var (
|
|||
|
||||
uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer()
|
||||
rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer()
|
||||
rawTypId = reflect.ValueOf(rawTyp).Pointer()
|
||||
intfTypId = reflect.ValueOf(intfTyp).Pointer()
|
||||
timeTypId = reflect.ValueOf(timeTyp).Pointer()
|
||||
stringTypId = reflect.ValueOf(stringTyp).Pointer()
|
||||
|
@ -357,6 +361,11 @@ type Handle interface {
|
|||
isBinary() bool
|
||||
}
|
||||
|
||||
// Raw represents raw formatted bytes.
|
||||
// We "blindly" store it during encode and store the raw bytes during decode.
|
||||
// Note: it is dangerous during encode, so we may gate the behaviour behind an Encode flag which must be explicitly set.
|
||||
type Raw []byte
|
||||
|
||||
// RawExt represents raw unprocessed extension data.
|
||||
// Some codecs will decode extension data as a *RawExt if there is no registered extension for the tag.
|
||||
//
|
||||
|
@ -367,7 +376,7 @@ type RawExt struct {
|
|||
// Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types
|
||||
Data []byte
|
||||
// Value represents the extension, if Data is nil.
|
||||
// Value is used by codecs (e.g. cbor) which use the format to do custom serialization of the types.
|
||||
// Value is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types.
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
|
@ -545,7 +554,7 @@ func (o *extHandle) AddExt(
|
|||
func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
|
||||
// o is a pointer, because we may need to initialize it
|
||||
if rt.PkgPath() == "" || rt.Kind() == reflect.Interface {
|
||||
err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T",
|
||||
err = fmt.Errorf("codec.Handle.AddExt: Takes named type, not a pointer or interface: %T",
|
||||
reflect.Zero(rt).Interface())
|
||||
return
|
||||
}
|
||||
|
@ -588,7 +597,8 @@ func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn {
|
|||
}
|
||||
|
||||
type structFieldInfo struct {
|
||||
encName string // encode name
|
||||
encName string // encode name
|
||||
fieldName string // field name
|
||||
|
||||
// only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set.
|
||||
|
||||
|
@ -849,21 +859,18 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
|
|||
}
|
||||
|
||||
if rt.Kind() == reflect.Struct {
|
||||
var siInfo *structFieldInfo
|
||||
var omitEmpty bool
|
||||
if f, ok := rt.FieldByName(structInfoFieldName); ok {
|
||||
siInfo = parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag))
|
||||
siInfo := parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag))
|
||||
ti.toArray = siInfo.toArray
|
||||
omitEmpty = siInfo.omitEmpty
|
||||
}
|
||||
pi := rgetPool.Get()
|
||||
pv := pi.(*rgetPoolT)
|
||||
pv.etypes[0] = ti.baseId
|
||||
vv := rgetT{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]}
|
||||
x.rget(rt, rtid, nil, &vv, siInfo)
|
||||
ti.sfip = make([]*structFieldInfo, len(vv.sfis))
|
||||
ti.sfi = make([]*structFieldInfo, len(vv.sfis))
|
||||
copy(ti.sfip, vv.sfis)
|
||||
sort.Sort(sfiSortedByEncName(vv.sfis))
|
||||
copy(ti.sfi, vv.sfis)
|
||||
x.rget(rt, rtid, omitEmpty, nil, &vv)
|
||||
ti.sfip, ti.sfi = rgetResolveSFI(vv.sfis, pv.sfiidx[:0])
|
||||
rgetPool.Put(pi)
|
||||
}
|
||||
// sfi = sfip
|
||||
|
@ -877,26 +884,17 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
|
|||
return
|
||||
}
|
||||
|
||||
func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr,
|
||||
indexstack []int, pv *rgetT, siInfo *structFieldInfo,
|
||||
func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool,
|
||||
indexstack []int, pv *rgetT,
|
||||
) {
|
||||
// This will read up the fields and store how to access the value.
|
||||
// It uses the go language's rules for embedding, as below:
|
||||
// - if a field has been seen while traversing, skip it
|
||||
// - if an encName has been seen while traversing, skip it
|
||||
// - if an embedded type has been seen, skip it
|
||||
// Read up fields and store how to access the value.
|
||||
//
|
||||
// Also, per Go's rules, embedded fields must be analyzed AFTER all top-level fields.
|
||||
// It uses go's rules for message selectors,
|
||||
// which say that the field with the shallowest depth is selected.
|
||||
//
|
||||
// Note: we consciously use slices, not a map, to simulate a set.
|
||||
// Typically, types have < 16 fields, and iteration using equals is faster than maps there
|
||||
|
||||
type anonField struct {
|
||||
ft reflect.Type
|
||||
idx int
|
||||
}
|
||||
|
||||
var anonFields []anonField
|
||||
// Typically, types have < 16 fields,
|
||||
// and iteration using equals is faster than maps there
|
||||
|
||||
LOOP:
|
||||
for j, jlen := 0, rt.NumField(); j < jlen; j++ {
|
||||
|
@ -908,7 +906,8 @@ LOOP:
|
|||
continue LOOP
|
||||
}
|
||||
|
||||
// if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) {
|
||||
// if r1, _ := utf8.DecodeRuneInString(f.Name);
|
||||
// r1 == utf8.RuneError || !unicode.IsUpper(r1) {
|
||||
if f.PkgPath != "" && !f.Anonymous { // unexported, not embedded
|
||||
continue
|
||||
}
|
||||
|
@ -917,7 +916,8 @@ LOOP:
|
|||
continue
|
||||
}
|
||||
var si *structFieldInfo
|
||||
// if anonymous and no struct tag (or it's blank), and a struct (or pointer to struct), inline it.
|
||||
// if anonymous and no struct tag (or it's blank),
|
||||
// and a struct (or pointer to struct), inline it.
|
||||
if f.Anonymous && fkind != reflect.Interface {
|
||||
doInline := stag == ""
|
||||
if !doInline {
|
||||
|
@ -931,8 +931,31 @@ LOOP:
|
|||
ft = ft.Elem()
|
||||
}
|
||||
if ft.Kind() == reflect.Struct {
|
||||
// handle anonymous fields after handling all the non-anon fields
|
||||
anonFields = append(anonFields, anonField{ft, j})
|
||||
// if etypes contains this, don't call rget again (as fields are already seen here)
|
||||
ftid := reflect.ValueOf(ft).Pointer()
|
||||
// We cannot recurse forever, but we need to track other field depths.
|
||||
// So - we break if we see a type twice (not the first time).
|
||||
// This should be sufficient to handle an embedded type that refers to its
|
||||
// owning type, which then refers to its embedded type.
|
||||
processIt := true
|
||||
numk := 0
|
||||
for _, k := range pv.etypes {
|
||||
if k == ftid {
|
||||
numk++
|
||||
if numk == rgetMaxRecursion {
|
||||
processIt = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if processIt {
|
||||
pv.etypes = append(pv.etypes, ftid)
|
||||
indexstack2 := make([]int, len(indexstack)+1)
|
||||
copy(indexstack2, indexstack)
|
||||
indexstack2[len(indexstack)] = j
|
||||
// indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
|
||||
x.rget(ft, ftid, omitEmpty, indexstack2, pv)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -947,11 +970,6 @@ LOOP:
|
|||
panic(noFieldNameToStructFieldInfoErr)
|
||||
}
|
||||
|
||||
for _, k := range pv.fNames {
|
||||
if k == f.Name {
|
||||
continue LOOP
|
||||
}
|
||||
}
|
||||
pv.fNames = append(pv.fNames, f.Name)
|
||||
|
||||
if si == nil {
|
||||
|
@ -959,12 +977,8 @@ LOOP:
|
|||
} else if si.encName == "" {
|
||||
si.encName = f.Name
|
||||
}
|
||||
si.fieldName = f.Name
|
||||
|
||||
for _, k := range pv.encNames {
|
||||
if k == si.encName {
|
||||
continue LOOP
|
||||
}
|
||||
}
|
||||
pv.encNames = append(pv.encNames, si.encName)
|
||||
|
||||
// si.ikind = int(f.Type.Kind())
|
||||
|
@ -978,32 +992,60 @@ LOOP:
|
|||
// si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
|
||||
}
|
||||
|
||||
if siInfo != nil {
|
||||
if siInfo.omitEmpty {
|
||||
si.omitEmpty = true
|
||||
}
|
||||
if omitEmpty {
|
||||
si.omitEmpty = true
|
||||
}
|
||||
pv.sfis = append(pv.sfis, si)
|
||||
}
|
||||
}
|
||||
|
||||
// now handle anonymous fields
|
||||
LOOP2:
|
||||
for _, af := range anonFields {
|
||||
// if etypes contains this, then do not call rget again (as the fields are already seen here)
|
||||
ftid := reflect.ValueOf(af.ft).Pointer()
|
||||
for _, k := range pv.etypes {
|
||||
if k == ftid {
|
||||
continue LOOP2
|
||||
// resolves the struct field info got from a call to rget.
|
||||
// Returns a trimmed, unsorted and sorted []*structFieldInfo.
|
||||
func rgetResolveSFI(x []*structFieldInfo, pv []sfiIdx) (y, z []*structFieldInfo) {
|
||||
var n int
|
||||
for i, v := range x {
|
||||
xn := v.encName //TODO: fieldName or encName? use encName for now.
|
||||
var found bool
|
||||
for j, k := range pv {
|
||||
if k.name == xn {
|
||||
// one of them must be reset to nil, and the index updated appropriately to the other one
|
||||
if len(v.is) == len(x[k.index].is) {
|
||||
} else if len(v.is) < len(x[k.index].is) {
|
||||
pv[j].index = i
|
||||
if x[k.index] != nil {
|
||||
x[k.index] = nil
|
||||
n++
|
||||
}
|
||||
} else {
|
||||
if x[i] != nil {
|
||||
x[i] = nil
|
||||
n++
|
||||
}
|
||||
}
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
pv.etypes = append(pv.etypes, ftid)
|
||||
|
||||
indexstack2 := make([]int, len(indexstack)+1)
|
||||
copy(indexstack2, indexstack)
|
||||
indexstack2[len(indexstack)] = af.idx
|
||||
// indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
|
||||
x.rget(af.ft, ftid, indexstack2, pv, siInfo)
|
||||
if !found {
|
||||
pv = append(pv, sfiIdx{xn, i})
|
||||
}
|
||||
}
|
||||
|
||||
// remove all the nils
|
||||
y = make([]*structFieldInfo, len(x)-n)
|
||||
n = 0
|
||||
for _, v := range x {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
y[n] = v
|
||||
n++
|
||||
}
|
||||
|
||||
z = make([]*structFieldInfo, len(y))
|
||||
copy(z, y)
|
||||
sort.Sort(sfiSortedByEncName(z))
|
||||
return
|
||||
}
|
||||
|
||||
func panicToErr(err *error) {
|
||||
|
|
|
@ -70,8 +70,8 @@ func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func isEmptyValue(v reflect.Value) bool {
|
||||
return hIsEmptyValue(v, derefForIsEmptyValue, checkStructForEmptyValue)
|
||||
func isEmptyValue(v reflect.Value, deref, checkStruct bool) bool {
|
||||
return hIsEmptyValue(v, deref, checkStruct)
|
||||
}
|
||||
|
||||
func pruneSignExt(v []byte, pos bool) (n int) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//+build !unsafe
|
||||
// +build !unsafe
|
||||
|
||||
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//+build unsafe
|
||||
// +build unsafe
|
||||
|
||||
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
|
|
@ -197,12 +197,20 @@ func (e *jsonEncDriver) EncodeBool(b bool) {
|
|||
}
|
||||
|
||||
func (e *jsonEncDriver) EncodeFloat32(f float32) {
|
||||
e.w.writeb(strconv.AppendFloat(e.b[:0], float64(f), 'E', -1, 32))
|
||||
e.encodeFloat(float64(f), 32)
|
||||
}
|
||||
|
||||
func (e *jsonEncDriver) EncodeFloat64(f float64) {
|
||||
// e.w.writestr(strconv.FormatFloat(f, 'E', -1, 64))
|
||||
e.w.writeb(strconv.AppendFloat(e.b[:0], f, 'E', -1, 64))
|
||||
e.encodeFloat(f, 64)
|
||||
}
|
||||
|
||||
func (e *jsonEncDriver) encodeFloat(f float64, numbits int) {
|
||||
x := strconv.AppendFloat(e.b[:0], f, 'G', -1, numbits)
|
||||
e.w.writeb(x)
|
||||
if bytes.IndexByte(x, 'E') == -1 && bytes.IndexByte(x, '.') == -1 {
|
||||
e.w.writen2('.', '0')
|
||||
}
|
||||
}
|
||||
|
||||
func (e *jsonEncDriver) EncodeInt(v int64) {
|
||||
|
@ -302,6 +310,8 @@ func (e *jsonEncDriver) quoteStr(s string) {
|
|||
w.writen1('"')
|
||||
start := 0
|
||||
for i := 0; i < len(s); {
|
||||
// encode all bytes < 0x20 (except \r, \n).
|
||||
// also encode < > & to prevent security holes when served to some browsers.
|
||||
if b := s[i]; b < utf8.RuneSelf {
|
||||
if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
|
||||
i++
|
||||
|
@ -323,9 +333,14 @@ func (e *jsonEncDriver) quoteStr(s string) {
|
|||
w.writen2('\\', 'f')
|
||||
case '\t':
|
||||
w.writen2('\\', 't')
|
||||
case '<', '>', '&':
|
||||
if e.h.HTMLCharsAsIs {
|
||||
w.writen1(b)
|
||||
} else {
|
||||
w.writestr(`\u00`)
|
||||
w.writen2(hex[b>>4], hex[b&0xF])
|
||||
}
|
||||
default:
|
||||
// encode all bytes < 0x20 (except \r, \n).
|
||||
// also encode < > & to prevent security holes when served to some browsers.
|
||||
w.writestr(`\u00`)
|
||||
w.writen2(hex[b>>4], hex[b&0xF])
|
||||
}
|
||||
|
@ -344,7 +359,7 @@ func (e *jsonEncDriver) quoteStr(s string) {
|
|||
continue
|
||||
}
|
||||
// U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR.
|
||||
// Both technically valid JSON, but bomb on JSONP, so fix here.
|
||||
// Both technically valid JSON, but bomb on JSONP, so fix here unconditionally.
|
||||
if c == '\u2028' || c == '\u2029' {
|
||||
if start < i {
|
||||
w.writestr(s[start:i])
|
||||
|
@ -923,6 +938,11 @@ func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut [
|
|||
if isstring {
|
||||
return d.bs
|
||||
}
|
||||
// if appendStringAsBytes returned a zero-len slice, then treat as nil.
|
||||
// This should only happen for null, and "".
|
||||
if len(d.bs) == 0 {
|
||||
return nil
|
||||
}
|
||||
bs0 := d.bs
|
||||
slen := base64.StdEncoding.DecodedLen(len(bs0))
|
||||
if slen <= cap(bs) {
|
||||
|
@ -960,6 +980,14 @@ func (d *jsonDecDriver) appendStringAsBytes() {
|
|||
}
|
||||
d.tok = b
|
||||
}
|
||||
|
||||
// handle null as a string
|
||||
if d.tok == 'n' {
|
||||
d.readStrIdx(10, 13) // ull
|
||||
d.bs = d.bs[:0]
|
||||
return
|
||||
}
|
||||
|
||||
if d.tok != '"' {
|
||||
d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok)
|
||||
}
|
||||
|
@ -1152,6 +1180,12 @@ type JsonHandle struct {
|
|||
// containing the exact integer representation as a decimal.
|
||||
// - else encode all integers as a json number (default)
|
||||
IntegerAsString uint8
|
||||
|
||||
// HTMLCharsAsIs controls how to encode some special characters to html: < > &
|
||||
//
|
||||
// By default, we encode them as \uXXX
|
||||
// to prevent security holes when served from some browsers.
|
||||
HTMLCharsAsIs bool
|
||||
}
|
||||
|
||||
func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
|
||||
|
|
|
@ -549,7 +549,7 @@ func (d *msgpackDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOu
|
|||
bs = d.b[:]
|
||||
}
|
||||
}
|
||||
return decByteSlice(d.r, clen, bs)
|
||||
return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
|
||||
}
|
||||
|
||||
func (d *msgpackDecDriver) DecodeString() (s string) {
|
||||
|
@ -561,6 +561,13 @@ func (d *msgpackDecDriver) readNextBd() {
|
|||
d.bdRead = true
|
||||
}
|
||||
|
||||
func (d *msgpackDecDriver) uncacheRead() {
|
||||
if d.bdRead {
|
||||
d.r.unreadn1()
|
||||
d.bdRead = false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *msgpackDecDriver) ContainerType() (vt valueType) {
|
||||
bd := d.bd
|
||||
if bd == mpNil {
|
||||
|
|
|
@ -25,7 +25,7 @@ type Rpc interface {
|
|||
}
|
||||
|
||||
// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer
|
||||
// used by the rpc connection. It accomodates use-cases where the connection
|
||||
// used by the rpc connection. It accommodates use-cases where the connection
|
||||
// should be used by rpc and non-rpc functions, e.g. streaming a file after
|
||||
// sending an rpc response.
|
||||
type RpcCodecBuffered interface {
|
||||
|
|
|
@ -166,6 +166,13 @@ func (d *simpleDecDriver) readNextBd() {
|
|||
d.bdRead = true
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver) uncacheRead() {
|
||||
if d.bdRead {
|
||||
d.r.unreadn1()
|
||||
d.bdRead = false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver) ContainerType() (vt valueType) {
|
||||
if d.bd == simpleVdNil {
|
||||
return valueTypeNil
|
||||
|
@ -340,7 +347,7 @@ func (d *simpleDecDriver) decLen() int {
|
|||
}
|
||||
return int(ui)
|
||||
}
|
||||
d.d.errorf("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8)
|
||||
d.d.errorf("decLen: Cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8)
|
||||
return -1
|
||||
}
|
||||
|
||||
|
@ -365,7 +372,7 @@ func (d *simpleDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut
|
|||
bs = d.b[:]
|
||||
}
|
||||
}
|
||||
return decByteSlice(d.r, clen, bs)
|
||||
return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
|
||||
|
@ -474,7 +481,7 @@ func (d *simpleDecDriver) DecodeNaked() {
|
|||
// SimpleHandle is a Handle for a very simple encoding format.
|
||||
//
|
||||
// simple is a simplistic codec similar to binc, but not as compact.
|
||||
// - Encoding of a value is always preceeded by the descriptor byte (bd)
|
||||
// - Encoding of a value is always preceded by the descriptor byte (bd)
|
||||
// - True, false, nil are encoded fully in 1 byte (the descriptor)
|
||||
// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
|
||||
// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
|
||||
|
|
|
@ -34,7 +34,7 @@ def get_test_data_list():
|
|||
True,
|
||||
u"null",
|
||||
None,
|
||||
u"someday",
|
||||
u"some&day>some<day",
|
||||
1328176922000002000,
|
||||
u"",
|
||||
-2206187877999998000,
|
||||
|
|
|
@ -17,7 +17,8 @@ _run() {
|
|||
zargs=""
|
||||
local OPTIND
|
||||
OPTIND=1
|
||||
while getopts "_xurtcinsvgzmefdl" flag
|
||||
# "_xurtcinsvgzmefdl" === "_cdefgilmnrtsuvxz"
|
||||
while getopts "_cdefgilmnrtsuvwxz" flag
|
||||
do
|
||||
case "x$flag" in
|
||||
'xr') ;;
|
||||
|
@ -29,6 +30,7 @@ _run() {
|
|||
'xz') zargs="$zargs -tr" ;;
|
||||
'xm') zargs="$zargs -tm" ;;
|
||||
'xl') zargs="$zargs -tl" ;;
|
||||
'xw') zargs="$zargs -tx=10" ;;
|
||||
*) ;;
|
||||
esac
|
||||
done
|
||||
|
@ -37,7 +39,7 @@ _run() {
|
|||
# echo ">>>>>>> TAGS: $ztags"
|
||||
|
||||
OPTIND=1
|
||||
while getopts "_xurtcinsvgzmefdl" flag
|
||||
while getopts "_cdefgilmnrtsuvwxz" flag
|
||||
do
|
||||
case "x$flag" in
|
||||
'xt') printf ">>>>>>> REGULAR : "; go test "-tags=$ztags" $zargs ; sleep 2 ;;
|
||||
|
@ -59,10 +61,11 @@ _run() {
|
|||
}
|
||||
|
||||
# echo ">>>>>>> RUNNING VARIATIONS OF TESTS"
|
||||
if [[ "x$@" = "x" ]]; then
|
||||
if [[ "x$@" = "x" || "x$@" = "x-A" ]]; then
|
||||
# All: r, x, g, gu
|
||||
_run "-_tcinsed_ml" # regular
|
||||
_run "-_tcinsed_ml_z" # regular with reset
|
||||
_run "-w_tcinsed_ml" # regular with max init len
|
||||
_run "-_tcinsed_ml_f" # regular with no fastpath (notfastpath)
|
||||
_run "-x_tcinsed_ml" # external
|
||||
_run "-gx_tcinsed_ml" # codecgen: requires external
|
||||
|
@ -75,6 +78,30 @@ elif [[ "x$@" = "x-F" ]]; then
|
|||
# regular with notfastpath
|
||||
_run "-_tcinsed_ml_f" # regular
|
||||
_run "-_tcinsed_ml_zf" # regular with reset
|
||||
elif [[ "x$@" = "x-C" ]]; then
|
||||
# codecgen
|
||||
_run "-gx_tcinsed_ml" # codecgen: requires external
|
||||
_run "-gxu_tcinsed_ml" # codecgen + unsafe
|
||||
_run "-gxuw_tcinsed_ml" # codecgen + unsafe + maxinitlen
|
||||
elif [[ "x$@" = "x-X" ]]; then
|
||||
# external
|
||||
_run "-x_tcinsed_ml" # external
|
||||
elif [[ "x$@" = "x-h" || "x$@" = "x-?" ]]; then
|
||||
cat <<EOF
|
||||
Usage: tests.sh [options...]
|
||||
-A run through all tests (regular, external, codecgen)
|
||||
-Z regular tests only
|
||||
-F regular tests only (without fastpath, so they run quickly)
|
||||
-C codecgen only
|
||||
-X external only
|
||||
-h show help (usage)
|
||||
-? same as -h
|
||||
(no options)
|
||||
same as -A
|
||||
(unrecognized options)
|
||||
just pass on the options from the command line
|
||||
EOF
|
||||
else
|
||||
# e.g. ./tests.sh "-w_tcinsed_ml"
|
||||
_run "$@"
|
||||
fi
|
||||
|
|
|
@ -1125,16 +1125,16 @@
|
|||
"revisionTime": "2016-09-30T03:27:40Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "tHm2SMtuRfrwh6NnnymsuoJ6e0Q=",
|
||||
"checksumSHA1": "CoxdaTYdPZNJXr8mJfLxye428N0=",
|
||||
"path": "github.com/ugorji/go/codec",
|
||||
"revision": "a396ed22fc049df733440d90efe17475e3929ccb",
|
||||
"revisionTime": "2016-03-28T06:07:40Z"
|
||||
"revision": "c88ee250d0221a57af388746f5cf03768c21d6e2",
|
||||
"revisionTime": "2017-02-15T20:11:44Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "CjQp8RP9SgJGltsq01UVaU4UYK0=",
|
||||
"path": "github.com/ugorji/go/codec/codecgen",
|
||||
"revision": "a396ed22fc049df733440d90efe17475e3929ccb",
|
||||
"revisionTime": "2016-03-28T06:07:40Z"
|
||||
"revision": "c88ee250d0221a57af388746f5cf03768c21d6e2",
|
||||
"revisionTime": "2017-02-15T20:11:44Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "9jjO5GjLa0XF/nfWihF02RoH4qc=",
|
||||
|
|
Loading…
Reference in New Issue