Merge pull request #1764 from hashicorp/f-query-templates

Adds support for prepared query templates.
This commit is contained in:
James Phillips 2016-03-07 13:34:58 -08:00
commit b09036efc0
216 changed files with 6274 additions and 20624 deletions

179
Godeps/Godeps.json generated
View File

@ -1,9 +1,6 @@
{
"ImportPath": "github.com/hashicorp/consul",
"GoVersion": "go1.5",
"Packages": [
"./..."
],
"GoVersion": "go1.6",
"Deps": [
{
"ImportPath": "github.com/DataDog/datadog-go/statsd",
@ -18,12 +15,12 @@
"Rev": "345426c77237ece5dab0e1605c3e4b35c3f54757"
},
{
"ImportPath": "github.com/armon/go-radix",
"Rev": "4239b77079c7b5d1243b7b4736304ce8ddb6f0f2"
"ImportPath": "github.com/armon/go-metrics/datadog",
"Rev": "345426c77237ece5dab0e1605c3e4b35c3f54757"
},
{
"ImportPath": "github.com/beorn7/perks/quantile",
"Rev": "b965b613227fddccbfffe13eae360ed3fa822f8d"
"ImportPath": "github.com/armon/go-radix",
"Rev": "4239b77079c7b5d1243b7b4736304ce8ddb6f0f2"
},
{
"ImportPath": "github.com/bgentry/speakeasy",
@ -43,8 +40,72 @@
"Rev": "7b651349f9479f5114913eefbfd3c4eeddd79ab4"
},
{
"ImportPath": "github.com/golang/protobuf/proto",
"Rev": "0dfe8f37844c14cb32c7247925270e0f7ba90973"
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus",
"Rev": "7b651349f9479f5114913eefbfd3c4eeddd79ab4"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts",
"Rev": "7b651349f9479f5114913eefbfd3c4eeddd79ab4"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive",
"Rev": "7b651349f9479f5114913eefbfd3c4eeddd79ab4"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils",
"Rev": "7b651349f9479f5114913eefbfd3c4eeddd79ab4"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir",
"Rev": "7b651349f9479f5114913eefbfd3c4eeddd79ab4"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools",
"Rev": "7b651349f9479f5114913eefbfd3c4eeddd79ab4"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils",
"Rev": "7b651349f9479f5114913eefbfd3c4eeddd79ab4"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath",
"Rev": "7b651349f9479f5114913eefbfd3c4eeddd79ab4"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools",
"Rev": "7b651349f9479f5114913eefbfd3c4eeddd79ab4"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise",
"Rev": "7b651349f9479f5114913eefbfd3c4eeddd79ab4"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy",
"Rev": "7b651349f9479f5114913eefbfd3c4eeddd79ab4"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system",
"Rev": "7b651349f9479f5114913eefbfd3c4eeddd79ab4"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/go-units",
"Rev": "7b651349f9479f5114913eefbfd3c4eeddd79ab4"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp",
"Rev": "7b651349f9479f5114913eefbfd3c4eeddd79ab4"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user",
"Rev": "7b651349f9479f5114913eefbfd3c4eeddd79ab4"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/golang.org/x/net/context",
"Rev": "7b651349f9479f5114913eefbfd3c4eeddd79ab4"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix",
"Rev": "7b651349f9479f5114913eefbfd3c4eeddd79ab4"
},
{
"ImportPath": "github.com/hashicorp/errwrap",
@ -60,11 +121,11 @@
},
{
"ImportPath": "github.com/hashicorp/go-immutable-radix",
"Rev": "12e90058b2897552deea141eff51bb7a07a09e63"
"Rev": "8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990"
},
{
"ImportPath": "github.com/hashicorp/go-memdb",
"Rev": "31949d523ade8a236956c6f1761e9dcf902d1638"
"Rev": "98f52f52d7a476958fa9da671354d270c50661a7"
},
{
"ImportPath": "github.com/hashicorp/go-msgpack/codec",
@ -90,10 +151,54 @@
"ImportPath": "github.com/hashicorp/golang-lru",
"Rev": "5c7531c003d8bf158b0fe5063649a2f41a822146"
},
{
"ImportPath": "github.com/hashicorp/golang-lru/simplelru",
"Rev": "5c7531c003d8bf158b0fe5063649a2f41a822146"
},
{
"ImportPath": "github.com/hashicorp/hcl",
"Rev": "578dd9746824a54637686b51a41bad457a56bcef"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/ast",
"Rev": "578dd9746824a54637686b51a41bad457a56bcef"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/parser",
"Rev": "578dd9746824a54637686b51a41bad457a56bcef"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/scanner",
"Rev": "578dd9746824a54637686b51a41bad457a56bcef"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/strconv",
"Rev": "578dd9746824a54637686b51a41bad457a56bcef"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/token",
"Rev": "578dd9746824a54637686b51a41bad457a56bcef"
},
{
"ImportPath": "github.com/hashicorp/hcl/json/parser",
"Rev": "578dd9746824a54637686b51a41bad457a56bcef"
},
{
"ImportPath": "github.com/hashicorp/hcl/json/scanner",
"Rev": "578dd9746824a54637686b51a41bad457a56bcef"
},
{
"ImportPath": "github.com/hashicorp/hcl/json/token",
"Rev": "578dd9746824a54637686b51a41bad457a56bcef"
},
{
"ImportPath": "github.com/hashicorp/hil",
"Rev": "0457360d54ca4d081a769eaa1617e0462153fd70"
},
{
"ImportPath": "github.com/hashicorp/hil/ast",
"Rev": "0457360d54ca4d081a769eaa1617e0462153fd70"
},
{
"ImportPath": "github.com/hashicorp/logutils",
"Rev": "0dc08b1671f34c4250ce212759ebd880f743d883"
@ -137,12 +242,24 @@
"Rev": "f693c7e88ba316d1a0ae3e205e22a01aa3ec2848"
},
{
"ImportPath": "github.com/mattn/go-isatty",
"Rev": "56b76bdf51f7708750eac80fa38b952bb9f32639"
"ImportPath": "github.com/inconshreveable/muxado/proto",
"Rev": "f693c7e88ba316d1a0ae3e205e22a01aa3ec2848"
},
{
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
"Rev": "d0c3fe89de86839aecf2e0579c40ba3bb336a453"
"ImportPath": "github.com/inconshreveable/muxado/proto/buffer",
"Rev": "f693c7e88ba316d1a0ae3e205e22a01aa3ec2848"
},
{
"ImportPath": "github.com/inconshreveable/muxado/proto/ext",
"Rev": "f693c7e88ba316d1a0ae3e205e22a01aa3ec2848"
},
{
"ImportPath": "github.com/inconshreveable/muxado/proto/frame",
"Rev": "f693c7e88ba316d1a0ae3e205e22a01aa3ec2848"
},
{
"ImportPath": "github.com/mattn/go-isatty",
"Rev": "56b76bdf51f7708750eac80fa38b952bb9f32639"
},
{
"ImportPath": "github.com/miekg/dns",
@ -152,35 +269,17 @@
"ImportPath": "github.com/mitchellh/cli",
"Rev": "cb6853d606ea4a12a15ac83cc43503df99fd28fb"
},
{
"ImportPath": "github.com/mitchellh/copystructure",
"Rev": "6fc66267e9da7d155a9d3bd489e00dad02666dc6"
},
{
"ImportPath": "github.com/mitchellh/mapstructure",
"Rev": "281073eb9eb092240d33ef253c404f1cca550309"
},
{
"ImportPath": "github.com/prometheus/client_golang/prometheus",
"Comment": "0.7.0-70-g15006a7",
"Rev": "15006a7ed88e73201c4e6142a2e66b54ae5fdf00"
},
{
"ImportPath": "github.com/prometheus/client_model/go",
"Comment": "model-0.0.2-12-gfa8ad6f",
"Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6"
},
{
"ImportPath": "github.com/prometheus/common/expfmt",
"Rev": "23070236b1ebff452f494ae831569545c2b61d26"
},
{
"ImportPath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
"Rev": "23070236b1ebff452f494ae831569545c2b61d26"
},
{
"ImportPath": "github.com/prometheus/common/model",
"Rev": "23070236b1ebff452f494ae831569545c2b61d26"
},
{
"ImportPath": "github.com/prometheus/procfs",
"Rev": "406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8"
"ImportPath": "github.com/mitchellh/reflectwalk",
"Rev": "eecf4c70c626c7cfbb95c90195bc34d386c74ac6"
},
{
"ImportPath": "github.com/ryanuber/columnize",

View File

@ -13,6 +13,7 @@ import (
const (
preparedQueryEndpoint = "PreparedQuery"
preparedQueryExecuteSuffix = "/execute"
preparedQueryExplainSuffix = "/explain"
)
// preparedQueryCreateResponse is used to wrap the query ID.
@ -124,6 +125,36 @@ func (s *HTTPServer) preparedQueryExecute(id string, resp http.ResponseWriter, r
return reply, nil
}
// preparedQueryExplain shows which query a name resolves to, the fully
// interpolated template (if it's a template), as well as additional info
// about the execution of a query.
func (s *HTTPServer) preparedQueryExplain(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) {
args := structs.PreparedQueryExecuteRequest{
QueryIDOrName: id,
}
s.parseSource(req, &args.Source)
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
return nil, nil
}
if err := parseLimit(req, &args.Limit); err != nil {
return nil, fmt.Errorf("Bad limit: %s", err)
}
var reply structs.PreparedQueryExplainResponse
endpoint := s.agent.getEndpoint(preparedQueryEndpoint)
if err := s.agent.RPC(endpoint+".Explain", &args, &reply); err != nil {
// We have to check the string since the RPC sheds
// the specific error type.
if err.Error() == consul.ErrQueryNotFound.Error() {
resp.WriteHeader(404)
resp.Write([]byte(err.Error()))
return nil, nil
}
return nil, err
}
return reply, nil
}
// preparedQueryGet returns a single prepared query.
func (s *HTTPServer) preparedQueryGet(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) {
args := structs.PreparedQuerySpecificRequest{
@ -197,16 +228,22 @@ func (s *HTTPServer) preparedQueryDelete(id string, resp http.ResponseWriter, re
// particular query.
func (s *HTTPServer) PreparedQuerySpecific(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
id := strings.TrimPrefix(req.URL.Path, "/v1/query/")
execute := false
execute, explain := false, false
if strings.HasSuffix(id, preparedQueryExecuteSuffix) {
execute = true
id = strings.TrimSuffix(id, preparedQueryExecuteSuffix)
} else if strings.HasSuffix(id, preparedQueryExplainSuffix) {
explain = true
id = strings.TrimSuffix(id, preparedQueryExplainSuffix)
}
switch req.Method {
case "GET":
if execute {
return s.preparedQueryExecute(id, resp, req)
} else if explain {
return s.preparedQueryExplain(id, resp, req)
} else {
return s.preparedQueryGet(id, resp, req)
}

View File

@ -25,6 +25,7 @@ type MockPreparedQuery struct {
getFn func(*structs.PreparedQuerySpecificRequest, *structs.IndexedPreparedQueries) error
listFn func(*structs.DCSpecificRequest, *structs.IndexedPreparedQueries) error
executeFn func(*structs.PreparedQueryExecuteRequest, *structs.PreparedQueryExecuteResponse) error
explainFn func(*structs.PreparedQueryExecuteRequest, *structs.PreparedQueryExplainResponse) error
}
func (m *MockPreparedQuery) Apply(args *structs.PreparedQueryRequest,
@ -59,6 +60,14 @@ func (m *MockPreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest,
return fmt.Errorf("should not have called Execute")
}
func (m *MockPreparedQuery) Explain(args *structs.PreparedQueryExecuteRequest,
reply *structs.PreparedQueryExplainResponse) error {
if m.explainFn != nil {
return m.explainFn(args, reply)
}
return fmt.Errorf("should not have called Explain")
}
func TestPreparedQuery_Create(t *testing.T) {
httpTest(t, func(srv *HTTPServer) {
m := MockPreparedQuery{}
@ -332,6 +341,77 @@ func TestPreparedQuery_Execute(t *testing.T) {
})
}
func TestPreparedQuery_Explain(t *testing.T) {
httpTest(t, func(srv *HTTPServer) {
m := MockPreparedQuery{}
if err := srv.agent.InjectEndpoint("PreparedQuery", &m); err != nil {
t.Fatalf("err: %v", err)
}
m.explainFn = func(args *structs.PreparedQueryExecuteRequest, reply *structs.PreparedQueryExplainResponse) error {
expected := &structs.PreparedQueryExecuteRequest{
Datacenter: "dc1",
QueryIDOrName: "my-id",
Limit: 5,
Source: structs.QuerySource{
Datacenter: "dc1",
Node: "my-node",
},
QueryOptions: structs.QueryOptions{
Token: "my-token",
RequireConsistent: true,
},
}
if !reflect.DeepEqual(args, expected) {
t.Fatalf("bad: %v", args)
}
// Just set something so we can tell this is returned.
reply.Query.Name = "hello"
return nil
}
body := bytes.NewBuffer(nil)
req, err := http.NewRequest("GET", "/v1/query/my-id/explain?token=my-token&consistent=true&near=my-node&limit=5", body)
if err != nil {
t.Fatalf("err: %v", err)
}
resp := httptest.NewRecorder()
obj, err := srv.PreparedQuerySpecific(resp, req)
if err != nil {
t.Fatalf("err: %v", err)
}
if resp.Code != 200 {
t.Fatalf("bad code: %d", resp.Code)
}
r, ok := obj.(structs.PreparedQueryExplainResponse)
if !ok {
t.Fatalf("unexpected: %T", obj)
}
if r.Query.Name != "hello" {
t.Fatalf("bad: %v", r)
}
})
httpTest(t, func(srv *HTTPServer) {
body := bytes.NewBuffer(nil)
req, err := http.NewRequest("GET", "/v1/query/not-there/explain", body)
if err != nil {
t.Fatalf("err: %v", err)
}
resp := httptest.NewRecorder()
_, err = srv.PreparedQuerySpecific(resp, req)
if err != nil {
t.Fatalf("err: %v", err)
}
if resp.Code != 404 {
t.Fatalf("bad code: %d", resp.Code)
}
})
}
func TestPreparedQuery_Get(t *testing.T) {
httpTest(t, func(srv *HTTPServer) {
m := MockPreparedQuery{}

View File

@ -610,9 +610,9 @@ func (s *consulSnapshot) persistPreparedQueries(sink raft.SnapshotSink,
return err
}
for query := queries.Next(); query != nil; query = queries.Next() {
for _, query := range queries {
sink.Write([]byte{byte(structs.PreparedQueryRequestType)})
if err := encoder.Encode(query.(*structs.PreparedQuery)); err != nil {
if err := encoder.Encode(query); err != nil {
return err
}
}

View File

@ -0,0 +1,186 @@
package prepared_query
import (
"fmt"
"reflect"
"regexp"
"strings"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/hil"
"github.com/hashicorp/hil/ast"
"github.com/mitchellh/copystructure"
)
// IsTemplate returns true if the given query is a template.
func IsTemplate(query *structs.PreparedQuery) bool {
return query.Template.Type != ""
}
// CompiledTemplate is an opaque object that can be used later to render a
// prepared query template.
type CompiledTemplate struct {
// query keeps a copy of the original query for rendering.
query *structs.PreparedQuery
// trees contains a map with paths to string fields in a structure to
// parsed syntax trees, suitable for later evaluation.
trees map[string]ast.Node
// re is the compiled regexp, if they supplied one (this can be nil).
re *regexp.Regexp
}
// Compile validates a prepared query template and returns an opaque compiled
// object that can be used later to render the template.
func Compile(query *structs.PreparedQuery) (*CompiledTemplate, error) {
// Make sure it's a type we understand.
if query.Template.Type != structs.QueryTemplateTypeNamePrefixMatch {
return nil, fmt.Errorf("Bad Template.Type '%s'", query.Template.Type)
}
// Start compile.
ct := &CompiledTemplate{
trees: make(map[string]ast.Node),
}
// Make a copy of the query to use as the basis for rendering later.
dup, err := copystructure.Copy(query)
if err != nil {
return nil, err
}
var ok bool
ct.query, ok = dup.(*structs.PreparedQuery)
if !ok {
return nil, fmt.Errorf("Failed to copy query")
}
// Walk over all the string fields in the Service sub-structure and
// parse them as HIL.
parse := func(path string, v reflect.Value) error {
tree, err := hil.Parse(v.String())
if err != nil {
return fmt.Errorf("Bad format '%s' in Service%s: %s", v.String(), path, err)
}
ct.trees[path] = tree
return nil
}
if err := walk(&ct.query.Service, parse); err != nil {
return nil, err
}
// If they supplied a regexp then compile it.
if ct.query.Template.Regexp != "" {
var err error
ct.re, err = regexp.Compile(ct.query.Template.Regexp)
if err != nil {
return nil, fmt.Errorf("Bad Regexp: %s", err)
}
}
// Finally do a test render with the supplied name prefix. This will
// help catch errors before run time, and this is the most minimal
// prefix it will be expected to run with. The results might not make
// sense and create a valid service to lookup, but it should render
// without any errors.
if _, err = ct.Render(ct.query.Name); err != nil {
return nil, err
}
return ct, nil
}
// Render takes a compiled template and renders it for the given name. For
// example, if the user looks up foobar.query.consul via DNS then we will call
// this function with "foobar" on the compiled template.
func (ct *CompiledTemplate) Render(name string) (*structs.PreparedQuery, error) {
// Make it "safe" to render a default structure.
if ct == nil {
return nil, fmt.Errorf("Cannot render an uncompiled template")
}
// Start with a fresh, detached copy of the original so we don't disturb
// the prototype.
dup, err := copystructure.Copy(ct.query)
if err != nil {
return nil, err
}
query, ok := dup.(*structs.PreparedQuery)
if !ok {
return nil, fmt.Errorf("Failed to copy query")
}
// Run the regular expression, if provided. We execute on a copy here
// to avoid internal lock contention because we expect this to be called
// from multiple goroutines.
var matches []string
if ct.re != nil {
re := ct.re.Copy()
matches = re.FindStringSubmatch(name)
}
// Create a safe match function that can't fail at run time. It will
// return an empty string for any invalid input.
match := ast.Function{
ArgTypes: []ast.Type{ast.TypeInt},
ReturnType: ast.TypeString,
Variadic: false,
Callback: func(inputs []interface{}) (interface{}, error) {
i, ok := inputs[0].(int)
if ok && i >= 0 && i < len(matches) {
return matches[i], nil
} else {
return "", nil
}
},
}
// Build up the HIL evaluation context.
config := &hil.EvalConfig{
GlobalScope: &ast.BasicScope{
VarMap: map[string]ast.Variable{
"name.full": ast.Variable{
Type: ast.TypeString,
Value: name,
},
"name.prefix": ast.Variable{
Type: ast.TypeString,
Value: query.Name,
},
"name.suffix": ast.Variable{
Type: ast.TypeString,
Value: strings.TrimPrefix(name, query.Name),
},
},
FuncMap: map[string]ast.Function{
"match": match,
},
},
}
// Run through the Service sub-structure and evaluate all the strings
// as HIL.
eval := func(path string, v reflect.Value) error {
tree, ok := ct.trees[path]
if !ok {
return nil
}
hv, ht, err := hil.Eval(tree, config)
if err != nil {
return fmt.Errorf("Bad evaluation for '%s' in Service%s: %s", v.String(), path, err)
}
if ht != ast.TypeString {
return fmt.Errorf("Expected Service%s field to be a string, got %s", path, ht)
}
v.SetString(hv.(string))
return nil
}
if err := walk(&query.Service, eval); err != nil {
return nil, err
}
return query, nil
}

View File

@ -0,0 +1,291 @@
package prepared_query
import (
"reflect"
"strings"
"testing"
"github.com/hashicorp/consul/consul/structs"
"github.com/mitchellh/copystructure"
)
var (
// bigBench is a test query that uses all the features of templates, not
// in a realistic way, but in a complete way.
bigBench = &structs.PreparedQuery{
Name: "hello",
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
Regexp: "^hello-(.*)-(.*)$",
},
Service: structs.ServiceQuery{
Service: "${name.full}",
Failover: structs.QueryDatacenterOptions{
Datacenters: []string{
"${name.full}",
"${name.prefix}",
"${name.suffix}",
"${match(0)}",
"${match(1)}",
"${match(2)}",
},
},
Tags: []string{
"${name.full}",
"${name.prefix}",
"${name.suffix}",
"${match(0)}",
"${match(1)}",
"${match(2)}",
},
},
}
// smallBench is a small prepared query just for doing geo failover. This
// is a minimal, useful configuration.
smallBench = &structs.PreparedQuery{
Name: "",
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
},
Service: structs.ServiceQuery{
Service: "${name.full}",
Failover: structs.QueryDatacenterOptions{
Datacenters: []string{
"dc1",
"dc2",
"dc3",
},
},
},
}
)
func compileBench(b *testing.B, query *structs.PreparedQuery) {
for i := 0; i < b.N; i++ {
_, err := Compile(query)
if err != nil {
b.Fatalf("err: %v", err)
}
}
}
func renderBench(b *testing.B, query *structs.PreparedQuery) {
compiled, err := Compile(query)
if err != nil {
b.Fatalf("err: %v", err)
}
for i := 0; i < b.N; i++ {
_, err := compiled.Render("hello-bench-mark")
if err != nil {
b.Fatalf("err: %v", err)
}
}
}
func BenchmarkTemplate_CompileSmall(b *testing.B) {
compileBench(b, smallBench)
}
func BenchmarkTemplate_CompileBig(b *testing.B) {
compileBench(b, bigBench)
}
func BenchmarkTemplate_RenderSmall(b *testing.B) {
renderBench(b, smallBench)
}
func BenchmarkTemplate_RenderBig(b *testing.B) {
renderBench(b, bigBench)
}
func TestTemplate_Compile(t *testing.T) {
// Start with an empty query that's not even a template.
query := &structs.PreparedQuery{}
_, err := Compile(query)
if err == nil || !strings.Contains(err.Error(), "Bad Template") {
t.Fatalf("bad: %v", err)
}
if IsTemplate(query) {
t.Fatalf("should not be a template")
}
// Make it a basic template, keeping a copy before we compile.
query.Template.Type = structs.QueryTemplateTypeNamePrefixMatch
query.Template.Regexp = "^(hello)there$"
query.Service.Service = "${name.full}"
query.Service.Tags = []string{"${match(1)}"}
backup, err := copystructure.Copy(query)
if err != nil {
t.Fatalf("err: %v", err)
}
ct, err := Compile(query)
if err != nil {
t.Fatalf("err: %v", err)
}
if !IsTemplate(query) {
t.Fatalf("should be a template")
}
// Do a sanity check render on it.
actual, err := ct.Render("hellothere")
if err != nil {
t.Fatalf("err: %v", err)
}
// See if it rendered correctly.
expected := &structs.PreparedQuery{
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
Regexp: "^(hello)there$",
},
Service: structs.ServiceQuery{
Service: "hellothere",
Tags: []string{
"hello",
},
},
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
// Prove that it didn't alter the definition we compiled.
if !reflect.DeepEqual(query, backup.(*structs.PreparedQuery)) {
t.Fatalf("bad: %#v", query)
}
// Try a bad HIL interpolation (syntax error).
query.Service.Service = "${name.full"
_, err = Compile(query)
if err == nil || !strings.Contains(err.Error(), "Bad format") {
t.Fatalf("bad: %v", err)
}
// Try a bad HIL interpolation (syntax ok but unknown variable).
query.Service.Service = "${name.nope}"
_, err = Compile(query)
if err == nil || !strings.Contains(err.Error(), "unknown variable") {
t.Fatalf("bad: %v", err)
}
// Try a bad regexp.
query.Template.Regexp = "^(nope$"
query.Service.Service = "${name.full}"
_, err = Compile(query)
if err == nil || !strings.Contains(err.Error(), "Bad Regexp") {
t.Fatalf("bad: %v", err)
}
}
func TestTemplate_Render(t *testing.T) {
// Try a noop template that is all static.
{
query := &structs.PreparedQuery{
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
},
Service: structs.ServiceQuery{
Service: "hellothere",
},
}
ct, err := Compile(query)
if err != nil {
t.Fatalf("err: %v", err)
}
actual, err := ct.Render("unused")
if err != nil {
t.Fatalf("err: %v", err)
}
if !reflect.DeepEqual(actual, query) {
t.Fatalf("bad: %#v", actual)
}
}
// Try all the variables and functions.
query := &structs.PreparedQuery{
Name: "hello-",
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
Regexp: "^(.*?)-(.*?)-(.*)$",
},
Service: structs.ServiceQuery{
Service: "${name.prefix} xxx ${name.full} xxx ${name.suffix}",
Tags: []string{
"${match(-1)}",
"${match(0)}",
"${match(1)}",
"${match(2)}",
"${match(3)}",
"${match(4)}",
"${40 + 2}",
},
},
}
ct, err := Compile(query)
if err != nil {
t.Fatalf("err: %v", err)
}
// Run a case that matches the regexp.
{
actual, err := ct.Render("hello-foo-bar-none")
if err != nil {
t.Fatalf("err: %v", err)
}
expected := &structs.PreparedQuery{
Name: "hello-",
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
Regexp: "^(.*?)-(.*?)-(.*)$",
},
Service: structs.ServiceQuery{
Service: "hello- xxx hello-foo-bar-none xxx foo-bar-none",
Tags: []string{
"",
"hello-foo-bar-none",
"hello",
"foo",
"bar-none",
"",
"42",
},
},
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
}
// Run a case that doesn't match the regexp
{
actual, err := ct.Render("hello-nope")
if err != nil {
t.Fatalf("err: %v", err)
}
expected := &structs.PreparedQuery{
Name: "hello-",
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
Regexp: "^(.*?)-(.*?)-(.*)$",
},
Service: structs.ServiceQuery{
Service: "hello- xxx hello-nope xxx nope",
Tags: []string{
"",
"",
"",
"",
"",
"",
"42",
},
},
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
}
}

View File

@ -0,0 +1,49 @@
package prepared_query
import (
"fmt"
"reflect"
)
// visitor is a function that will get called for each string element of a
// structure.
type visitor func(path string, v reflect.Value) error
// visit calls the visitor function for each string it finds, and will descend
// recursively into structures and slices. If any visitor returns an error then
// the search will stop and that error will be returned.
func visit(path string, v reflect.Value, t reflect.Type, fn visitor) error {
switch v.Kind() {
case reflect.String:
return fn(path, v)
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
vf := v.Field(i)
tf := t.Field(i)
newPath := fmt.Sprintf("%s.%s", path, tf.Name)
if err := visit(newPath, vf, tf.Type, fn); err != nil {
return err
}
}
case reflect.Slice:
for i := 0; i < v.Len(); i++ {
vi := v.Index(i)
ti := vi.Type()
newPath := fmt.Sprintf("%s[%d]", path, i)
if err := visit(newPath, vi, ti, fn); err != nil {
return err
}
}
}
return nil
}
// walk finds all the string elements of a given structure (and its sub-
// structures) and calls the visitor function. Each string found will get
// a unique path computed. If any visitor returns an error then the search
// will stop and that error will be returned.
func walk(obj interface{}, fn visitor) error {
v := reflect.ValueOf(obj).Elem()
t := v.Type()
return visit("", v, t, fn)
}

View File

@ -0,0 +1,52 @@
package prepared_query
import (
"fmt"
"reflect"
"testing"
"github.com/hashicorp/consul/consul/structs"
)
func TestWalk_ServiceQuery(t *testing.T) {
var actual []string
fn := func(path string, v reflect.Value) error {
actual = append(actual, fmt.Sprintf("%s:%s", path, v.String()))
return nil
}
service := &structs.ServiceQuery{
Service: "the-service",
Failover: structs.QueryDatacenterOptions{
Datacenters: []string{"dc1", "dc2"},
},
Tags: []string{"tag1", "tag2", "tag3"},
}
if err := walk(service, fn); err != nil {
t.Fatalf("err: %v", err)
}
expected := []string{
".Service:the-service",
".Failover.Datacenters[0]:dc1",
".Failover.Datacenters[1]:dc2",
".Tags[0]:tag1",
".Tags[1]:tag2",
".Tags[2]:tag3",
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
}
func TestWalk_Visitor_Errors(t *testing.T) {
fn := func(path string, v reflect.Value) error {
return fmt.Errorf("bad")
}
service := &structs.ServiceQuery{}
err := walk(service, fn)
if err == nil || err.Error() != "bad" {
t.Fatalf("bad: %#v", err)
}
}

View File

@ -134,6 +134,8 @@ func parseQuery(query *structs.PreparedQuery) error {
// transaction. Otherwise, people could "steal" queries that they don't
// have proper ACL rights to change.
// - Session is optional and checked for integrity during the transaction.
// - Template is checked during the transaction since that's where we
// compile it.
// Token is checked when the query is executed, but we do make sure the
// user hasn't accidentally pasted-in the special redacted token name,
@ -162,7 +164,7 @@ func parseQuery(query *structs.PreparedQuery) error {
func parseService(svc *structs.ServiceQuery) error {
// Service is required.
if svc.Service == "" {
return fmt.Errorf("Must provide a service name to query")
return fmt.Errorf("Must provide a Service name to query")
}
// NearestN can be 0 which means "don't fail over by RTT".
@ -267,6 +269,54 @@ func (p *PreparedQuery) List(args *structs.DCSpecificRequest, reply *structs.Ind
})
}
// Explain resolves a prepared query and returns the (possibly rendered template)
// to the caller. This is useful for letting operators figure out which query is
// picking up a given name. We can also add additional info about how the query
// will be executed here.
func (p *PreparedQuery) Explain(args *structs.PreparedQueryExecuteRequest,
reply *structs.PreparedQueryExplainResponse) error {
if done, err := p.srv.forward("PreparedQuery.Explain", args, args, reply); done {
return err
}
defer metrics.MeasureSince([]string{"consul", "prepared-query", "explain"}, time.Now())
// We have to do this ourselves since we are not doing a blocking RPC.
p.srv.setQueryMeta(&reply.QueryMeta)
if args.RequireConsistent {
if err := p.srv.consistentRead(); err != nil {
return err
}
}
// Try to locate the query.
state := p.srv.fsm.State()
_, query, err := state.PreparedQueryResolve(args.QueryIDOrName)
if err != nil {
return err
}
if query == nil {
return ErrQueryNotFound
}
// Place the query into a list so we can run the standard ACL filter on
// it.
queries := &structs.IndexedPreparedQueries{
Queries: structs.PreparedQueries{query},
}
if err := p.srv.filterACL(args.Token, queries); err != nil {
return err
}
// If the query was filtered out, return an error.
if len(queries.Queries) == 0 {
p.srv.logger.Printf("[WARN] consul.prepared_query: Explain on prepared query '%s' denied due to ACLs", query.ID)
return permissionDeniedErr
}
reply.Query = *(queries.Queries[0])
return nil
}
// Execute runs a prepared query and returns the results. This will perform the
// failover logic if no local results are available. This is typically called as
// part of a DNS lookup, or when executing prepared queries from the HTTP API.
@ -287,7 +337,7 @@ func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest,
// Try to locate the query.
state := p.srv.fsm.State()
_, query, err := state.PreparedQueryLookup(args.QueryIDOrName)
_, query, err := state.PreparedQueryResolve(args.QueryIDOrName)
if err != nil {
return err
}

View File

@ -530,7 +530,7 @@ func TestPreparedQuery_parseQuery(t *testing.T) {
query := &structs.PreparedQuery{}
err := parseQuery(query)
if err == nil || !strings.Contains(err.Error(), "Must provide a service") {
if err == nil || !strings.Contains(err.Error(), "Must provide a Service") {
t.Fatalf("bad: %v", err)
}
@ -579,6 +579,219 @@ func TestPreparedQuery_parseQuery(t *testing.T) {
}
}
func TestPreparedQuery_ACLDeny_Catchall_Template(t *testing.T) {
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.ACLDatacenter = "dc1"
c.ACLMasterToken = "root"
c.ACLDefaultPolicy = "deny"
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testutil.WaitForLeader(t, s1.RPC, "dc1")
// Create an ACL with write permissions for any prefix.
var token string
{
var rules = `
query "" {
policy = "write"
}
`
req := structs.ACLRequest{
Datacenter: "dc1",
Op: structs.ACLSet,
ACL: structs.ACL{
Name: "User token",
Type: structs.ACLTypeClient,
Rules: rules,
},
WriteRequest: structs.WriteRequest{Token: "root"},
}
if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token); err != nil {
t.Fatalf("err: %v", err)
}
}
// Set up a catch-all template.
query := structs.PreparedQueryRequest{
Datacenter: "dc1",
Op: structs.PreparedQueryCreate,
Query: &structs.PreparedQuery{
Name: "",
Token: "5e1e24e5-1329-f86f-18c6-3d3734edb2cd",
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
},
Service: structs.ServiceQuery{
Service: "${name.full}",
},
},
}
var reply string
// Creating without a token should fail since the default policy is to
// deny.
err := msgpackrpc.CallWithCodec(codec, "PreparedQuery.Apply", &query, &reply)
if err == nil || !strings.Contains(err.Error(), permissionDenied) {
t.Fatalf("bad: %v", err)
}
// Now add the token and try again.
query.WriteRequest.Token = token
if err = msgpackrpc.CallWithCodec(codec, "PreparedQuery.Apply", &query, &reply); err != nil {
t.Fatalf("err: %v", err)
}
// Capture the ID and read back the query to verify. Note that the token
// will be redacted since this isn't a management token.
query.Query.ID = reply
query.Query.Token = redactedToken
{
req := &structs.PreparedQuerySpecificRequest{
Datacenter: "dc1",
QueryID: query.Query.ID,
QueryOptions: structs.QueryOptions{Token: token},
}
var resp structs.IndexedPreparedQueries
if err = msgpackrpc.CallWithCodec(codec, "PreparedQuery.Get", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if len(resp.Queries) != 1 {
t.Fatalf("bad: %v", resp)
}
actual := resp.Queries[0]
if resp.Index != actual.ModifyIndex {
t.Fatalf("bad index: %d", resp.Index)
}
actual.CreateIndex, actual.ModifyIndex = 0, 0
if !reflect.DeepEqual(actual, query.Query) {
t.Fatalf("bad: %v", actual)
}
}
// Try to query by ID without a token and make sure it gets denied, even
// though this has an empty name and would normally be shown.
{
req := &structs.PreparedQuerySpecificRequest{
Datacenter: "dc1",
QueryID: query.Query.ID,
}
var resp structs.IndexedPreparedQueries
err := msgpackrpc.CallWithCodec(codec, "PreparedQuery.Get", req, &resp)
if err == nil || !strings.Contains(err.Error(), permissionDenied) {
t.Fatalf("bad: %v", err)
}
if len(resp.Queries) != 0 {
t.Fatalf("bad: %v", resp)
}
}
// We should get the same result listing all the queries without a
// token.
{
req := &structs.DCSpecificRequest{
Datacenter: "dc1",
}
var resp structs.IndexedPreparedQueries
if err = msgpackrpc.CallWithCodec(codec, "PreparedQuery.List", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if len(resp.Queries) != 0 {
t.Fatalf("bad: %v", resp)
}
}
// But a management token should be able to see it, and the real token.
query.Query.Token = "5e1e24e5-1329-f86f-18c6-3d3734edb2cd"
{
req := &structs.PreparedQuerySpecificRequest{
Datacenter: "dc1",
QueryID: query.Query.ID,
QueryOptions: structs.QueryOptions{Token: "root"},
}
var resp structs.IndexedPreparedQueries
if err = msgpackrpc.CallWithCodec(codec, "PreparedQuery.Get", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if len(resp.Queries) != 1 {
t.Fatalf("bad: %v", resp)
}
actual := resp.Queries[0]
if resp.Index != actual.ModifyIndex {
t.Fatalf("bad index: %d", resp.Index)
}
actual.CreateIndex, actual.ModifyIndex = 0, 0
if !reflect.DeepEqual(actual, query.Query) {
t.Fatalf("bad: %v", actual)
}
}
// Explaining should also be denied without a token.
{
req := &structs.PreparedQueryExecuteRequest{
Datacenter: "dc1",
QueryIDOrName: "anything",
}
var resp structs.PreparedQueryExplainResponse
err := msgpackrpc.CallWithCodec(codec, "PreparedQuery.Explain", req, &resp)
if err == nil || !strings.Contains(err.Error(), permissionDenied) {
t.Fatalf("bad: %v", err)
}
}
// The user can explain and see the redacted token.
query.Query.Token = redactedToken
query.Query.Service.Service = "anything"
{
req := &structs.PreparedQueryExecuteRequest{
Datacenter: "dc1",
QueryIDOrName: "anything",
QueryOptions: structs.QueryOptions{Token: token},
}
var resp structs.PreparedQueryExplainResponse
err := msgpackrpc.CallWithCodec(codec, "PreparedQuery.Explain", req, &resp)
if err != nil {
t.Fatalf("err: %v", err)
}
actual := &resp.Query
actual.CreateIndex, actual.ModifyIndex = 0, 0
if !reflect.DeepEqual(actual, query.Query) {
t.Fatalf("bad: %v", actual)
}
}
// Make sure the management token can also explain and see the token.
query.Query.Token = "5e1e24e5-1329-f86f-18c6-3d3734edb2cd"
query.Query.Service.Service = "anything"
{
req := &structs.PreparedQueryExecuteRequest{
Datacenter: "dc1",
QueryIDOrName: "anything",
QueryOptions: structs.QueryOptions{Token: "root"},
}
var resp structs.PreparedQueryExplainResponse
err := msgpackrpc.CallWithCodec(codec, "PreparedQuery.Explain", req, &resp)
if err != nil {
t.Fatalf("err: %v", err)
}
actual := &resp.Query
actual.CreateIndex, actual.ModifyIndex = 0, 0
if !reflect.DeepEqual(actual, query.Query) {
t.Fatalf("bad: %v", actual)
}
}
}
func TestPreparedQuery_Get(t *testing.T) {
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.ACLDatacenter = "dc1"
@ -1004,6 +1217,138 @@ func TestPreparedQuery_List(t *testing.T) {
}
}
func TestPreparedQuery_Explain(t *testing.T) {
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.ACLDatacenter = "dc1"
c.ACLMasterToken = "root"
c.ACLDefaultPolicy = "deny"
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testutil.WaitForLeader(t, s1.RPC, "dc1")
// Create an ACL with write permissions for prod- queries.
var token string
{
var rules = `
query "prod-" {
policy = "write"
}
`
req := structs.ACLRequest{
Datacenter: "dc1",
Op: structs.ACLSet,
ACL: structs.ACL{
Name: "User token",
Type: structs.ACLTypeClient,
Rules: rules,
},
WriteRequest: structs.WriteRequest{Token: "root"},
}
if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token); err != nil {
t.Fatalf("err: %v", err)
}
}
// Set up a template.
query := structs.PreparedQueryRequest{
Datacenter: "dc1",
Op: structs.PreparedQueryCreate,
Query: &structs.PreparedQuery{
Name: "prod-",
Token: "5e1e24e5-1329-f86f-18c6-3d3734edb2cd",
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
},
Service: structs.ServiceQuery{
Service: "${name.full}",
},
},
WriteRequest: structs.WriteRequest{Token: token},
}
var reply string
if err := msgpackrpc.CallWithCodec(codec, "PreparedQuery.Apply", &query, &reply); err != nil {
t.Fatalf("err: %v", err)
}
// Explain via the management token.
query.Query.ID = reply
query.Query.Service.Service = "prod-redis"
{
req := &structs.PreparedQueryExecuteRequest{
Datacenter: "dc1",
QueryIDOrName: "prod-redis",
QueryOptions: structs.QueryOptions{Token: "root"},
}
var resp structs.PreparedQueryExplainResponse
err := msgpackrpc.CallWithCodec(codec, "PreparedQuery.Explain", req, &resp)
if err != nil {
t.Fatalf("err: %v", err)
}
actual := &resp.Query
actual.CreateIndex, actual.ModifyIndex = 0, 0
if !reflect.DeepEqual(actual, query.Query) {
t.Fatalf("bad: %v", actual)
}
}
// Explain via the user token, which will redact the captured token.
query.Query.Token = redactedToken
query.Query.Service.Service = "prod-redis"
{
req := &structs.PreparedQueryExecuteRequest{
Datacenter: "dc1",
QueryIDOrName: "prod-redis",
QueryOptions: structs.QueryOptions{Token: token},
}
var resp structs.PreparedQueryExplainResponse
err := msgpackrpc.CallWithCodec(codec, "PreparedQuery.Explain", req, &resp)
if err != nil {
t.Fatalf("err: %v", err)
}
actual := &resp.Query
actual.CreateIndex, actual.ModifyIndex = 0, 0
if !reflect.DeepEqual(actual, query.Query) {
t.Fatalf("bad: %v", actual)
}
}
// Explaining should be denied without a token, since the user isn't
// allowed to see the query.
{
req := &structs.PreparedQueryExecuteRequest{
Datacenter: "dc1",
QueryIDOrName: "prod-redis",
}
var resp structs.PreparedQueryExplainResponse
err := msgpackrpc.CallWithCodec(codec, "PreparedQuery.Explain", req, &resp)
if err == nil || !strings.Contains(err.Error(), permissionDenied) {
t.Fatalf("bad: %v", err)
}
}
// Try to explain a bogus ID.
{
req := &structs.PreparedQueryExecuteRequest{
Datacenter: "dc1",
QueryIDOrName: generateUUID(),
QueryOptions: structs.QueryOptions{Token: "root"},
}
var resp structs.IndexedPreparedQueries
if err := msgpackrpc.CallWithCodec(codec, "PreparedQuery.Explain", req, &resp); err != nil {
if err.Error() != ErrQueryNotFound.Error() {
t.Fatalf("err: %v", err)
}
}
}
}
// This is a beast of a test, but the setup is so extensive it makes sense to
// walk through the different cases once we have it up. This is broken into
// sections so it's still pretty easy to read.
@ -1910,6 +2255,11 @@ func TestPreparedQuery_tagFilter(t *testing.T) {
if ret != "node2|node6" {
t.Fatalf("bad: %s", ret)
}
ret = stringify(tagFilter([]string{""}, testNodes()))
if ret != "" {
t.Fatalf("bad: %s", ret)
}
}
func TestPreparedQuery_Wrapper(t *testing.T) {

View File

@ -4,6 +4,7 @@ import (
"fmt"
"regexp"
"github.com/hashicorp/consul/consul/prepared_query"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/go-memdb"
)
@ -16,22 +17,60 @@ func isUUID(str string) bool {
return validUUID.MatchString(str)
}
// queryWrapper is an internal structure that is used to store a query alongside
// its compiled template, which can be nil.
type queryWrapper struct {
// We embed the PreparedQuery structure so that the UUID field indexer
// can see the ID directly.
*structs.PreparedQuery
// ct is the compiled template, or nil if the query isn't a template. The
// state store manages this and keeps it up to date every time the query
// changes.
ct *prepared_query.CompiledTemplate
}
// toPreparedQuery unwraps the internal form of a prepared query and returns
// the regular struct.
func toPreparedQuery(wrapped interface{}) *structs.PreparedQuery {
if wrapped == nil {
return nil
}
return wrapped.(*queryWrapper).PreparedQuery
}
// PreparedQueries is used to pull all the prepared queries from the snapshot.
func (s *StateSnapshot) PreparedQueries() (memdb.ResultIterator, error) {
iter, err := s.tx.Get("prepared-queries", "id")
func (s *StateSnapshot) PreparedQueries() (structs.PreparedQueries, error) {
queries, err := s.tx.Get("prepared-queries", "id")
if err != nil {
return nil, err
}
return iter, nil
var ret structs.PreparedQueries
for wrapped := queries.Next(); wrapped != nil; wrapped = queries.Next() {
ret = append(ret, toPreparedQuery(wrapped))
}
return ret, nil
}
// PrepparedQuery is used when restoring from a snapshot. For general inserts,
// use PreparedQuerySet.
func (s *StateRestore) PreparedQuery(query *structs.PreparedQuery) error {
if err := s.tx.Insert("prepared-queries", query); err != nil {
return fmt.Errorf("failed restoring prepared query: %s", err)
// If this is a template, compile it, otherwise leave the compiled
// template field nil.
var ct *prepared_query.CompiledTemplate
if prepared_query.IsTemplate(query) {
var err error
ct, err = prepared_query.Compile(query)
if err != nil {
return fmt.Errorf("failed compiling template: %s", err)
}
}
// Insert the wrapped query.
if err := s.tx.Insert("prepared-queries", &queryWrapper{query, ct}); err != nil {
return fmt.Errorf("failed restoring prepared query: %s", err)
}
if err := indexUpdateMaxTxn(s.tx, query.ModifyIndex, "prepared-queries"); err != nil {
return fmt.Errorf("failed updating index: %s", err)
}
@ -62,14 +101,15 @@ func (s *StateStore) preparedQuerySetTxn(tx *memdb.Txn, idx uint64, query *struc
}
// Check for an existing query.
existing, err := tx.First("prepared-queries", "id", query.ID)
wrapped, err := tx.First("prepared-queries", "id", query.ID)
if err != nil {
return fmt.Errorf("failed prepared query lookup: %s", err)
}
existing := toPreparedQuery(wrapped)
// Set the indexes.
if existing != nil {
query.CreateIndex = existing.(*structs.PreparedQuery).CreateIndex
query.CreateIndex = existing.CreateIndex
query.ModifyIndex = idx
} else {
query.CreateIndex = idx
@ -77,16 +117,27 @@ func (s *StateStore) preparedQuerySetTxn(tx *memdb.Txn, idx uint64, query *struc
}
// Verify that the query name doesn't already exist, or that we are
// updating the same instance that has this name.
// updating the same instance that has this name. If this is a template
// and the name is empty then we make sure there's not an empty template
// already registered.
if query.Name != "" {
alias, err := tx.First("prepared-queries", "name", query.Name)
wrapped, err := tx.First("prepared-queries", "name", query.Name)
if err != nil {
return fmt.Errorf("failed prepared query lookup: %s", err)
}
if alias != nil && (existing == nil ||
existing.(*structs.PreparedQuery).ID != alias.(*structs.PreparedQuery).ID) {
other := toPreparedQuery(wrapped)
if other != nil && (existing == nil || existing.ID != other.ID) {
return fmt.Errorf("name '%s' aliases an existing query name", query.Name)
}
} else if prepared_query.IsTemplate(query) {
wrapped, err := tx.First("prepared-queries", "template", query.Name)
if err != nil {
return fmt.Errorf("failed prepared query lookup: %s", err)
}
other := toPreparedQuery(wrapped)
if other != nil && (existing == nil || existing.ID != other.ID) {
return fmt.Errorf("a query template with an empty name already exists")
}
}
// Verify that the name doesn't alias any existing ID. We allow queries
@ -99,11 +150,11 @@ func (s *StateStore) preparedQuerySetTxn(tx *memdb.Txn, idx uint64, query *struc
// index will complain if we look up something that's not formatted
// like one.
if isUUID(query.Name) {
alias, err := tx.First("prepared-queries", "id", query.Name)
wrapped, err := tx.First("prepared-queries", "id", query.Name)
if err != nil {
return fmt.Errorf("failed prepared query lookup: %s", err)
}
if alias != nil {
if wrapped != nil {
return fmt.Errorf("name '%s' aliases an existing query ID", query.Name)
}
}
@ -123,8 +174,19 @@ func (s *StateStore) preparedQuerySetTxn(tx *memdb.Txn, idx uint64, query *struc
// checked at execute time and not doing integrity checking on them
// helps avoid bootstrapping chicken and egg problems.
// Insert the query.
if err := tx.Insert("prepared-queries", query); err != nil {
// If this is a template, compile it, otherwise leave the compiled
// template field nil.
var ct *prepared_query.CompiledTemplate
if prepared_query.IsTemplate(query) {
var err error
ct, err = prepared_query.Compile(query)
if err != nil {
return fmt.Errorf("failed compiling template: %s", err)
}
}
// Insert the wrapped query.
if err := tx.Insert("prepared-queries", &queryWrapper{query, ct}); err != nil {
return fmt.Errorf("failed inserting prepared query: %s", err)
}
if err := tx.Insert("index", &IndexEntry{"prepared-queries", idx}); err != nil {
@ -155,16 +217,16 @@ func (s *StateStore) PreparedQueryDelete(idx uint64, queryID string) error {
func (s *StateStore) preparedQueryDeleteTxn(tx *memdb.Txn, idx uint64, watches *DumbWatchManager,
queryID string) error {
// Pull the query.
query, err := tx.First("prepared-queries", "id", queryID)
wrapped, err := tx.First("prepared-queries", "id", queryID)
if err != nil {
return fmt.Errorf("failed prepared query lookup: %s", err)
}
if query == nil {
if wrapped == nil {
return nil
}
// Delete the query and update the index.
if err := tx.Delete("prepared-queries", query); err != nil {
if err := tx.Delete("prepared-queries", wrapped); err != nil {
return fmt.Errorf("failed prepared query delete: %s", err)
}
if err := tx.Insert("index", &IndexEntry{"prepared-queries", idx}); err != nil {
@ -184,24 +246,22 @@ func (s *StateStore) PreparedQueryGet(queryID string) (uint64, *structs.Prepared
idx := maxIndexTxn(tx, s.getWatchTables("PreparedQueryGet")...)
// Look up the query by its ID.
query, err := tx.First("prepared-queries", "id", queryID)
wrapped, err := tx.First("prepared-queries", "id", queryID)
if err != nil {
return 0, nil, fmt.Errorf("failed prepared query lookup: %s", err)
}
if query != nil {
return idx, query.(*structs.PreparedQuery), nil
}
return idx, nil, nil
return idx, toPreparedQuery(wrapped), nil
}
// PreparedQueryLookup returns the given prepared query by looking up an ID or
// Name.
func (s *StateStore) PreparedQueryLookup(queryIDOrName string) (uint64, *structs.PreparedQuery, error) {
// PreparedQueryResolve returns the given prepared query by looking up an ID or
// Name. If the query was looked up by name and it's a template, then the
// template will be rendered before it is returned.
func (s *StateStore) PreparedQueryResolve(queryIDOrName string) (uint64, *structs.PreparedQuery, error) {
tx := s.db.Txn(false)
defer tx.Abort()
// Get the table index.
idx := maxIndexTxn(tx, s.getWatchTables("PreparedQueryLookup")...)
idx := maxIndexTxn(tx, s.getWatchTables("PreparedQueryResolve")...)
// Explicitly ban an empty query. This will never match an ID and the
// schema is set up so it will never match a query with an empty name,
@ -215,22 +275,56 @@ func (s *StateStore) PreparedQueryLookup(queryIDOrName string) (uint64, *structs
// format before trying this because the UUID index will complain if
// we look up something that's not formatted like one.
if isUUID(queryIDOrName) {
query, err := tx.First("prepared-queries", "id", queryIDOrName)
wrapped, err := tx.First("prepared-queries", "id", queryIDOrName)
if err != nil {
return 0, nil, fmt.Errorf("failed prepared query lookup: %s", err)
}
if query != nil {
return idx, query.(*structs.PreparedQuery), nil
if wrapped != nil {
query := toPreparedQuery(wrapped)
if prepared_query.IsTemplate(query) {
return idx, nil, fmt.Errorf("prepared query templates can only be resolved up by name, not by ID")
}
return idx, query, nil
}
}
// Then try by name.
query, err := tx.First("prepared-queries", "name", queryIDOrName)
if err != nil {
return 0, nil, fmt.Errorf("failed prepared query lookup: %s", err)
// prep will check to see if the query is a template and render it
// first, otherwise it will just return a regular query.
prep := func(wrapped interface{}) (uint64, *structs.PreparedQuery, error) {
wrapper := wrapped.(*queryWrapper)
if prepared_query.IsTemplate(wrapper.PreparedQuery) {
render, err := wrapper.ct.Render(queryIDOrName)
if err != nil {
return idx, nil, err
}
return idx, render, nil
} else {
return idx, wrapper.PreparedQuery, nil
}
}
if query != nil {
return idx, query.(*structs.PreparedQuery), nil
// Next, look for an exact name match. This is the common case for static
// prepared queries, and could also apply to templates.
{
wrapped, err := tx.First("prepared-queries", "name", queryIDOrName)
if err != nil {
return 0, nil, fmt.Errorf("failed prepared query lookup: %s", err)
}
if wrapped != nil {
return prep(wrapped)
}
}
// Next, look for the longest prefix match among the prepared query
// templates.
{
wrapped, err := tx.LongestPrefix("prepared-queries", "template_prefix", queryIDOrName)
if err != nil {
return 0, nil, fmt.Errorf("failed prepared query lookup: %s", err)
}
if wrapped != nil {
return prep(wrapped)
}
}
return idx, nil, nil
@ -252,8 +346,8 @@ func (s *StateStore) PreparedQueryList() (uint64, structs.PreparedQueries, error
// Go over all of the queries and build the response.
var result structs.PreparedQueries
for query := queries.Next(); query != nil; query = queries.Next() {
result = append(result, query.(*structs.PreparedQuery))
for wrapped := queries.Next(); wrapped != nil; wrapped = queries.Next() {
result = append(result, toPreparedQuery(wrapped))
}
return idx, result, nil
}

View File

@ -0,0 +1,51 @@
package state
import (
"fmt"
"strings"
"github.com/hashicorp/consul/consul/prepared_query"
)
// PreparedQueryIndex is a custom memdb indexer used to manage index prepared
// query templates. None of the built-in indexers do what we need, and our
// use case is pretty specific so it's better to put the logic here.
type PreparedQueryIndex struct {
}
// FromObject is used to compute the index key when inserting or updating an
// object.
func (*PreparedQueryIndex) FromObject(obj interface{}) (bool, []byte, error) {
wrapped, ok := obj.(*queryWrapper)
if !ok {
return false, nil, fmt.Errorf("invalid object given to index as prepared query")
}
query := toPreparedQuery(wrapped)
if !prepared_query.IsTemplate(query) {
return false, nil, nil
}
// Always prepend a null so that we can represent even an empty name.
out := "\x00" + strings.ToLower(query.Name)
return true, []byte(out), nil
}
// FromArgs is used when querying for an exact match. Since we don't add any
// suffix we can just call the prefix version.
func (p *PreparedQueryIndex) FromArgs(args ...interface{}) ([]byte, error) {
return p.PrefixFromArgs(args...)
}
// PrefixFromArgs is used when doing a prefix scan for an object.
func (*PreparedQueryIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
if len(args) != 1 {
return nil, fmt.Errorf("must provide only a single argument")
}
arg, ok := args[0].(string)
if !ok {
return nil, fmt.Errorf("argument must be a string: %#v", args[0])
}
arg = "\x00" + strings.ToLower(arg)
return []byte(arg), nil
}

View File

@ -0,0 +1,111 @@
package state
import (
"testing"
"github.com/hashicorp/consul/consul/structs"
)
// Indexer is a global indexer to use for tests since there is no state.
var index PreparedQueryIndex
func TestPreparedQueryIndex_FromObject(t *testing.T) {
// We shouldn't index an object we don't understand.
if ok, _, err := index.FromObject(42); ok || err == nil {
t.Fatalf("bad: ok=%v err=%v", ok, err)
}
// We shouldn't index a non-template query.
wrapped := &queryWrapper{&structs.PreparedQuery{}, nil}
if ok, _, err := index.FromObject(wrapped); ok || err != nil {
t.Fatalf("bad: ok=%v err=%v", ok, err)
}
// Create a template with an empty name.
query := &structs.PreparedQuery{
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
},
}
ok, key, err := index.FromObject(&queryWrapper{query, nil})
if !ok || err != nil {
t.Fatalf("bad: ok=%v err=%v", ok, err)
}
if string(key) != "\x00" {
t.Fatalf("bad: %#v", key)
}
// Set the name and try again.
query.Name = "hello"
ok, key, err = index.FromObject(&queryWrapper{query, nil})
if !ok || err != nil {
t.Fatalf("bad: ok=%v err=%v", ok, err)
}
if string(key) != "\x00hello" {
t.Fatalf("bad: %#v", key)
}
// Make sure the index isn't case-sensitive.
query.Name = "HELLO"
ok, key, err = index.FromObject(&queryWrapper{query, nil})
if !ok || err != nil {
t.Fatalf("bad: ok=%v err=%v", ok, err)
}
if string(key) != "\x00hello" {
t.Fatalf("bad: %#v", key)
}
}
func TestPreparedQueryIndex_FromArgs(t *testing.T) {
// Only accept a single string arg.
if _, err := index.FromArgs(42); err == nil {
t.Fatalf("should be an error")
}
if _, err := index.FromArgs("hello", "world"); err == nil {
t.Fatalf("should be an error")
}
// Try an empty string.
if key, err := index.FromArgs(""); err != nil || string(key) != "\x00" {
t.Fatalf("bad: key=%#v err=%v", key, err)
}
// Try a non-empty string.
if key, err := index.FromArgs("hello"); err != nil ||
string(key) != "\x00hello" {
t.Fatalf("bad: key=%#v err=%v", key, err)
}
// Make sure index is not case-sensitive.
if key, err := index.FromArgs("HELLO"); err != nil ||
string(key) != "\x00hello" {
t.Fatalf("bad: key=%#v err=%v", key, err)
}
}
func TestPreparedQueryIndex_PrefixFromArgs(t *testing.T) {
// Only accept a single string arg.
if _, err := index.PrefixFromArgs(42); err == nil {
t.Fatalf("should be an error")
}
if _, err := index.PrefixFromArgs("hello", "world"); err == nil {
t.Fatalf("should be an error")
}
// Try an empty string.
if key, err := index.PrefixFromArgs(""); err != nil || string(key) != "\x00" {
t.Fatalf("bad: key=%#v err=%v", key, err)
}
// Try a non-empty string.
if key, err := index.PrefixFromArgs("hello"); err != nil ||
string(key) != "\x00hello" {
t.Fatalf("bad: key=%#v err=%v", key, err)
}
// Make sure index is not case-sensitive.
if key, err := index.PrefixFromArgs("HELLO"); err != nil ||
string(key) != "\x00hello" {
t.Fatalf("bad: key=%#v err=%v", key, err)
}
}

View File

@ -232,10 +232,186 @@ func TestStateStore_PreparedQuerySet_PreparedQueryGet(t *testing.T) {
}
}
// Try to register a template that squats on the existing query's name.
{
evil := &structs.PreparedQuery{
ID: testUUID(),
Name: query.Name,
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
},
Service: structs.ServiceQuery{
Service: "redis",
},
}
err := s.PreparedQuerySet(8, evil)
if err == nil || !strings.Contains(err.Error(), "aliases an existing query name") {
t.Fatalf("bad: %v", err)
}
// Sanity check to make sure it's not there.
idx, actual, err := s.PreparedQueryGet(evil.ID)
if err != nil {
t.Fatalf("err: %s", err)
}
if idx != 6 {
t.Fatalf("bad index: %d", idx)
}
if actual != nil {
t.Fatalf("bad: %v", actual)
}
}
// Index is not updated if nothing is saved.
if idx := s.maxIndex("prepared-queries"); idx != 6 {
t.Fatalf("bad index: %d", idx)
}
// Turn the query into a template with an empty name.
query.Name = ""
query.Template = structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
}
if err := s.PreparedQuerySet(9, query); err != nil {
t.Fatalf("err: %s", err)
}
// Make sure the index got updated.
if idx := s.maxIndex("prepared-queries"); idx != 9 {
t.Fatalf("bad index: %d", idx)
}
// Read it back and verify the data was updated as well as the index.
expected.Name = ""
expected.Template = structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
}
expected.ModifyIndex = 9
idx, actual, err = s.PreparedQueryGet(query.ID)
if err != nil {
t.Fatalf("err: %s", err)
}
if idx != 9 {
t.Fatalf("bad index: %d", idx)
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %v", actual)
}
// Try to register a template that squats on the empty prefix.
{
evil := &structs.PreparedQuery{
ID: testUUID(),
Name: "",
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
},
Service: structs.ServiceQuery{
Service: "redis",
},
}
err := s.PreparedQuerySet(10, evil)
if err == nil || !strings.Contains(err.Error(), "query template with an empty name already exists") {
t.Fatalf("bad: %v", err)
}
// Sanity check to make sure it's not there.
idx, actual, err := s.PreparedQueryGet(evil.ID)
if err != nil {
t.Fatalf("err: %s", err)
}
if idx != 9 {
t.Fatalf("bad index: %d", idx)
}
if actual != nil {
t.Fatalf("bad: %v", actual)
}
}
// Give the query template a name.
query.Name = "prefix"
if err := s.PreparedQuerySet(11, query); err != nil {
t.Fatalf("err: %s", err)
}
// Make sure the index got updated.
if idx := s.maxIndex("prepared-queries"); idx != 11 {
t.Fatalf("bad index: %d", idx)
}
// Read it back and verify the data was updated as well as the index.
expected.Name = "prefix"
expected.ModifyIndex = 11
idx, actual, err = s.PreparedQueryGet(query.ID)
if err != nil {
t.Fatalf("err: %s", err)
}
if idx != 11 {
t.Fatalf("bad index: %d", idx)
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %v", actual)
}
// Try to register a template that squats on the prefix.
{
evil := &structs.PreparedQuery{
ID: testUUID(),
Name: "prefix",
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
},
Service: structs.ServiceQuery{
Service: "redis",
},
}
err := s.PreparedQuerySet(12, evil)
if err == nil || !strings.Contains(err.Error(), "aliases an existing query name") {
t.Fatalf("bad: %v", err)
}
// Sanity check to make sure it's not there.
idx, actual, err := s.PreparedQueryGet(evil.ID)
if err != nil {
t.Fatalf("err: %s", err)
}
if idx != 11 {
t.Fatalf("bad index: %d", idx)
}
if actual != nil {
t.Fatalf("bad: %v", actual)
}
}
// Try to register a template that doesn't compile.
{
evil := &structs.PreparedQuery{
ID: testUUID(),
Name: "legit-prefix",
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
},
Service: structs.ServiceQuery{
Service: "${nope",
},
}
err := s.PreparedQuerySet(13, evil)
if err == nil || !strings.Contains(err.Error(), "failed compiling template") {
t.Fatalf("bad: %v", err)
}
// Sanity check to make sure it's not there.
idx, actual, err := s.PreparedQueryGet(evil.ID)
if err != nil {
t.Fatalf("err: %s", err)
}
if idx != 11 {
t.Fatalf("bad index: %d", idx)
}
if actual != nil {
t.Fatalf("bad: %v", actual)
}
}
}
func TestStateStore_PreparedQueryDelete(t *testing.T) {
@ -318,7 +494,7 @@ func TestStateStore_PreparedQueryDelete(t *testing.T) {
}
}
func TestStateStore_PreparedQueryLookup(t *testing.T) {
func TestStateStore_PreparedQueryResolve(t *testing.T) {
s := testStateStore(t)
// Set up our test environment.
@ -336,7 +512,7 @@ func TestStateStore_PreparedQueryLookup(t *testing.T) {
// Try to lookup a query that's not there using something that looks
// like a real ID.
idx, actual, err := s.PreparedQueryLookup(query.ID)
idx, actual, err := s.PreparedQueryResolve(query.ID)
if err != nil {
t.Fatalf("err: %s", err)
}
@ -349,7 +525,7 @@ func TestStateStore_PreparedQueryLookup(t *testing.T) {
// Try to lookup a query that's not there using something that looks
// like a name
idx, actual, err = s.PreparedQueryLookup(query.Name)
idx, actual, err = s.PreparedQueryResolve(query.Name)
if err != nil {
t.Fatalf("err: %s", err)
}
@ -382,7 +558,7 @@ func TestStateStore_PreparedQueryLookup(t *testing.T) {
ModifyIndex: 3,
},
}
idx, actual, err = s.PreparedQueryLookup(query.ID)
idx, actual, err = s.PreparedQueryResolve(query.ID)
if err != nil {
t.Fatalf("err: %s", err)
}
@ -394,7 +570,7 @@ func TestStateStore_PreparedQueryLookup(t *testing.T) {
}
// Read it back using the name and verify it again.
idx, actual, err = s.PreparedQueryLookup(query.Name)
idx, actual, err = s.PreparedQueryResolve(query.Name)
if err != nil {
t.Fatalf("err: %s", err)
}
@ -407,7 +583,7 @@ func TestStateStore_PreparedQueryLookup(t *testing.T) {
// Make sure an empty lookup is well-behaved if there are actual queries
// in the state store.
idx, actual, err = s.PreparedQueryLookup("")
idx, actual, err = s.PreparedQueryResolve("")
if err != ErrMissingQueryID {
t.Fatalf("bad: %v ", err)
}
@ -417,6 +593,123 @@ func TestStateStore_PreparedQueryLookup(t *testing.T) {
if actual != nil {
t.Fatalf("bad: %v", actual)
}
// Create two prepared query templates, one a longer prefix of the
// other.
tmpl1 := &structs.PreparedQuery{
ID: testUUID(),
Name: "prod-",
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
},
Service: structs.ServiceQuery{
Service: "${name.suffix}",
},
}
if err := s.PreparedQuerySet(4, tmpl1); err != nil {
t.Fatalf("err: %s", err)
}
tmpl2 := &structs.PreparedQuery{
ID: testUUID(),
Name: "prod-redis",
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
Regexp: "^prod-(.*)$",
},
Service: structs.ServiceQuery{
Service: "${match(1)}-master",
},
}
if err := s.PreparedQuerySet(5, tmpl2); err != nil {
t.Fatalf("err: %s", err)
}
// Resolve the less-specific prefix.
expected = &structs.PreparedQuery{
ID: tmpl1.ID,
Name: "prod-",
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
},
Service: structs.ServiceQuery{
Service: "mongodb",
},
RaftIndex: structs.RaftIndex{
CreateIndex: 4,
ModifyIndex: 4,
},
}
idx, actual, err = s.PreparedQueryResolve("prod-mongodb")
if err != nil {
t.Fatalf("err: %s", err)
}
if idx != 5 {
t.Fatalf("bad index: %d", idx)
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %v", actual)
}
// Now resolve the more specific prefix.
expected = &structs.PreparedQuery{
ID: tmpl2.ID,
Name: "prod-redis",
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
Regexp: "^prod-(.*)$",
},
Service: structs.ServiceQuery{
Service: "redis-foobar-master",
},
RaftIndex: structs.RaftIndex{
CreateIndex: 5,
ModifyIndex: 5,
},
}
idx, actual, err = s.PreparedQueryResolve("prod-redis-foobar")
if err != nil {
t.Fatalf("err: %s", err)
}
if idx != 5 {
t.Fatalf("bad index: %d", idx)
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %v", actual)
}
// Resolve an exact-match prefix. The output of this one doesn't match a
// sensical service name, but it still renders.
expected = &structs.PreparedQuery{
ID: tmpl1.ID,
Name: "prod-",
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
},
Service: structs.ServiceQuery{
Service: "",
},
RaftIndex: structs.RaftIndex{
CreateIndex: 4,
ModifyIndex: 4,
},
}
idx, actual, err = s.PreparedQueryResolve("prod-")
if err != nil {
t.Fatalf("err: %s", err)
}
if idx != 5 {
t.Fatalf("bad index: %d", idx)
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %v", actual)
}
// Make sure you can't run a prepared query template by ID, since that
// makes no sense.
_, _, err = s.PreparedQueryResolve(tmpl1.ID)
if err == nil || !strings.Contains(err.Error(), "prepared query templates can only be resolved up by name") {
t.Fatalf("bad: %v", err)
}
}
func TestStateStore_PreparedQueryList(t *testing.T) {
@ -525,9 +818,12 @@ func TestStateStore_PreparedQuery_Snapshot_Restore(t *testing.T) {
},
&structs.PreparedQuery{
ID: testUUID(),
Name: "bob",
Name: "bob-",
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
},
Service: structs.ServiceQuery{
Service: "mongodb",
Service: "${name.suffix}",
},
},
}
@ -571,9 +867,12 @@ func TestStateStore_PreparedQuery_Snapshot_Restore(t *testing.T) {
},
&structs.PreparedQuery{
ID: queries[1].ID,
Name: "bob",
Name: "bob-",
Template: structs.QueryTemplateOptions{
Type: structs.QueryTemplateTypeNamePrefixMatch,
},
Service: structs.ServiceQuery{
Service: "mongodb",
Service: "${name.suffix}",
},
RaftIndex: structs.RaftIndex{
CreateIndex: 5,
@ -581,14 +880,10 @@ func TestStateStore_PreparedQuery_Snapshot_Restore(t *testing.T) {
},
},
}
iter, err := snap.PreparedQueries()
dump, err := snap.PreparedQueries()
if err != nil {
t.Fatalf("err: %s", err)
}
var dump structs.PreparedQueries
for query := iter.Next(); query != nil; query = iter.Next() {
dump = append(dump, query.(*structs.PreparedQuery))
}
if !reflect.DeepEqual(dump, expected) {
t.Fatalf("bad: %v", dump)
}
@ -616,6 +911,19 @@ func TestStateStore_PreparedQuery_Snapshot_Restore(t *testing.T) {
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %v", actual)
}
// Make sure the second query, which is a template, was compiled
// and can be resolved.
_, query, err := s.PreparedQueryResolve("bob-backwards-is-bob")
if err != nil {
t.Fatalf("err: %s", err)
}
if query == nil {
t.Fatalf("should have resolved the query")
}
if query.Service.Service != "backwards-is-bob" {
t.Fatalf("bad: %s", query.Service.Service)
}
}()
}

View File

@ -390,6 +390,12 @@ func preparedQueriesTableSchema() *memdb.TableSchema {
Lowercase: true,
},
},
"template": &memdb.IndexSchema{
Name: "template",
AllowMissing: true,
Unique: true,
Indexer: &PreparedQueryIndex{},
},
"session": &memdb.IndexSchema{
Name: "session",
AllowMissing: true,

View File

@ -413,7 +413,7 @@ func (s *StateStore) getWatchTables(method string) []string {
return []string{"acls"}
case "Coordinates":
return []string{"coordinates"}
case "PreparedQueryGet", "PreparedQueryLookup", "PreparedQueryList":
case "PreparedQueryGet", "PreparedQueryResolve", "PreparedQueryList":
return []string{"prepared-queries"}
}
@ -2151,15 +2151,14 @@ func (s *StateStore) deleteSessionTxn(tx *memdb.Txn, idx uint64, watches *DumbWa
return fmt.Errorf("failed prepared query lookup: %s", err)
}
{
var objs []interface{}
for query := queries.Next(); query != nil; query = queries.Next() {
objs = append(objs, query)
var ids []string
for wrapped := queries.Next(); wrapped != nil; wrapped = queries.Next() {
ids = append(ids, toPreparedQuery(wrapped).ID)
}
// Do the delete in a separate loop so we don't trash the iterator.
for _, obj := range objs {
q := obj.(*structs.PreparedQuery)
if err := s.preparedQueryDeleteTxn(tx, idx, watches, q.ID); err != nil {
for _, id := range ids {
if err := s.preparedQueryDeleteTxn(tx, idx, watches, id); err != nil {
return fmt.Errorf("failed prepared query delete: %s", err)
}
}

View File

@ -40,6 +40,25 @@ type ServiceQuery struct {
Tags []string
}
const (
// QueryTemplateTypeNamePrefixMatch uses the Name field of the query as
// a prefix to select the template.
QueryTemplateTypeNamePrefixMatch = "name_prefix_match"
)
// QueryTemplateOptions controls settings if this query is a template.
type QueryTemplateOptions struct {
// Type, if non-empty, means that this query is a template. This is
// set to one of the QueryTemplateType* constants above.
Type string
// Regexp is an optional regular expression to use to parse the full
// name, once the prefix match has selected a template. This can be
// used to extract parts of the name and choose a service name, set
// tags, etc.
Regexp string
}
// PreparedQuery defines a complete prepared query, and is the structure we
// maintain in the state store.
type PreparedQuery struct {
@ -61,6 +80,11 @@ type PreparedQuery struct {
// with management privileges, must be used to change the query later.
Token string
// Template is used to configure this query as a template, which will
// respond to queries based on the Name, and then will be rendered
// before it is executed.
Template QueryTemplateOptions
// Service defines a service query (leaving things open for other types
// later).
Service ServiceQuery
@ -76,7 +100,7 @@ type PreparedQuery struct {
// this query, and whether the prefix applies to this query. You always need to
// check the ok value before using the prefix.
func (pq *PreparedQuery) GetACLPrefix() (string, bool) {
if pq.Name != "" {
if pq.Name != "" || pq.Template.Type != "" {
return pq.Name, true
}
@ -207,3 +231,12 @@ type PreparedQueryExecuteResponse struct {
// QueryMeta has freshness information about the query.
QueryMeta
}
// PreparedQueryExplainResponse has the results when explaining a query/
type PreparedQueryExplainResponse struct {
// Query has the fully-rendered query.
Query PreparedQuery
// QueryMeta has freshness information about the query.
QueryMeta
}

View File

@ -10,8 +10,20 @@ func TestStructs_PreparedQuery_GetACLPrefix(t *testing.T) {
t.Fatalf("bad: %s", prefix)
}
named := &PreparedQuery{Name: "hello"}
named := &PreparedQuery{
Name: "hello",
}
if prefix, ok := named.GetACLPrefix(); !ok || prefix != "hello" {
t.Fatalf("bad: %#v", prefix)
t.Fatalf("bad: ok=%v, prefix=%#v", ok, prefix)
}
tmpl := &PreparedQuery{
Name: "",
Template: QueryTemplateOptions{
Type: QueryTemplateTypeNamePrefixMatch,
},
}
if prefix, ok := tmpl.GetACLPrefix(); !ok || prefix != "" {
t.Fatalf("bad: ok=%v prefix=%#v", ok, prefix)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,292 +0,0 @@
// Package quantile computes approximate quantiles over an unbounded data
// stream within low memory and CPU bounds.
//
// A small amount of accuracy is traded to achieve the above properties.
//
// Multiple streams can be merged before calling Query to generate a single set
// of results. This is meaningful when the streams represent the same type of
// data. See Merge and Samples.
//
// For more detailed information about the algorithm used, see:
//
// Effective Computation of Biased Quantiles over Data Streams
//
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
package quantile
import (
"math"
"sort"
)
// Sample holds an observed value and meta information for compression. JSON
// tags have been added for convenience.
type Sample struct {
Value float64 `json:",string"`
Width float64 `json:",string"`
Delta float64 `json:",string"`
}
// Samples represents a slice of samples. It implements sort.Interface.
type Samples []Sample
func (a Samples) Len() int { return len(a) }
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type invariant func(s *stream, r float64) float64
// NewLowBiased returns an initialized Stream for low-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the lower ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewLowBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * r
}
return newStream(ƒ)
}
// NewHighBiased returns an initialized Stream for high-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the higher ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewHighBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * (s.n - r)
}
return newStream(ƒ)
}
// NewTargeted returns an initialized Stream concerned with a particular set of
// quantile values that are supplied a priori. Knowing these a priori reduces
// space and computation time. The targets map maps the desired quantiles to
// their absolute errors, i.e. the true quantile of a value returned by a query
// is guaranteed to be within (Quantile±Epsilon).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
func NewTargeted(targets map[float64]float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
var m = math.MaxFloat64
var f float64
for quantile, epsilon := range targets {
if quantile*s.n <= r {
f = (2 * epsilon * r) / quantile
} else {
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
}
if f < m {
m = f
}
}
return m
}
return newStream(ƒ)
}
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
// design. Take care when using across multiple goroutines.
type Stream struct {
*stream
b Samples
sorted bool
}
func newStream(ƒ invariant) *Stream {
x := &stream{ƒ: ƒ}
return &Stream{x, make(Samples, 0, 500), true}
}
// Insert inserts v into the stream.
func (s *Stream) Insert(v float64) {
s.insert(Sample{Value: v, Width: 1})
}
func (s *Stream) insert(sample Sample) {
s.b = append(s.b, sample)
s.sorted = false
if len(s.b) == cap(s.b) {
s.flush()
}
}
// Query returns the computed qth percentiles value. If s was created with
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
// will return an unspecified result.
func (s *Stream) Query(q float64) float64 {
if !s.flushed() {
// Fast path when there hasn't been enough data for a flush;
// this also yields better accuracy for small sets of data.
l := len(s.b)
if l == 0 {
return 0
}
i := int(float64(l) * q)
if i > 0 {
i -= 1
}
s.maybeSort()
return s.b[i].Value
}
s.flush()
return s.stream.query(q)
}
// Merge merges samples into the underlying streams samples. This is handy when
// merging multiple streams from separate threads, database shards, etc.
//
// ATTENTION: This method is broken and does not yield correct results. The
// underlying algorithm is not capable of merging streams correctly.
func (s *Stream) Merge(samples Samples) {
sort.Sort(samples)
s.stream.merge(samples)
}
// Reset reinitializes and clears the list reusing the samples buffer memory.
func (s *Stream) Reset() {
s.stream.reset()
s.b = s.b[:0]
}
// Samples returns stream samples held by s.
func (s *Stream) Samples() Samples {
if !s.flushed() {
return s.b
}
s.flush()
return s.stream.samples()
}
// Count returns the total number of samples observed in the stream
// since initialization.
func (s *Stream) Count() int {
return len(s.b) + s.stream.count()
}
func (s *Stream) flush() {
s.maybeSort()
s.stream.merge(s.b)
s.b = s.b[:0]
}
func (s *Stream) maybeSort() {
if !s.sorted {
s.sorted = true
sort.Sort(s.b)
}
}
func (s *Stream) flushed() bool {
return len(s.stream.l) > 0
}
type stream struct {
n float64
l []Sample
ƒ invariant
}
func (s *stream) reset() {
s.l = s.l[:0]
s.n = 0
}
func (s *stream) insert(v float64) {
s.merge(Samples{{v, 1, 0}})
}
func (s *stream) merge(samples Samples) {
// TODO(beorn7): This tries to merge not only individual samples, but
// whole summaries. The paper doesn't mention merging summaries at
// all. Unittests show that the merging is inaccurate. Find out how to
// do merges properly.
var r float64
i := 0
for _, sample := range samples {
for ; i < len(s.l); i++ {
c := s.l[i]
if c.Value > sample.Value {
// Insert at position i.
s.l = append(s.l, Sample{})
copy(s.l[i+1:], s.l[i:])
s.l[i] = Sample{
sample.Value,
sample.Width,
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
// TODO(beorn7): How to calculate delta correctly?
}
i++
goto inserted
}
r += c.Width
}
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
i++
inserted:
s.n += sample.Width
r += sample.Width
}
s.compress()
}
func (s *stream) count() int {
return int(s.n)
}
func (s *stream) query(q float64) float64 {
t := math.Ceil(q * s.n)
t += math.Ceil(s.ƒ(s, t) / 2)
p := s.l[0]
var r float64
for _, c := range s.l[1:] {
r += p.Width
if r+c.Width+c.Delta > t {
return p.Value
}
p = c
}
return p.Value
}
func (s *stream) compress() {
if len(s.l) < 2 {
return
}
x := s.l[len(s.l)-1]
xi := len(s.l) - 1
r := s.n - 1 - x.Width
for i := len(s.l) - 2; i >= 0; i-- {
c := s.l[i]
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
x.Width += c.Width
s.l[xi] = x
// Remove element at i.
copy(s.l[i:], s.l[i+1:])
s.l = s.l[:len(s.l)-1]
xi -= 1
} else {
x = c
xi = i
}
r -= c.Width
}
}
func (s *stream) samples() Samples {
samples := make(Samples, len(s.l))
copy(samples, s.l)
return samples
}

View File

@ -1,43 +0,0 @@
# Go support for Protocol Buffers - Google's data interchange format
#
# Copyright 2010 The Go Authors. All rights reserved.
# https://github.com/golang/protobuf
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
install:
go install
test: install generate-test-pbs
go test
generate-test-pbs:
make install
make -C testdata
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto
make

View File

@ -1,223 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer deep copy and merge.
// TODO: RawMessage.
package proto
import (
"log"
"reflect"
"strings"
)
// Clone returns a deep copy of a protocol buffer.
func Clone(pb Message) Message {
in := reflect.ValueOf(pb)
if in.IsNil() {
return pb
}
out := reflect.New(in.Type().Elem())
// out is empty so a merge is a deep copy.
mergeStruct(out.Elem(), in.Elem())
return out.Interface().(Message)
}
// Merge merges src into dst.
// Required and optional fields that are set in src will be set to that value in dst.
// Elements of repeated fields will be appended.
// Merge panics if src and dst are not the same type, or if dst is nil.
func Merge(dst, src Message) {
in := reflect.ValueOf(src)
out := reflect.ValueOf(dst)
if out.IsNil() {
panic("proto: nil destination")
}
if in.Type() != out.Type() {
// Explicit test prior to mergeStruct so that mistyped nils will fail
panic("proto: type mismatch")
}
if in.IsNil() {
// Merging nil into non-nil is a quiet no-op
return
}
mergeStruct(out.Elem(), in.Elem())
}
func mergeStruct(out, in reflect.Value) {
sprop := GetProperties(in.Type())
for i := 0; i < in.NumField(); i++ {
f := in.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
}
if emIn, ok := in.Addr().Interface().(extendableProto); ok {
emOut := out.Addr().Interface().(extendableProto)
mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
}
uf := in.FieldByName("XXX_unrecognized")
if !uf.IsValid() {
return
}
uin := uf.Bytes()
if len(uin) > 0 {
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
}
}
// mergeAny performs a merge between two values of the same type.
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
// prop is set if this is a struct field (it may be nil).
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
if in.Type() == protoMessageType {
if !in.IsNil() {
if out.IsNil() {
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
} else {
Merge(out.Interface().(Message), in.Interface().(Message))
}
}
return
}
switch in.Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
if !viaPtr && isProto3Zero(in) {
return
}
out.Set(in)
case reflect.Interface:
// Probably a oneof field; copy non-nil values.
if in.IsNil() {
return
}
// Allocate destination if it is not set, or set to a different type.
// Otherwise we will merge as normal.
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
}
mergeAny(out.Elem(), in.Elem(), false, nil)
case reflect.Map:
if in.Len() == 0 {
return
}
if out.IsNil() {
out.Set(reflect.MakeMap(in.Type()))
}
// For maps with value types of *T or []byte we need to deep copy each value.
elemKind := in.Type().Elem().Kind()
for _, key := range in.MapKeys() {
var val reflect.Value
switch elemKind {
case reflect.Ptr:
val = reflect.New(in.Type().Elem().Elem())
mergeAny(val, in.MapIndex(key), false, nil)
case reflect.Slice:
val = in.MapIndex(key)
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
default:
val = in.MapIndex(key)
}
out.SetMapIndex(key, val)
}
case reflect.Ptr:
if in.IsNil() {
return
}
if out.IsNil() {
out.Set(reflect.New(in.Elem().Type()))
}
mergeAny(out.Elem(), in.Elem(), true, nil)
case reflect.Slice:
if in.IsNil() {
return
}
if in.Type().Elem().Kind() == reflect.Uint8 {
// []byte is a scalar bytes field, not a repeated field.
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value, and should not
// be merged.
if prop != nil && prop.proto3 && in.Len() == 0 {
return
}
// Make a deep copy.
// Append to []byte{} instead of []byte(nil) so that we never end up
// with a nil result.
out.SetBytes(append([]byte{}, in.Bytes()...))
return
}
n := in.Len()
if out.IsNil() {
out.Set(reflect.MakeSlice(in.Type(), 0, n))
}
switch in.Type().Elem().Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
out.Set(reflect.AppendSlice(out, in))
default:
for i := 0; i < n; i++ {
x := reflect.Indirect(reflect.New(in.Type().Elem()))
mergeAny(x, in.Index(i), false, nil)
out.Set(reflect.Append(out, x))
}
}
case reflect.Struct:
mergeStruct(out, in)
default:
// unknown type, so not a protocol buffer
log.Printf("proto: don't know how to copy %v", in)
}
}
func mergeExtension(out, in map[int32]Extension) {
for extNum, eIn := range in {
eOut := Extension{desc: eIn.desc}
if eIn.value != nil {
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
eOut.value = v.Interface()
}
if eIn.enc != nil {
eOut.enc = make([]byte, len(eIn.enc))
copy(eOut.enc, eIn.enc)
}
out[extNum] = eOut
}
}

View File

@ -1,867 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for decoding protocol buffer data to construct in-memory representations.
*/
import (
"errors"
"fmt"
"io"
"os"
"reflect"
)
// errOverflow is returned when an integer is too large to be represented.
var errOverflow = errors.New("proto: integer overflow")
// ErrInternalBadWireType is returned by generated code when an incorrect
// wire type is encountered. It does not get returned to user code.
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
// The fundamental decoders that interpret bytes on the wire.
// Those that take integer types all return uint64 and are
// therefore of type valueDecoder.
// DecodeVarint reads a varint-encoded integer from the slice.
// It returns the integer and the number of bytes consumed, or
// zero if there is not enough.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func DecodeVarint(buf []byte) (x uint64, n int) {
// x, n already 0
for shift := uint(0); shift < 64; shift += 7 {
if n >= len(buf) {
return 0, 0
}
b := uint64(buf[n])
n++
x |= (b & 0x7F) << shift
if (b & 0x80) == 0 {
return x, n
}
}
// The number is too large to represent in a 64-bit value.
return 0, 0
}
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
// x, err already 0
i := p.index
l := len(p.buf)
for shift := uint(0); shift < 64; shift += 7 {
if i >= l {
err = io.ErrUnexpectedEOF
return
}
b := p.buf[i]
i++
x |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
p.index = i
return
}
}
// The number is too large to represent in a 64-bit value.
err = errOverflow
return
}
// DecodeFixed64 reads a 64-bit integer from the Buffer.
// This is the format for the
// fixed64, sfixed64, and double protocol buffer types.
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
// x, err already 0
i := p.index + 8
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-8])
x |= uint64(p.buf[i-7]) << 8
x |= uint64(p.buf[i-6]) << 16
x |= uint64(p.buf[i-5]) << 24
x |= uint64(p.buf[i-4]) << 32
x |= uint64(p.buf[i-3]) << 40
x |= uint64(p.buf[i-2]) << 48
x |= uint64(p.buf[i-1]) << 56
return
}
// DecodeFixed32 reads a 32-bit integer from the Buffer.
// This is the format for the
// fixed32, sfixed32, and float protocol buffer types.
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
// x, err already 0
i := p.index + 4
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-4])
x |= uint64(p.buf[i-3]) << 8
x |= uint64(p.buf[i-2]) << 16
x |= uint64(p.buf[i-1]) << 24
return
}
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
// from the Buffer.
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
return
}
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
// from the Buffer.
// This is the format used for the sint32 protocol buffer type.
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
return
}
// These are not ValueDecoders: they produce an array of bytes or a string.
// bytes, embedded messages
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
n, err := p.DecodeVarint()
if err != nil {
return nil, err
}
nb := int(n)
if nb < 0 {
return nil, fmt.Errorf("proto: bad byte length %d", nb)
}
end := p.index + nb
if end < p.index || end > len(p.buf) {
return nil, io.ErrUnexpectedEOF
}
if !alloc {
// todo: check if can get more uses of alloc=false
buf = p.buf[p.index:end]
p.index += nb
return
}
buf = make([]byte, nb)
copy(buf, p.buf[p.index:])
p.index += nb
return
}
// DecodeStringBytes reads an encoded string from the Buffer.
// This is the format used for the proto2 string type.
func (p *Buffer) DecodeStringBytes() (s string, err error) {
buf, err := p.DecodeRawBytes(false)
if err != nil {
return
}
return string(buf), nil
}
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
// If the protocol buffer has extensions, and the field matches, add it as an extension.
// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
oi := o.index
err := o.skip(t, tag, wire)
if err != nil {
return err
}
if !unrecField.IsValid() {
return nil
}
ptr := structPointer_Bytes(base, unrecField)
// Add the skipped field to struct field
obuf := o.buf
o.buf = *ptr
o.EncodeVarint(uint64(tag<<3 | wire))
*ptr = append(o.buf, obuf[oi:o.index]...)
o.buf = obuf
return nil
}
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
var u uint64
var err error
switch wire {
case WireVarint:
_, err = o.DecodeVarint()
case WireFixed64:
_, err = o.DecodeFixed64()
case WireBytes:
_, err = o.DecodeRawBytes(false)
case WireFixed32:
_, err = o.DecodeFixed32()
case WireStartGroup:
for {
u, err = o.DecodeVarint()
if err != nil {
break
}
fwire := int(u & 0x7)
if fwire == WireEndGroup {
break
}
ftag := int(u >> 3)
err = o.skip(t, ftag, fwire)
if err != nil {
break
}
}
default:
err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
}
return err
}
// Unmarshaler is the interface representing objects that can
// unmarshal themselves. The method should reset the receiver before
// decoding starts. The argument points to data that may be
// overwritten, so implementations should not keep references to the
// buffer.
type Unmarshaler interface {
Unmarshal([]byte) error
}
// Unmarshal parses the protocol buffer representation in buf and places the
// decoded result in pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// Unmarshal resets pb before starting to unmarshal, so any
// existing data in pb is always removed. Use UnmarshalMerge
// to preserve and append to existing data.
func Unmarshal(buf []byte, pb Message) error {
pb.Reset()
return UnmarshalMerge(buf, pb)
}
// UnmarshalMerge parses the protocol buffer representation in buf and
// writes the decoded result to pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// UnmarshalMerge merges into existing data in pb.
// Most code should use Unmarshal instead.
func UnmarshalMerge(buf []byte, pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok {
return u.Unmarshal(buf)
}
return NewBuffer(buf).Unmarshal(pb)
}
// DecodeMessage reads a count-delimited message from the Buffer.
func (p *Buffer) DecodeMessage(pb Message) error {
enc, err := p.DecodeRawBytes(false)
if err != nil {
return err
}
return NewBuffer(enc).Unmarshal(pb)
}
// DecodeGroup reads a tag-delimited group from the Buffer.
func (p *Buffer) DecodeGroup(pb Message) error {
typ, base, err := getbase(pb)
if err != nil {
return err
}
return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
}
// Unmarshal parses the protocol buffer representation in the
// Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be
// unpredictable.
func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok {
err := u.Unmarshal(p.buf[p.index:])
p.index = len(p.buf)
return err
}
typ, base, err := getbase(pb)
if err != nil {
return err
}
err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
if collectStats {
stats.Decode++
}
return err
}
// unmarshalType does the work of unmarshaling a structure.
func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
var state errorState
required, reqFields := prop.reqCount, uint64(0)
var err error
for err == nil && o.index < len(o.buf) {
oi := o.index
var u uint64
u, err = o.DecodeVarint()
if err != nil {
break
}
wire := int(u & 0x7)
if wire == WireEndGroup {
if is_group {
return nil // input is satisfied
}
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
}
tag := int(u >> 3)
if tag <= 0 {
return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
}
fieldnum, ok := prop.decoderTags.get(tag)
if !ok {
// Maybe it's an extension?
if prop.extendable {
if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
if err = o.skip(st, tag, wire); err == nil {
ext := e.ExtensionMap()[int32(tag)] // may be missing
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
e.ExtensionMap()[int32(tag)] = ext
}
continue
}
}
// Maybe it's a oneof?
if prop.oneofUnmarshaler != nil {
m := structPointer_Interface(base, st).(Message)
// First return value indicates whether tag is a oneof field.
ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
if err == ErrInternalBadWireType {
// Map the error to something more descriptive.
// Do the formatting here to save generated code space.
err = fmt.Errorf("bad wiretype for oneof field in %T", m)
}
if ok {
continue
}
}
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
continue
}
p := prop.Prop[fieldnum]
if p.dec == nil {
fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
continue
}
dec := p.dec
if wire != WireStartGroup && wire != p.WireType {
if wire == WireBytes && p.packedDec != nil {
// a packable field
dec = p.packedDec
} else {
err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
continue
}
}
decErr := dec(o, p, base)
if decErr != nil && !state.shouldContinue(decErr, p) {
err = decErr
}
if err == nil && p.Required {
// Successfully decoded a required field.
if tag <= 64 {
// use bitmap for fields 1-64 to catch field reuse.
var mask uint64 = 1 << uint64(tag-1)
if reqFields&mask == 0 {
// new required field
reqFields |= mask
required--
}
} else {
// This is imprecise. It can be fooled by a required field
// with a tag > 64 that is encoded twice; that's very rare.
// A fully correct implementation would require allocating
// a data structure, which we would like to avoid.
required--
}
}
}
if err == nil {
if is_group {
return io.ErrUnexpectedEOF
}
if state.err != nil {
return state.err
}
if required > 0 {
// Not enough information to determine the exact field. If we use extra
// CPU, we could determine the field only if the missing required field
// has a tag <= 64 and we check reqFields.
return &RequiredNotSetError{"{Unknown}"}
}
}
return err
}
// Individual type decoders
// For each,
// u is the decoded value,
// v is a pointer to the field (pointer) in the struct
// Sizes of the pools to allocate inside the Buffer.
// The goal is modest amortization and allocation
// on at least 16-byte boundaries.
const (
boolPoolSize = 16
uint32PoolSize = 8
uint64PoolSize = 4
)
// Decode a bool.
func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
if len(o.bools) == 0 {
o.bools = make([]bool, boolPoolSize)
}
o.bools[0] = u != 0
*structPointer_Bool(base, p.field) = &o.bools[0]
o.bools = o.bools[1:]
return nil
}
func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
*structPointer_BoolVal(base, p.field) = u != 0
return nil
}
// Decode an int32.
func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
return nil
}
func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
return nil
}
// Decode an int64.
func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word64_Set(structPointer_Word64(base, p.field), o, u)
return nil
}
func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
return nil
}
// Decode a string.
func (o *Buffer) dec_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
*structPointer_String(base, p.field) = &s
return nil
}
func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
*structPointer_StringVal(base, p.field) = s
return nil
}
// Decode a slice of bytes ([]byte).
func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
*structPointer_Bytes(base, p.field) = b
return nil
}
// Decode a slice of bools ([]bool).
func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
v := structPointer_BoolSlice(base, p.field)
*v = append(*v, u != 0)
return nil
}
// Decode a slice of bools ([]bool) in packed format.
func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
v := structPointer_BoolSlice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded bools
fin := o.index + nb
if fin < o.index {
return errOverflow
}
y := *v
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
y = append(y, u != 0)
}
*v = y
return nil
}
// Decode a slice of int32s ([]int32).
func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
structPointer_Word32Slice(base, p.field).Append(uint32(u))
return nil
}
// Decode a slice of int32s ([]int32) in packed format.
func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
v := structPointer_Word32Slice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded int32s
fin := o.index + nb
if fin < o.index {
return errOverflow
}
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
v.Append(uint32(u))
}
return nil
}
// Decode a slice of int64s ([]int64).
func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
structPointer_Word64Slice(base, p.field).Append(u)
return nil
}
// Decode a slice of int64s ([]int64) in packed format.
func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
v := structPointer_Word64Slice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded int64s
fin := o.index + nb
if fin < o.index {
return errOverflow
}
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
v.Append(u)
}
return nil
}
// Decode a slice of strings ([]string).
func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
v := structPointer_StringSlice(base, p.field)
*v = append(*v, s)
return nil
}
// Decode a slice of slice of bytes ([][]byte).
func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
v := structPointer_BytesSlice(base, p.field)
*v = append(*v, b)
return nil
}
// Decode a map field.
func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
raw, err := o.DecodeRawBytes(false)
if err != nil {
return err
}
oi := o.index // index at the end of this map entry
o.index -= len(raw) // move buffer back to start of map entry
mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
if mptr.Elem().IsNil() {
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
}
v := mptr.Elem() // map[K]V
// Prepare addressable doubly-indirect placeholders for the key and value types.
// See enc_new_map for why.
keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
keybase := toStructPointer(keyptr.Addr()) // **K
var valbase structPointer
var valptr reflect.Value
switch p.mtype.Elem().Kind() {
case reflect.Slice:
// []byte
var dummy []byte
valptr = reflect.ValueOf(&dummy) // *[]byte
valbase = toStructPointer(valptr) // *[]byte
case reflect.Ptr:
// message; valptr is **Msg; need to allocate the intermediate pointer
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
valptr.Set(reflect.New(valptr.Type().Elem()))
valbase = toStructPointer(valptr)
default:
// everything else
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
valbase = toStructPointer(valptr.Addr()) // **V
}
// Decode.
// This parses a restricted wire format, namely the encoding of a message
// with two fields. See enc_new_map for the format.
for o.index < oi {
// tagcode for key and value properties are always a single byte
// because they have tags 1 and 2.
tagcode := o.buf[o.index]
o.index++
switch tagcode {
case p.mkeyprop.tagcode[0]:
if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
return err
}
case p.mvalprop.tagcode[0]:
if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
return err
}
default:
// TODO: Should we silently skip this instead?
return fmt.Errorf("proto: bad map data tag %d", raw[0])
}
}
keyelem, valelem := keyptr.Elem(), valptr.Elem()
if !keyelem.IsValid() || !valelem.IsValid() {
// We did not decode the key or the value in the map entry.
// Either way, it's an invalid map entry.
return fmt.Errorf("proto: bad map data: missing key/val")
}
v.SetMapIndex(keyelem, valelem)
return nil
}
// Decode a group.
func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
bas := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(bas) {
// allocate new nested message
bas = toStructPointer(reflect.New(p.stype))
structPointer_SetStructPointer(base, p.field, bas)
}
return o.unmarshalType(p.stype, p.sprop, true, bas)
}
// Decode an embedded message.
func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
raw, e := o.DecodeRawBytes(false)
if e != nil {
return e
}
bas := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(bas) {
// allocate new nested message
bas = toStructPointer(reflect.New(p.stype))
structPointer_SetStructPointer(base, p.field, bas)
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
iv := structPointer_Interface(bas, p.stype)
return iv.(Unmarshaler).Unmarshal(raw)
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
err = o.unmarshalType(p.stype, p.sprop, false, bas)
o.buf = obuf
o.index = oi
return err
}
// Decode a slice of embedded messages.
func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
return o.dec_slice_struct(p, false, base)
}
// Decode a slice of embedded groups.
func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
return o.dec_slice_struct(p, true, base)
}
// Decode a slice of structs ([]*struct).
func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
v := reflect.New(p.stype)
bas := toStructPointer(v)
structPointer_StructPointerSlice(base, p.field).Append(bas)
if is_group {
err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
return err
}
raw, err := o.DecodeRawBytes(false)
if err != nil {
return err
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
iv := v.Interface()
return iv.(Unmarshaler).Unmarshal(raw)
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
o.buf = obuf
o.index = oi
return err
}

File diff suppressed because it is too large Load Diff

View File

@ -1,276 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer comparison.
package proto
import (
"bytes"
"log"
"reflect"
"strings"
)
/*
Equal returns true iff protocol buffers a and b are equal.
The arguments must both be pointers to protocol buffer structs.
Equality is defined in this way:
- Two messages are equal iff they are the same type,
corresponding fields are equal, unknown field sets
are equal, and extensions sets are equal.
- Two set scalar fields are equal iff their values are equal.
If the fields are of a floating-point type, remember that
NaN != x for all x, including NaN. If the message is defined
in a proto3 .proto file, fields are not "set"; specifically,
zero length proto3 "bytes" fields are equal (nil == {}).
- Two repeated fields are equal iff their lengths are the same,
and their corresponding elements are equal (a "bytes" field,
although represented by []byte, is not a repeated field)
- Two unset fields are equal.
- Two unknown field sets are equal if their current
encoded state is equal.
- Two extension sets are equal iff they have corresponding
elements that are pairwise equal.
- Every other combination of things are not equal.
The return value is undefined if a and b are not protocol buffers.
*/
func Equal(a, b Message) bool {
if a == nil || b == nil {
return a == b
}
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
if v1.Type() != v2.Type() {
return false
}
if v1.Kind() == reflect.Ptr {
if v1.IsNil() {
return v2.IsNil()
}
if v2.IsNil() {
return false
}
v1, v2 = v1.Elem(), v2.Elem()
}
if v1.Kind() != reflect.Struct {
return false
}
return equalStruct(v1, v2)
}
// v1 and v2 are known to have the same type.
func equalStruct(v1, v2 reflect.Value) bool {
sprop := GetProperties(v1.Type())
for i := 0; i < v1.NumField(); i++ {
f := v1.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
f1, f2 := v1.Field(i), v2.Field(i)
if f.Type.Kind() == reflect.Ptr {
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
// both unset
continue
} else if n1 != n2 {
// set/unset mismatch
return false
}
b1, ok := f1.Interface().(raw)
if ok {
b2 := f2.Interface().(raw)
// RawMessage
if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
return false
}
continue
}
f1, f2 = f1.Elem(), f2.Elem()
}
if !equalAny(f1, f2, sprop.Prop[i]) {
return false
}
}
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
em2 := v2.FieldByName("XXX_extensions")
if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
return false
}
}
uf := v1.FieldByName("XXX_unrecognized")
if !uf.IsValid() {
return true
}
u1 := uf.Bytes()
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
if !bytes.Equal(u1, u2) {
return false
}
return true
}
// v1 and v2 are known to have the same type.
// prop may be nil.
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
if v1.Type() == protoMessageType {
m1, _ := v1.Interface().(Message)
m2, _ := v2.Interface().(Message)
return Equal(m1, m2)
}
switch v1.Kind() {
case reflect.Bool:
return v1.Bool() == v2.Bool()
case reflect.Float32, reflect.Float64:
return v1.Float() == v2.Float()
case reflect.Int32, reflect.Int64:
return v1.Int() == v2.Int()
case reflect.Interface:
// Probably a oneof field; compare the inner values.
n1, n2 := v1.IsNil(), v2.IsNil()
if n1 || n2 {
return n1 == n2
}
e1, e2 := v1.Elem(), v2.Elem()
if e1.Type() != e2.Type() {
return false
}
return equalAny(e1, e2, nil)
case reflect.Map:
if v1.Len() != v2.Len() {
return false
}
for _, key := range v1.MapKeys() {
val2 := v2.MapIndex(key)
if !val2.IsValid() {
// This key was not found in the second map.
return false
}
if !equalAny(v1.MapIndex(key), val2, nil) {
return false
}
}
return true
case reflect.Ptr:
return equalAny(v1.Elem(), v2.Elem(), prop)
case reflect.Slice:
if v1.Type().Elem().Kind() == reflect.Uint8 {
// short circuit: []byte
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value.
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
return true
}
if v1.IsNil() != v2.IsNil() {
return false
}
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
}
if v1.Len() != v2.Len() {
return false
}
for i := 0; i < v1.Len(); i++ {
if !equalAny(v1.Index(i), v2.Index(i), prop) {
return false
}
}
return true
case reflect.String:
return v1.Interface().(string) == v2.Interface().(string)
case reflect.Struct:
return equalStruct(v1, v2)
case reflect.Uint32, reflect.Uint64:
return v1.Uint() == v2.Uint()
}
// unknown type, so not a protocol buffer
log.Printf("proto: don't know how to compare %v", v1)
return false
}
// base is the struct type that the extensions are based on.
// em1 and em2 are extension maps.
func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
if len(em1) != len(em2) {
return false
}
for extNum, e1 := range em1 {
e2, ok := em2[extNum]
if !ok {
return false
}
m1, m2 := e1.value, e2.value
if m1 != nil && m2 != nil {
// Both are unencoded.
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false
}
continue
}
// At least one is encoded. To do a semantically correct comparison
// we need to unmarshal them first.
var desc *ExtensionDesc
if m := extensionMaps[base]; m != nil {
desc = m[extNum]
}
if desc == nil {
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
continue
}
var err error
if m1 == nil {
m1, err = decodeExtension(e1.enc, desc)
}
if m2 == nil && err == nil {
m2, err = decodeExtension(e2.enc, desc)
}
if err != nil {
// The encoded form is invalid.
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
return false
}
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false
}
}
return true
}

View File

@ -1,399 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Types and routines for supporting protocol buffer extensions.
*/
import (
"errors"
"fmt"
"reflect"
"strconv"
"sync"
)
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
var ErrMissingExtension = errors.New("proto: missing extension")
// ExtensionRange represents a range of message extensions for a protocol buffer.
// Used in code generated by the protocol compiler.
type ExtensionRange struct {
Start, End int32 // both inclusive
}
// extendableProto is an interface implemented by any protocol buffer that may be extended.
type extendableProto interface {
Message
ExtensionRangeArray() []ExtensionRange
ExtensionMap() map[int32]Extension
}
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
// ExtensionDesc represents an extension specification.
// Used in generated code from the protocol compiler.
type ExtensionDesc struct {
ExtendedType Message // nil pointer to the type that is being extended
ExtensionType interface{} // nil pointer to the extension type
Field int32 // field number
Name string // fully-qualified name of extension, for text formatting
Tag string // protobuf tag style
}
func (ed *ExtensionDesc) repeated() bool {
t := reflect.TypeOf(ed.ExtensionType)
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
}
// Extension represents an extension in a message.
type Extension struct {
// When an extension is stored in a message using SetExtension
// only desc and value are set. When the message is marshaled
// enc will be set to the encoded form of the message.
//
// When a message is unmarshaled and contains extensions, each
// extension will have only enc set. When such an extension is
// accessed using GetExtension (or GetExtensions) desc and value
// will be set.
desc *ExtensionDesc
value interface{}
enc []byte
}
// SetRawExtension is for testing only.
func SetRawExtension(base extendableProto, id int32, b []byte) {
base.ExtensionMap()[id] = Extension{enc: b}
}
// isExtensionField returns true iff the given field number is in an extension range.
func isExtensionField(pb extendableProto, field int32) bool {
for _, er := range pb.ExtensionRangeArray() {
if er.Start <= field && field <= er.End {
return true
}
}
return false
}
// checkExtensionTypes checks that the given extension is valid for pb.
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
// Check the extended type.
if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
}
// Check the range.
if !isExtensionField(pb, extension.Field) {
return errors.New("proto: bad extension number; not in declared ranges")
}
return nil
}
// extPropKey is sufficient to uniquely identify an extension.
type extPropKey struct {
base reflect.Type
field int32
}
var extProp = struct {
sync.RWMutex
m map[extPropKey]*Properties
}{
m: make(map[extPropKey]*Properties),
}
func extensionProperties(ed *ExtensionDesc) *Properties {
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
extProp.RLock()
if prop, ok := extProp.m[key]; ok {
extProp.RUnlock()
return prop
}
extProp.RUnlock()
extProp.Lock()
defer extProp.Unlock()
// Check again.
if prop, ok := extProp.m[key]; ok {
return prop
}
prop := new(Properties)
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
extProp.m[key] = prop
return prop
}
// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
func encodeExtensionMap(m map[int32]Extension) error {
for k, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
continue
}
// We don't skip extensions that have an encoded form set,
// because the extension value may have been mutated after
// the last time this function was called.
et := reflect.TypeOf(e.desc.ExtensionType)
props := extensionProperties(e.desc)
p := NewBuffer(nil)
// If e.value has type T, the encoder expects a *struct{ X T }.
// Pass a *T with a zero field and hope it all works out.
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(e.value))
if err := props.enc(p, props, toStructPointer(x)); err != nil {
return err
}
e.enc = p.buf
m[k] = e
}
return nil
}
func sizeExtensionMap(m map[int32]Extension) (n int) {
for _, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
n += len(e.enc)
continue
}
// We don't skip extensions that have an encoded form set,
// because the extension value may have been mutated after
// the last time this function was called.
et := reflect.TypeOf(e.desc.ExtensionType)
props := extensionProperties(e.desc)
// If e.value has type T, the encoder expects a *struct{ X T }.
// Pass a *T with a zero field and hope it all works out.
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(e.value))
n += props.size(props, toStructPointer(x))
}
return
}
// HasExtension returns whether the given extension is present in pb.
func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
// TODO: Check types, field numbers, etc.?
_, ok := pb.ExtensionMap()[extension.Field]
return ok
}
// ClearExtension removes the given extension from pb.
func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
// TODO: Check types, field numbers, etc.?
delete(pb.ExtensionMap(), extension.Field)
}
// GetExtension parses and returns the given extension of pb.
// If the extension is not present and has no default value it returns ErrMissingExtension.
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
if err := checkExtensionTypes(pb, extension); err != nil {
return nil, err
}
emap := pb.ExtensionMap()
e, ok := emap[extension.Field]
if !ok {
// defaultExtensionValue returns the default value or
// ErrMissingExtension if there is no default.
return defaultExtensionValue(extension)
}
if e.value != nil {
// Already decoded. Check the descriptor, though.
if e.desc != extension {
// This shouldn't happen. If it does, it means that
// GetExtension was called twice with two different
// descriptors with the same field number.
return nil, errors.New("proto: descriptor conflict")
}
return e.value, nil
}
v, err := decodeExtension(e.enc, extension)
if err != nil {
return nil, err
}
// Remember the decoded version and drop the encoded version.
// That way it is safe to mutate what we return.
e.value = v
e.desc = extension
e.enc = nil
emap[extension.Field] = e
return e.value, nil
}
// defaultExtensionValue returns the default value for extension.
// If no default for an extension is defined ErrMissingExtension is returned.
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
sf, _, err := fieldDefault(t, props)
if err != nil {
return nil, err
}
if sf == nil || sf.value == nil {
// There is no default value.
return nil, ErrMissingExtension
}
if t.Kind() != reflect.Ptr {
// We do not need to return a Ptr, we can directly return sf.value.
return sf.value, nil
}
// We need to return an interface{} that is a pointer to sf.value.
value := reflect.New(t).Elem()
value.Set(reflect.New(value.Type().Elem()))
if sf.kind == reflect.Int32 {
// We may have an int32 or an enum, but the underlying data is int32.
// Since we can't set an int32 into a non int32 reflect.value directly
// set it as a int32.
value.Elem().SetInt(int64(sf.value.(int32)))
} else {
value.Elem().Set(reflect.ValueOf(sf.value))
}
return value.Interface(), nil
}
// decodeExtension decodes an extension encoded in b.
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
o := NewBuffer(b)
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
// t is a pointer to a struct, pointer to basic type or a slice.
// Allocate a "field" to store the pointer/slice itself; the
// pointer/slice will be stored here. We pass
// the address of this field to props.dec.
// This passes a zero field and a *t and lets props.dec
// interpret it as a *struct{ x t }.
value := reflect.New(t).Elem()
for {
// Discard wire type and field number varint. It isn't needed.
if _, err := o.DecodeVarint(); err != nil {
return nil, err
}
if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
return nil, err
}
if o.index >= len(o.buf) {
break
}
}
return value.Interface(), nil
}
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
// The returned slice has the same length as es; missing extensions will appear as nil elements.
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
epb, ok := pb.(extendableProto)
if !ok {
err = errors.New("proto: not an extendable proto")
return
}
extensions = make([]interface{}, len(es))
for i, e := range es {
extensions[i], err = GetExtension(epb, e)
if err == ErrMissingExtension {
err = nil
}
if err != nil {
return
}
}
return
}
// SetExtension sets the specified extension of pb to the specified value.
func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
if err := checkExtensionTypes(pb, extension); err != nil {
return err
}
typ := reflect.TypeOf(extension.ExtensionType)
if typ != reflect.TypeOf(value) {
return errors.New("proto: bad extension value type")
}
// nil extension values need to be caught early, because the
// encoder can't distinguish an ErrNil due to a nil extension
// from an ErrNil due to a missing field. Extensions are
// always optional, so the encoder would just swallow the error
// and drop all the extensions from the encoded message.
if reflect.ValueOf(value).IsNil() {
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
}
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
return nil
}
// A global registry of extensions.
// The generated code will register the generated descriptors by calling RegisterExtension.
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
// RegisterExtension is called from the generated code.
func RegisterExtension(desc *ExtensionDesc) {
st := reflect.TypeOf(desc.ExtendedType).Elem()
m := extensionMaps[st]
if m == nil {
m = make(map[int32]*ExtensionDesc)
extensionMaps[st] = m
}
if _, ok := m[desc.Field]; ok {
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
}
m[desc.Field] = desc
}
// RegisteredExtensions returns a map of the registered extensions of a
// protocol buffer struct, indexed by the extension number.
// The argument pb should be a nil pointer to the struct type.
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
return extensionMaps[reflect.TypeOf(pb).Elem()]
}

View File

@ -1,894 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package proto converts data structures to and from the wire format of
protocol buffers. It works in concert with the Go source code generated
for .proto files by the protocol compiler.
A summary of the properties of the protocol buffer interface
for a protocol buffer variable v:
- Names are turned from camel_case to CamelCase for export.
- There are no methods on v to set fields; just treat
them as structure fields.
- There are getters that return a field's value if set,
and return the field's default value if unset.
The getters work even if the receiver is a nil message.
- The zero value for a struct is its correct initialization state.
All desired fields must be set before marshaling.
- A Reset() method will restore a protobuf struct to its zero state.
- Non-repeated fields are pointers to the values; nil means unset.
That is, optional or required field int32 f becomes F *int32.
- Repeated fields are slices.
- Helper functions are available to aid the setting of fields.
msg.Foo = proto.String("hello") // set field
- Constants are defined to hold the default values of all fields that
have them. They have the form Default_StructName_FieldName.
Because the getter methods handle defaulted values,
direct use of these constants should be rare.
- Enums are given type names and maps from names to values.
Enum values are prefixed by the enclosing message's name, or by the
enum's type name if it is a top-level enum. Enum types have a String
method, and a Enum method to assist in message construction.
- Nested messages, groups and enums have type names prefixed with the name of
the surrounding message type.
- Extensions are given descriptor names that start with E_,
followed by an underscore-delimited list of the nested messages
that contain it (if any) followed by the CamelCased name of the
extension field itself. HasExtension, ClearExtension, GetExtension
and SetExtension are functions for manipulating extensions.
- Oneof field sets are given a single field in their message,
with distinguished wrapper types for each possible field value.
- Marshal and Unmarshal are functions to encode and decode the wire format.
When the .proto file specifies `syntax="proto3"`, there are some differences:
- Non-repeated fields of non-message type are values instead of pointers.
- Getters are only generated for message and oneof fields.
- Enum types do not get an Enum method.
The simplest way to describe this is to see an example.
Given file test.proto, containing
package example;
enum FOO { X = 17; }
message Test {
required string label = 1;
optional int32 type = 2 [default=77];
repeated int64 reps = 3;
optional group OptionalGroup = 4 {
required string RequiredField = 5;
}
oneof union {
int32 number = 6;
string name = 7;
}
}
The resulting file, test.pb.go, is:
package example
import proto "github.com/golang/protobuf/proto"
import math "math"
type FOO int32
const (
FOO_X FOO = 17
)
var FOO_name = map[int32]string{
17: "X",
}
var FOO_value = map[string]int32{
"X": 17,
}
func (x FOO) Enum() *FOO {
p := new(FOO)
*p = x
return p
}
func (x FOO) String() string {
return proto.EnumName(FOO_name, int32(x))
}
func (x *FOO) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
if err != nil {
return err
}
*x = FOO(value)
return nil
}
type Test struct {
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
// Types that are valid to be assigned to Union:
// *Test_Number
// *Test_Name
Union isTest_Union `protobuf_oneof:"union"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Test) Reset() { *m = Test{} }
func (m *Test) String() string { return proto.CompactTextString(m) }
func (*Test) ProtoMessage() {}
type isTest_Union interface {
isTest_Union()
}
type Test_Number struct {
Number int32 `protobuf:"varint,6,opt,name=number"`
}
type Test_Name struct {
Name string `protobuf:"bytes,7,opt,name=name"`
}
func (*Test_Number) isTest_Union() {}
func (*Test_Name) isTest_Union() {}
func (m *Test) GetUnion() isTest_Union {
if m != nil {
return m.Union
}
return nil
}
const Default_Test_Type int32 = 77
func (m *Test) GetLabel() string {
if m != nil && m.Label != nil {
return *m.Label
}
return ""
}
func (m *Test) GetType() int32 {
if m != nil && m.Type != nil {
return *m.Type
}
return Default_Test_Type
}
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
if m != nil {
return m.Optionalgroup
}
return nil
}
type Test_OptionalGroup struct {
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
}
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
func (m *Test_OptionalGroup) GetRequiredField() string {
if m != nil && m.RequiredField != nil {
return *m.RequiredField
}
return ""
}
func (m *Test) GetNumber() int32 {
if x, ok := m.GetUnion().(*Test_Number); ok {
return x.Number
}
return 0
}
func (m *Test) GetName() string {
if x, ok := m.GetUnion().(*Test_Name); ok {
return x.Name
}
return ""
}
func init() {
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
}
To create and play with a Test object:
package main
import (
"log"
"github.com/golang/protobuf/proto"
pb "./example.pb"
)
func main() {
test := &pb.Test{
Label: proto.String("hello"),
Type: proto.Int32(17),
Reps: []int64{1, 2, 3},
Optionalgroup: &pb.Test_OptionalGroup{
RequiredField: proto.String("good bye"),
},
Union: &pb.Test_Name{"fred"},
}
data, err := proto.Marshal(test)
if err != nil {
log.Fatal("marshaling error: ", err)
}
newTest := &pb.Test{}
err = proto.Unmarshal(data, newTest)
if err != nil {
log.Fatal("unmarshaling error: ", err)
}
// Now test and newTest contain the same data.
if test.GetLabel() != newTest.GetLabel() {
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
}
// Use a type switch to determine which oneof was set.
switch u := test.Union.(type) {
case *pb.Test_Number: // u.Number contains the number.
case *pb.Test_Name: // u.Name contains the string.
}
// etc.
}
*/
package proto
import (
"encoding/json"
"fmt"
"log"
"reflect"
"sort"
"strconv"
"sync"
)
// Message is implemented by generated protocol buffer messages.
type Message interface {
Reset()
String() string
ProtoMessage()
}
// Stats records allocation details about the protocol buffer encoders
// and decoders. Useful for tuning the library itself.
type Stats struct {
Emalloc uint64 // mallocs in encode
Dmalloc uint64 // mallocs in decode
Encode uint64 // number of encodes
Decode uint64 // number of decodes
Chit uint64 // number of cache hits
Cmiss uint64 // number of cache misses
Size uint64 // number of sizes
}
// Set to true to enable stats collection.
const collectStats = false
var stats Stats
// GetStats returns a copy of the global Stats structure.
func GetStats() Stats { return stats }
// A Buffer is a buffer manager for marshaling and unmarshaling
// protocol buffers. It may be reused between invocations to
// reduce memory usage. It is not necessary to use a Buffer;
// the global functions Marshal and Unmarshal create a
// temporary Buffer and are fine for most applications.
type Buffer struct {
buf []byte // encode/decode byte stream
index int // write point
// pools of basic types to amortize allocation.
bools []bool
uint32s []uint32
uint64s []uint64
// extra pools, only used with pointer_reflect.go
int32s []int32
int64s []int64
float32s []float32
float64s []float64
}
// NewBuffer allocates a new Buffer and initializes its internal data to
// the contents of the argument slice.
func NewBuffer(e []byte) *Buffer {
return &Buffer{buf: e}
}
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
func (p *Buffer) Reset() {
p.buf = p.buf[0:0] // for reading/writing
p.index = 0 // for reading
}
// SetBuf replaces the internal buffer with the slice,
// ready for unmarshaling the contents of the slice.
func (p *Buffer) SetBuf(s []byte) {
p.buf = s
p.index = 0
}
// Bytes returns the contents of the Buffer.
func (p *Buffer) Bytes() []byte { return p.buf }
/*
* Helper routines for simplifying the creation of optional fields of basic type.
*/
// Bool is a helper routine that allocates a new bool value
// to store v and returns a pointer to it.
func Bool(v bool) *bool {
return &v
}
// Int32 is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it.
func Int32(v int32) *int32 {
return &v
}
// Int is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it, but unlike Int32
// its argument value is an int.
func Int(v int) *int32 {
p := new(int32)
*p = int32(v)
return p
}
// Int64 is a helper routine that allocates a new int64 value
// to store v and returns a pointer to it.
func Int64(v int64) *int64 {
return &v
}
// Float32 is a helper routine that allocates a new float32 value
// to store v and returns a pointer to it.
func Float32(v float32) *float32 {
return &v
}
// Float64 is a helper routine that allocates a new float64 value
// to store v and returns a pointer to it.
func Float64(v float64) *float64 {
return &v
}
// Uint32 is a helper routine that allocates a new uint32 value
// to store v and returns a pointer to it.
func Uint32(v uint32) *uint32 {
return &v
}
// Uint64 is a helper routine that allocates a new uint64 value
// to store v and returns a pointer to it.
func Uint64(v uint64) *uint64 {
return &v
}
// String is a helper routine that allocates a new string value
// to store v and returns a pointer to it.
func String(v string) *string {
return &v
}
// EnumName is a helper function to simplify printing protocol buffer enums
// by name. Given an enum map and a value, it returns a useful string.
func EnumName(m map[int32]string, v int32) string {
s, ok := m[v]
if ok {
return s
}
return strconv.Itoa(int(v))
}
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
// from their JSON-encoded representation. Given a map from the enum's symbolic
// names to its int values, and a byte buffer containing the JSON-encoded
// value, it returns an int32 that can be cast to the enum type by the caller.
//
// The function can deal with both JSON representations, numeric and symbolic.
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
if data[0] == '"' {
// New style: enums are strings.
var repr string
if err := json.Unmarshal(data, &repr); err != nil {
return -1, err
}
val, ok := m[repr]
if !ok {
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
}
return val, nil
}
// Old style: enums are ints.
var val int32
if err := json.Unmarshal(data, &val); err != nil {
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
}
return val, nil
}
// DebugPrint dumps the encoded data in b in a debugging format with a header
// including the string s. Used in testing but made available for general debugging.
func (p *Buffer) DebugPrint(s string, b []byte) {
var u uint64
obuf := p.buf
index := p.index
p.buf = b
p.index = 0
depth := 0
fmt.Printf("\n--- %s ---\n", s)
out:
for {
for i := 0; i < depth; i++ {
fmt.Print(" ")
}
index := p.index
if index == len(p.buf) {
break
}
op, err := p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: fetching op err %v\n", index, err)
break out
}
tag := op >> 3
wire := op & 7
switch wire {
default:
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
index, tag, wire)
break out
case WireBytes:
var r []byte
r, err = p.DecodeRawBytes(false)
if err != nil {
break out
}
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
if len(r) <= 6 {
for i := 0; i < len(r); i++ {
fmt.Printf(" %.2x", r[i])
}
} else {
for i := 0; i < 3; i++ {
fmt.Printf(" %.2x", r[i])
}
fmt.Printf(" ..")
for i := len(r) - 3; i < len(r); i++ {
fmt.Printf(" %.2x", r[i])
}
}
fmt.Printf("\n")
case WireFixed32:
u, err = p.DecodeFixed32()
if err != nil {
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
case WireFixed64:
u, err = p.DecodeFixed64()
if err != nil {
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
case WireVarint:
u, err = p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
case WireStartGroup:
fmt.Printf("%3d: t=%3d start\n", index, tag)
depth++
case WireEndGroup:
depth--
fmt.Printf("%3d: t=%3d end\n", index, tag)
}
}
if depth != 0 {
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
}
fmt.Printf("\n")
p.buf = obuf
p.index = index
}
// SetDefaults sets unset protocol buffer fields to their default values.
// It only modifies fields that are both unset and have defined defaults.
// It recursively sets default values in any non-nil sub-messages.
func SetDefaults(pb Message) {
setDefaults(reflect.ValueOf(pb), true, false)
}
// v is a pointer to a struct.
func setDefaults(v reflect.Value, recur, zeros bool) {
v = v.Elem()
defaultMu.RLock()
dm, ok := defaults[v.Type()]
defaultMu.RUnlock()
if !ok {
dm = buildDefaultMessage(v.Type())
defaultMu.Lock()
defaults[v.Type()] = dm
defaultMu.Unlock()
}
for _, sf := range dm.scalars {
f := v.Field(sf.index)
if !f.IsNil() {
// field already set
continue
}
dv := sf.value
if dv == nil && !zeros {
// no explicit default, and don't want to set zeros
continue
}
fptr := f.Addr().Interface() // **T
// TODO: Consider batching the allocations we do here.
switch sf.kind {
case reflect.Bool:
b := new(bool)
if dv != nil {
*b = dv.(bool)
}
*(fptr.(**bool)) = b
case reflect.Float32:
f := new(float32)
if dv != nil {
*f = dv.(float32)
}
*(fptr.(**float32)) = f
case reflect.Float64:
f := new(float64)
if dv != nil {
*f = dv.(float64)
}
*(fptr.(**float64)) = f
case reflect.Int32:
// might be an enum
if ft := f.Type(); ft != int32PtrType {
// enum
f.Set(reflect.New(ft.Elem()))
if dv != nil {
f.Elem().SetInt(int64(dv.(int32)))
}
} else {
// int32 field
i := new(int32)
if dv != nil {
*i = dv.(int32)
}
*(fptr.(**int32)) = i
}
case reflect.Int64:
i := new(int64)
if dv != nil {
*i = dv.(int64)
}
*(fptr.(**int64)) = i
case reflect.String:
s := new(string)
if dv != nil {
*s = dv.(string)
}
*(fptr.(**string)) = s
case reflect.Uint8:
// exceptional case: []byte
var b []byte
if dv != nil {
db := dv.([]byte)
b = make([]byte, len(db))
copy(b, db)
} else {
b = []byte{}
}
*(fptr.(*[]byte)) = b
case reflect.Uint32:
u := new(uint32)
if dv != nil {
*u = dv.(uint32)
}
*(fptr.(**uint32)) = u
case reflect.Uint64:
u := new(uint64)
if dv != nil {
*u = dv.(uint64)
}
*(fptr.(**uint64)) = u
default:
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
}
}
for _, ni := range dm.nested {
f := v.Field(ni)
// f is *T or []*T or map[T]*T
switch f.Kind() {
case reflect.Ptr:
if f.IsNil() {
continue
}
setDefaults(f, recur, zeros)
case reflect.Slice:
for i := 0; i < f.Len(); i++ {
e := f.Index(i)
if e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
}
case reflect.Map:
for _, k := range f.MapKeys() {
e := f.MapIndex(k)
if e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
}
}
}
}
var (
// defaults maps a protocol buffer struct type to a slice of the fields,
// with its scalar fields set to their proto-declared non-zero default values.
defaultMu sync.RWMutex
defaults = make(map[reflect.Type]defaultMessage)
int32PtrType = reflect.TypeOf((*int32)(nil))
)
// defaultMessage represents information about the default values of a message.
type defaultMessage struct {
scalars []scalarField
nested []int // struct field index of nested messages
}
type scalarField struct {
index int // struct field index
kind reflect.Kind // element type (the T in *T or []T)
value interface{} // the proto-declared default value, or nil
}
// t is a struct type.
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
sprop := GetProperties(t)
for _, prop := range sprop.Prop {
fi, ok := sprop.decoderTags.get(prop.Tag)
if !ok {
// XXX_unrecognized
continue
}
ft := t.Field(fi).Type
sf, nested, err := fieldDefault(ft, prop)
switch {
case err != nil:
log.Print(err)
case nested:
dm.nested = append(dm.nested, fi)
case sf != nil:
sf.index = fi
dm.scalars = append(dm.scalars, *sf)
}
}
return dm
}
// fieldDefault returns the scalarField for field type ft.
// sf will be nil if the field can not have a default.
// nestedMessage will be true if this is a nested message.
// Note that sf.index is not set on return.
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
var canHaveDefault bool
switch ft.Kind() {
case reflect.Ptr:
if ft.Elem().Kind() == reflect.Struct {
nestedMessage = true
} else {
canHaveDefault = true // proto2 scalar field
}
case reflect.Slice:
switch ft.Elem().Kind() {
case reflect.Ptr:
nestedMessage = true // repeated message
case reflect.Uint8:
canHaveDefault = true // bytes field
}
case reflect.Map:
if ft.Elem().Kind() == reflect.Ptr {
nestedMessage = true // map with message values
}
}
if !canHaveDefault {
if nestedMessage {
return nil, true, nil
}
return nil, false, nil
}
// We now know that ft is a pointer or slice.
sf = &scalarField{kind: ft.Elem().Kind()}
// scalar fields without defaults
if !prop.HasDefault {
return sf, false, nil
}
// a scalar field: either *T or []byte
switch ft.Elem().Kind() {
case reflect.Bool:
x, err := strconv.ParseBool(prop.Default)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Float32:
x, err := strconv.ParseFloat(prop.Default, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
}
sf.value = float32(x)
case reflect.Float64:
x, err := strconv.ParseFloat(prop.Default, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Int32:
x, err := strconv.ParseInt(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
}
sf.value = int32(x)
case reflect.Int64:
x, err := strconv.ParseInt(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.String:
sf.value = prop.Default
case reflect.Uint8:
// []byte (not *uint8)
sf.value = []byte(prop.Default)
case reflect.Uint32:
x, err := strconv.ParseUint(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
}
sf.value = uint32(x)
case reflect.Uint64:
x, err := strconv.ParseUint(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
}
sf.value = x
default:
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
}
return sf, false, nil
}
// Map fields may have key types of non-float scalars, strings and enums.
// The easiest way to sort them in some deterministic order is to use fmt.
// If this turns out to be inefficient we can always consider other options,
// such as doing a Schwartzian transform.
func mapKeys(vs []reflect.Value) sort.Interface {
s := mapKeySorter{
vs: vs,
// default Less function: textual comparison
less: func(a, b reflect.Value) bool {
return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
},
}
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
// numeric keys are sorted numerically.
if len(vs) == 0 {
return s
}
switch vs[0].Kind() {
case reflect.Int32, reflect.Int64:
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
case reflect.Uint32, reflect.Uint64:
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
}
return s
}
type mapKeySorter struct {
vs []reflect.Value
less func(a, b reflect.Value) bool
}
func (s mapKeySorter) Len() int { return len(s.vs) }
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
func (s mapKeySorter) Less(i, j int) bool {
return s.less(s.vs[i], s.vs[j])
}
// isProto3Zero reports whether v is a zero proto3 value.
func isProto3Zero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return !v.Bool()
case reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint32, reflect.Uint64:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.String:
return v.String() == ""
}
return false
}
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion1 = true

View File

@ -1,280 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Support for message sets.
*/
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"reflect"
"sort"
)
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
// A message type ID is required for storing a protocol buffer in a message set.
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
// The first two types (_MessageSet_Item and messageSet)
// model what the protocol compiler produces for the following protocol message:
// message MessageSet {
// repeated group Item = 1 {
// required int32 type_id = 2;
// required string message = 3;
// };
// }
// That is the MessageSet wire format. We can't use a proto to generate these
// because that would introduce a circular dependency between it and this package.
type _MessageSet_Item struct {
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
Message []byte `protobuf:"bytes,3,req,name=message"`
}
type messageSet struct {
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
XXX_unrecognized []byte
// TODO: caching?
}
// Make sure messageSet is a Message.
var _ Message = (*messageSet)(nil)
// messageTypeIder is an interface satisfied by a protocol buffer type
// that may be stored in a MessageSet.
type messageTypeIder interface {
MessageTypeId() int32
}
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
mti, ok := pb.(messageTypeIder)
if !ok {
return nil
}
id := mti.MessageTypeId()
for _, item := range ms.Item {
if *item.TypeId == id {
return item
}
}
return nil
}
func (ms *messageSet) Has(pb Message) bool {
if ms.find(pb) != nil {
return true
}
return false
}
func (ms *messageSet) Unmarshal(pb Message) error {
if item := ms.find(pb); item != nil {
return Unmarshal(item.Message, pb)
}
if _, ok := pb.(messageTypeIder); !ok {
return errNoMessageTypeID
}
return nil // TODO: return error instead?
}
func (ms *messageSet) Marshal(pb Message) error {
msg, err := Marshal(pb)
if err != nil {
return err
}
if item := ms.find(pb); item != nil {
// reuse existing item
item.Message = msg
return nil
}
mti, ok := pb.(messageTypeIder)
if !ok {
return errNoMessageTypeID
}
mtid := mti.MessageTypeId()
ms.Item = append(ms.Item, &_MessageSet_Item{
TypeId: &mtid,
Message: msg,
})
return nil
}
func (ms *messageSet) Reset() { *ms = messageSet{} }
func (ms *messageSet) String() string { return CompactTextString(ms) }
func (*messageSet) ProtoMessage() {}
// Support for the message_set_wire_format message option.
func skipVarint(buf []byte) []byte {
i := 0
for ; buf[i]&0x80 != 0; i++ {
}
return buf[i+1:]
}
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
if err := encodeExtensionMap(m); err != nil {
return nil, err
}
// Sort extension IDs to provide a deterministic encoding.
// See also enc_map in encode.go.
ids := make([]int, 0, len(m))
for id := range m {
ids = append(ids, int(id))
}
sort.Ints(ids)
ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
for _, id := range ids {
e := m[int32(id)]
// Remove the wire type and field number varint, as well as the length varint.
msg := skipVarint(skipVarint(e.enc))
ms.Item = append(ms.Item, &_MessageSet_Item{
TypeId: Int32(int32(id)),
Message: msg,
})
}
return Marshal(ms)
}
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
ms := new(messageSet)
if err := Unmarshal(buf, ms); err != nil {
return err
}
for _, item := range ms.Item {
id := *item.TypeId
msg := item.Message
// Restore wire type and field number varint, plus length varint.
// Be careful to preserve duplicate items.
b := EncodeVarint(uint64(id)<<3 | WireBytes)
if ext, ok := m[id]; ok {
// Existing data; rip off the tag and length varint
// so we join the new data correctly.
// We can assume that ext.enc is set because we are unmarshaling.
o := ext.enc[len(b):] // skip wire type and field number
_, n := DecodeVarint(o) // calculate length of length varint
o = o[n:] // skip length varint
msg = append(o, msg...) // join old data and new data
}
b = append(b, EncodeVarint(uint64(len(msg)))...)
b = append(b, msg...)
m[id] = Extension{enc: b}
}
return nil
}
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
var b bytes.Buffer
b.WriteByte('{')
// Process the map in key order for deterministic output.
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
for i, id := range ids {
ext := m[id]
if i > 0 {
b.WriteByte(',')
}
msd, ok := messageSetMap[id]
if !ok {
// Unknown type; we can't render it, so skip it.
continue
}
fmt.Fprintf(&b, `"[%s]":`, msd.name)
x := ext.value
if x == nil {
x = reflect.New(msd.t.Elem()).Interface()
if err := Unmarshal(ext.enc, x.(Message)); err != nil {
return nil, err
}
}
d, err := json.Marshal(x)
if err != nil {
return nil, err
}
b.Write(d)
}
b.WriteByte('}')
return b.Bytes(), nil
}
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
// Common-case fast path.
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
return nil
}
// This is fairly tricky, and it's not clear that it is needed.
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
}
// A global registry of types that can be used in a MessageSet.
var messageSetMap = make(map[int32]messageSetDesc)
type messageSetDesc struct {
t reflect.Type // pointer to struct
name string
}
// RegisterMessageSetType is called from the generated code.
func RegisterMessageSetType(m Message, fieldNum int32, name string) {
messageSetMap[fieldNum] = messageSetDesc{
t: reflect.TypeOf(m),
name: name,
}
}

View File

@ -1,479 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build appengine
// This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
// be used on App Engine.
package proto
import (
"math"
"reflect"
)
// A structPointer is a pointer to a struct.
type structPointer struct {
v reflect.Value
}
// toStructPointer returns a structPointer equivalent to the given reflect value.
// The reflect value must itself be a pointer to a struct.
func toStructPointer(v reflect.Value) structPointer {
return structPointer{v}
}
// IsNil reports whether p is nil.
func structPointer_IsNil(p structPointer) bool {
return p.v.IsNil()
}
// Interface returns the struct pointer as an interface value.
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
return p.v.Interface()
}
// A field identifies a field in a struct, accessible from a structPointer.
// In this implementation, a field is identified by the sequence of field indices
// passed to reflect's FieldByIndex.
type field []int
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return f.Index
}
// invalidField is an invalid field identifier.
var invalidField = field(nil)
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool { return f != nil }
// field returns the given field in the struct as a reflect value.
func structPointer_field(p structPointer, f field) reflect.Value {
// Special case: an extension map entry with a value of type T
// passes a *T to the struct-handling code with a zero field,
// expecting that it will be treated as equivalent to *struct{ X T },
// which has the same memory layout. We have to handle that case
// specially, because reflect will panic if we call FieldByIndex on a
// non-struct.
if f == nil {
return p.v.Elem()
}
return p.v.Elem().FieldByIndex(f)
}
// ifield returns the given field in the struct as an interface value.
func structPointer_ifield(p structPointer, f field) interface{} {
return structPointer_field(p, f).Addr().Interface()
}
// Bytes returns the address of a []byte field in the struct.
func structPointer_Bytes(p structPointer, f field) *[]byte {
return structPointer_ifield(p, f).(*[]byte)
}
// BytesSlice returns the address of a [][]byte field in the struct.
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
return structPointer_ifield(p, f).(*[][]byte)
}
// Bool returns the address of a *bool field in the struct.
func structPointer_Bool(p structPointer, f field) **bool {
return structPointer_ifield(p, f).(**bool)
}
// BoolVal returns the address of a bool field in the struct.
func structPointer_BoolVal(p structPointer, f field) *bool {
return structPointer_ifield(p, f).(*bool)
}
// BoolSlice returns the address of a []bool field in the struct.
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
return structPointer_ifield(p, f).(*[]bool)
}
// String returns the address of a *string field in the struct.
func structPointer_String(p structPointer, f field) **string {
return structPointer_ifield(p, f).(**string)
}
// StringVal returns the address of a string field in the struct.
func structPointer_StringVal(p structPointer, f field) *string {
return structPointer_ifield(p, f).(*string)
}
// StringSlice returns the address of a []string field in the struct.
func structPointer_StringSlice(p structPointer, f field) *[]string {
return structPointer_ifield(p, f).(*[]string)
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return structPointer_ifield(p, f).(*map[int32]Extension)
}
// NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return structPointer_field(p, f).Addr()
}
// SetStructPointer writes a *struct field in the struct.
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
structPointer_field(p, f).Set(q.v)
}
// GetStructPointer reads a *struct field in the struct.
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
return structPointer{structPointer_field(p, f)}
}
// StructPointerSlice the address of a []*struct field in the struct.
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
return structPointerSlice{structPointer_field(p, f)}
}
// A structPointerSlice represents the address of a slice of pointers to structs
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
type structPointerSlice struct {
v reflect.Value
}
func (p structPointerSlice) Len() int { return p.v.Len() }
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
func (p structPointerSlice) Append(q structPointer) {
p.v.Set(reflect.Append(p.v, q.v))
}
var (
int32Type = reflect.TypeOf(int32(0))
uint32Type = reflect.TypeOf(uint32(0))
float32Type = reflect.TypeOf(float32(0))
int64Type = reflect.TypeOf(int64(0))
uint64Type = reflect.TypeOf(uint64(0))
float64Type = reflect.TypeOf(float64(0))
)
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
type word32 struct {
v reflect.Value
}
// IsNil reports whether p is nil.
func word32_IsNil(p word32) bool {
return p.v.IsNil()
}
// Set sets p to point at a newly allocated word with bits set to x.
func word32_Set(p word32, o *Buffer, x uint32) {
t := p.v.Type().Elem()
switch t {
case int32Type:
if len(o.int32s) == 0 {
o.int32s = make([]int32, uint32PoolSize)
}
o.int32s[0] = int32(x)
p.v.Set(reflect.ValueOf(&o.int32s[0]))
o.int32s = o.int32s[1:]
return
case uint32Type:
if len(o.uint32s) == 0 {
o.uint32s = make([]uint32, uint32PoolSize)
}
o.uint32s[0] = x
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
o.uint32s = o.uint32s[1:]
return
case float32Type:
if len(o.float32s) == 0 {
o.float32s = make([]float32, uint32PoolSize)
}
o.float32s[0] = math.Float32frombits(x)
p.v.Set(reflect.ValueOf(&o.float32s[0]))
o.float32s = o.float32s[1:]
return
}
// must be enum
p.v.Set(reflect.New(t))
p.v.Elem().SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32_Get(p word32) uint32 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32(p structPointer, f field) word32 {
return word32{structPointer_field(p, f)}
}
// A word32Val represents a field of type int32, uint32, float32, or enum.
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
type word32Val struct {
v reflect.Value
}
// Set sets *p to x.
func word32Val_Set(p word32Val, x uint32) {
switch p.v.Type() {
case int32Type:
p.v.SetInt(int64(x))
return
case uint32Type:
p.v.SetUint(uint64(x))
return
case float32Type:
p.v.SetFloat(float64(math.Float32frombits(x)))
return
}
// must be enum
p.v.SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32Val_Get(p word32Val) uint32 {
elem := p.v
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
func structPointer_Word32Val(p structPointer, f field) word32Val {
return word32Val{structPointer_field(p, f)}
}
// A word32Slice is a slice of 32-bit values.
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
type word32Slice struct {
v reflect.Value
}
func (p word32Slice) Append(x uint32) {
n, m := p.v.Len(), p.v.Cap()
if n < m {
p.v.SetLen(n + 1)
} else {
t := p.v.Type().Elem()
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
}
elem := p.v.Index(n)
switch elem.Kind() {
case reflect.Int32:
elem.SetInt(int64(int32(x)))
case reflect.Uint32:
elem.SetUint(uint64(x))
case reflect.Float32:
elem.SetFloat(float64(math.Float32frombits(x)))
}
}
func (p word32Slice) Len() int {
return p.v.Len()
}
func (p word32Slice) Index(i int) uint32 {
elem := p.v.Index(i)
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
func structPointer_Word32Slice(p structPointer, f field) word32Slice {
return word32Slice{structPointer_field(p, f)}
}
// word64 is like word32 but for 64-bit values.
type word64 struct {
v reflect.Value
}
func word64_Set(p word64, o *Buffer, x uint64) {
t := p.v.Type().Elem()
switch t {
case int64Type:
if len(o.int64s) == 0 {
o.int64s = make([]int64, uint64PoolSize)
}
o.int64s[0] = int64(x)
p.v.Set(reflect.ValueOf(&o.int64s[0]))
o.int64s = o.int64s[1:]
return
case uint64Type:
if len(o.uint64s) == 0 {
o.uint64s = make([]uint64, uint64PoolSize)
}
o.uint64s[0] = x
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
o.uint64s = o.uint64s[1:]
return
case float64Type:
if len(o.float64s) == 0 {
o.float64s = make([]float64, uint64PoolSize)
}
o.float64s[0] = math.Float64frombits(x)
p.v.Set(reflect.ValueOf(&o.float64s[0]))
o.float64s = o.float64s[1:]
return
}
panic("unreachable")
}
func word64_IsNil(p word64) bool {
return p.v.IsNil()
}
func word64_Get(p word64) uint64 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
}
panic("unreachable")
}
func structPointer_Word64(p structPointer, f field) word64 {
return word64{structPointer_field(p, f)}
}
// word64Val is like word32Val but for 64-bit values.
type word64Val struct {
v reflect.Value
}
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
switch p.v.Type() {
case int64Type:
p.v.SetInt(int64(x))
return
case uint64Type:
p.v.SetUint(x)
return
case float64Type:
p.v.SetFloat(math.Float64frombits(x))
return
}
panic("unreachable")
}
func word64Val_Get(p word64Val) uint64 {
elem := p.v
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
}
panic("unreachable")
}
func structPointer_Word64Val(p structPointer, f field) word64Val {
return word64Val{structPointer_field(p, f)}
}
type word64Slice struct {
v reflect.Value
}
func (p word64Slice) Append(x uint64) {
n, m := p.v.Len(), p.v.Cap()
if n < m {
p.v.SetLen(n + 1)
} else {
t := p.v.Type().Elem()
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
}
elem := p.v.Index(n)
switch elem.Kind() {
case reflect.Int64:
elem.SetInt(int64(int64(x)))
case reflect.Uint64:
elem.SetUint(uint64(x))
case reflect.Float64:
elem.SetFloat(float64(math.Float64frombits(x)))
}
}
func (p word64Slice) Len() int {
return p.v.Len()
}
func (p word64Slice) Index(i int) uint64 {
elem := p.v.Index(i)
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return uint64(elem.Uint())
case reflect.Float64:
return math.Float64bits(float64(elem.Float()))
}
panic("unreachable")
}
func structPointer_Word64Slice(p structPointer, f field) word64Slice {
return word64Slice{structPointer_field(p, f)}
}

View File

@ -1,266 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build !appengine
// This file contains the implementation of the proto field accesses using package unsafe.
package proto
import (
"reflect"
"unsafe"
)
// NOTE: These type_Foo functions would more idiomatically be methods,
// but Go does not allow methods on pointer types, and we must preserve
// some pointer type for the garbage collector. We use these
// funcs with clunky names as our poor approximation to methods.
//
// An alternative would be
// type structPointer struct { p unsafe.Pointer }
// but that does not registerize as well.
// A structPointer is a pointer to a struct.
type structPointer unsafe.Pointer
// toStructPointer returns a structPointer equivalent to the given reflect value.
func toStructPointer(v reflect.Value) structPointer {
return structPointer(unsafe.Pointer(v.Pointer()))
}
// IsNil reports whether p is nil.
func structPointer_IsNil(p structPointer) bool {
return p == nil
}
// Interface returns the struct pointer, assumed to have element type t,
// as an interface value.
func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
}
// A field identifies a field in a struct, accessible from a structPointer.
// In this implementation, a field is identified by its byte offset from the start of the struct.
type field uintptr
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return field(f.Offset)
}
// invalidField is an invalid field identifier.
const invalidField = ^field(0)
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool {
return f != ^field(0)
}
// Bytes returns the address of a []byte field in the struct.
func structPointer_Bytes(p structPointer, f field) *[]byte {
return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BytesSlice returns the address of a [][]byte field in the struct.
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// Bool returns the address of a *bool field in the struct.
func structPointer_Bool(p structPointer, f field) **bool {
return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BoolVal returns the address of a bool field in the struct.
func structPointer_BoolVal(p structPointer, f field) *bool {
return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BoolSlice returns the address of a []bool field in the struct.
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// String returns the address of a *string field in the struct.
func structPointer_String(p structPointer, f field) **string {
return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StringVal returns the address of a string field in the struct.
func structPointer_StringVal(p structPointer, f field) *string {
return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StringSlice returns the address of a []string field in the struct.
func structPointer_StringSlice(p structPointer, f field) *[]string {
return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
}
// SetStructPointer writes a *struct field in the struct.
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
}
// GetStructPointer reads a *struct field in the struct.
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StructPointerSlice the address of a []*struct field in the struct.
func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
type structPointerSlice []structPointer
func (v *structPointerSlice) Len() int { return len(*v) }
func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
// A word32 is the address of a "pointer to 32-bit value" field.
type word32 **uint32
// IsNil reports whether *v is nil.
func word32_IsNil(p word32) bool {
return *p == nil
}
// Set sets *v to point at a newly allocated word set to x.
func word32_Set(p word32, o *Buffer, x uint32) {
if len(o.uint32s) == 0 {
o.uint32s = make([]uint32, uint32PoolSize)
}
o.uint32s[0] = x
*p = &o.uint32s[0]
o.uint32s = o.uint32s[1:]
}
// Get gets the value pointed at by *v.
func word32_Get(p word32) uint32 {
return **p
}
// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32(p structPointer, f field) word32 {
return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// A word32Val is the address of a 32-bit value field.
type word32Val *uint32
// Set sets *p to x.
func word32Val_Set(p word32Val, x uint32) {
*p = x
}
// Get gets the value pointed at by p.
func word32Val_Get(p word32Val) uint32 {
return *p
}
// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32Val(p structPointer, f field) word32Val {
return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// A word32Slice is a slice of 32-bit values.
type word32Slice []uint32
func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
func (v *word32Slice) Len() int { return len(*v) }
func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// word64 is like word32 but for 64-bit values.
type word64 **uint64
func word64_Set(p word64, o *Buffer, x uint64) {
if len(o.uint64s) == 0 {
o.uint64s = make([]uint64, uint64PoolSize)
}
o.uint64s[0] = x
*p = &o.uint64s[0]
o.uint64s = o.uint64s[1:]
}
func word64_IsNil(p word64) bool {
return *p == nil
}
func word64_Get(p word64) uint64 {
return **p
}
func structPointer_Word64(p structPointer, f field) word64 {
return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// word64Val is like word32Val but for 64-bit values.
type word64Val *uint64
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
*p = x
}
func word64Val_Get(p word64Val) uint64 {
return *p
}
func structPointer_Word64Val(p structPointer, f field) word64Val {
return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// word64Slice is like word32Slice but for 64-bit values.
type word64Slice []uint64
func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
func (v *word64Slice) Len() int { return len(*v) }
func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}

View File

@ -1,846 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for encoding data into the wire format for protocol buffers.
*/
import (
"fmt"
"log"
"os"
"reflect"
"sort"
"strconv"
"strings"
"sync"
)
const debug bool = false
// Constants that identify the encoding of a value on the wire.
const (
WireVarint = 0
WireFixed64 = 1
WireBytes = 2
WireStartGroup = 3
WireEndGroup = 4
WireFixed32 = 5
)
const startSize = 10 // initial slice/string sizes
// Encoders are defined in encode.go
// An encoder outputs the full representation of a field, including its
// tag and encoder type.
type encoder func(p *Buffer, prop *Properties, base structPointer) error
// A valueEncoder encodes a single integer in a particular encoding.
type valueEncoder func(o *Buffer, x uint64) error
// Sizers are defined in encode.go
// A sizer returns the encoded size of a field, including its tag and encoder
// type.
type sizer func(prop *Properties, base structPointer) int
// A valueSizer returns the encoded size of a single integer in a particular
// encoding.
type valueSizer func(x uint64) int
// Decoders are defined in decode.go
// A decoder creates a value from its wire representation.
// Unrecognized subelements are saved in unrec.
type decoder func(p *Buffer, prop *Properties, base structPointer) error
// A valueDecoder decodes a single integer in a particular encoding.
type valueDecoder func(o *Buffer) (x uint64, err error)
// A oneofMarshaler does the marshaling for all oneof fields in a message.
type oneofMarshaler func(Message, *Buffer) error
// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
// A oneofSizer does the sizing for all oneof fields in a message.
type oneofSizer func(Message) int
// tagMap is an optimization over map[int]int for typical protocol buffer
// use-cases. Encoded protocol buffers are often in tag order with small tag
// numbers.
type tagMap struct {
fastTags []int
slowTags map[int]int
}
// tagMapFastLimit is the upper bound on the tag number that will be stored in
// the tagMap slice rather than its map.
const tagMapFastLimit = 1024
func (p *tagMap) get(t int) (int, bool) {
if t > 0 && t < tagMapFastLimit {
if t >= len(p.fastTags) {
return 0, false
}
fi := p.fastTags[t]
return fi, fi >= 0
}
fi, ok := p.slowTags[t]
return fi, ok
}
func (p *tagMap) put(t int, fi int) {
if t > 0 && t < tagMapFastLimit {
for len(p.fastTags) < t+1 {
p.fastTags = append(p.fastTags, -1)
}
p.fastTags[t] = fi
return
}
if p.slowTags == nil {
p.slowTags = make(map[int]int)
}
p.slowTags[t] = fi
}
// StructProperties represents properties for all the fields of a struct.
// decoderTags and decoderOrigNames should only be used by the decoder.
type StructProperties struct {
Prop []*Properties // properties for each field
reqCount int // required count
decoderTags tagMap // map from proto tag to struct field number
decoderOrigNames map[string]int // map from original name to struct field number
order []int // list of struct field numbers in tag order
unrecField field // field id of the XXX_unrecognized []byte field
extendable bool // is this an extendable proto
oneofMarshaler oneofMarshaler
oneofUnmarshaler oneofUnmarshaler
oneofSizer oneofSizer
stype reflect.Type
// OneofTypes contains information about the oneof fields in this message.
// It is keyed by the original name of a field.
OneofTypes map[string]*OneofProperties
}
// OneofProperties represents information about a specific field in a oneof.
type OneofProperties struct {
Type reflect.Type // pointer to generated struct type for this oneof field
Field int // struct field number of the containing oneof in the message
Prop *Properties
}
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
// See encode.go, (*Buffer).enc_struct.
func (sp *StructProperties) Len() int { return len(sp.order) }
func (sp *StructProperties) Less(i, j int) bool {
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
}
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
// Properties represents the protocol-specific behavior of a single struct field.
type Properties struct {
Name string // name of the field, for error messages
OrigName string // original name before protocol compiler (always set)
JSONName string // name to use for JSON; determined by protoc
Wire string
WireType int
Tag int
Required bool
Optional bool
Repeated bool
Packed bool // relevant for repeated primitives only
Enum string // set for enum types only
proto3 bool // whether this is known to be a proto3 field; set for []byte only
oneof bool // whether this is a oneof field
Default string // default value
HasDefault bool // whether an explicit default was provided
def_uint64 uint64
enc encoder
valEnc valueEncoder // set for bool and numeric types only
field field
tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
tagbuf [8]byte
stype reflect.Type // set for struct types only
sprop *StructProperties // set for struct types only
isMarshaler bool
isUnmarshaler bool
mtype reflect.Type // set for map types only
mkeyprop *Properties // set for map types only
mvalprop *Properties // set for map types only
size sizer
valSize valueSizer // set for bool and numeric types only
dec decoder
valDec valueDecoder // set for bool and numeric types only
// If this is a packable field, this will be the decoder for the packed version of the field.
packedDec decoder
}
// String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string {
s := p.Wire
s = ","
s += strconv.Itoa(p.Tag)
if p.Required {
s += ",req"
}
if p.Optional {
s += ",opt"
}
if p.Repeated {
s += ",rep"
}
if p.Packed {
s += ",packed"
}
s += ",name=" + p.OrigName
if p.JSONName != p.OrigName {
s += ",json=" + p.JSONName
}
if p.proto3 {
s += ",proto3"
}
if p.oneof {
s += ",oneof"
}
if len(p.Enum) > 0 {
s += ",enum=" + p.Enum
}
if p.HasDefault {
s += ",def=" + p.Default
}
return s
}
// Parse populates p by parsing a string in the protobuf struct field tag style.
func (p *Properties) Parse(s string) {
// "bytes,49,opt,name=foo,def=hello!"
fields := strings.Split(s, ",") // breaks def=, but handled below.
if len(fields) < 2 {
fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
return
}
p.Wire = fields[0]
switch p.Wire {
case "varint":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeVarint
p.valDec = (*Buffer).DecodeVarint
p.valSize = sizeVarint
case "fixed32":
p.WireType = WireFixed32
p.valEnc = (*Buffer).EncodeFixed32
p.valDec = (*Buffer).DecodeFixed32
p.valSize = sizeFixed32
case "fixed64":
p.WireType = WireFixed64
p.valEnc = (*Buffer).EncodeFixed64
p.valDec = (*Buffer).DecodeFixed64
p.valSize = sizeFixed64
case "zigzag32":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeZigzag32
p.valDec = (*Buffer).DecodeZigzag32
p.valSize = sizeZigzag32
case "zigzag64":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeZigzag64
p.valDec = (*Buffer).DecodeZigzag64
p.valSize = sizeZigzag64
case "bytes", "group":
p.WireType = WireBytes
// no numeric converter for non-numeric types
default:
fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
return
}
var err error
p.Tag, err = strconv.Atoi(fields[1])
if err != nil {
return
}
for i := 2; i < len(fields); i++ {
f := fields[i]
switch {
case f == "req":
p.Required = true
case f == "opt":
p.Optional = true
case f == "rep":
p.Repeated = true
case f == "packed":
p.Packed = true
case strings.HasPrefix(f, "name="):
p.OrigName = f[5:]
case strings.HasPrefix(f, "json="):
p.JSONName = f[5:]
case strings.HasPrefix(f, "enum="):
p.Enum = f[5:]
case f == "proto3":
p.proto3 = true
case f == "oneof":
p.oneof = true
case strings.HasPrefix(f, "def="):
p.HasDefault = true
p.Default = f[4:] // rest of string
if i+1 < len(fields) {
// Commas aren't escaped, and def is always last.
p.Default += "," + strings.Join(fields[i+1:], ",")
break
}
}
}
}
func logNoSliceEnc(t1, t2 reflect.Type) {
fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
}
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
// Initialize the fields for encoding and decoding.
func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
p.enc = nil
p.dec = nil
p.size = nil
switch t1 := typ; t1.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
// proto3 scalar types
case reflect.Bool:
p.enc = (*Buffer).enc_proto3_bool
p.dec = (*Buffer).dec_proto3_bool
p.size = size_proto3_bool
case reflect.Int32:
p.enc = (*Buffer).enc_proto3_int32
p.dec = (*Buffer).dec_proto3_int32
p.size = size_proto3_int32
case reflect.Uint32:
p.enc = (*Buffer).enc_proto3_uint32
p.dec = (*Buffer).dec_proto3_int32 // can reuse
p.size = size_proto3_uint32
case reflect.Int64, reflect.Uint64:
p.enc = (*Buffer).enc_proto3_int64
p.dec = (*Buffer).dec_proto3_int64
p.size = size_proto3_int64
case reflect.Float32:
p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
p.dec = (*Buffer).dec_proto3_int32
p.size = size_proto3_uint32
case reflect.Float64:
p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
p.dec = (*Buffer).dec_proto3_int64
p.size = size_proto3_int64
case reflect.String:
p.enc = (*Buffer).enc_proto3_string
p.dec = (*Buffer).dec_proto3_string
p.size = size_proto3_string
case reflect.Ptr:
switch t2 := t1.Elem(); t2.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
break
case reflect.Bool:
p.enc = (*Buffer).enc_bool
p.dec = (*Buffer).dec_bool
p.size = size_bool
case reflect.Int32:
p.enc = (*Buffer).enc_int32
p.dec = (*Buffer).dec_int32
p.size = size_int32
case reflect.Uint32:
p.enc = (*Buffer).enc_uint32
p.dec = (*Buffer).dec_int32 // can reuse
p.size = size_uint32
case reflect.Int64, reflect.Uint64:
p.enc = (*Buffer).enc_int64
p.dec = (*Buffer).dec_int64
p.size = size_int64
case reflect.Float32:
p.enc = (*Buffer).enc_uint32 // can just treat them as bits
p.dec = (*Buffer).dec_int32
p.size = size_uint32
case reflect.Float64:
p.enc = (*Buffer).enc_int64 // can just treat them as bits
p.dec = (*Buffer).dec_int64
p.size = size_int64
case reflect.String:
p.enc = (*Buffer).enc_string
p.dec = (*Buffer).dec_string
p.size = size_string
case reflect.Struct:
p.stype = t1.Elem()
p.isMarshaler = isMarshaler(t1)
p.isUnmarshaler = isUnmarshaler(t1)
if p.Wire == "bytes" {
p.enc = (*Buffer).enc_struct_message
p.dec = (*Buffer).dec_struct_message
p.size = size_struct_message
} else {
p.enc = (*Buffer).enc_struct_group
p.dec = (*Buffer).dec_struct_group
p.size = size_struct_group
}
}
case reflect.Slice:
switch t2 := t1.Elem(); t2.Kind() {
default:
logNoSliceEnc(t1, t2)
break
case reflect.Bool:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_bool
p.size = size_slice_packed_bool
} else {
p.enc = (*Buffer).enc_slice_bool
p.size = size_slice_bool
}
p.dec = (*Buffer).dec_slice_bool
p.packedDec = (*Buffer).dec_slice_packed_bool
case reflect.Int32:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int32
p.size = size_slice_packed_int32
} else {
p.enc = (*Buffer).enc_slice_int32
p.size = size_slice_int32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case reflect.Uint32:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_uint32
p.size = size_slice_packed_uint32
} else {
p.enc = (*Buffer).enc_slice_uint32
p.size = size_slice_uint32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case reflect.Int64, reflect.Uint64:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int64
p.size = size_slice_packed_int64
} else {
p.enc = (*Buffer).enc_slice_int64
p.size = size_slice_int64
}
p.dec = (*Buffer).dec_slice_int64
p.packedDec = (*Buffer).dec_slice_packed_int64
case reflect.Uint8:
p.enc = (*Buffer).enc_slice_byte
p.dec = (*Buffer).dec_slice_byte
p.size = size_slice_byte
// This is a []byte, which is either a bytes field,
// or the value of a map field. In the latter case,
// we always encode an empty []byte, so we should not
// use the proto3 enc/size funcs.
// f == nil iff this is the key/value of a map field.
if p.proto3 && f != nil {
p.enc = (*Buffer).enc_proto3_slice_byte
p.size = size_proto3_slice_byte
}
case reflect.Float32, reflect.Float64:
switch t2.Bits() {
case 32:
// can just treat them as bits
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_uint32
p.size = size_slice_packed_uint32
} else {
p.enc = (*Buffer).enc_slice_uint32
p.size = size_slice_uint32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case 64:
// can just treat them as bits
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int64
p.size = size_slice_packed_int64
} else {
p.enc = (*Buffer).enc_slice_int64
p.size = size_slice_int64
}
p.dec = (*Buffer).dec_slice_int64
p.packedDec = (*Buffer).dec_slice_packed_int64
default:
logNoSliceEnc(t1, t2)
break
}
case reflect.String:
p.enc = (*Buffer).enc_slice_string
p.dec = (*Buffer).dec_slice_string
p.size = size_slice_string
case reflect.Ptr:
switch t3 := t2.Elem(); t3.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
break
case reflect.Struct:
p.stype = t2.Elem()
p.isMarshaler = isMarshaler(t2)
p.isUnmarshaler = isUnmarshaler(t2)
if p.Wire == "bytes" {
p.enc = (*Buffer).enc_slice_struct_message
p.dec = (*Buffer).dec_slice_struct_message
p.size = size_slice_struct_message
} else {
p.enc = (*Buffer).enc_slice_struct_group
p.dec = (*Buffer).dec_slice_struct_group
p.size = size_slice_struct_group
}
}
case reflect.Slice:
switch t2.Elem().Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
break
case reflect.Uint8:
p.enc = (*Buffer).enc_slice_slice_byte
p.dec = (*Buffer).dec_slice_slice_byte
p.size = size_slice_slice_byte
}
}
case reflect.Map:
p.enc = (*Buffer).enc_new_map
p.dec = (*Buffer).dec_new_map
p.size = size_new_map
p.mtype = t1
p.mkeyprop = &Properties{}
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
p.mvalprop = &Properties{}
vtype := p.mtype.Elem()
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
// The value type is not a message (*T) or bytes ([]byte),
// so we need encoders for the pointer to this type.
vtype = reflect.PtrTo(vtype)
}
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
}
// precalculate tag code
wire := p.WireType
if p.Packed {
wire = WireBytes
}
x := uint32(p.Tag)<<3 | uint32(wire)
i := 0
for i = 0; x > 127; i++ {
p.tagbuf[i] = 0x80 | uint8(x&0x7F)
x >>= 7
}
p.tagbuf[i] = uint8(x)
p.tagcode = p.tagbuf[0 : i+1]
if p.stype != nil {
if lockGetProp {
p.sprop = GetProperties(p.stype)
} else {
p.sprop = getPropertiesLocked(p.stype)
}
}
}
var (
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
)
// isMarshaler reports whether type t implements Marshaler.
func isMarshaler(t reflect.Type) bool {
// We're checking for (likely) pointer-receiver methods
// so if t is not a pointer, something is very wrong.
// The calls above only invoke isMarshaler on pointer types.
if t.Kind() != reflect.Ptr {
panic("proto: misuse of isMarshaler")
}
return t.Implements(marshalerType)
}
// isUnmarshaler reports whether type t implements Unmarshaler.
func isUnmarshaler(t reflect.Type) bool {
// We're checking for (likely) pointer-receiver methods
// so if t is not a pointer, something is very wrong.
// The calls above only invoke isUnmarshaler on pointer types.
if t.Kind() != reflect.Ptr {
panic("proto: misuse of isUnmarshaler")
}
return t.Implements(unmarshalerType)
}
// Init populates the properties from a protocol buffer struct tag.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
p.init(typ, name, tag, f, true)
}
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
// "bytes,49,opt,def=hello!"
p.Name = name
p.OrigName = name
if f != nil {
p.field = toField(f)
}
if tag == "" {
return
}
p.Parse(tag)
p.setEncAndDec(typ, f, lockGetProp)
}
var (
propertiesMu sync.RWMutex
propertiesMap = make(map[reflect.Type]*StructProperties)
)
// GetProperties returns the list of properties for the type represented by t.
// t must represent a generated struct type of a protocol message.
func GetProperties(t reflect.Type) *StructProperties {
if t.Kind() != reflect.Struct {
panic("proto: type must have kind struct")
}
// Most calls to GetProperties in a long-running program will be
// retrieving details for types we have seen before.
propertiesMu.RLock()
sprop, ok := propertiesMap[t]
propertiesMu.RUnlock()
if ok {
if collectStats {
stats.Chit++
}
return sprop
}
propertiesMu.Lock()
sprop = getPropertiesLocked(t)
propertiesMu.Unlock()
return sprop
}
// getPropertiesLocked requires that propertiesMu is held.
func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok {
if collectStats {
stats.Chit++
}
return prop
}
if collectStats {
stats.Cmiss++
}
prop := new(StructProperties)
// in case of recursive protos, fill this in now.
propertiesMap[t] = prop
// build properties
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
prop.unrecField = invalidField
prop.Prop = make([]*Properties, t.NumField())
prop.order = make([]int, t.NumField())
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
p := new(Properties)
name := f.Name
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
if f.Name == "XXX_extensions" { // special case
p.enc = (*Buffer).enc_map
p.dec = nil // not needed
p.size = size_map
}
if f.Name == "XXX_unrecognized" { // special case
prop.unrecField = toField(&f)
}
oneof := f.Tag.Get("protobuf_oneof") != "" // special case
prop.Prop[i] = p
prop.order[i] = i
if debug {
print(i, " ", f.Name, " ", t.String(), " ")
if p.Tag > 0 {
print(p.String())
}
print("\n")
}
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
}
}
// Re-order prop.order.
sort.Sort(prop)
type oneofMessage interface {
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
}
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
var oots []interface{}
prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
prop.stype = t
// Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
for _, oot := range oots {
oop := &OneofProperties{
Type: reflect.ValueOf(oot).Type(), // *T
Prop: new(Properties),
}
sft := oop.Type.Elem().Field(0)
oop.Prop.Name = sft.Name
oop.Prop.Parse(sft.Tag.Get("protobuf"))
// There will be exactly one interface field that
// this new value is assignable to.
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Type.Kind() != reflect.Interface {
continue
}
if !oop.Type.AssignableTo(f.Type) {
continue
}
oop.Field = i
break
}
prop.OneofTypes[oop.Prop.OrigName] = oop
}
}
// build required counts
// build tags
reqCount := 0
prop.decoderOrigNames = make(map[string]int)
for i, p := range prop.Prop {
if strings.HasPrefix(p.Name, "XXX_") {
// Internal fields should not appear in tags/origNames maps.
// They are handled specially when encoding and decoding.
continue
}
if p.Required {
reqCount++
}
prop.decoderTags.put(p.Tag, i)
prop.decoderOrigNames[p.OrigName] = i
}
prop.reqCount = reqCount
return prop
}
// Return the Properties object for the x[0]'th field of the structure.
func propByIndex(t reflect.Type, x []int) *Properties {
if len(x) != 1 {
fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
return nil
}
prop := GetProperties(t)
return prop.Prop[x[0]]
}
// Get the address and type of a pointer to a struct from an interface.
func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
if pb == nil {
err = ErrNil
return
}
// get the reflect type of the pointer to the struct.
t = reflect.TypeOf(pb)
// get the address of the struct.
value := reflect.ValueOf(pb)
b = toStructPointer(value)
return
}
// A global registry of enum types.
// The generated code will register the generated maps by calling RegisterEnum.
var enumValueMaps = make(map[string]map[string]int32)
// RegisterEnum is called from the generated code to install the enum descriptor
// maps into the global table to aid parsing text format protocol buffers.
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
if _, ok := enumValueMaps[typeName]; ok {
panic("proto: duplicate enum registered: " + typeName)
}
enumValueMaps[typeName] = valueMap
}
// EnumValueMap returns the mapping from names to integers of the
// enum type enumType, or a nil if not found.
func EnumValueMap(enumType string) map[string]int32 {
return enumValueMaps[enumType]
}
// A registry of all linked message types.
// The string is a fully-qualified proto name ("pkg.Message").
var (
protoTypes = make(map[string]reflect.Type)
revProtoTypes = make(map[reflect.Type]string)
)
// RegisterType is called from generated code and maps from the fully qualified
// proto name to the type (pointer to struct) of the protocol buffer.
func RegisterType(x Message, name string) {
if _, ok := protoTypes[name]; ok {
// TODO: Some day, make this a panic.
log.Printf("proto: duplicate proto type registered: %s", name)
return
}
t := reflect.TypeOf(x)
protoTypes[name] = t
revProtoTypes[t] = name
}
// MessageName returns the fully-qualified proto name for the given message type.
func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
// MessageType returns the message type (pointer to struct) for a named message.
func MessageType(name string) reflect.Type { return protoTypes[name] }

View File

@ -1,122 +0,0 @@
// Code generated by protoc-gen-go.
// source: proto3_proto/proto3.proto
// DO NOT EDIT!
/*
Package proto3_proto is a generated protocol buffer package.
It is generated from these files:
proto3_proto/proto3.proto
It has these top-level messages:
Message
Nested
MessageWithMap
*/
package proto3_proto
import proto "github.com/golang/protobuf/proto"
import testdata "github.com/golang/protobuf/proto/testdata"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
type Message_Humour int32
const (
Message_UNKNOWN Message_Humour = 0
Message_PUNS Message_Humour = 1
Message_SLAPSTICK Message_Humour = 2
Message_BILL_BAILEY Message_Humour = 3
)
var Message_Humour_name = map[int32]string{
0: "UNKNOWN",
1: "PUNS",
2: "SLAPSTICK",
3: "BILL_BAILEY",
}
var Message_Humour_value = map[string]int32{
"UNKNOWN": 0,
"PUNS": 1,
"SLAPSTICK": 2,
"BILL_BAILEY": 3,
}
func (x Message_Humour) String() string {
return proto.EnumName(Message_Humour_name, int32(x))
}
type Message struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"`
HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm" json:"height_in_cm,omitempty"`
Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
ResultCount int64 `protobuf:"varint,7,opt,name=result_count" json:"result_count,omitempty"`
TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman" json:"true_scotsman,omitempty"`
Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"`
Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"`
Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"`
Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field" json:"proto2_field,omitempty"`
Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *Message) Reset() { *m = Message{} }
func (m *Message) String() string { return proto.CompactTextString(m) }
func (*Message) ProtoMessage() {}
func (m *Message) GetNested() *Nested {
if m != nil {
return m.Nested
}
return nil
}
func (m *Message) GetTerrain() map[string]*Nested {
if m != nil {
return m.Terrain
}
return nil
}
func (m *Message) GetProto2Field() *testdata.SubDefaults {
if m != nil {
return m.Proto2Field
}
return nil
}
func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults {
if m != nil {
return m.Proto2Value
}
return nil
}
type Nested struct {
Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"`
}
func (m *Nested) Reset() { *m = Nested{} }
func (m *Nested) String() string { return proto.CompactTextString(m) }
func (*Nested) ProtoMessage() {}
type MessageWithMap struct {
ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *MessageWithMap) Reset() { *m = MessageWithMap{} }
func (m *MessageWithMap) String() string { return proto.CompactTextString(m) }
func (*MessageWithMap) ProtoMessage() {}
func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
if m != nil {
return m.ByteMapping
}
return nil
}
func init() {
proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value)
}

View File

@ -1,68 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2014 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
import "testdata/test.proto";
package proto3_proto;
message Message {
enum Humour {
UNKNOWN = 0;
PUNS = 1;
SLAPSTICK = 2;
BILL_BAILEY = 3;
}
string name = 1;
Humour hilarity = 2;
uint32 height_in_cm = 3;
bytes data = 4;
int64 result_count = 7;
bool true_scotsman = 8;
float score = 9;
repeated uint64 key = 5;
Nested nested = 6;
map<string, Nested> terrain = 10;
testdata.SubDefaults proto2_field = 11;
map<string, testdata.SubDefaults> proto2_value = 13;
}
message Nested {
string bunny = 1;
}
message MessageWithMap {
map<bool, bytes> byte_mapping = 1;
}

View File

@ -1,751 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
// Functions for writing the text protocol buffer format.
import (
"bufio"
"bytes"
"encoding"
"errors"
"fmt"
"io"
"log"
"math"
"reflect"
"sort"
"strings"
)
var (
newline = []byte("\n")
spaces = []byte(" ")
gtNewline = []byte(">\n")
endBraceNewline = []byte("}\n")
backslashN = []byte{'\\', 'n'}
backslashR = []byte{'\\', 'r'}
backslashT = []byte{'\\', 't'}
backslashDQ = []byte{'\\', '"'}
backslashBS = []byte{'\\', '\\'}
posInf = []byte("inf")
negInf = []byte("-inf")
nan = []byte("nan")
)
type writer interface {
io.Writer
WriteByte(byte) error
}
// textWriter is an io.Writer that tracks its indentation level.
type textWriter struct {
ind int
complete bool // if the current position is a complete line
compact bool // whether to write out as a one-liner
w writer
}
func (w *textWriter) WriteString(s string) (n int, err error) {
if !strings.Contains(s, "\n") {
if !w.compact && w.complete {
w.writeIndent()
}
w.complete = false
return io.WriteString(w.w, s)
}
// WriteString is typically called without newlines, so this
// codepath and its copy are rare. We copy to avoid
// duplicating all of Write's logic here.
return w.Write([]byte(s))
}
func (w *textWriter) Write(p []byte) (n int, err error) {
newlines := bytes.Count(p, newline)
if newlines == 0 {
if !w.compact && w.complete {
w.writeIndent()
}
n, err = w.w.Write(p)
w.complete = false
return n, err
}
frags := bytes.SplitN(p, newline, newlines+1)
if w.compact {
for i, frag := range frags {
if i > 0 {
if err := w.w.WriteByte(' '); err != nil {
return n, err
}
n++
}
nn, err := w.w.Write(frag)
n += nn
if err != nil {
return n, err
}
}
return n, nil
}
for i, frag := range frags {
if w.complete {
w.writeIndent()
}
nn, err := w.w.Write(frag)
n += nn
if err != nil {
return n, err
}
if i+1 < len(frags) {
if err := w.w.WriteByte('\n'); err != nil {
return n, err
}
n++
}
}
w.complete = len(frags[len(frags)-1]) == 0
return n, nil
}
func (w *textWriter) WriteByte(c byte) error {
if w.compact && c == '\n' {
c = ' '
}
if !w.compact && w.complete {
w.writeIndent()
}
err := w.w.WriteByte(c)
w.complete = c == '\n'
return err
}
func (w *textWriter) indent() { w.ind++ }
func (w *textWriter) unindent() {
if w.ind == 0 {
log.Printf("proto: textWriter unindented too far")
return
}
w.ind--
}
func writeName(w *textWriter, props *Properties) error {
if _, err := w.WriteString(props.OrigName); err != nil {
return err
}
if props.Wire != "group" {
return w.WriteByte(':')
}
return nil
}
// raw is the interface satisfied by RawMessage.
type raw interface {
Bytes() []byte
}
func writeStruct(w *textWriter, sv reflect.Value) error {
st := sv.Type()
sprops := GetProperties(st)
for i := 0; i < sv.NumField(); i++ {
fv := sv.Field(i)
props := sprops.Prop[i]
name := st.Field(i).Name
if strings.HasPrefix(name, "XXX_") {
// There are two XXX_ fields:
// XXX_unrecognized []byte
// XXX_extensions map[int32]proto.Extension
// The first is handled here;
// the second is handled at the bottom of this function.
if name == "XXX_unrecognized" && !fv.IsNil() {
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
return err
}
}
continue
}
if fv.Kind() == reflect.Ptr && fv.IsNil() {
// Field not filled in. This could be an optional field or
// a required field that wasn't filled in. Either way, there
// isn't anything we can show for it.
continue
}
if fv.Kind() == reflect.Slice && fv.IsNil() {
// Repeated field that is empty, or a bytes field that is unused.
continue
}
if props.Repeated && fv.Kind() == reflect.Slice {
// Repeated field.
for j := 0; j < fv.Len(); j++ {
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
v := fv.Index(j)
if v.Kind() == reflect.Ptr && v.IsNil() {
// A nil message in a repeated field is not valid,
// but we can handle that more gracefully than panicking.
if _, err := w.Write([]byte("<nil>\n")); err != nil {
return err
}
continue
}
if err := writeAny(w, v, props); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
continue
}
if fv.Kind() == reflect.Map {
// Map fields are rendered as a repeated struct with key/value fields.
keys := fv.MapKeys()
sort.Sort(mapKeys(keys))
for _, key := range keys {
val := fv.MapIndex(key)
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
// open struct
if err := w.WriteByte('<'); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
// key
if _, err := w.WriteString("key:"); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := writeAny(w, key, props.mkeyprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
// nil values aren't legal, but we can avoid panicking because of them.
if val.Kind() != reflect.Ptr || !val.IsNil() {
// value
if _, err := w.WriteString("value:"); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := writeAny(w, val, props.mvalprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
// close struct
w.unindent()
if err := w.WriteByte('>'); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
continue
}
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
// empty bytes field
continue
}
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
// proto3 non-repeated scalar field; skip if zero value
if isProto3Zero(fv) {
continue
}
}
if fv.Kind() == reflect.Interface {
// Check if it is a oneof.
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
// fv is nil, or holds a pointer to generated struct.
// That generated struct has exactly one field,
// which has a protobuf struct tag.
if fv.IsNil() {
continue
}
inner := fv.Elem().Elem() // interface -> *T -> T
tag := inner.Type().Field(0).Tag.Get("protobuf")
props = new(Properties) // Overwrite the outer props var, but not its pointee.
props.Parse(tag)
// Write the value in the oneof, not the oneof itself.
fv = inner.Field(0)
// Special case to cope with malformed messages gracefully:
// If the value in the oneof is a nil pointer, don't panic
// in writeAny.
if fv.Kind() == reflect.Ptr && fv.IsNil() {
// Use errors.New so writeAny won't render quotes.
msg := errors.New("/* nil */")
fv = reflect.ValueOf(&msg).Elem()
}
}
}
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if b, ok := fv.Interface().(raw); ok {
if err := writeRaw(w, b.Bytes()); err != nil {
return err
}
continue
}
// Enums have a String method, so writeAny will work fine.
if err := writeAny(w, fv, props); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
// Extensions (the XXX_extensions field).
pv := sv.Addr()
if pv.Type().Implements(extendableProtoType) {
if err := writeExtensions(w, pv); err != nil {
return err
}
}
return nil
}
// writeRaw writes an uninterpreted raw message.
func writeRaw(w *textWriter, b []byte) error {
if err := w.WriteByte('<'); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
if err := writeUnknownStruct(w, b); err != nil {
return err
}
w.unindent()
if err := w.WriteByte('>'); err != nil {
return err
}
return nil
}
// writeAny writes an arbitrary field.
func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
v = reflect.Indirect(v)
// Floats have special cases.
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
x := v.Float()
var b []byte
switch {
case math.IsInf(x, 1):
b = posInf
case math.IsInf(x, -1):
b = negInf
case math.IsNaN(x):
b = nan
}
if b != nil {
_, err := w.Write(b)
return err
}
// Other values are handled below.
}
// We don't attempt to serialise every possible value type; only those
// that can occur in protocol buffers.
switch v.Kind() {
case reflect.Slice:
// Should only be a []byte; repeated fields are handled in writeStruct.
if err := writeString(w, string(v.Interface().([]byte))); err != nil {
return err
}
case reflect.String:
if err := writeString(w, v.String()); err != nil {
return err
}
case reflect.Struct:
// Required/optional group/message.
var bra, ket byte = '<', '>'
if props != nil && props.Wire == "group" {
bra, ket = '{', '}'
}
if err := w.WriteByte(bra); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
text, err := tm.MarshalText()
if err != nil {
return err
}
if _, err = w.Write(text); err != nil {
return err
}
} else if err := writeStruct(w, v); err != nil {
return err
}
w.unindent()
if err := w.WriteByte(ket); err != nil {
return err
}
default:
_, err := fmt.Fprint(w, v.Interface())
return err
}
return nil
}
// equivalent to C's isprint.
func isprint(c byte) bool {
return c >= 0x20 && c < 0x7f
}
// writeString writes a string in the protocol buffer text format.
// It is similar to strconv.Quote except we don't use Go escape sequences,
// we treat the string as a byte sequence, and we use octal escapes.
// These differences are to maintain interoperability with the other
// languages' implementations of the text format.
func writeString(w *textWriter, s string) error {
// use WriteByte here to get any needed indent
if err := w.WriteByte('"'); err != nil {
return err
}
// Loop over the bytes, not the runes.
for i := 0; i < len(s); i++ {
var err error
// Divergence from C++: we don't escape apostrophes.
// There's no need to escape them, and the C++ parser
// copes with a naked apostrophe.
switch c := s[i]; c {
case '\n':
_, err = w.w.Write(backslashN)
case '\r':
_, err = w.w.Write(backslashR)
case '\t':
_, err = w.w.Write(backslashT)
case '"':
_, err = w.w.Write(backslashDQ)
case '\\':
_, err = w.w.Write(backslashBS)
default:
if isprint(c) {
err = w.w.WriteByte(c)
} else {
_, err = fmt.Fprintf(w.w, "\\%03o", c)
}
}
if err != nil {
return err
}
}
return w.WriteByte('"')
}
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
if !w.compact {
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
return err
}
}
b := NewBuffer(data)
for b.index < len(b.buf) {
x, err := b.DecodeVarint()
if err != nil {
_, err := fmt.Fprintf(w, "/* %v */\n", err)
return err
}
wire, tag := x&7, x>>3
if wire == WireEndGroup {
w.unindent()
if _, err := w.Write(endBraceNewline); err != nil {
return err
}
continue
}
if _, err := fmt.Fprint(w, tag); err != nil {
return err
}
if wire != WireStartGroup {
if err := w.WriteByte(':'); err != nil {
return err
}
}
if !w.compact || wire == WireStartGroup {
if err := w.WriteByte(' '); err != nil {
return err
}
}
switch wire {
case WireBytes:
buf, e := b.DecodeRawBytes(false)
if e == nil {
_, err = fmt.Fprintf(w, "%q", buf)
} else {
_, err = fmt.Fprintf(w, "/* %v */", e)
}
case WireFixed32:
x, err = b.DecodeFixed32()
err = writeUnknownInt(w, x, err)
case WireFixed64:
x, err = b.DecodeFixed64()
err = writeUnknownInt(w, x, err)
case WireStartGroup:
err = w.WriteByte('{')
w.indent()
case WireVarint:
x, err = b.DecodeVarint()
err = writeUnknownInt(w, x, err)
default:
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
}
if err != nil {
return err
}
if err = w.WriteByte('\n'); err != nil {
return err
}
}
return nil
}
func writeUnknownInt(w *textWriter, x uint64, err error) error {
if err == nil {
_, err = fmt.Fprint(w, x)
} else {
_, err = fmt.Fprintf(w, "/* %v */", err)
}
return err
}
type int32Slice []int32
func (s int32Slice) Len() int { return len(s) }
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// writeExtensions writes all the extensions in pv.
// pv is assumed to be a pointer to a protocol message struct that is extendable.
func writeExtensions(w *textWriter, pv reflect.Value) error {
emap := extensionMaps[pv.Type().Elem()]
ep := pv.Interface().(extendableProto)
// Order the extensions by ID.
// This isn't strictly necessary, but it will give us
// canonical output, which will also make testing easier.
m := ep.ExtensionMap()
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids))
for _, extNum := range ids {
ext := m[extNum]
var desc *ExtensionDesc
if emap != nil {
desc = emap[extNum]
}
if desc == nil {
// Unknown extension.
if err := writeUnknownStruct(w, ext.enc); err != nil {
return err
}
continue
}
pb, err := GetExtension(ep, desc)
if err != nil {
return fmt.Errorf("failed getting extension: %v", err)
}
// Repeated extensions will appear as a slice.
if !desc.repeated() {
if err := writeExtension(w, desc.Name, pb); err != nil {
return err
}
} else {
v := reflect.ValueOf(pb)
for i := 0; i < v.Len(); i++ {
if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
return err
}
}
}
}
return nil
}
func writeExtension(w *textWriter, name string, pb interface{}) error {
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
return nil
}
func (w *textWriter) writeIndent() {
if !w.complete {
return
}
remain := w.ind * 2
for remain > 0 {
n := remain
if n > len(spaces) {
n = len(spaces)
}
w.w.Write(spaces[:n])
remain -= n
}
w.complete = false
}
func marshalText(w io.Writer, pb Message, compact bool) error {
val := reflect.ValueOf(pb)
if pb == nil || val.IsNil() {
w.Write([]byte("<nil>"))
return nil
}
var bw *bufio.Writer
ww, ok := w.(writer)
if !ok {
bw = bufio.NewWriter(w)
ww = bw
}
aw := &textWriter{
w: ww,
complete: true,
compact: compact,
}
if tm, ok := pb.(encoding.TextMarshaler); ok {
text, err := tm.MarshalText()
if err != nil {
return err
}
if _, err = aw.Write(text); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// Dereference the received pointer so we don't have outer < and >.
v := reflect.Indirect(val)
if err := writeStruct(aw, v); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// MarshalText writes a given protocol buffer in text format.
// The only errors returned are from w.
func MarshalText(w io.Writer, pb Message) error {
return marshalText(w, pb, false)
}
// MarshalTextString is the same as MarshalText, but returns the string directly.
func MarshalTextString(pb Message) string {
var buf bytes.Buffer
marshalText(&buf, pb, false)
return buf.String()
}
// CompactText writes a given protocol buffer in compact text format (one line).
func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) }
// CompactTextString is the same as CompactText, but returns the string directly.
func CompactTextString(pb Message) string {
var buf bytes.Buffer
marshalText(&buf, pb, true)
return buf.String()
}

View File

@ -1,806 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
// Functions for parsing the Text protocol buffer format.
// TODO: message sets.
import (
"encoding"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"unicode/utf8"
)
type ParseError struct {
Message string
Line int // 1-based line number
Offset int // 0-based byte offset from start of input
}
func (p *ParseError) Error() string {
if p.Line == 1 {
// show offset only for first line
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
}
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
}
type token struct {
value string
err *ParseError
line int // line number
offset int // byte number from start of input, not start of line
unquoted string // the unquoted version of value, if it was a quoted string
}
func (t *token) String() string {
if t.err == nil {
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
}
return fmt.Sprintf("parse error: %v", t.err)
}
type textParser struct {
s string // remaining input
done bool // whether the parsing is finished (success or error)
backed bool // whether back() was called
offset, line int
cur token
}
func newTextParser(s string) *textParser {
p := new(textParser)
p.s = s
p.line = 1
p.cur.line = 1
return p
}
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
p.cur.err = pe
p.done = true
return pe
}
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
func isIdentOrNumberChar(c byte) bool {
switch {
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
return true
case '0' <= c && c <= '9':
return true
}
switch c {
case '-', '+', '.', '_':
return true
}
return false
}
func isWhitespace(c byte) bool {
switch c {
case ' ', '\t', '\n', '\r':
return true
}
return false
}
func isQuote(c byte) bool {
switch c {
case '"', '\'':
return true
}
return false
}
func (p *textParser) skipWhitespace() {
i := 0
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
if p.s[i] == '#' {
// comment; skip to end of line or input
for i < len(p.s) && p.s[i] != '\n' {
i++
}
if i == len(p.s) {
break
}
}
if p.s[i] == '\n' {
p.line++
}
i++
}
p.offset += i
p.s = p.s[i:len(p.s)]
if len(p.s) == 0 {
p.done = true
}
}
func (p *textParser) advance() {
// Skip whitespace
p.skipWhitespace()
if p.done {
return
}
// Start of non-whitespace
p.cur.err = nil
p.cur.offset, p.cur.line = p.offset, p.line
p.cur.unquoted = ""
switch p.s[0] {
case '<', '>', '{', '}', ':', '[', ']', ';', ',':
// Single symbol
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
case '"', '\'':
// Quoted string
i := 1
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
if p.s[i] == '\\' && i+1 < len(p.s) {
// skip escaped char
i++
}
i++
}
if i >= len(p.s) || p.s[i] != p.s[0] {
p.errorf("unmatched quote")
return
}
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
if err != nil {
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
return
}
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
p.cur.unquoted = unq
default:
i := 0
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
i++
}
if i == 0 {
p.errorf("unexpected byte %#x", p.s[0])
return
}
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
}
p.offset += len(p.cur.value)
}
var (
errBadUTF8 = errors.New("proto: bad UTF-8")
errBadHex = errors.New("proto: bad hexadecimal")
)
func unquoteC(s string, quote rune) (string, error) {
// This is based on C++'s tokenizer.cc.
// Despite its name, this is *not* parsing C syntax.
// For instance, "\0" is an invalid quoted string.
// Avoid allocation in trivial cases.
simple := true
for _, r := range s {
if r == '\\' || r == quote {
simple = false
break
}
}
if simple {
return s, nil
}
buf := make([]byte, 0, 3*len(s)/2)
for len(s) > 0 {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", errBadUTF8
}
s = s[n:]
if r != '\\' {
if r < utf8.RuneSelf {
buf = append(buf, byte(r))
} else {
buf = append(buf, string(r)...)
}
continue
}
ch, tail, err := unescape(s)
if err != nil {
return "", err
}
buf = append(buf, ch...)
s = tail
}
return string(buf), nil
}
func unescape(s string) (ch string, tail string, err error) {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", "", errBadUTF8
}
s = s[n:]
switch r {
case 'a':
return "\a", s, nil
case 'b':
return "\b", s, nil
case 'f':
return "\f", s, nil
case 'n':
return "\n", s, nil
case 'r':
return "\r", s, nil
case 't':
return "\t", s, nil
case 'v':
return "\v", s, nil
case '?':
return "?", s, nil // trigraph workaround
case '\'', '"', '\\':
return string(r), s, nil
case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
if len(s) < 2 {
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
}
base := 8
ss := s[:2]
s = s[2:]
if r == 'x' || r == 'X' {
base = 16
} else {
ss = string(r) + ss
}
i, err := strconv.ParseUint(ss, base, 8)
if err != nil {
return "", "", err
}
return string([]byte{byte(i)}), s, nil
case 'u', 'U':
n := 4
if r == 'U' {
n = 8
}
if len(s) < n {
return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
}
bs := make([]byte, n/2)
for i := 0; i < n; i += 2 {
a, ok1 := unhex(s[i])
b, ok2 := unhex(s[i+1])
if !ok1 || !ok2 {
return "", "", errBadHex
}
bs[i/2] = a<<4 | b
}
s = s[n:]
return string(bs), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}
// Adapted from src/pkg/strconv/quote.go.
func unhex(b byte) (v byte, ok bool) {
switch {
case '0' <= b && b <= '9':
return b - '0', true
case 'a' <= b && b <= 'f':
return b - 'a' + 10, true
case 'A' <= b && b <= 'F':
return b - 'A' + 10, true
}
return 0, false
}
// Back off the parser by one token. Can only be done between calls to next().
// It makes the next advance() a no-op.
func (p *textParser) back() { p.backed = true }
// Advances the parser and returns the new current token.
func (p *textParser) next() *token {
if p.backed || p.done {
p.backed = false
return &p.cur
}
p.advance()
if p.done {
p.cur.value = ""
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
// Look for multiple quoted strings separated by whitespace,
// and concatenate them.
cat := p.cur
for {
p.skipWhitespace()
if p.done || !isQuote(p.s[0]) {
break
}
p.advance()
if p.cur.err != nil {
return &p.cur
}
cat.value += " " + p.cur.value
cat.unquoted += p.cur.unquoted
}
p.done = false // parser may have seen EOF, but we want to return cat
p.cur = cat
}
return &p.cur
}
func (p *textParser) consumeToken(s string) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != s {
p.back()
return p.errorf("expected %q, found %q", s, tok.value)
}
return nil
}
// Return a RequiredNotSetError indicating which required field was not set.
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
st := sv.Type()
sprops := GetProperties(st)
for i := 0; i < st.NumField(); i++ {
if !isNil(sv.Field(i)) {
continue
}
props := sprops.Prop[i]
if props.Required {
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
}
}
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
}
// Returns the index in the struct for the named field, as well as the parsed tag properties.
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
i, ok := sprops.decoderOrigNames[name]
if ok {
return i, sprops.Prop[i], true
}
return -1, nil, false
}
// Consume a ':' from the input stream (if the next token is a colon),
// returning an error if a colon is needed but not present.
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ":" {
// Colon is optional when the field is a group or message.
needColon := true
switch props.Wire {
case "group":
needColon = false
case "bytes":
// A "bytes" field is either a message, a string, or a repeated field;
// those three become *T, *string and []T respectively, so we can check for
// this field being a pointer to a non-string.
if typ.Kind() == reflect.Ptr {
// *T or *string
if typ.Elem().Kind() == reflect.String {
break
}
} else if typ.Kind() == reflect.Slice {
// []T or []*T
if typ.Elem().Kind() != reflect.Ptr {
break
}
} else if typ.Kind() == reflect.String {
// The proto3 exception is for a string field,
// which requires a colon.
break
}
needColon = false
}
if needColon {
return p.errorf("expected ':', found %q", tok.value)
}
p.back()
}
return nil
}
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
st := sv.Type()
sprops := GetProperties(st)
reqCount := sprops.reqCount
var reqFieldErr error
fieldSet := make(map[string]bool)
// A struct is a sequence of "name: value", terminated by one of
// '>' or '}', or the end of the input. A name may also be
// "[extension]".
for {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == terminator {
break
}
if tok.value == "[" {
// Looks like an extension.
//
// TODO: Check whether we need to handle
// namespace rooted names (e.g. ".something.Foo").
tok = p.next()
if tok.err != nil {
return tok.err
}
var desc *ExtensionDesc
// This could be faster, but it's functional.
// TODO: Do something smarter than a linear scan.
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
if d.Name == tok.value {
desc = d
break
}
}
if desc == nil {
return p.errorf("unrecognized extension %q", tok.value)
}
// Check the extension terminator.
tok = p.next()
if tok.err != nil {
return tok.err
}
if tok.value != "]" {
return p.errorf("unrecognized extension terminator %q", tok.value)
}
props := &Properties{}
props.Parse(desc.Tag)
typ := reflect.TypeOf(desc.ExtensionType)
if err := p.checkForColon(props, typ); err != nil {
return err
}
rep := desc.repeated()
// Read the extension structure, and set it in
// the value we're constructing.
var ext reflect.Value
if !rep {
ext = reflect.New(typ).Elem()
} else {
ext = reflect.New(typ.Elem()).Elem()
}
if err := p.readAny(ext, props); err != nil {
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
}
ep := sv.Addr().Interface().(extendableProto)
if !rep {
SetExtension(ep, desc, ext.Interface())
} else {
old, err := GetExtension(ep, desc)
var sl reflect.Value
if err == nil {
sl = reflect.ValueOf(old) // existing slice
} else {
sl = reflect.MakeSlice(typ, 0, 1)
}
sl = reflect.Append(sl, ext)
SetExtension(ep, desc, sl.Interface())
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
continue
}
// This is a normal, non-extension field.
name := tok.value
var dst reflect.Value
fi, props, ok := structFieldByName(sprops, name)
if ok {
dst = sv.Field(fi)
} else if oop, ok := sprops.OneofTypes[name]; ok {
// It is a oneof.
props = oop.Prop
nv := reflect.New(oop.Type.Elem())
dst = nv.Elem().Field(0)
sv.Field(oop.Field).Set(nv)
}
if !dst.IsValid() {
return p.errorf("unknown field name %q in %v", name, st)
}
if dst.Kind() == reflect.Map {
// Consume any colon.
if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
// Construct the map if it doesn't already exist.
if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
}
key := reflect.New(dst.Type().Key()).Elem()
val := reflect.New(dst.Type().Elem()).Elem()
// The map entry should be this sequence of tokens:
// < key : KEY value : VALUE >
// Technically the "key" and "value" could come in any order,
// but in practice they won't.
tok := p.next()
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
if err := p.consumeToken("key"); err != nil {
return err
}
if err := p.consumeToken(":"); err != nil {
return err
}
if err := p.readAny(key, props.mkeyprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
if err := p.consumeToken("value"); err != nil {
return err
}
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
return err
}
if err := p.readAny(val, props.mvalprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
if err := p.consumeToken(terminator); err != nil {
return err
}
dst.SetMapIndex(key, val)
continue
}
// Check that it's not already set if it's not a repeated field.
if !props.Repeated && fieldSet[name] {
return p.errorf("non-repeated field %q was repeated", name)
}
if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
// Parse into the field.
fieldSet[name] = true
if err := p.readAny(dst, props); err != nil {
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
} else if props.Required {
reqCount--
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
}
if reqCount > 0 {
return p.missingRequiredFieldError(sv)
}
return reqFieldErr
}
// consumeOptionalSeparator consumes an optional semicolon or comma.
// It is used in readStruct to provide backward compatibility.
func (p *textParser) consumeOptionalSeparator() error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ";" && tok.value != "," {
p.back()
}
return nil
}
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == "" {
return p.errorf("unexpected EOF")
}
switch fv := v; fv.Kind() {
case reflect.Slice:
at := v.Type()
if at.Elem().Kind() == reflect.Uint8 {
// Special case for []byte
if tok.value[0] != '"' && tok.value[0] != '\'' {
// Deliberately written out here, as the error after
// this switch statement would write "invalid []byte: ...",
// which is not as user-friendly.
return p.errorf("invalid string: %v", tok.value)
}
bytes := []byte(tok.unquoted)
fv.Set(reflect.ValueOf(bytes))
return nil
}
// Repeated field.
if tok.value == "[" {
// Repeated field with list notation, like [1,2,3].
for {
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
err := p.readAny(fv.Index(fv.Len()-1), props)
if err != nil {
return err
}
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == "]" {
break
}
if tok.value != "," {
return p.errorf("Expected ']' or ',' found %q", tok.value)
}
}
return nil
}
// One value of the repeated field.
p.back()
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
return p.readAny(fv.Index(fv.Len()-1), props)
case reflect.Bool:
// Either "true", "false", 1 or 0.
switch tok.value {
case "true", "1":
fv.SetBool(true)
return nil
case "false", "0":
fv.SetBool(false)
return nil
}
case reflect.Float32, reflect.Float64:
v := tok.value
// Ignore 'f' for compatibility with output generated by C++, but don't
// remove 'f' when the value is "-inf" or "inf".
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
v = v[:len(v)-1]
}
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
fv.SetFloat(f)
return nil
}
case reflect.Int32:
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
fv.SetInt(x)
return nil
}
if len(props.Enum) == 0 {
break
}
m, ok := enumValueMaps[props.Enum]
if !ok {
break
}
x, ok := m[tok.value]
if !ok {
break
}
fv.SetInt(int64(x))
return nil
case reflect.Int64:
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
fv.SetInt(x)
return nil
}
case reflect.Ptr:
// A basic field (indirected through pointer), or a repeated message/group
p.back()
fv.Set(reflect.New(fv.Type().Elem()))
return p.readAny(fv.Elem(), props)
case reflect.String:
if tok.value[0] == '"' || tok.value[0] == '\'' {
fv.SetString(tok.unquoted)
return nil
}
case reflect.Struct:
var terminator string
switch tok.value {
case "{":
terminator = "}"
case "<":
terminator = ">"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
return p.readStruct(fv, terminator)
case reflect.Uint32:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
fv.SetUint(uint64(x))
return nil
}
case reflect.Uint64:
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
fv.SetUint(x)
return nil
}
}
return p.errorf("invalid %v: %v", v.Type(), tok.value)
}
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
// before starting to unmarshal, so any existing data in pb is always removed.
// If a required field is not set and no other error occurs,
// UnmarshalText returns *RequiredNotSetError.
func UnmarshalText(s string, pb Message) error {
if um, ok := pb.(encoding.TextUnmarshaler); ok {
err := um.UnmarshalText([]byte(s))
return err
}
pb.Reset()
v := reflect.ValueOf(pb)
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
return pe
}
return nil
}

View File

@ -41,8 +41,15 @@ func (n *Node) isLeaf() bool {
}
func (n *Node) addEdge(e edge) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= e.label
})
n.edges = append(n.edges, e)
n.edges.Sort()
if idx != num {
copy(n.edges[idx+1:], n.edges[idx:num])
n.edges[idx] = e
}
}
func (n *Node) replaceEdge(e edge) {

View File

@ -291,10 +291,6 @@ func (c *CompoundIndex) FromArgs(args ...interface{}) ([]byte, error) {
if len(args) != len(c.Indexes) {
return nil, fmt.Errorf("less arguments than index fields")
}
return c.PrefixFromArgs(args...)
}
func (c *CompoundIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
var out []byte
for i, arg := range args {
val, err := c.Indexes[i].FromArgs(arg)
@ -305,3 +301,30 @@ func (c *CompoundIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
}
return out, nil
}
func (c *CompoundIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
if len(args) > len(c.Indexes) {
return nil, fmt.Errorf("more arguments than index fields")
}
var out []byte
for i, arg := range args {
if i+1 < len(args) {
val, err := c.Indexes[i].FromArgs(arg)
if err != nil {
return nil, fmt.Errorf("sub-index %d error: %v", i, err)
}
out = append(out, val...)
} else {
prefixIndexer, ok := c.Indexes[i].(PrefixIndexer)
if !ok {
return nil, fmt.Errorf("sub-index %d does not support prefix scanning", i)
}
val, err := prefixIndexer.PrefixFromArgs(arg)
if err != nil {
return nil, fmt.Errorf("sub-index %d error: %v", i, err)
}
out = append(out, val...)
}
}
return out, nil
}

View File

@ -2,6 +2,8 @@ package memdb
import (
"sync"
"sync/atomic"
"unsafe"
"github.com/hashicorp/go-immutable-radix"
)
@ -12,7 +14,7 @@ import (
// transactions and MVCC.
type MemDB struct {
schema *DBSchema
root *iradix.Tree
root unsafe.Pointer // *iradix.Tree underneath
// There can only be a single writter at once
writer sync.Mutex
@ -28,7 +30,7 @@ func NewMemDB(schema *DBSchema) (*MemDB, error) {
// Create the MemDB
db := &MemDB{
schema: schema,
root: iradix.New(),
root: unsafe.Pointer(iradix.New()),
}
if err := db.initialize(); err != nil {
return nil, err
@ -36,6 +38,12 @@ func NewMemDB(schema *DBSchema) (*MemDB, error) {
return db, nil
}
// getRoot is used to do an atomic load of the root pointer
func (db *MemDB) getRoot() *iradix.Tree {
root := (*iradix.Tree)(atomic.LoadPointer(&db.root))
return root
}
// Txn is used to start a new transaction, in either read or write mode.
// There can only be a single concurrent writer, but any number of readers.
func (db *MemDB) Txn(write bool) *Txn {
@ -45,7 +53,7 @@ func (db *MemDB) Txn(write bool) *Txn {
txn := &Txn{
db: db,
write: write,
rootTxn: db.root.Txn(),
rootTxn: db.getRoot().Txn(),
}
return txn
}
@ -56,20 +64,22 @@ func (db *MemDB) Txn(write bool) *Txn {
func (db *MemDB) Snapshot() *MemDB {
clone := &MemDB{
schema: db.schema,
root: db.root,
root: unsafe.Pointer(db.getRoot()),
}
return clone
}
// initialize is used to setup the DB for use after creation
func (db *MemDB) initialize() error {
root := db.getRoot()
for tName, tableSchema := range db.schema.Tables {
for iName, _ := range tableSchema.Indexes {
index := iradix.New()
path := indexPath(tName, iName)
db.root, _, _ = db.root.Insert(path, index)
root, _, _ = root.Insert(path, index)
}
}
db.root = unsafe.Pointer(root)
return nil
}

View File

@ -1,8 +1,11 @@
package memdb
import (
"bytes"
"fmt"
"strings"
"sync/atomic"
"unsafe"
"github.com/hashicorp/go-immutable-radix"
)
@ -10,6 +13,7 @@ import (
const (
id = "id"
)
// tableIndex is a tuple of (Table, Index) used for lookups
type tableIndex struct {
Table string
@ -113,7 +117,8 @@ func (txn *Txn) Commit() {
}
// Update the root of the DB
txn.db.root = txn.rootTxn.Commit()
newRoot := txn.rootTxn.Commit()
atomic.StorePointer(&txn.db.root, unsafe.Pointer(newRoot))
// Clear the txn
txn.rootTxn = nil
@ -161,28 +166,44 @@ func (txn *Txn) Insert(table string, obj interface{}) error {
for name, indexSchema := range tableSchema.Indexes {
indexTxn := txn.writableIndex(table, name)
// Handle the update by deleting from the index first
if update {
ok, val, err := indexSchema.Indexer.FromObject(existing)
if err != nil {
return fmt.Errorf("failed to build index '%s': %v", name, err)
}
if ok {
// Handle non-unique index by computing a unique index.
// This is done by appending the primary key which must
// be unique anyways.
if !indexSchema.Unique {
val = append(val, idVal...)
}
indexTxn.Delete(val)
}
}
// Handle the insert after the update
// Determine the new index value
ok, val, err := indexSchema.Indexer.FromObject(obj)
if err != nil {
return fmt.Errorf("failed to build index '%s': %v", name, err)
}
// Handle non-unique index by computing a unique index.
// This is done by appending the primary key which must
// be unique anyways.
if ok && !indexSchema.Unique {
val = append(val, idVal...)
}
// Handle the update by deleting from the index first
if update {
okExist, valExist, err := indexSchema.Indexer.FromObject(existing)
if err != nil {
return fmt.Errorf("failed to build index '%s': %v", name, err)
}
if okExist {
// Handle non-unique index by computing a unique index.
// This is done by appending the primary key which must
// be unique anyways.
if !indexSchema.Unique {
valExist = append(valExist, idVal...)
}
// If we are writing to the same index with the same value,
// we can avoid the delete as the insert will overwrite the
// value anyways.
if !bytes.Equal(valExist, val) {
indexTxn.Delete(valExist)
}
}
}
// If there is no index value, either this is an error or an expected
// case and we can skip updating
if !ok {
if indexSchema.AllowMissing {
continue
@ -191,12 +212,7 @@ func (txn *Txn) Insert(table string, obj interface{}) error {
}
}
// Handle non-unique index by computing a unique index.
// This is done by appending the primary key which must
// be unique anyways.
if !indexSchema.Unique {
val = append(val, idVal...)
}
// Update the value of the index
indexTxn.Insert(val, obj)
}
return nil
@ -281,7 +297,7 @@ func (txn *Txn) DeleteAll(table, index string, args ...interface{}) (int, error)
// Do the deletes
num := 0
for _, obj := range(objs) {
for _, obj := range objs {
if err := txn.Delete(table, obj); err != nil {
return num, err
}
@ -318,6 +334,39 @@ func (txn *Txn) First(table, index string, args ...interface{}) (interface{}, er
return value, nil
}
// LongestPrefix is used to fetch the longest prefix match for the given
// constraints on the index. Note that this will not work with the memdb
// StringFieldIndex because it adds null terminators which prevent the
// algorithm from correctly finding a match (it will get to right before the
// null and fail to find a leaf node). This should only be used where the prefix
// given is capable of matching indexed entries directly, which typically only
// applies to a custom indexer. See the unit test for an example.
func (txn *Txn) LongestPrefix(table, index string, args ...interface{}) (interface{}, error) {
// Enforce that this only works on prefix indexes.
if !strings.HasSuffix(index, "_prefix") {
return nil, fmt.Errorf("must use '%s_prefix' on index", index)
}
// Get the index value.
indexSchema, val, err := txn.getIndexValue(table, index, args...)
if err != nil {
return nil, err
}
// This algorithm only makes sense against a unique index, otherwise the
// index keys will have the IDs appended to them.
if !indexSchema.Unique {
return nil, fmt.Errorf("index '%s' is not unique", index)
}
// Find the longest prefix match with the given index.
indexTxn := txn.readableIndex(table, indexSchema.Name)
if _, value, ok := indexTxn.Root().LongestPrefix(val); ok {
return value, nil
}
return nil, nil
}
// getIndexValue is used to get the IndexSchema and the value
// used to scan the index given the parameters. This handles prefix based
// scans when the index has the "_prefix" suffix. The index must support

View File

@ -1,4 +0,0 @@
foo = [
"1",
"2", # comment
]

View File

@ -1,6 +0,0 @@
provisioner "remote-exec" {
scripts = [
"${path.module}/scripts/install-consul.sh" // missing comma
"${path.module}/scripts/install-haproxy.sh"
]
}

View File

@ -1,6 +0,0 @@
resource = [{
"foo": {
"bar": {},
"baz": [1, 2, "foo"],
}
}]

View File

@ -1,5 +0,0 @@
resource = [{
foo = [{
bar = {}
}]
}]

View File

@ -1,15 +0,0 @@
// Foo
/* Bar */
/*
/*
Baz
*/
# Another
# Multiple
# Lines
foo = "bar"

View File

@ -1 +0,0 @@
#foo

View File

@ -1 +0,0 @@
# Hello

View File

@ -1,42 +0,0 @@
variable "foo" {
default = "bar"
description = "bar"
}
variable "groups" { }
provider "aws" {
access_key = "foo"
secret_key = "bar"
}
provider "do" {
api_key = "${var.foo}"
}
resource "aws_security_group" "firewall" {
count = 5
}
resource aws_instance "web" {
ami = "${var.foo}"
security_groups = [
"foo",
"${aws_security_group.firewall.foo}",
"${element(split(\",\", var.groups)}",
]
network_interface = {
device_index = 0
description = "Main network interface"
}
}
resource "aws_instance" "db" {
security_groups = "${aws_security_group.firewall.*.id}"
VPC = "foo"
depends_on = ["aws_instance.web"]
}
output "web_ip" {
value = "${aws_instance.web.private_ip}"
}

View File

@ -1 +0,0 @@
foo.bar = "baz"

View File

@ -1 +0,0 @@
foo = [1, 2, "foo"]

View File

@ -1 +0,0 @@
foo = [1, 2, "foo",]

View File

@ -1,4 +0,0 @@
# should error, but not crash
resource "template_file" "cloud_config" {
template = "$file("${path.module}/some/path")"
}

View File

@ -1,2 +0,0 @@
foo = "bar"
key = 7

View File

@ -1,3 +0,0 @@
default = {
"eu-west-1": "ami-b1cf19c6",
}

View File

@ -1,5 +0,0 @@
// This is a test structure for the lexer
foo bar "baz" {
key = 7
foo = "bar"
}

View File

@ -1,5 +0,0 @@
foo {
value = 7
"value" = 8
"complex::value" = 9
}

View File

@ -1 +0,0 @@
resource "foo" "bar" {}

View File

@ -1,7 +0,0 @@
foo = "bar"
bar = 7
baz = [1,2,3]
foo = -12
bar = 3.14159
foo = true
bar = false

View File

@ -1,4 +0,0 @@
{
"foo": [1, 2, "bar"],
"bar": "baz"
}

View File

@ -1,3 +0,0 @@
{
"foo": "bar"
}

View File

@ -1,5 +0,0 @@
{
"foo": {
"bar": [1,2]
}
}

View File

@ -1,10 +0,0 @@
{
"foo": "bar",
"bar": 7,
"baz": [1,2,3],
"foo": -12,
"bar": 3.14159,
"foo": true,
"bar": false,
"foo": null
}

3
vendor/github.com/hashicorp/hil/.gitignore generated vendored Normal file
View File

@ -0,0 +1,3 @@
.DS_Store
.idea
*.iml

3
vendor/github.com/hashicorp/hil/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,3 @@
sudo: false
language: go
go: 1.5

102
vendor/github.com/hashicorp/hil/README.md generated vendored Normal file
View File

@ -0,0 +1,102 @@
# HIL
[![GoDoc](https://godoc.org/github.com/hashicorp/hil?status.png)](https://godoc.org/github.com/hashicorp/hil) [![Build Status](https://travis-ci.org/hashicorp/hil.svg?branch=master)](https://travis-ci.org/hashicorp/hil)
HIL (HashiCorp Interpolation Language) is a lightweight embedded language used
primarily for configuration interpolation. The goal of HIL is to make a simple
language for interpolations in the various configurations of HashiCorp tools.
HIL is built to interpolate any string, but is in use by HashiCorp primarily
with [HCL](https://github.com/hashicorp/hcl). HCL is _not required_ in any
way for use with HIL.
HIL isn't meant to be a general purpose language. It was built for basic
configuration interpolations. Therefore, you can't currently write functions,
have conditionals, set intermediary variables, etc. within HIL itself. It is
possible some of these may be added later but the right use case must exist.
## Why?
Many of our tools have support for something similar to templates, but
within the configuration itself. The most prominent requirement was in
[Terraform](https://github.com/hashicorp/terraform) where we wanted the
configuration to be able to reference values from elsewhere in the
configuration. Example:
foo = "hi ${var.world}"
We originally used a full templating language for this, but found it
was too heavy weight. Additionally, many full languages required bindings
to C (and thus the usage of cgo) which we try to avoid to make cross-compilation
easier. We then moved to very basic regular expression based
string replacement, but found the need for basic arithmetic and function
calls resulting in overly complex regular expressions.
Ultimately, we wrote our own mini-language within Terraform itself. As
we built other projects such as [Nomad](https://nomadproject.io) and
[Otto](https://ottoproject.io), the need for basic interpolations arose
again.
Thus HIL was born. It is extracted from Terraform, cleaned up, and
better tested for general purpose use.
## Syntax
For a complete grammar, please see the parser itself. A high-level overview
of the syntax and grammer is listed here.
Code begins within `${` and `}`. Outside of this, text is treated
literally. For example, `foo` is a valid HIL program that is just the
string "foo", but `foo ${bar}` is an HIL program that is the string "foo "
concatened with the value of `bar`. For the remainder of the syntax
docs, we'll assume you're within `${}`.
* Identifiers are any text in the format of `[a-zA-Z0-9-.]`. Example
identifiers: `foo`, `var.foo`, `foo-bar`.
* Strings are double quoted and can contain any UTF-8 characters.
Example: `"Hello, World"`
* Numbers are assumed to be base 10. If you prefix a number with 0x,
it is treated as a hexadecimal. If it is prefixed with 0, it is
treated as an octal. Numbers can be in scientific notation: "1e10".
* Unary `-` can be used for negative numbers. Example: `-10` or `-0.2`
* Boolean values: `true`, `false`
* The following arithmetic operations are allowed: +, -, *, /, %.
* Function calls are in the form of `name(arg1, arg2, ...)`. Example:
`add(1, 5)`. Arguments can be any valid HIL expression, example:
`add(1, var.foo)` or even nested function calls:
`add(1, get("some value"))`.
* Witin strings, further interpolations can be opened with `${}`.
Example: `"Hello ${nested}"`. A full example including the
original `${}` (remember this list assumes were inside of one
already) could be: `foo ${func("hello ${var.foo}")}`.
## Language Changes
We've used this mini-language in Terraform for years. For backwards compatibility
reasons, we're unlikely to make an incompatible change to the language but
we're not currently making that promise, either.
The internal API of this project may very well change as we evolve it
to work with more of our projects. We recommend using some sort of dependency
management solution with this package.
## Future Changes
The following changes are already planned to be made at some point:
* Richer types: lists, maps, etc.
* Convert to a more standard Go parser structure similar to HCL. This
will improve our error messaging as well as allow us to have automatic
formatting.
* Allow interpolations to result in more types than just a string. While
within the interpolation basic types are honored, the result is always
a string.

43
vendor/github.com/hashicorp/hil/ast/arithmetic.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
package ast
import (
"bytes"
"fmt"
)
// Arithmetic represents a node where the result is arithmetic of
// two or more operands in the order given.
type Arithmetic struct {
Op ArithmeticOp
Exprs []Node
Posx Pos
}
func (n *Arithmetic) Accept(v Visitor) Node {
for i, expr := range n.Exprs {
n.Exprs[i] = expr.Accept(v)
}
return v(n)
}
func (n *Arithmetic) Pos() Pos {
return n.Posx
}
func (n *Arithmetic) GoString() string {
return fmt.Sprintf("*%#v", *n)
}
func (n *Arithmetic) String() string {
var b bytes.Buffer
for _, expr := range n.Exprs {
b.WriteString(fmt.Sprintf("%s", expr))
}
return b.String()
}
func (n *Arithmetic) Type(Scope) (Type, error) {
return TypeInt, nil
}

13
vendor/github.com/hashicorp/hil/ast/arithmetic_op.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
package ast
// ArithmeticOp is the operation to use for the math.
type ArithmeticOp int
const (
ArithmeticOpInvalid ArithmeticOp = 0
ArithmeticOpAdd ArithmeticOp = iota
ArithmeticOpSub
ArithmeticOpMul
ArithmeticOpDiv
ArithmeticOpMod
)

56
vendor/github.com/hashicorp/hil/ast/ast.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
package ast
import (
"fmt"
)
// Node is the interface that all AST nodes must implement.
type Node interface {
// Accept is called to dispatch to the visitors. It must return the
// resulting Node (which might be different in an AST transform).
Accept(Visitor) Node
// Pos returns the position of this node in some source.
Pos() Pos
// Type returns the type of this node for the given context.
Type(Scope) (Type, error)
}
// Pos is the starting position of an AST node
type Pos struct {
Column, Line int // Column/Line number, starting at 1
}
func (p Pos) String() string {
return fmt.Sprintf("%d:%d", p.Line, p.Column)
}
// Visitors are just implementations of this function.
//
// The function must return the Node to replace this node with. "nil" is
// _not_ a valid return value. If there is no replacement, the original node
// should be returned. We build this replacement directly into the visitor
// pattern since AST transformations are a common and useful tool and
// building it into the AST itself makes it required for future Node
// implementations and very easy to do.
//
// Note that this isn't a true implementation of the visitor pattern, which
// generally requires proper type dispatch on the function. However,
// implementing this basic visitor pattern style is still very useful even
// if you have to type switch.
type Visitor func(Node) Node
//go:generate stringer -type=Type
// Type is the type of any value.
type Type uint32
const (
TypeInvalid Type = 0
TypeAny Type = 1 << iota
TypeString
TypeInt
TypeFloat
TypeList
)

47
vendor/github.com/hashicorp/hil/ast/call.go generated vendored Normal file
View File

@ -0,0 +1,47 @@
package ast
import (
"fmt"
"strings"
)
// Call represents a function call.
type Call struct {
Func string
Args []Node
Posx Pos
}
func (n *Call) Accept(v Visitor) Node {
for i, a := range n.Args {
n.Args[i] = a.Accept(v)
}
return v(n)
}
func (n *Call) Pos() Pos {
return n.Posx
}
func (n *Call) String() string {
args := make([]string, len(n.Args))
for i, arg := range n.Args {
args[i] = fmt.Sprintf("%s", arg)
}
return fmt.Sprintf("Call(%s, %s)", n.Func, strings.Join(args, ", "))
}
func (n *Call) Type(s Scope) (Type, error) {
f, ok := s.LookupFunc(n.Func)
if !ok {
return TypeInvalid, fmt.Errorf("unknown function: %s", n.Func)
}
return f.ReturnType, nil
}
func (n *Call) GoString() string {
return fmt.Sprintf("*%#v", *n)
}

42
vendor/github.com/hashicorp/hil/ast/concat.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
package ast
import (
"bytes"
"fmt"
)
// Concat represents a node where the result of two or more expressions are
// concatenated. The result of all expressions must be a string.
type Concat struct {
Exprs []Node
Posx Pos
}
func (n *Concat) Accept(v Visitor) Node {
for i, expr := range n.Exprs {
n.Exprs[i] = expr.Accept(v)
}
return v(n)
}
func (n *Concat) Pos() Pos {
return n.Posx
}
func (n *Concat) GoString() string {
return fmt.Sprintf("*%#v", *n)
}
func (n *Concat) String() string {
var b bytes.Buffer
for _, expr := range n.Exprs {
b.WriteString(fmt.Sprintf("%s", expr))
}
return b.String()
}
func (n *Concat) Type(Scope) (Type, error) {
return TypeString, nil
}

68
vendor/github.com/hashicorp/hil/ast/index.go generated vendored Normal file
View File

@ -0,0 +1,68 @@
package ast
import (
"fmt"
"strings"
)
// Index represents an indexing operation into another data structure
type Index struct {
Target Node
Key Node
Posx Pos
}
func (n *Index) Accept(v Visitor) Node {
return v(n)
}
func (n *Index) Pos() Pos {
return n.Posx
}
func (n *Index) String() string {
return fmt.Sprintf("Index(%s, %s)", n.Target, n.Key)
}
func (n *Index) Type(s Scope) (Type, error) {
variableAccess, ok := n.Target.(*VariableAccess)
if !ok {
return TypeInvalid, fmt.Errorf("target is not a variable")
}
variable, ok := s.LookupVar(variableAccess.Name)
if !ok {
return TypeInvalid, fmt.Errorf("unknown variable accessed: %s", variableAccess.Name)
}
if variable.Type != TypeList {
return TypeInvalid, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type)
}
list := variable.Value.([]Variable)
// Ensure that the types of the list elements are homogenous
listTypes := make(map[Type]struct{})
for _, v := range list {
if _, ok := listTypes[v.Type]; ok {
continue
}
listTypes[v.Type] = struct{}{}
}
if len(listTypes) != 1 {
typesFound := make([]string, len(listTypes))
i := 0
for k, _ := range listTypes {
typesFound[0] = k.String()
i++
}
types := strings.Join(typesFound, ", ")
return TypeInvalid, fmt.Errorf("list %q does not have homogenous types. found %s", variableAccess.Name, types)
}
return list[0].Type, nil
}
func (n *Index) GoString() string {
return fmt.Sprintf("*%#v", *n)
}

33
vendor/github.com/hashicorp/hil/ast/literal.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
package ast
import (
"fmt"
)
// LiteralNode represents a single literal value, such as "foo" or
// 42 or 3.14159. Based on the Type, the Value can be safely cast.
type LiteralNode struct {
Value interface{}
Typex Type
Posx Pos
}
func (n *LiteralNode) Accept(v Visitor) Node {
return v(n)
}
func (n *LiteralNode) Pos() Pos {
return n.Posx
}
func (n *LiteralNode) GoString() string {
return fmt.Sprintf("*%#v", *n)
}
func (n *LiteralNode) String() string {
return fmt.Sprintf("Literal(%s, %v)", n.Typex, n.Value)
}
func (n *LiteralNode) Type(Scope) (Type, error) {
return n.Typex, nil
}

90
vendor/github.com/hashicorp/hil/ast/scope.go generated vendored Normal file
View File

@ -0,0 +1,90 @@
package ast
import (
"fmt"
"reflect"
)
// Scope is the interface used to look up variables and functions while
// evaluating. How these functions/variables are defined are up to the caller.
type Scope interface {
LookupFunc(string) (Function, bool)
LookupVar(string) (Variable, bool)
}
// Variable is a variable value for execution given as input to the engine.
// It records the value of a variables along with their type.
type Variable struct {
Value interface{}
Type Type
}
// NewVariable creates a new Variable for the given value. This will
// attempt to infer the correct type. If it can't, an error will be returned.
func NewVariable(v interface{}) (result Variable, err error) {
switch v := reflect.ValueOf(v); v.Kind() {
case reflect.String:
result.Type = TypeString
default:
err = fmt.Errorf("Unknown type: %s", v.Kind())
}
result.Value = v
return
}
// String implements Stringer on Variable, displaying the type and value
// of the Variable.
func (v Variable) String() string {
return fmt.Sprintf("{Variable (%s): %+v}", v.Type, v.Value)
}
// Function defines a function that can be executed by the engine.
// The type checker will validate that the proper types will be called
// to the callback.
type Function struct {
// ArgTypes is the list of types in argument order. These are the
// required arguments.
//
// ReturnType is the type of the returned value. The Callback MUST
// return this type.
ArgTypes []Type
ReturnType Type
// Variadic, if true, says that this function is variadic, meaning
// it takes a variable number of arguments. In this case, the
// VariadicType must be set.
Variadic bool
VariadicType Type
// Callback is the function called for a function. The argument
// types are guaranteed to match the spec above by the type checker.
// The length of the args is strictly == len(ArgTypes) unless Varidiac
// is true, in which case its >= len(ArgTypes).
Callback func([]interface{}) (interface{}, error)
}
// BasicScope is a simple scope that looks up variables and functions
// using a map.
type BasicScope struct {
FuncMap map[string]Function
VarMap map[string]Variable
}
func (s *BasicScope) LookupFunc(n string) (Function, bool) {
if s == nil {
return Function{}, false
}
v, ok := s.FuncMap[n]
return v, ok
}
func (s *BasicScope) LookupVar(n string) (Variable, bool) {
if s == nil {
return Variable{}, false
}
v, ok := s.VarMap[n]
return v, ok
}

25
vendor/github.com/hashicorp/hil/ast/stack.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
package ast
// Stack is a stack of Node.
type Stack struct {
stack []Node
}
func (s *Stack) Len() int {
return len(s.stack)
}
func (s *Stack) Push(n Node) {
s.stack = append(s.stack, n)
}
func (s *Stack) Pop() Node {
x := s.stack[len(s.stack)-1]
s.stack[len(s.stack)-1] = nil
s.stack = s.stack[:len(s.stack)-1]
return x
}
func (s *Stack) Reset() {
s.stack = nil
}

42
vendor/github.com/hashicorp/hil/ast/type_string.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
// Code generated by "stringer -type=Type"; DO NOT EDIT
package ast
import "fmt"
const (
_Type_name_0 = "TypeInvalid"
_Type_name_1 = "TypeAny"
_Type_name_2 = "TypeString"
_Type_name_3 = "TypeInt"
_Type_name_4 = "TypeFloat"
_Type_name_5 = "TypeList"
)
var (
_Type_index_0 = [...]uint8{0, 11}
_Type_index_1 = [...]uint8{0, 7}
_Type_index_2 = [...]uint8{0, 10}
_Type_index_3 = [...]uint8{0, 7}
_Type_index_4 = [...]uint8{0, 9}
_Type_index_5 = [...]uint8{0, 8}
)
func (i Type) String() string {
switch {
case i == 0:
return _Type_name_0
case i == 2:
return _Type_name_1
case i == 4:
return _Type_name_2
case i == 8:
return _Type_name_3
case i == 16:
return _Type_name_4
case i == 32:
return _Type_name_5
default:
return fmt.Sprintf("Type(%d)", i)
}
}

36
vendor/github.com/hashicorp/hil/ast/variable_access.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
package ast
import (
"fmt"
)
// VariableAccess represents a variable access.
type VariableAccess struct {
Name string
Posx Pos
}
func (n *VariableAccess) Accept(v Visitor) Node {
return v(n)
}
func (n *VariableAccess) Pos() Pos {
return n.Posx
}
func (n *VariableAccess) GoString() string {
return fmt.Sprintf("*%#v", *n)
}
func (n *VariableAccess) String() string {
return fmt.Sprintf("Variable(%s)", n.Name)
}
func (n *VariableAccess) Type(s Scope) (Type, error) {
v, ok := s.LookupVar(n.Name)
if !ok {
return TypeInvalid, fmt.Errorf("unknown variable: %s", n.Name)
}
return v.Type, nil
}

144
vendor/github.com/hashicorp/hil/builtins.go generated vendored Normal file
View File

@ -0,0 +1,144 @@
package hil
import (
"strconv"
"github.com/hashicorp/hil/ast"
)
// NOTE: All builtins are tested in engine_test.go
func registerBuiltins(scope *ast.BasicScope) *ast.BasicScope {
if scope == nil {
scope = new(ast.BasicScope)
}
if scope.FuncMap == nil {
scope.FuncMap = make(map[string]ast.Function)
}
// Implicit conversions
scope.FuncMap["__builtin_FloatToInt"] = builtinFloatToInt()
scope.FuncMap["__builtin_FloatToString"] = builtinFloatToString()
scope.FuncMap["__builtin_IntToFloat"] = builtinIntToFloat()
scope.FuncMap["__builtin_IntToString"] = builtinIntToString()
scope.FuncMap["__builtin_StringToInt"] = builtinStringToInt()
// Math operations
scope.FuncMap["__builtin_IntMath"] = builtinIntMath()
scope.FuncMap["__builtin_FloatMath"] = builtinFloatMath()
return scope
}
func builtinFloatMath() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeInt},
Variadic: true,
VariadicType: ast.TypeFloat,
ReturnType: ast.TypeFloat,
Callback: func(args []interface{}) (interface{}, error) {
op := args[0].(ast.ArithmeticOp)
result := args[1].(float64)
for _, raw := range args[2:] {
arg := raw.(float64)
switch op {
case ast.ArithmeticOpAdd:
result += arg
case ast.ArithmeticOpSub:
result -= arg
case ast.ArithmeticOpMul:
result *= arg
case ast.ArithmeticOpDiv:
result /= arg
}
}
return result, nil
},
}
}
func builtinIntMath() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeInt},
Variadic: true,
VariadicType: ast.TypeInt,
ReturnType: ast.TypeInt,
Callback: func(args []interface{}) (interface{}, error) {
op := args[0].(ast.ArithmeticOp)
result := args[1].(int)
for _, raw := range args[2:] {
arg := raw.(int)
switch op {
case ast.ArithmeticOpAdd:
result += arg
case ast.ArithmeticOpSub:
result -= arg
case ast.ArithmeticOpMul:
result *= arg
case ast.ArithmeticOpDiv:
result /= arg
case ast.ArithmeticOpMod:
result = result % arg
}
}
return result, nil
},
}
}
func builtinFloatToInt() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeFloat},
ReturnType: ast.TypeInt,
Callback: func(args []interface{}) (interface{}, error) {
return int(args[0].(float64)), nil
},
}
}
func builtinFloatToString() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeFloat},
ReturnType: ast.TypeString,
Callback: func(args []interface{}) (interface{}, error) {
return strconv.FormatFloat(
args[0].(float64), 'g', -1, 64), nil
},
}
}
func builtinIntToFloat() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeInt},
ReturnType: ast.TypeFloat,
Callback: func(args []interface{}) (interface{}, error) {
return float64(args[0].(int)), nil
},
}
}
func builtinIntToString() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeInt},
ReturnType: ast.TypeString,
Callback: func(args []interface{}) (interface{}, error) {
return strconv.FormatInt(int64(args[0].(int)), 10), nil
},
}
}
func builtinStringToInt() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeInt},
ReturnType: ast.TypeString,
Callback: func(args []interface{}) (interface{}, error) {
v, err := strconv.ParseInt(args[0].(string), 0, 0)
if err != nil {
return nil, err
}
return int(v), nil
},
}
}

88
vendor/github.com/hashicorp/hil/check_identifier.go generated vendored Normal file
View File

@ -0,0 +1,88 @@
package hil
import (
"fmt"
"sync"
"github.com/hashicorp/hil/ast"
)
// IdentifierCheck is a SemanticCheck that checks that all identifiers
// resolve properly and that the right number of arguments are passed
// to functions.
type IdentifierCheck struct {
Scope ast.Scope
err error
lock sync.Mutex
}
func (c *IdentifierCheck) Visit(root ast.Node) error {
c.lock.Lock()
defer c.lock.Unlock()
defer c.reset()
root.Accept(c.visit)
return c.err
}
func (c *IdentifierCheck) visit(raw ast.Node) ast.Node {
if c.err != nil {
return raw
}
switch n := raw.(type) {
case *ast.Call:
c.visitCall(n)
case *ast.VariableAccess:
c.visitVariableAccess(n)
case *ast.Concat:
// Ignore
case *ast.LiteralNode:
// Ignore
default:
// Ignore
}
// We never do replacement with this visitor
return raw
}
func (c *IdentifierCheck) visitCall(n *ast.Call) {
// Look up the function in the map
function, ok := c.Scope.LookupFunc(n.Func)
if !ok {
c.createErr(n, fmt.Sprintf("unknown function called: %s", n.Func))
return
}
// Break up the args into what is variadic and what is required
args := n.Args
if function.Variadic && len(args) > len(function.ArgTypes) {
args = n.Args[:len(function.ArgTypes)]
}
// Verify the number of arguments
if len(args) != len(function.ArgTypes) {
c.createErr(n, fmt.Sprintf(
"%s: expected %d arguments, got %d",
n.Func, len(function.ArgTypes), len(n.Args)))
return
}
}
func (c *IdentifierCheck) visitVariableAccess(n *ast.VariableAccess) {
// Look up the variable in the map
if _, ok := c.Scope.LookupVar(n.Name); !ok {
c.createErr(n, fmt.Sprintf(
"unknown variable accessed: %s", n.Name))
return
}
}
func (c *IdentifierCheck) createErr(n ast.Node, str string) {
c.err = fmt.Errorf("%s: %s", n.Pos(), str)
}
func (c *IdentifierCheck) reset() {
c.err = nil
}

376
vendor/github.com/hashicorp/hil/check_types.go generated vendored Normal file
View File

@ -0,0 +1,376 @@
package hil
import (
"fmt"
"sync"
"github.com/hashicorp/hil/ast"
)
// TypeCheck implements ast.Visitor for type checking an AST tree.
// It requires some configuration to look up the type of nodes.
//
// It also optionally will not type error and will insert an implicit
// type conversions for specific types if specified by the Implicit
// field. Note that this is kind of organizationally weird to put into
// this structure but we'd rather do that than duplicate the type checking
// logic multiple times.
type TypeCheck struct {
Scope ast.Scope
// Implicit is a map of implicit type conversions that we can do,
// and that shouldn't error. The key of the first map is the from type,
// the key of the second map is the to type, and the final string
// value is the function to call (which must be registered in the Scope).
Implicit map[ast.Type]map[ast.Type]string
// Stack of types. This shouldn't be used directly except by implementations
// of TypeCheckNode.
Stack []ast.Type
err error
lock sync.Mutex
}
// TypeCheckNode is the interface that must be implemented by any
// ast.Node that wants to support type-checking. If the type checker
// encounters a node that doesn't implement this, it will error.
type TypeCheckNode interface {
TypeCheck(*TypeCheck) (ast.Node, error)
}
func (v *TypeCheck) Visit(root ast.Node) error {
v.lock.Lock()
defer v.lock.Unlock()
defer v.reset()
root.Accept(v.visit)
return v.err
}
func (v *TypeCheck) visit(raw ast.Node) ast.Node {
if v.err != nil {
return raw
}
var result ast.Node
var err error
switch n := raw.(type) {
case *ast.Arithmetic:
tc := &typeCheckArithmetic{n}
result, err = tc.TypeCheck(v)
case *ast.Call:
tc := &typeCheckCall{n}
result, err = tc.TypeCheck(v)
case *ast.Index:
tc := &typeCheckIndex{n}
result, err = tc.TypeCheck(v)
case *ast.Concat:
tc := &typeCheckConcat{n}
result, err = tc.TypeCheck(v)
case *ast.LiteralNode:
tc := &typeCheckLiteral{n}
result, err = tc.TypeCheck(v)
case *ast.VariableAccess:
tc := &typeCheckVariableAccess{n}
result, err = tc.TypeCheck(v)
default:
tc, ok := raw.(TypeCheckNode)
if !ok {
err = fmt.Errorf("unknown node for type check: %#v", raw)
break
}
result, err = tc.TypeCheck(v)
}
if err != nil {
pos := raw.Pos()
v.err = fmt.Errorf("At column %d, line %d: %s",
pos.Column, pos.Line, err)
}
return result
}
type typeCheckArithmetic struct {
n *ast.Arithmetic
}
func (tc *typeCheckArithmetic) TypeCheck(v *TypeCheck) (ast.Node, error) {
// The arguments are on the stack in reverse order, so pop them off.
exprs := make([]ast.Type, len(tc.n.Exprs))
for i, _ := range tc.n.Exprs {
exprs[len(tc.n.Exprs)-1-i] = v.StackPop()
}
// Determine the resulting type we want. We do this by going over
// every expression until we find one with a type we recognize.
// We do this because the first expr might be a string ("var.foo")
// and we need to know what to implicit to.
mathFunc := "__builtin_IntMath"
mathType := ast.TypeInt
for _, v := range exprs {
exit := true
switch v {
case ast.TypeInt:
mathFunc = "__builtin_IntMath"
mathType = v
case ast.TypeFloat:
mathFunc = "__builtin_FloatMath"
mathType = v
default:
exit = false
}
// We found the type, so leave
if exit {
break
}
}
// Verify the args
for i, arg := range exprs {
if arg != mathType {
cn := v.ImplicitConversion(exprs[i], mathType, tc.n.Exprs[i])
if cn != nil {
tc.n.Exprs[i] = cn
continue
}
return nil, fmt.Errorf(
"operand %d should be %s, got %s",
i+1, mathType, arg)
}
}
// Modulo doesn't work for floats
if mathType == ast.TypeFloat && tc.n.Op == ast.ArithmeticOpMod {
return nil, fmt.Errorf("modulo cannot be used with floats")
}
// Return type
v.StackPush(mathType)
// Replace our node with a call to the proper function. This isn't
// type checked but we already verified types.
args := make([]ast.Node, len(tc.n.Exprs)+1)
args[0] = &ast.LiteralNode{
Value: tc.n.Op,
Typex: ast.TypeInt,
Posx: tc.n.Pos(),
}
copy(args[1:], tc.n.Exprs)
return &ast.Call{
Func: mathFunc,
Args: args,
Posx: tc.n.Pos(),
}, nil
}
type typeCheckCall struct {
n *ast.Call
}
func (tc *typeCheckCall) TypeCheck(v *TypeCheck) (ast.Node, error) {
// Look up the function in the map
function, ok := v.Scope.LookupFunc(tc.n.Func)
if !ok {
return nil, fmt.Errorf("unknown function called: %s", tc.n.Func)
}
// The arguments are on the stack in reverse order, so pop them off.
args := make([]ast.Type, len(tc.n.Args))
for i, _ := range tc.n.Args {
args[len(tc.n.Args)-1-i] = v.StackPop()
}
// Verify the args
for i, expected := range function.ArgTypes {
if expected == ast.TypeAny {
continue
}
if args[i] != expected {
cn := v.ImplicitConversion(args[i], expected, tc.n.Args[i])
if cn != nil {
tc.n.Args[i] = cn
continue
}
return nil, fmt.Errorf(
"%s: argument %d should be %s, got %s",
tc.n.Func, i+1, expected, args[i])
}
}
// If we're variadic, then verify the types there
if function.Variadic && function.VariadicType != ast.TypeAny {
args = args[len(function.ArgTypes):]
for i, t := range args {
if t != function.VariadicType {
realI := i + len(function.ArgTypes)
cn := v.ImplicitConversion(
t, function.VariadicType, tc.n.Args[realI])
if cn != nil {
tc.n.Args[realI] = cn
continue
}
return nil, fmt.Errorf(
"%s: argument %d should be %s, got %s",
tc.n.Func, realI,
function.VariadicType, t)
}
}
}
// Return type
v.StackPush(function.ReturnType)
return tc.n, nil
}
type typeCheckConcat struct {
n *ast.Concat
}
func (tc *typeCheckConcat) TypeCheck(v *TypeCheck) (ast.Node, error) {
n := tc.n
types := make([]ast.Type, len(n.Exprs))
for i, _ := range n.Exprs {
types[len(n.Exprs)-1-i] = v.StackPop()
}
// All concat args must be strings, so validate that
for i, t := range types {
if t != ast.TypeString {
cn := v.ImplicitConversion(t, ast.TypeString, n.Exprs[i])
if cn != nil {
n.Exprs[i] = cn
continue
}
return nil, fmt.Errorf(
"argument %d must be a string", i+1)
}
}
// This always results in type string
v.StackPush(ast.TypeString)
return n, nil
}
type typeCheckLiteral struct {
n *ast.LiteralNode
}
func (tc *typeCheckLiteral) TypeCheck(v *TypeCheck) (ast.Node, error) {
v.StackPush(tc.n.Typex)
return tc.n, nil
}
type typeCheckVariableAccess struct {
n *ast.VariableAccess
}
func (tc *typeCheckVariableAccess) TypeCheck(v *TypeCheck) (ast.Node, error) {
// Look up the variable in the map
variable, ok := v.Scope.LookupVar(tc.n.Name)
if !ok {
return nil, fmt.Errorf(
"unknown variable accessed: %s", tc.n.Name)
}
// Add the type to the stack
v.StackPush(variable.Type)
return tc.n, nil
}
type typeCheckIndex struct {
n *ast.Index
}
func (tc *typeCheckIndex) TypeCheck(v *TypeCheck) (ast.Node, error) {
value, err := tc.n.Key.Type(v.Scope)
if err != nil {
return nil, err
}
if value != ast.TypeInt {
return nil, fmt.Errorf("key of an index must be an int, was %s", value)
}
// Ensure we have a VariableAccess as the target
varAccessNode, ok := tc.n.Target.(*ast.VariableAccess)
if !ok {
return nil, fmt.Errorf("target of an index must be a VariableAccess node, was %T", tc.n.Target)
}
// Get the variable
variable, ok := v.Scope.LookupVar(varAccessNode.Name)
if !ok {
return nil, fmt.Errorf("unknown variable accessed: %s", varAccessNode.Name)
}
if variable.Type != ast.TypeList {
return nil, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type)
}
list := variable.Value.([]ast.Variable)
// Ensure that the types of the list elements are homogenous
listTypes := make(map[ast.Type]struct{})
for _, v := range list {
if _, ok := listTypes[v.Type]; ok {
continue
}
listTypes[v.Type] = struct{}{}
}
if len(listTypes) != 1 {
return nil, fmt.Errorf("list %q does not have homogenous types (%s)", varAccessNode.Name)
}
// This is the type since the list is homogenous in type
v.StackPush(list[0].Type)
return tc.n, nil
}
func (v *TypeCheck) ImplicitConversion(
actual ast.Type, expected ast.Type, n ast.Node) ast.Node {
if v.Implicit == nil {
return nil
}
fromMap, ok := v.Implicit[actual]
if !ok {
return nil
}
toFunc, ok := fromMap[expected]
if !ok {
return nil
}
return &ast.Call{
Func: toFunc,
Args: []ast.Node{n},
Posx: n.Pos(),
}
}
func (v *TypeCheck) reset() {
v.Stack = nil
v.err = nil
}
func (v *TypeCheck) StackPush(t ast.Type) {
v.Stack = append(v.Stack, t)
}
func (v *TypeCheck) StackPop() ast.Type {
var x ast.Type
x, v.Stack = v.Stack[len(v.Stack)-1], v.Stack[:len(v.Stack)-1]
return x
}

271
vendor/github.com/hashicorp/hil/eval.go generated vendored Normal file
View File

@ -0,0 +1,271 @@
package hil
import (
"bytes"
"fmt"
"sync"
"github.com/hashicorp/hil/ast"
)
// EvalConfig is the configuration for evaluating.
type EvalConfig struct {
// GlobalScope is the global scope of execution for evaluation.
GlobalScope *ast.BasicScope
// SemanticChecks is a list of additional semantic checks that will be run
// on the tree prior to evaluating it. The type checker, identifier checker,
// etc. will be run before these automatically.
SemanticChecks []SemanticChecker
}
// SemanticChecker is the type that must be implemented to do a
// semantic check on an AST tree. This will be called with the root node.
type SemanticChecker func(ast.Node) error
// Eval evaluates the given AST tree and returns its output value, the type
// of the output, and any error that occurred.
func Eval(root ast.Node, config *EvalConfig) (interface{}, ast.Type, error) {
// Copy the scope so we can add our builtins
if config == nil {
config = new(EvalConfig)
}
scope := registerBuiltins(config.GlobalScope)
implicitMap := map[ast.Type]map[ast.Type]string{
ast.TypeFloat: {
ast.TypeInt: "__builtin_FloatToInt",
ast.TypeString: "__builtin_FloatToString",
},
ast.TypeInt: {
ast.TypeFloat: "__builtin_IntToFloat",
ast.TypeString: "__builtin_IntToString",
},
ast.TypeString: {
ast.TypeInt: "__builtin_StringToInt",
},
}
// Build our own semantic checks that we always run
tv := &TypeCheck{Scope: scope, Implicit: implicitMap}
ic := &IdentifierCheck{Scope: scope}
// Build up the semantic checks for execution
checks := make(
[]SemanticChecker,
len(config.SemanticChecks),
len(config.SemanticChecks)+2)
copy(checks, config.SemanticChecks)
checks = append(checks, ic.Visit)
checks = append(checks, tv.Visit)
// Run the semantic checks
for _, check := range checks {
if err := check(root); err != nil {
return nil, ast.TypeInvalid, err
}
}
// Execute
v := &evalVisitor{Scope: scope}
return v.Visit(root)
}
// EvalNode is the interface that must be implemented by any ast.Node
// to support evaluation. This will be called in visitor pattern order.
// The result of each call to Eval is automatically pushed onto the
// stack as a LiteralNode. Pop elements off the stack to get child
// values.
type EvalNode interface {
Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error)
}
type evalVisitor struct {
Scope ast.Scope
Stack ast.Stack
err error
lock sync.Mutex
}
func (v *evalVisitor) Visit(root ast.Node) (interface{}, ast.Type, error) {
// Run the actual visitor pattern
root.Accept(v.visit)
// Get our result and clear out everything else
var result *ast.LiteralNode
if v.Stack.Len() > 0 {
result = v.Stack.Pop().(*ast.LiteralNode)
} else {
result = new(ast.LiteralNode)
}
resultErr := v.err
// Clear everything else so we aren't just dangling
v.Stack.Reset()
v.err = nil
t, err := result.Type(v.Scope)
if err != nil {
return nil, ast.TypeInvalid, err
}
return result.Value, t, resultErr
}
func (v *evalVisitor) visit(raw ast.Node) ast.Node {
if v.err != nil {
return raw
}
en, err := evalNode(raw)
if err != nil {
v.err = err
return raw
}
out, outType, err := en.Eval(v.Scope, &v.Stack)
if err != nil {
v.err = err
return raw
}
v.Stack.Push(&ast.LiteralNode{
Value: out,
Typex: outType,
})
return raw
}
// evalNode is a private function that returns an EvalNode for built-in
// types as well as any other EvalNode implementations.
func evalNode(raw ast.Node) (EvalNode, error) {
switch n := raw.(type) {
case *ast.Index:
return &evalIndex{n}, nil
case *ast.Call:
return &evalCall{n}, nil
case *ast.Concat:
return &evalConcat{n}, nil
case *ast.LiteralNode:
return &evalLiteralNode{n}, nil
case *ast.VariableAccess:
return &evalVariableAccess{n}, nil
default:
en, ok := n.(EvalNode)
if !ok {
return nil, fmt.Errorf("node doesn't support evaluation: %#v", raw)
}
return en, nil
}
}
type evalCall struct{ *ast.Call }
func (v *evalCall) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
// Look up the function in the map
function, ok := s.LookupFunc(v.Func)
if !ok {
return nil, ast.TypeInvalid, fmt.Errorf(
"unknown function called: %s", v.Func)
}
// The arguments are on the stack in reverse order, so pop them off.
args := make([]interface{}, len(v.Args))
for i, _ := range v.Args {
node := stack.Pop().(*ast.LiteralNode)
args[len(v.Args)-1-i] = node.Value
}
// Call the function
result, err := function.Callback(args)
if err != nil {
return nil, ast.TypeInvalid, fmt.Errorf("%s: %s", v.Func, err)
}
return result, function.ReturnType, nil
}
type evalIndex struct{ *ast.Index }
func (v *evalIndex) Eval(scope ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
evalVarAccess, err := evalNode(v.Target)
if err != nil {
return nil, ast.TypeInvalid, err
}
target, targetType, err := evalVarAccess.Eval(scope, stack)
evalKey, err := evalNode(v.Key)
if err != nil {
return nil, ast.TypeInvalid, err
}
key, keyType, err := evalKey.Eval(scope, stack)
// Last sanity check
if targetType != ast.TypeList {
return nil, ast.TypeInvalid, fmt.Errorf("target for indexing must be ast.TypeList, is %s", targetType)
}
if keyType != ast.TypeInt {
return nil, ast.TypeInvalid, fmt.Errorf("key for indexing must be ast.TypeInt, is %s", keyType)
}
list, ok := target.([]ast.Variable)
if !ok {
return nil, ast.TypeInvalid, fmt.Errorf("cannot cast target to []Variable")
}
keyInt, ok := key.(int)
if !ok {
return nil, ast.TypeInvalid, fmt.Errorf("cannot cast key to int")
}
if len(list) == 0 {
return nil, ast.TypeInvalid, fmt.Errorf("list is empty")
}
if keyInt < 0 || len(list) < keyInt+1 {
return nil, ast.TypeInvalid, fmt.Errorf("index %d out of range (max %d)", keyInt, len(list))
}
returnVal := list[keyInt].Value
returnType := list[keyInt].Type
return returnVal, returnType, nil
}
type evalConcat struct{ *ast.Concat }
func (v *evalConcat) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
// The expressions should all be on the stack in reverse
// order. So pop them off, reverse their order, and concatenate.
nodes := make([]*ast.LiteralNode, 0, len(v.Exprs))
for range v.Exprs {
nodes = append(nodes, stack.Pop().(*ast.LiteralNode))
}
var buf bytes.Buffer
for i := len(nodes) - 1; i >= 0; i-- {
buf.WriteString(nodes[i].Value.(string))
}
return buf.String(), ast.TypeString, nil
}
type evalLiteralNode struct{ *ast.LiteralNode }
func (v *evalLiteralNode) Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error) {
return v.Value, v.Typex, nil
}
type evalVariableAccess struct{ *ast.VariableAccess }
func (v *evalVariableAccess) Eval(scope ast.Scope, _ *ast.Stack) (interface{}, ast.Type, error) {
// Look up the variable in the map
variable, ok := scope.LookupVar(v.Name)
if !ok {
return nil, ast.TypeInvalid, fmt.Errorf(
"unknown variable accessed: %s", v.Name)
}
return variable.Value, variable.Type, nil
}

196
vendor/github.com/hashicorp/hil/lang.y generated vendored Normal file
View File

@ -0,0 +1,196 @@
// This is the yacc input for creating the parser for interpolation
// expressions in Go. To build it, just run `go generate` on this
// package, as the lexer has the go generate pragma within it.
%{
package hil
import (
"github.com/hashicorp/hil/ast"
)
%}
%union {
node ast.Node
nodeList []ast.Node
str string
token *parserToken
}
%token <str> PROGRAM_BRACKET_LEFT PROGRAM_BRACKET_RIGHT
%token <str> PROGRAM_STRING_START PROGRAM_STRING_END
%token <str> PAREN_LEFT PAREN_RIGHT COMMA
%token <str> SQUARE_BRACKET_LEFT SQUARE_BRACKET_RIGHT
%token <token> ARITH_OP IDENTIFIER INTEGER FLOAT STRING
%type <node> expr interpolation literal literalModeTop literalModeValue
%type <nodeList> args
%left ARITH_OP
%%
top:
{
parserResult = &ast.LiteralNode{
Value: "",
Typex: ast.TypeString,
Posx: ast.Pos{Column: 1, Line: 1},
}
}
| literalModeTop
{
parserResult = $1
// We want to make sure that the top value is always a Concat
// so that the return value is always a string type from an
// interpolation.
//
// The logic for checking for a LiteralNode is a little annoying
// because functionally the AST is the same, but we do that because
// it makes for an easy literal check later (to check if a string
// has any interpolations).
if _, ok := $1.(*ast.Concat); !ok {
if n, ok := $1.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString {
parserResult = &ast.Concat{
Exprs: []ast.Node{$1},
Posx: $1.Pos(),
}
}
}
}
literalModeTop:
literalModeValue
{
$$ = $1
}
| literalModeTop literalModeValue
{
var result []ast.Node
if c, ok := $1.(*ast.Concat); ok {
result = append(c.Exprs, $2)
} else {
result = []ast.Node{$1, $2}
}
$$ = &ast.Concat{
Exprs: result,
Posx: result[0].Pos(),
}
}
literalModeValue:
literal
{
$$ = $1
}
| interpolation
{
$$ = $1
}
interpolation:
PROGRAM_BRACKET_LEFT expr PROGRAM_BRACKET_RIGHT
{
$$ = $2
}
expr:
PAREN_LEFT expr PAREN_RIGHT
{
$$ = $2
}
| literalModeTop
{
$$ = $1
}
| INTEGER
{
$$ = &ast.LiteralNode{
Value: $1.Value.(int),
Typex: ast.TypeInt,
Posx: $1.Pos,
}
}
| FLOAT
{
$$ = &ast.LiteralNode{
Value: $1.Value.(float64),
Typex: ast.TypeFloat,
Posx: $1.Pos,
}
}
| ARITH_OP expr
{
// This is REALLY jank. We assume that a singular ARITH_OP
// means 0 ARITH_OP expr, which... is weird. We don't want to
// support *, /, etc., only -. We should fix this later with a pure
// Go scanner/parser.
if $1.Value.(ast.ArithmeticOp) != ast.ArithmeticOpSub {
panic("Unary - is only allowed")
}
$$ = &ast.Arithmetic{
Op: $1.Value.(ast.ArithmeticOp),
Exprs: []ast.Node{
&ast.LiteralNode{Value: 0, Typex: ast.TypeInt},
$2,
},
Posx: $2.Pos(),
}
}
| expr ARITH_OP expr
{
$$ = &ast.Arithmetic{
Op: $2.Value.(ast.ArithmeticOp),
Exprs: []ast.Node{$1, $3},
Posx: $1.Pos(),
}
}
| IDENTIFIER
{
$$ = &ast.VariableAccess{Name: $1.Value.(string), Posx: $1.Pos}
}
| IDENTIFIER PAREN_LEFT args PAREN_RIGHT
{
$$ = &ast.Call{Func: $1.Value.(string), Args: $3, Posx: $1.Pos}
}
| IDENTIFIER SQUARE_BRACKET_LEFT expr SQUARE_BRACKET_RIGHT
{
$$ = &ast.Index{
Target: &ast.VariableAccess{
Name: $1.Value.(string),
Posx: $1.Pos,
},
Key: $3,
Posx: $1.Pos,
}
}
args:
{
$$ = nil
}
| args COMMA expr
{
$$ = append($1, $3)
}
| expr
{
$$ = append($$, $1)
}
literal:
STRING
{
$$ = &ast.LiteralNode{
Value: $1.Value.(string),
Typex: ast.TypeString,
Posx: $1.Pos,
}
}
%%

407
vendor/github.com/hashicorp/hil/lex.go generated vendored Normal file
View File

@ -0,0 +1,407 @@
package hil
import (
"bytes"
"fmt"
"strconv"
"unicode"
"unicode/utf8"
"github.com/hashicorp/hil/ast"
)
//go:generate go tool yacc -p parser lang.y
// The parser expects the lexer to return 0 on EOF.
const lexEOF = 0
// The parser uses the type <prefix>Lex as a lexer. It must provide
// the methods Lex(*<prefix>SymType) int and Error(string).
type parserLex struct {
Err error
Input string
mode parserMode
interpolationDepth int
pos int
width int
col, line int
lastLine int
astPos *ast.Pos
}
// parserToken is the token yielded to the parser. The value can be
// determined within the parser type based on the enum value returned
// from Lex.
type parserToken struct {
Value interface{}
Pos ast.Pos
}
// parserMode keeps track of what mode we're in for the parser. We have
// two modes: literal and interpolation. Literal mode is when strings
// don't have to be quoted, and interpolations are defined as ${foo}.
// Interpolation mode means that strings have to be quoted and unquoted
// things are identifiers, such as foo("bar").
type parserMode uint8
const (
parserModeInvalid parserMode = 0
parserModeLiteral = 1 << iota
parserModeInterpolation
)
// The parser calls this method to get each new token.
func (x *parserLex) Lex(yylval *parserSymType) int {
// We always start in literal mode, since programs don't start
// in an interpolation. ex. "foo ${bar}" vs "bar" (and assuming interp.)
if x.mode == parserModeInvalid {
x.mode = parserModeLiteral
}
// Defer an update to set the proper column/line we read the next token.
defer func() {
if yylval.token != nil && yylval.token.Pos.Column == 0 {
yylval.token.Pos = *x.astPos
}
}()
x.astPos = nil
return x.lex(yylval)
}
func (x *parserLex) lex(yylval *parserSymType) int {
switch x.mode {
case parserModeLiteral:
return x.lexModeLiteral(yylval)
case parserModeInterpolation:
return x.lexModeInterpolation(yylval)
default:
x.Error(fmt.Sprintf("Unknown parse mode: %d", x.mode))
return lexEOF
}
}
func (x *parserLex) lexModeLiteral(yylval *parserSymType) int {
for {
c := x.next()
if c == lexEOF {
return lexEOF
}
// Are we starting an interpolation?
if c == '$' && x.peek() == '{' {
x.next()
x.interpolationDepth++
x.mode = parserModeInterpolation
return PROGRAM_BRACKET_LEFT
}
// We're just a normal string that isn't part of any interpolation yet.
x.backup()
result, terminated := x.lexString(yylval, x.interpolationDepth > 0)
// If the string terminated and we're within an interpolation already
// then that means that we finished a nested string, so pop
// back out to interpolation mode.
if terminated && x.interpolationDepth > 0 {
x.mode = parserModeInterpolation
// If the string is empty, just skip it. We're still in
// an interpolation so we do this to avoid empty nodes.
if yylval.token.Value.(string) == "" {
return x.lex(yylval)
}
}
return result
}
}
func (x *parserLex) lexModeInterpolation(yylval *parserSymType) int {
for {
c := x.next()
if c == lexEOF {
return lexEOF
}
// Ignore all whitespace
if unicode.IsSpace(c) {
continue
}
// If we see a double quote then we're lexing a string since
// we're in interpolation mode.
if c == '"' {
result, terminated := x.lexString(yylval, true)
if !terminated {
// The string didn't end, which means that we're in the
// middle of starting another interpolation.
x.mode = parserModeLiteral
// If the string is empty and we're starting an interpolation,
// then just skip it to avoid empty string AST nodes
if yylval.token.Value.(string) == "" {
return x.lex(yylval)
}
}
return result
}
// If we are seeing a number, it is the start of a number. Lex it.
if c >= '0' && c <= '9' {
x.backup()
return x.lexNumber(yylval)
}
switch c {
case '}':
// '}' means we ended the interpolation. Pop back into
// literal mode and reduce our interpolation depth.
x.interpolationDepth--
x.mode = parserModeLiteral
return PROGRAM_BRACKET_RIGHT
case '(':
return PAREN_LEFT
case ')':
return PAREN_RIGHT
case '[':
return SQUARE_BRACKET_LEFT
case ']':
return SQUARE_BRACKET_RIGHT
case ',':
return COMMA
case '+':
yylval.token = &parserToken{Value: ast.ArithmeticOpAdd}
return ARITH_OP
case '-':
yylval.token = &parserToken{Value: ast.ArithmeticOpSub}
return ARITH_OP
case '*':
yylval.token = &parserToken{Value: ast.ArithmeticOpMul}
return ARITH_OP
case '/':
yylval.token = &parserToken{Value: ast.ArithmeticOpDiv}
return ARITH_OP
case '%':
yylval.token = &parserToken{Value: ast.ArithmeticOpMod}
return ARITH_OP
default:
x.backup()
return x.lexId(yylval)
}
}
}
func (x *parserLex) lexId(yylval *parserSymType) int {
var b bytes.Buffer
var last rune
for {
c := x.next()
if c == lexEOF {
break
}
// We only allow * after a '.' for resource splast: type.name.*.id
// Otherwise, its probably multiplication.
if c == '*' && last != '.' {
x.backup()
break
}
// If this isn't a character we want in an ID, return out.
// One day we should make this a regexp.
if c != '_' &&
c != '-' &&
c != '.' &&
c != '*' &&
!unicode.IsLetter(c) &&
!unicode.IsNumber(c) {
x.backup()
break
}
if _, err := b.WriteRune(c); err != nil {
x.Error(err.Error())
return lexEOF
}
last = c
}
yylval.token = &parserToken{Value: b.String()}
return IDENTIFIER
}
// lexNumber lexes out a number: an integer or a float.
func (x *parserLex) lexNumber(yylval *parserSymType) int {
var b bytes.Buffer
gotPeriod := false
for {
c := x.next()
if c == lexEOF {
break
}
// If we see a period, we might be getting a float..
if c == '.' {
// If we've already seen a period, then ignore it, and
// exit. This will probably result in a syntax error later.
if gotPeriod {
x.backup()
break
}
gotPeriod = true
} else if c < '0' || c > '9' {
// If we're not seeing a number, then also exit.
x.backup()
break
}
if _, err := b.WriteRune(c); err != nil {
x.Error(fmt.Sprintf("internal error: %s", err))
return lexEOF
}
}
// If we didn't see a period, it is an int
if !gotPeriod {
v, err := strconv.ParseInt(b.String(), 0, 0)
if err != nil {
x.Error(fmt.Sprintf("expected number: %s", err))
return lexEOF
}
yylval.token = &parserToken{Value: int(v)}
return INTEGER
}
// If we did see a period, it is a float
f, err := strconv.ParseFloat(b.String(), 64)
if err != nil {
x.Error(fmt.Sprintf("expected float: %s", err))
return lexEOF
}
yylval.token = &parserToken{Value: f}
return FLOAT
}
func (x *parserLex) lexString(yylval *parserSymType, quoted bool) (int, bool) {
var b bytes.Buffer
terminated := false
for {
c := x.next()
if c == lexEOF {
if quoted {
x.Error("unterminated string")
}
break
}
// Behavior is a bit different if we're lexing within a quoted string.
if quoted {
// If its a double quote, we've reached the end of the string
if c == '"' {
terminated = true
break
}
// Let's check to see if we're escaping anything.
if c == '\\' {
switch n := x.next(); n {
case '\\', '"':
c = n
case 'n':
c = '\n'
default:
x.backup()
}
}
}
// If we hit a dollar sign, then check if we're starting
// another interpolation. If so, then we're done.
if c == '$' {
n := x.peek()
// If it is '{', then we're starting another interpolation
if n == '{' {
x.backup()
break
}
// If it is '$', then we're escaping a dollar sign
if n == '$' {
x.next()
}
}
if _, err := b.WriteRune(c); err != nil {
x.Error(err.Error())
return lexEOF, false
}
}
yylval.token = &parserToken{Value: b.String()}
return STRING, terminated
}
// Return the next rune for the lexer.
func (x *parserLex) next() rune {
if int(x.pos) >= len(x.Input) {
x.width = 0
return lexEOF
}
r, w := utf8.DecodeRuneInString(x.Input[x.pos:])
x.width = w
x.pos += x.width
if x.line == 0 {
x.line = 1
x.col = 1
} else {
x.col += 1
}
if r == '\n' {
x.lastLine = x.col
x.line += 1
x.col = 1
}
if x.astPos == nil {
x.astPos = &ast.Pos{Column: x.col, Line: x.line}
}
return r
}
// peek returns but does not consume the next rune in the input
func (x *parserLex) peek() rune {
r := x.next()
x.backup()
return r
}
// backup steps back one rune. Can only be called once per next.
func (x *parserLex) backup() {
x.pos -= x.width
x.col -= 1
// If we are at column 0, we're backing up across a line boundary
// so we need to be careful to get the proper value.
if x.col == 0 {
x.col = x.lastLine
x.line -= 1
}
}
// The parser calls this method on a parse error.
func (x *parserLex) Error(s string) {
x.Err = fmt.Errorf("parse error: %s", s)
}

30
vendor/github.com/hashicorp/hil/parse.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package hil
import (
"sync"
"github.com/hashicorp/hil/ast"
)
var parserLock sync.Mutex
var parserResult ast.Node
// Parse parses the given program and returns an executable AST tree.
func Parse(v string) (ast.Node, error) {
// Unfortunately due to the way that goyacc generated parsers are
// formatted, we can only do a single parse at a time without a lot
// of extra work. In the future we can remove this limitation.
parserLock.Lock()
defer parserLock.Unlock()
// Reset our globals
parserResult = nil
// Create the lexer
lex := &parserLex{Input: v}
// Parse!
parserParse(lex)
return parserResult, lex.Err
}

29
vendor/github.com/hashicorp/hil/transform_fixed.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
package hil
import (
"github.com/hashicorp/hil/ast"
)
// FixedValueTransform transforms an AST to return a fixed value for
// all interpolations. i.e. you can make "hi ${anything}" always
// turn into "hi foo".
//
// The primary use case for this is for config validations where you can
// verify that interpolations result in a certain type of string.
func FixedValueTransform(root ast.Node, Value *ast.LiteralNode) ast.Node {
// We visit the nodes in top-down order
result := root
switch n := result.(type) {
case *ast.Concat:
for i, v := range n.Exprs {
n.Exprs[i] = FixedValueTransform(v, Value)
}
case *ast.LiteralNode:
// We keep it as-is
default:
// Anything else we replace
result = Value
}
return result
}

266
vendor/github.com/hashicorp/hil/walk.go generated vendored Normal file
View File

@ -0,0 +1,266 @@
package hil
import (
"fmt"
"reflect"
"strings"
"github.com/hashicorp/hil/ast"
"github.com/mitchellh/reflectwalk"
)
// WalkFn is the type of function to pass to Walk. Modify fields within
// WalkData to control whether replacement happens.
type WalkFn func(*WalkData) error
// WalkData is the structure passed to the callback of the Walk function.
//
// This structure contains data passed in as well as fields that are expected
// to be written by the caller as a result. Please see the documentation for
// each field for more information.
type WalkData struct {
// Root is the parsed root of this HIL program
Root ast.Node
// Location is the location within the structure where this
// value was found. This can be used to modify behavior within
// slices and so on.
Location reflectwalk.Location
// The below two values must be set by the callback to have any effect.
//
// Replace, if true, will replace the value in the structure with
// ReplaceValue. It is up to the caller to make sure this is a string.
Replace bool
ReplaceValue string
}
// Walk will walk an arbitrary Go structure and parse any string as an
// HIL program and call the callback cb to determine what to replace it
// with.
//
// This function is very useful for arbitrary HIL program interpolation
// across a complex configuration structure. Due to the heavy use of
// reflection in this function, it is recommend to write many unit tests
// with your typical configuration structures to hilp mitigate the risk
// of panics.
func Walk(v interface{}, cb WalkFn) error {
walker := &interpolationWalker{F: cb}
return reflectwalk.Walk(v, walker)
}
// interpolationWalker implements interfaces for the reflectwalk package
// (github.com/mitchellh/reflectwalk) that can be used to automatically
// execute a callback for an interpolation.
type interpolationWalker struct {
F WalkFn
key []string
lastValue reflect.Value
loc reflectwalk.Location
cs []reflect.Value
csKey []reflect.Value
csData interface{}
sliceIndex int
unknownKeys []string
}
func (w *interpolationWalker) Enter(loc reflectwalk.Location) error {
w.loc = loc
return nil
}
func (w *interpolationWalker) Exit(loc reflectwalk.Location) error {
w.loc = reflectwalk.None
switch loc {
case reflectwalk.Map:
w.cs = w.cs[:len(w.cs)-1]
case reflectwalk.MapValue:
w.key = w.key[:len(w.key)-1]
w.csKey = w.csKey[:len(w.csKey)-1]
case reflectwalk.Slice:
// Split any values that need to be split
w.splitSlice()
w.cs = w.cs[:len(w.cs)-1]
case reflectwalk.SliceElem:
w.csKey = w.csKey[:len(w.csKey)-1]
}
return nil
}
func (w *interpolationWalker) Map(m reflect.Value) error {
w.cs = append(w.cs, m)
return nil
}
func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error {
w.csData = k
w.csKey = append(w.csKey, k)
w.key = append(w.key, k.String())
w.lastValue = v
return nil
}
func (w *interpolationWalker) Slice(s reflect.Value) error {
w.cs = append(w.cs, s)
return nil
}
func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error {
w.csKey = append(w.csKey, reflect.ValueOf(i))
w.sliceIndex = i
return nil
}
func (w *interpolationWalker) Primitive(v reflect.Value) error {
setV := v
// We only care about strings
if v.Kind() == reflect.Interface {
setV = v
v = v.Elem()
}
if v.Kind() != reflect.String {
return nil
}
astRoot, err := Parse(v.String())
if err != nil {
return err
}
// If the AST we got is just a literal string value with the same
// value then we ignore it. We have to check if its the same value
// because it is possible to input a string, get out a string, and
// have it be different. For example: "foo-$${bar}" turns into
// "foo-${bar}"
if n, ok := astRoot.(*ast.LiteralNode); ok {
if s, ok := n.Value.(string); ok && s == v.String() {
return nil
}
}
if w.F == nil {
return nil
}
data := WalkData{Root: astRoot, Location: w.loc}
if err := w.F(&data); err != nil {
return fmt.Errorf(
"%s in:\n\n%s",
err, v.String())
}
if data.Replace {
/*
if remove {
w.removeCurrent()
return nil
}
*/
resultVal := reflect.ValueOf(data.ReplaceValue)
switch w.loc {
case reflectwalk.MapKey:
m := w.cs[len(w.cs)-1]
// Delete the old value
var zero reflect.Value
m.SetMapIndex(w.csData.(reflect.Value), zero)
// Set the new key with the existing value
m.SetMapIndex(resultVal, w.lastValue)
// Set the key to be the new key
w.csData = resultVal
case reflectwalk.MapValue:
// If we're in a map, then the only way to set a map value is
// to set it directly.
m := w.cs[len(w.cs)-1]
mk := w.csData.(reflect.Value)
m.SetMapIndex(mk, resultVal)
default:
// Otherwise, we should be addressable
setV.Set(resultVal)
}
}
return nil
}
func (w *interpolationWalker) removeCurrent() {
// Append the key to the unknown keys
w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, "."))
for i := 1; i <= len(w.cs); i++ {
c := w.cs[len(w.cs)-i]
switch c.Kind() {
case reflect.Map:
// Zero value so that we delete the map key
var val reflect.Value
// Get the key and delete it
k := w.csData.(reflect.Value)
c.SetMapIndex(k, val)
return
}
}
panic("No container found for removeCurrent")
}
func (w *interpolationWalker) replaceCurrent(v reflect.Value) {
c := w.cs[len(w.cs)-2]
switch c.Kind() {
case reflect.Map:
// Get the key and delete it
k := w.csKey[len(w.csKey)-1]
c.SetMapIndex(k, v)
}
}
func (w *interpolationWalker) splitSlice() {
// Get the []interface{} slice so we can do some operations on
// it without dealing with reflection. We'll document each step
// here to be clear.
var s []interface{}
raw := w.cs[len(w.cs)-1]
switch v := raw.Interface().(type) {
case []interface{}:
s = v
case []map[string]interface{}:
return
default:
panic("Unknown kind: " + raw.Kind().String())
}
// Check if we have any elements that we need to split. If not, then
// just return since we're done.
split := false
if !split {
return
}
// Make a new result slice that is twice the capacity to fit our growth.
result := make([]interface{}, 0, len(s)*2)
// Go over each element of the original slice and start building up
// the resulting slice by splitting where we have to.
for _, v := range s {
sv, ok := v.(string)
if !ok {
// Not a string, so just set it
result = append(result, v)
continue
}
// Not a string list, so just set it
result = append(result, sv)
}
// Our slice is now done, we have to replace the slice now
// with this new one that we have.
w.replaceCurrent(reflect.ValueOf(result))
}

665
vendor/github.com/hashicorp/hil/y.go generated vendored Normal file
View File

@ -0,0 +1,665 @@
//line lang.y:6
package hil
import __yyfmt__ "fmt"
//line lang.y:6
import (
"github.com/hashicorp/hil/ast"
)
//line lang.y:14
type parserSymType struct {
yys int
node ast.Node
nodeList []ast.Node
str string
token *parserToken
}
const PROGRAM_BRACKET_LEFT = 57346
const PROGRAM_BRACKET_RIGHT = 57347
const PROGRAM_STRING_START = 57348
const PROGRAM_STRING_END = 57349
const PAREN_LEFT = 57350
const PAREN_RIGHT = 57351
const COMMA = 57352
const SQUARE_BRACKET_LEFT = 57353
const SQUARE_BRACKET_RIGHT = 57354
const ARITH_OP = 57355
const IDENTIFIER = 57356
const INTEGER = 57357
const FLOAT = 57358
const STRING = 57359
var parserToknames = [...]string{
"$end",
"error",
"$unk",
"PROGRAM_BRACKET_LEFT",
"PROGRAM_BRACKET_RIGHT",
"PROGRAM_STRING_START",
"PROGRAM_STRING_END",
"PAREN_LEFT",
"PAREN_RIGHT",
"COMMA",
"SQUARE_BRACKET_LEFT",
"SQUARE_BRACKET_RIGHT",
"ARITH_OP",
"IDENTIFIER",
"INTEGER",
"FLOAT",
"STRING",
}
var parserStatenames = [...]string{}
const parserEofCode = 1
const parserErrCode = 2
const parserMaxDepth = 200
//line lang.y:196
//line yacctab:1
var parserExca = [...]int{
-1, 1,
1, -1,
-2, 0,
}
const parserNprod = 21
const parserPrivate = 57344
var parserTokenNames []string
var parserStates []string
const parserLast = 37
var parserAct = [...]int{
9, 7, 29, 17, 23, 16, 17, 3, 17, 20,
8, 18, 21, 17, 6, 19, 27, 28, 22, 8,
1, 25, 26, 7, 11, 2, 24, 10, 4, 30,
5, 0, 14, 15, 12, 13, 6,
}
var parserPact = [...]int{
-3, -1000, -3, -1000, -1000, -1000, -1000, 19, -1000, 0,
19, -3, -1000, -1000, 19, 1, -1000, 19, -5, -1000,
19, 19, -1000, -1000, 7, -7, -10, -1000, 19, -1000,
-7,
}
var parserPgo = [...]int{
0, 0, 30, 28, 24, 7, 26, 20,
}
var parserR1 = [...]int{
0, 7, 7, 4, 4, 5, 5, 2, 1, 1,
1, 1, 1, 1, 1, 1, 1, 6, 6, 6,
3,
}
var parserR2 = [...]int{
0, 0, 1, 1, 2, 1, 1, 3, 3, 1,
1, 1, 2, 3, 1, 4, 4, 0, 3, 1,
1,
}
var parserChk = [...]int{
-1000, -7, -4, -5, -3, -2, 17, 4, -5, -1,
8, -4, 15, 16, 13, 14, 5, 13, -1, -1,
8, 11, -1, 9, -6, -1, -1, 9, 10, 12,
-1,
}
var parserDef = [...]int{
1, -2, 2, 3, 5, 6, 20, 0, 4, 0,
0, 9, 10, 11, 0, 14, 7, 0, 0, 12,
17, 0, 13, 8, 0, 19, 0, 15, 0, 16,
18,
}
var parserTok1 = [...]int{
1,
}
var parserTok2 = [...]int{
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17,
}
var parserTok3 = [...]int{
0,
}
var parserErrorMessages = [...]struct {
state int
token int
msg string
}{}
//line yaccpar:1
/* parser for yacc output */
var (
parserDebug = 0
parserErrorVerbose = false
)
type parserLexer interface {
Lex(lval *parserSymType) int
Error(s string)
}
type parserParser interface {
Parse(parserLexer) int
Lookahead() int
}
type parserParserImpl struct {
lookahead func() int
}
func (p *parserParserImpl) Lookahead() int {
return p.lookahead()
}
func parserNewParser() parserParser {
p := &parserParserImpl{
lookahead: func() int { return -1 },
}
return p
}
const parserFlag = -1000
func parserTokname(c int) string {
if c >= 1 && c-1 < len(parserToknames) {
if parserToknames[c-1] != "" {
return parserToknames[c-1]
}
}
return __yyfmt__.Sprintf("tok-%v", c)
}
func parserStatname(s int) string {
if s >= 0 && s < len(parserStatenames) {
if parserStatenames[s] != "" {
return parserStatenames[s]
}
}
return __yyfmt__.Sprintf("state-%v", s)
}
func parserErrorMessage(state, lookAhead int) string {
const TOKSTART = 4
if !parserErrorVerbose {
return "syntax error"
}
for _, e := range parserErrorMessages {
if e.state == state && e.token == lookAhead {
return "syntax error: " + e.msg
}
}
res := "syntax error: unexpected " + parserTokname(lookAhead)
// To match Bison, suggest at most four expected tokens.
expected := make([]int, 0, 4)
// Look for shiftable tokens.
base := parserPact[state]
for tok := TOKSTART; tok-1 < len(parserToknames); tok++ {
if n := base + tok; n >= 0 && n < parserLast && parserChk[parserAct[n]] == tok {
if len(expected) == cap(expected) {
return res
}
expected = append(expected, tok)
}
}
if parserDef[state] == -2 {
i := 0
for parserExca[i] != -1 || parserExca[i+1] != state {
i += 2
}
// Look for tokens that we accept or reduce.
for i += 2; parserExca[i] >= 0; i += 2 {
tok := parserExca[i]
if tok < TOKSTART || parserExca[i+1] == 0 {
continue
}
if len(expected) == cap(expected) {
return res
}
expected = append(expected, tok)
}
// If the default action is to accept or reduce, give up.
if parserExca[i+1] != 0 {
return res
}
}
for i, tok := range expected {
if i == 0 {
res += ", expecting "
} else {
res += " or "
}
res += parserTokname(tok)
}
return res
}
func parserlex1(lex parserLexer, lval *parserSymType) (char, token int) {
token = 0
char = lex.Lex(lval)
if char <= 0 {
token = parserTok1[0]
goto out
}
if char < len(parserTok1) {
token = parserTok1[char]
goto out
}
if char >= parserPrivate {
if char < parserPrivate+len(parserTok2) {
token = parserTok2[char-parserPrivate]
goto out
}
}
for i := 0; i < len(parserTok3); i += 2 {
token = parserTok3[i+0]
if token == char {
token = parserTok3[i+1]
goto out
}
}
out:
if token == 0 {
token = parserTok2[1] /* unknown char */
}
if parserDebug >= 3 {
__yyfmt__.Printf("lex %s(%d)\n", parserTokname(token), uint(char))
}
return char, token
}
func parserParse(parserlex parserLexer) int {
return parserNewParser().Parse(parserlex)
}
func (parserrcvr *parserParserImpl) Parse(parserlex parserLexer) int {
var parsern int
var parserlval parserSymType
var parserVAL parserSymType
var parserDollar []parserSymType
_ = parserDollar // silence set and not used
parserS := make([]parserSymType, parserMaxDepth)
Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */
parserstate := 0
parserchar := -1
parsertoken := -1 // parserchar translated into internal numbering
parserrcvr.lookahead = func() int { return parserchar }
defer func() {
// Make sure we report no lookahead when not parsing.
parserstate = -1
parserchar = -1
parsertoken = -1
}()
parserp := -1
goto parserstack
ret0:
return 0
ret1:
return 1
parserstack:
/* put a state and value onto the stack */
if parserDebug >= 4 {
__yyfmt__.Printf("char %v in %v\n", parserTokname(parsertoken), parserStatname(parserstate))
}
parserp++
if parserp >= len(parserS) {
nyys := make([]parserSymType, len(parserS)*2)
copy(nyys, parserS)
parserS = nyys
}
parserS[parserp] = parserVAL
parserS[parserp].yys = parserstate
parsernewstate:
parsern = parserPact[parserstate]
if parsern <= parserFlag {
goto parserdefault /* simple state */
}
if parserchar < 0 {
parserchar, parsertoken = parserlex1(parserlex, &parserlval)
}
parsern += parsertoken
if parsern < 0 || parsern >= parserLast {
goto parserdefault
}
parsern = parserAct[parsern]
if parserChk[parsern] == parsertoken { /* valid shift */
parserchar = -1
parsertoken = -1
parserVAL = parserlval
parserstate = parsern
if Errflag > 0 {
Errflag--
}
goto parserstack
}
parserdefault:
/* default state action */
parsern = parserDef[parserstate]
if parsern == -2 {
if parserchar < 0 {
parserchar, parsertoken = parserlex1(parserlex, &parserlval)
}
/* look through exception table */
xi := 0
for {
if parserExca[xi+0] == -1 && parserExca[xi+1] == parserstate {
break
}
xi += 2
}
for xi += 2; ; xi += 2 {
parsern = parserExca[xi+0]
if parsern < 0 || parsern == parsertoken {
break
}
}
parsern = parserExca[xi+1]
if parsern < 0 {
goto ret0
}
}
if parsern == 0 {
/* error ... attempt to resume parsing */
switch Errflag {
case 0: /* brand new error */
parserlex.Error(parserErrorMessage(parserstate, parsertoken))
Nerrs++
if parserDebug >= 1 {
__yyfmt__.Printf("%s", parserStatname(parserstate))
__yyfmt__.Printf(" saw %s\n", parserTokname(parsertoken))
}
fallthrough
case 1, 2: /* incompletely recovered error ... try again */
Errflag = 3
/* find a state where "error" is a legal shift action */
for parserp >= 0 {
parsern = parserPact[parserS[parserp].yys] + parserErrCode
if parsern >= 0 && parsern < parserLast {
parserstate = parserAct[parsern] /* simulate a shift of "error" */
if parserChk[parserstate] == parserErrCode {
goto parserstack
}
}
/* the current p has no shift on "error", pop stack */
if parserDebug >= 2 {
__yyfmt__.Printf("error recovery pops state %d\n", parserS[parserp].yys)
}
parserp--
}
/* there is no state on the stack with an error shift ... abort */
goto ret1
case 3: /* no shift yet; clobber input char */
if parserDebug >= 2 {
__yyfmt__.Printf("error recovery discards %s\n", parserTokname(parsertoken))
}
if parsertoken == parserEofCode {
goto ret1
}
parserchar = -1
parsertoken = -1
goto parsernewstate /* try again in the same state */
}
}
/* reduction by production parsern */
if parserDebug >= 2 {
__yyfmt__.Printf("reduce %v in:\n\t%v\n", parsern, parserStatname(parserstate))
}
parsernt := parsern
parserpt := parserp
_ = parserpt // guard against "declared and not used"
parserp -= parserR2[parsern]
// parserp is now the index of $0. Perform the default action. Iff the
// reduced production is ε, $1 is possibly out of range.
if parserp+1 >= len(parserS) {
nyys := make([]parserSymType, len(parserS)*2)
copy(nyys, parserS)
parserS = nyys
}
parserVAL = parserS[parserp+1]
/* consult goto table to find next state */
parsern = parserR1[parsern]
parserg := parserPgo[parsern]
parserj := parserg + parserS[parserp].yys + 1
if parserj >= parserLast {
parserstate = parserAct[parserg]
} else {
parserstate = parserAct[parserj]
if parserChk[parserstate] != -parsern {
parserstate = parserAct[parserg]
}
}
// dummy call; replaced with literal code
switch parsernt {
case 1:
parserDollar = parserS[parserpt-0 : parserpt+1]
//line lang.y:36
{
parserResult = &ast.LiteralNode{
Value: "",
Typex: ast.TypeString,
Posx: ast.Pos{Column: 1, Line: 1},
}
}
case 2:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:44
{
parserResult = parserDollar[1].node
// We want to make sure that the top value is always a Concat
// so that the return value is always a string type from an
// interpolation.
//
// The logic for checking for a LiteralNode is a little annoying
// because functionally the AST is the same, but we do that because
// it makes for an easy literal check later (to check if a string
// has any interpolations).
if _, ok := parserDollar[1].node.(*ast.Concat); !ok {
if n, ok := parserDollar[1].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString {
parserResult = &ast.Concat{
Exprs: []ast.Node{parserDollar[1].node},
Posx: parserDollar[1].node.Pos(),
}
}
}
}
case 3:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:67
{
parserVAL.node = parserDollar[1].node
}
case 4:
parserDollar = parserS[parserpt-2 : parserpt+1]
//line lang.y:71
{
var result []ast.Node
if c, ok := parserDollar[1].node.(*ast.Concat); ok {
result = append(c.Exprs, parserDollar[2].node)
} else {
result = []ast.Node{parserDollar[1].node, parserDollar[2].node}
}
parserVAL.node = &ast.Concat{
Exprs: result,
Posx: result[0].Pos(),
}
}
case 5:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:87
{
parserVAL.node = parserDollar[1].node
}
case 6:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:91
{
parserVAL.node = parserDollar[1].node
}
case 7:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:97
{
parserVAL.node = parserDollar[2].node
}
case 8:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:103
{
parserVAL.node = parserDollar[2].node
}
case 9:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:107
{
parserVAL.node = parserDollar[1].node
}
case 10:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:111
{
parserVAL.node = &ast.LiteralNode{
Value: parserDollar[1].token.Value.(int),
Typex: ast.TypeInt,
Posx: parserDollar[1].token.Pos,
}
}
case 11:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:119
{
parserVAL.node = &ast.LiteralNode{
Value: parserDollar[1].token.Value.(float64),
Typex: ast.TypeFloat,
Posx: parserDollar[1].token.Pos,
}
}
case 12:
parserDollar = parserS[parserpt-2 : parserpt+1]
//line lang.y:127
{
// This is REALLY jank. We assume that a singular ARITH_OP
// means 0 ARITH_OP expr, which... is weird. We don't want to
// support *, /, etc., only -. We should fix this later with a pure
// Go scanner/parser.
if parserDollar[1].token.Value.(ast.ArithmeticOp) != ast.ArithmeticOpSub {
panic("Unary - is only allowed")
}
parserVAL.node = &ast.Arithmetic{
Op: parserDollar[1].token.Value.(ast.ArithmeticOp),
Exprs: []ast.Node{
&ast.LiteralNode{Value: 0, Typex: ast.TypeInt},
parserDollar[2].node,
},
Posx: parserDollar[2].node.Pos(),
}
}
case 13:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:146
{
parserVAL.node = &ast.Arithmetic{
Op: parserDollar[2].token.Value.(ast.ArithmeticOp),
Exprs: []ast.Node{parserDollar[1].node, parserDollar[3].node},
Posx: parserDollar[1].node.Pos(),
}
}
case 14:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:154
{
parserVAL.node = &ast.VariableAccess{Name: parserDollar[1].token.Value.(string), Posx: parserDollar[1].token.Pos}
}
case 15:
parserDollar = parserS[parserpt-4 : parserpt+1]
//line lang.y:158
{
parserVAL.node = &ast.Call{Func: parserDollar[1].token.Value.(string), Args: parserDollar[3].nodeList, Posx: parserDollar[1].token.Pos}
}
case 16:
parserDollar = parserS[parserpt-4 : parserpt+1]
//line lang.y:162
{
parserVAL.node = &ast.Index{
Target: &ast.VariableAccess{
Name: parserDollar[1].token.Value.(string),
Posx: parserDollar[1].token.Pos,
},
Key: parserDollar[3].node,
Posx: parserDollar[1].token.Pos,
}
}
case 17:
parserDollar = parserS[parserpt-0 : parserpt+1]
//line lang.y:174
{
parserVAL.nodeList = nil
}
case 18:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:178
{
parserVAL.nodeList = append(parserDollar[1].nodeList, parserDollar[3].node)
}
case 19:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:182
{
parserVAL.nodeList = append(parserVAL.nodeList, parserDollar[1].node)
}
case 20:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:188
{
parserVAL.node = &ast.LiteralNode{
Value: parserDollar[1].token.Value.(string),
Typex: ast.TypeString,
Posx: parserDollar[1].token.Pos,
}
}
}
goto parserstack /* stack new state and value */
}

328
vendor/github.com/hashicorp/hil/y.output generated vendored Normal file
View File

@ -0,0 +1,328 @@
state 0
$accept: .top $end
top: . (1)
PROGRAM_BRACKET_LEFT shift 7
STRING shift 6
. reduce 1 (src line 35)
interpolation goto 5
literal goto 4
literalModeTop goto 2
literalModeValue goto 3
top goto 1
state 1
$accept: top.$end
$end accept
. error
state 2
top: literalModeTop. (2)
literalModeTop: literalModeTop.literalModeValue
PROGRAM_BRACKET_LEFT shift 7
STRING shift 6
. reduce 2 (src line 43)
interpolation goto 5
literal goto 4
literalModeValue goto 8
state 3
literalModeTop: literalModeValue. (3)
. reduce 3 (src line 65)
state 4
literalModeValue: literal. (5)
. reduce 5 (src line 85)
state 5
literalModeValue: interpolation. (6)
. reduce 6 (src line 90)
state 6
literal: STRING. (20)
. reduce 20 (src line 186)
state 7
interpolation: PROGRAM_BRACKET_LEFT.expr PROGRAM_BRACKET_RIGHT
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
expr goto 9
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
state 8
literalModeTop: literalModeTop literalModeValue. (4)
. reduce 4 (src line 70)
state 9
interpolation: PROGRAM_BRACKET_LEFT expr.PROGRAM_BRACKET_RIGHT
expr: expr.ARITH_OP expr
PROGRAM_BRACKET_RIGHT shift 16
ARITH_OP shift 17
. error
state 10
expr: PAREN_LEFT.expr PAREN_RIGHT
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
expr goto 18
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
state 11
literalModeTop: literalModeTop.literalModeValue
expr: literalModeTop. (9)
PROGRAM_BRACKET_LEFT shift 7
STRING shift 6
. reduce 9 (src line 106)
interpolation goto 5
literal goto 4
literalModeValue goto 8
state 12
expr: INTEGER. (10)
. reduce 10 (src line 110)
state 13
expr: FLOAT. (11)
. reduce 11 (src line 118)
state 14
expr: ARITH_OP.expr
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
expr goto 19
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
state 15
expr: IDENTIFIER. (14)
expr: IDENTIFIER.PAREN_LEFT args PAREN_RIGHT
expr: IDENTIFIER.SQUARE_BRACKET_LEFT expr SQUARE_BRACKET_RIGHT
PAREN_LEFT shift 20
SQUARE_BRACKET_LEFT shift 21
. reduce 14 (src line 153)
state 16
interpolation: PROGRAM_BRACKET_LEFT expr PROGRAM_BRACKET_RIGHT. (7)
. reduce 7 (src line 95)
state 17
expr: expr ARITH_OP.expr
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
expr goto 22
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
state 18
expr: PAREN_LEFT expr.PAREN_RIGHT
expr: expr.ARITH_OP expr
PAREN_RIGHT shift 23
ARITH_OP shift 17
. error
state 19
expr: ARITH_OP expr. (12)
expr: expr.ARITH_OP expr
. reduce 12 (src line 126)
state 20
expr: IDENTIFIER PAREN_LEFT.args PAREN_RIGHT
args: . (17)
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. reduce 17 (src line 173)
expr goto 25
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
args goto 24
state 21
expr: IDENTIFIER SQUARE_BRACKET_LEFT.expr SQUARE_BRACKET_RIGHT
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
expr goto 26
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
state 22
expr: expr.ARITH_OP expr
expr: expr ARITH_OP expr. (13)
. reduce 13 (src line 145)
state 23
expr: PAREN_LEFT expr PAREN_RIGHT. (8)
. reduce 8 (src line 101)
state 24
expr: IDENTIFIER PAREN_LEFT args.PAREN_RIGHT
args: args.COMMA expr
PAREN_RIGHT shift 27
COMMA shift 28
. error
state 25
expr: expr.ARITH_OP expr
args: expr. (19)
ARITH_OP shift 17
. reduce 19 (src line 181)
state 26
expr: expr.ARITH_OP expr
expr: IDENTIFIER SQUARE_BRACKET_LEFT expr.SQUARE_BRACKET_RIGHT
SQUARE_BRACKET_RIGHT shift 29
ARITH_OP shift 17
. error
state 27
expr: IDENTIFIER PAREN_LEFT args PAREN_RIGHT. (15)
. reduce 15 (src line 157)
state 28
args: args COMMA.expr
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
expr goto 30
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
state 29
expr: IDENTIFIER SQUARE_BRACKET_LEFT expr SQUARE_BRACKET_RIGHT. (16)
. reduce 16 (src line 161)
state 30
expr: expr.ARITH_OP expr
args: args COMMA expr. (18)
ARITH_OP shift 17
. reduce 18 (src line 177)
17 terminals, 8 nonterminals
21 grammar rules, 31/2000 states
0 shift/reduce, 0 reduce/reduce conflicts reported
57 working sets used
memory: parser 45/30000
26 extra closures
67 shift entries, 1 exceptions
16 goto entries
31 entries saved by goto default
Optimizer space used: output 37/30000
37 table entries, 1 zero
maximum spread: 17, maximum offset: 28

View File

@ -1,75 +0,0 @@
// Copyright 2013 Matt T. Proud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pbutil
import (
"encoding/binary"
"errors"
"io"
"github.com/golang/protobuf/proto"
)
var errInvalidVarint = errors.New("invalid varint32 encountered")
// ReadDelimited decodes a message from the provided length-delimited stream,
// where the length is encoded as 32-bit varint prefix to the message body.
// It returns the total number of bytes read and any applicable error. This is
// roughly equivalent to the companion Java API's
// MessageLite#parseDelimitedFrom. As per the reader contract, this function
// calls r.Read repeatedly as required until exactly one message including its
// prefix is read and decoded (or an error has occurred). The function never
// reads more bytes from the stream than required. The function never returns
// an error if a message has been read and decoded correctly, even if the end
// of the stream has been reached in doing so. In that case, any subsequent
// calls return (0, io.EOF).
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
// Per AbstractParser#parsePartialDelimitedFrom with
// CodedInputStream#readRawVarint32.
headerBuf := make([]byte, binary.MaxVarintLen32)
var bytesRead, varIntBytes int
var messageLength uint64
for varIntBytes == 0 { // i.e. no varint has been decoded yet.
if bytesRead >= len(headerBuf) {
return bytesRead, errInvalidVarint
}
// We have to read byte by byte here to avoid reading more bytes
// than required. Each read byte is appended to what we have
// read before.
newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
if newBytesRead == 0 {
if err != nil {
return bytesRead, err
}
// A Reader should not return (0, nil), but if it does,
// it should be treated as no-op (according to the
// Reader contract). So let's go on...
continue
}
bytesRead += newBytesRead
// Now present everything read so far to the varint decoder and
// see if a varint can be decoded already.
messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
}
messageBuf := make([]byte, messageLength)
newBytesRead, err := io.ReadFull(r, messageBuf)
bytesRead += newBytesRead
if err != nil {
return bytesRead, err
}
return bytesRead, proto.Unmarshal(messageBuf, m)
}

View File

@ -1,16 +0,0 @@
// Copyright 2013 Matt T. Proud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package pbutil provides record length-delimited Protocol Buffer streaming.
package pbutil

View File

@ -1,46 +0,0 @@
// Copyright 2013 Matt T. Proud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pbutil
import (
"encoding/binary"
"io"
"github.com/golang/protobuf/proto"
)
// WriteDelimited encodes and dumps a message to the provided writer prefixed
// with a 32-bit varint indicating the length of the encoded message, producing
// a length-delimited record stream, which can be used to chain together
// encoded messages of the same type together in a file. It returns the total
// number of bytes written and any applicable error. This is roughly
// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
buffer, err := proto.Marshal(m)
if err != nil {
return 0, err
}
buf := make([]byte, binary.MaxVarintLen32)
encodedLength := binary.PutUvarint(buf, uint64(len(buffer)))
sync, err := w.Write(buf[:encodedLength])
if err != nil {
return sync, err
}
n, err = w.Write(buffer)
return n + sync, err
}

21
vendor/github.com/mitchellh/copystructure/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Mitchell Hashimoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

21
vendor/github.com/mitchellh/copystructure/README.md generated vendored Normal file
View File

@ -0,0 +1,21 @@
# copystructure
copystructure is a Go library for deep copying values in Go.
This allows you to copy Go values that may contain reference values
such as maps, slices, or pointers, and copy their data as well instead
of just their references.
## Installation
Standard `go get`:
```
$ go get github.com/mitchellh/copystructure
```
## Usage & Example
For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure).
The `Copy` function has examples associated with it there.

View File

@ -0,0 +1,15 @@
package copystructure
import (
"reflect"
"time"
)
func init() {
Copiers[reflect.TypeOf(time.Time{})] = timeCopier
}
func timeCopier(v interface{}) (interface{}, error) {
// Just... copy it.
return v.(time.Time), nil
}

View File

@ -0,0 +1,279 @@
package copystructure
import (
"reflect"
"github.com/mitchellh/reflectwalk"
)
// Copy returns a deep copy of v.
func Copy(v interface{}) (interface{}, error) {
w := new(walker)
err := reflectwalk.Walk(v, w)
if err != nil {
return nil, err
}
// Get the result. If the result is nil, then we want to turn it
// into a typed nil if we can.
result := w.Result
if result == nil {
val := reflect.ValueOf(v)
result = reflect.Indirect(reflect.New(val.Type())).Interface()
}
return result, nil
}
// CopierFunc is a function that knows how to deep copy a specific type.
// Register these globally with the Copiers variable.
type CopierFunc func(interface{}) (interface{}, error)
// Copiers is a map of types that behave specially when they are copied.
// If a type is found in this map while deep copying, this function
// will be called to copy it instead of attempting to copy all fields.
//
// The key should be the type, obtained using: reflect.TypeOf(value with type).
//
// It is unsafe to write to this map after Copies have started. If you
// are writing to this map while also copying, wrap all modifications to
// this map as well as to Copy in a mutex.
var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc)
type walker struct {
Result interface{}
depth int
ignoreDepth int
vals []reflect.Value
cs []reflect.Value
ps []bool
}
func (w *walker) Enter(l reflectwalk.Location) error {
w.depth++
return nil
}
func (w *walker) Exit(l reflectwalk.Location) error {
w.depth--
if w.ignoreDepth > w.depth {
w.ignoreDepth = 0
}
if w.ignoring() {
return nil
}
switch l {
case reflectwalk.Map:
fallthrough
case reflectwalk.Slice:
// Pop map off our container
w.cs = w.cs[:len(w.cs)-1]
case reflectwalk.MapValue:
// Pop off the key and value
mv := w.valPop()
mk := w.valPop()
m := w.cs[len(w.cs)-1]
m.SetMapIndex(mk, mv)
case reflectwalk.SliceElem:
// Pop off the value and the index and set it on the slice
v := w.valPop()
i := w.valPop().Interface().(int)
s := w.cs[len(w.cs)-1]
s.Index(i).Set(v)
case reflectwalk.Struct:
w.replacePointerMaybe()
// Remove the struct from the container stack
w.cs = w.cs[:len(w.cs)-1]
case reflectwalk.StructField:
// Pop off the value and the field
v := w.valPop()
f := w.valPop().Interface().(reflect.StructField)
if v.IsValid() {
s := w.cs[len(w.cs)-1]
sf := reflect.Indirect(s).FieldByName(f.Name)
sf.Set(v)
}
case reflectwalk.WalkLoc:
// Clear out the slices for GC
w.cs = nil
w.vals = nil
}
return nil
}
func (w *walker) Map(m reflect.Value) error {
if w.ignoring() {
return nil
}
// Get the type for the map
t := m.Type()
mapType := reflect.MapOf(t.Key(), t.Elem())
// Create the map. If the map itself is nil, then just make a nil map
var newMap reflect.Value
if m.IsNil() {
newMap = reflect.Indirect(reflect.New(mapType))
} else {
newMap = reflect.MakeMap(reflect.MapOf(t.Key(), t.Elem()))
}
w.cs = append(w.cs, newMap)
w.valPush(newMap)
return nil
}
func (w *walker) MapElem(m, k, v reflect.Value) error {
return nil
}
func (w *walker) PointerEnter(v bool) error {
if w.ignoring() {
return nil
}
w.ps = append(w.ps, v)
return nil
}
func (w *walker) PointerExit(bool) error {
if w.ignoring() {
return nil
}
w.ps = w.ps[:len(w.ps)-1]
return nil
}
func (w *walker) Primitive(v reflect.Value) error {
if w.ignoring() {
return nil
}
var newV reflect.Value
if v.IsValid() {
newV = reflect.New(v.Type())
reflect.Indirect(newV).Set(v)
}
w.valPush(newV)
w.replacePointerMaybe()
return nil
}
func (w *walker) Slice(s reflect.Value) error {
if w.ignoring() {
return nil
}
var newS reflect.Value
if s.IsNil() {
newS = reflect.Indirect(reflect.New(s.Type()))
} else {
newS = reflect.MakeSlice(s.Type(), s.Len(), s.Cap())
}
w.cs = append(w.cs, newS)
w.valPush(newS)
return nil
}
func (w *walker) SliceElem(i int, elem reflect.Value) error {
if w.ignoring() {
return nil
}
// We don't write the slice here because elem might still be
// arbitrarily complex. Just record the index and continue on.
w.valPush(reflect.ValueOf(i))
return nil
}
func (w *walker) Struct(s reflect.Value) error {
if w.ignoring() {
return nil
}
var v reflect.Value
if c, ok := Copiers[s.Type()]; ok {
// We have a Copier for this struct, so we use that copier to
// get the copy, and we ignore anything deeper than this.
w.ignoreDepth = w.depth
dup, err := c(s.Interface())
if err != nil {
return err
}
v = reflect.ValueOf(dup)
} else {
// No copier, we copy ourselves and allow reflectwalk to guide
// us deeper into the structure for copying.
v = reflect.New(s.Type())
}
// Push the value onto the value stack for setting the struct field,
// and add the struct itself to the containers stack in case we walk
// deeper so that its own fields can be modified.
w.valPush(v)
w.cs = append(w.cs, v)
return nil
}
func (w *walker) StructField(f reflect.StructField, v reflect.Value) error {
if w.ignoring() {
return nil
}
// Push the field onto the stack, we'll handle it when we exit
// the struct field in Exit...
w.valPush(reflect.ValueOf(f))
return nil
}
func (w *walker) ignoring() bool {
return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth
}
func (w *walker) pointerPeek() bool {
return w.ps[len(w.ps)-1]
}
func (w *walker) valPop() reflect.Value {
result := w.vals[len(w.vals)-1]
w.vals = w.vals[:len(w.vals)-1]
// If we're out of values, that means we popped everything off. In
// this case, we reset the result so the next pushed value becomes
// the result.
if len(w.vals) == 0 {
w.Result = nil
}
return result
}
func (w *walker) valPush(v reflect.Value) {
w.vals = append(w.vals, v)
// If we haven't set the result yet, then this is the result since
// it is the first (outermost) value we're seeing.
if w.Result == nil && v.IsValid() {
w.Result = v.Interface()
}
}
func (w *walker) replacePointerMaybe() {
// Determine the last pointer value. If it is NOT a pointer, then
// we need to push that onto the stack.
if !w.pointerPeek() {
w.valPush(reflect.Indirect(w.valPop()))
}
}

21
vendor/github.com/mitchellh/reflectwalk/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2013 Mitchell Hashimoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

6
vendor/github.com/mitchellh/reflectwalk/README.md generated vendored Normal file
View File

@ -0,0 +1,6 @@
# reflectwalk
reflectwalk is a Go library for "walking" a value in Go using reflection,
in the same way a directory tree can be "walked" on the filesystem. Walking
a complex structure can allow you to do manipulations on unknown structures
such as those decoded from JSON.

17
vendor/github.com/mitchellh/reflectwalk/location.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
package reflectwalk
//go:generate stringer -type=Location location.go
type Location uint
const (
None Location = iota
Map
MapKey
MapValue
Slice
SliceElem
Struct
StructField
WalkLoc
)

View File

@ -0,0 +1,16 @@
// generated by stringer -type=Location location.go; DO NOT EDIT
package reflectwalk
import "fmt"
const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemStructStructFieldWalkLoc"
var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 41, 52, 59}
func (i Location) String() string {
if i+1 >= Location(len(_Location_index)) {
return fmt.Sprintf("Location(%d)", i)
}
return _Location_name[_Location_index[i]:_Location_index[i+1]]
}

279
vendor/github.com/mitchellh/reflectwalk/reflectwalk.go generated vendored Normal file
View File

@ -0,0 +1,279 @@
// reflectwalk is a package that allows you to "walk" complex structures
// similar to how you may "walk" a filesystem: visiting every element one
// by one and calling callback functions allowing you to handle and manipulate
// those elements.
package reflectwalk
import (
"reflect"
)
// PrimitiveWalker implementations are able to handle primitive values
// within complex structures. Primitive values are numbers, strings,
// booleans, funcs, chans.
//
// These primitive values are often members of more complex
// structures (slices, maps, etc.) that are walkable by other interfaces.
type PrimitiveWalker interface {
Primitive(reflect.Value) error
}
// MapWalker implementations are able to handle individual elements
// found within a map structure.
type MapWalker interface {
Map(m reflect.Value) error
MapElem(m, k, v reflect.Value) error
}
// SliceWalker implementations are able to handle slice elements found
// within complex structures.
type SliceWalker interface {
Slice(reflect.Value) error
SliceElem(int, reflect.Value) error
}
// StructWalker is an interface that has methods that are called for
// structs when a Walk is done.
type StructWalker interface {
Struct(reflect.Value) error
StructField(reflect.StructField, reflect.Value) error
}
// EnterExitWalker implementations are notified before and after
// they walk deeper into complex structures (into struct fields,
// into slice elements, etc.)
type EnterExitWalker interface {
Enter(Location) error
Exit(Location) error
}
// PointerWalker implementations are notified when the value they're
// walking is a pointer or not. Pointer is called for _every_ value whether
// it is a pointer or not.
type PointerWalker interface {
PointerEnter(bool) error
PointerExit(bool) error
}
// Walk takes an arbitrary value and an interface and traverses the
// value, calling callbacks on the interface if they are supported.
// The interface should implement one or more of the walker interfaces
// in this package, such as PrimitiveWalker, StructWalker, etc.
func Walk(data, walker interface{}) (err error) {
v := reflect.ValueOf(data)
ew, ok := walker.(EnterExitWalker)
if ok {
err = ew.Enter(WalkLoc)
}
if err == nil {
err = walk(v, walker)
}
if ok && err == nil {
err = ew.Exit(WalkLoc)
}
return
}
func walk(v reflect.Value, w interface{}) (err error) {
// Determine if we're receiving a pointer and if so notify the walker.
pointer := false
if v.Kind() == reflect.Ptr {
pointer = true
v = reflect.Indirect(v)
}
if pw, ok := w.(PointerWalker); ok {
if err = pw.PointerEnter(pointer); err != nil {
return
}
defer func() {
if err != nil {
return
}
err = pw.PointerExit(pointer)
}()
}
// We preserve the original value here because if it is an interface
// type, we want to pass that directly into the walkPrimitive, so that
// we can set it.
originalV := v
if v.Kind() == reflect.Interface {
v = v.Elem()
}
k := v.Kind()
if k >= reflect.Int && k <= reflect.Complex128 {
k = reflect.Int
}
switch k {
// Primitives
case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid:
err = walkPrimitive(originalV, w)
return
case reflect.Map:
err = walkMap(v, w)
return
case reflect.Slice:
err = walkSlice(v, w)
return
case reflect.Struct:
err = walkStruct(v, w)
return
default:
panic("unsupported type: " + k.String())
}
}
func walkMap(v reflect.Value, w interface{}) error {
ew, ewok := w.(EnterExitWalker)
if ewok {
ew.Enter(Map)
}
if mw, ok := w.(MapWalker); ok {
if err := mw.Map(v); err != nil {
return err
}
}
for _, k := range v.MapKeys() {
kv := v.MapIndex(k)
if mw, ok := w.(MapWalker); ok {
if err := mw.MapElem(v, k, kv); err != nil {
return err
}
}
ew, ok := w.(EnterExitWalker)
if ok {
ew.Enter(MapKey)
}
if err := walk(k, w); err != nil {
return err
}
if ok {
ew.Exit(MapKey)
ew.Enter(MapValue)
}
if err := walk(kv, w); err != nil {
return err
}
if ok {
ew.Exit(MapValue)
}
}
if ewok {
ew.Exit(Map)
}
return nil
}
func walkPrimitive(v reflect.Value, w interface{}) error {
if pw, ok := w.(PrimitiveWalker); ok {
return pw.Primitive(v)
}
return nil
}
func walkSlice(v reflect.Value, w interface{}) (err error) {
ew, ok := w.(EnterExitWalker)
if ok {
ew.Enter(Slice)
}
if sw, ok := w.(SliceWalker); ok {
if err := sw.Slice(v); err != nil {
return err
}
}
for i := 0; i < v.Len(); i++ {
elem := v.Index(i)
if sw, ok := w.(SliceWalker); ok {
if err := sw.SliceElem(i, elem); err != nil {
return err
}
}
ew, ok := w.(EnterExitWalker)
if ok {
ew.Enter(SliceElem)
}
if err := walk(elem, w); err != nil {
return err
}
if ok {
ew.Exit(SliceElem)
}
}
ew, ok = w.(EnterExitWalker)
if ok {
ew.Exit(Slice)
}
return nil
}
func walkStruct(v reflect.Value, w interface{}) (err error) {
ew, ewok := w.(EnterExitWalker)
if ewok {
ew.Enter(Struct)
}
if sw, ok := w.(StructWalker); ok {
if err = sw.Struct(v); err != nil {
return
}
}
vt := v.Type()
for i := 0; i < vt.NumField(); i++ {
sf := vt.Field(i)
f := v.FieldByIndex([]int{i})
if sw, ok := w.(StructWalker); ok {
err = sw.StructField(sf, f)
if err != nil {
return
}
}
ew, ok := w.(EnterExitWalker)
if ok {
ew.Enter(StructField)
}
err = walk(f, w)
if err != nil {
return
}
if ok {
ew.Exit(StructField)
}
}
if ewok {
ew.Exit(Struct)
}
return nil
}

View File

@ -1 +0,0 @@
command-line-arguments.test

Some files were not shown because too many files have changed in this diff Show More