diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index 7f0c41785..9f6fab419 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -15,63 +15,63 @@
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws",
- "Comment": "v0.9.16-1-g66c840e",
- "Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
+ "Comment": "v0.10.0-6-g83bae04",
+ "Rev": "83bae04b770b2b9aae4c946f795149d294e147d3"
},
{
- "ImportPath": "github.com/aws/aws-sdk-go/internal/endpoints",
- "Comment": "v0.9.16-1-g66c840e",
- "Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
+ "ImportPath": "github.com/aws/aws-sdk-go/private/endpoints",
+ "Comment": "v0.10.0-6-g83bae04",
+ "Rev": "83bae04b770b2b9aae4c946f795149d294e147d3"
},
{
- "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/ec2query",
- "Comment": "v0.9.16-1-g66c840e",
- "Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
+ "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/ec2query",
+ "Comment": "v0.10.0-6-g83bae04",
+ "Rev": "83bae04b770b2b9aae4c946f795149d294e147d3"
},
{
- "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/query",
- "Comment": "v0.9.16-1-g66c840e",
- "Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
+ "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
+ "Comment": "v0.10.0-6-g83bae04",
+ "Rev": "83bae04b770b2b9aae4c946f795149d294e147d3"
},
{
- "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/rest",
- "Comment": "v0.9.16-1-g66c840e",
- "Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
+ "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
+ "Comment": "v0.10.0-6-g83bae04",
+ "Rev": "83bae04b770b2b9aae4c946f795149d294e147d3"
},
{
- "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/restxml",
- "Comment": "v0.9.16-1-g66c840e",
- "Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
+ "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml",
+ "Comment": "v0.10.0-6-g83bae04",
+ "Rev": "83bae04b770b2b9aae4c946f795149d294e147d3"
},
{
- "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil",
- "Comment": "v0.9.16-1-g66c840e",
- "Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
+ "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
+ "Comment": "v0.10.0-6-g83bae04",
+ "Rev": "83bae04b770b2b9aae4c946f795149d294e147d3"
},
{
- "ImportPath": "github.com/aws/aws-sdk-go/internal/signer/v4",
- "Comment": "v0.9.16-1-g66c840e",
- "Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
+ "ImportPath": "github.com/aws/aws-sdk-go/private/signer/v4",
+ "Comment": "v0.10.0-6-g83bae04",
+ "Rev": "83bae04b770b2b9aae4c946f795149d294e147d3"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/ec2",
- "Comment": "v0.9.16-1-g66c840e",
- "Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
+ "Comment": "v0.10.0-6-g83bae04",
+ "Rev": "83bae04b770b2b9aae4c946f795149d294e147d3"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/iam",
- "Comment": "v0.9.16-1-g66c840e",
- "Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
+ "Comment": "v0.10.0-6-g83bae04",
+ "Rev": "83bae04b770b2b9aae4c946f795149d294e147d3"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/s3",
- "Comment": "v0.9.16-1-g66c840e",
- "Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
+ "Comment": "v0.10.0-6-g83bae04",
+ "Rev": "83bae04b770b2b9aae4c946f795149d294e147d3"
},
{
"ImportPath": "github.com/coreos/go-etcd/etcd",
- "Comment": "v2.0.0-36-g2038b59",
- "Rev": "2038b5942e8e7f4f244729ff9353afab8ba11afc"
+ "Comment": "v2.0.0-38-g003851b",
+ "Rev": "003851be7bb0694fe3cc457a49529a19388ee7cf"
},
{
"ImportPath": "github.com/duosecurity/duo_api_golang",
@@ -83,18 +83,18 @@
},
{
"ImportPath": "github.com/go-ldap/ldap",
- "Comment": "v2.1",
- "Rev": "90b1711ae8a4d6aef7576dfe0bc48def3f3ffcf4"
+ "Comment": "v2.1-2-gd57f702",
+ "Rev": "d57f702d9f8a22278428abed6c025621ab657fd6"
},
{
"ImportPath": "github.com/go-sql-driver/mysql",
- "Comment": "v1.2-119-g527bcd5",
- "Rev": "527bcd55aab2e53314f1a150922560174b493034"
+ "Comment": "v1.2-121-g69e3ed7",
+ "Rev": "69e3ed7607d7c139386480824801584c947c67cf"
},
{
"ImportPath": "github.com/gocql/gocql",
- "Comment": "1st_gen_framing-323-g8041a37",
- "Rev": "8041a37b40f2ca115d6c2902b279250eb627d7af"
+ "Comment": "1st_gen_framing-339-ga13e827",
+ "Rev": "a13e827ba9f379ea13199708c1b262a5d30b95a9"
},
{
"ImportPath": "github.com/golang/snappy",
@@ -102,16 +102,20 @@
},
{
"ImportPath": "github.com/google/go-github/github",
- "Rev": "84fc80440d3bb3a82d297b827308ce11dcf047eb"
+ "Rev": "81d0490d8aa8400f6760a077f4a2039eb0296e86"
},
{
"ImportPath": "github.com/google/go-querystring/query",
- "Rev": "547ef5ac979778feb2f760cdb5f4eae1a2207b86"
+ "Rev": "2a60fc2ba6c19de80291203597d752e9ba58e4c0"
+ },
+ {
+ "ImportPath": "github.com/hailocab/go-hostpool",
+ "Rev": "be8d763da234fdc1ffbf8c3149c308db31fbf894"
},
{
"ImportPath": "github.com/hashicorp/consul/api",
- "Comment": "v0.5.2-469-g6a350d5",
- "Rev": "6a350d5d19a41f94e0c99a933410e8545c4e7a51"
+ "Comment": "v0.6.0-rc1-3-g921602b",
+ "Rev": "921602b565b04d59ac634d53542d05dfd93a6ca3"
},
{
"ImportPath": "github.com/hashicorp/errwrap",
@@ -131,7 +135,7 @@
},
{
"ImportPath": "github.com/hashicorp/golang-lru",
- "Rev": "17e3543cc4e0b72d6c71d2e59e27a10821ea353b"
+ "Rev": "a6091bb5d00e2e9c4a16a0e739e306f8a3071a3c"
},
{
"ImportPath": "github.com/hashicorp/hcl",
@@ -141,6 +145,11 @@
"ImportPath": "github.com/hashicorp/logutils",
"Rev": "0dc08b1671f34c4250ce212759ebd880f743d883"
},
+ {
+ "ImportPath": "github.com/hashicorp/serf/coordinate",
+ "Comment": "v0.6.4-141-gc952108",
+ "Rev": "c9521088072459c5113a5e78e353b0b5b5ceec58"
+ },
{
"ImportPath": "github.com/hashicorp/uuid",
"Rev": "2951e8b9707a040acdb49145ed9f36a088f3532e"
@@ -164,7 +173,7 @@
},
{
"ImportPath": "github.com/mitchellh/go-homedir",
- "Rev": "df55a15e5ce646808815381b3db47a8c66ea62f4"
+ "Rev": "d682a8f0cf139663a984ff12528da460ca963de9"
},
{
"ImportPath": "github.com/mitchellh/mapstructure",
@@ -185,7 +194,7 @@
},
{
"ImportPath": "github.com/ugorji/go/codec",
- "Rev": "8a2a3a8c488c3ebd98f422a965260278267a0551"
+ "Rev": "f1f1a805ed361a0e078bb537e4ea78cd37dcf065"
},
{
"ImportPath": "github.com/vaughan0/go-ini",
@@ -217,7 +226,7 @@
},
{
"ImportPath": "golang.org/x/net/context",
- "Rev": "2cba614e8ff920c60240d2677bc019af32ee04e5"
+ "Rev": "c95266fa704a83c2716406ae957772a273e1380d"
},
{
"ImportPath": "golang.org/x/oauth2",
diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/datadog/dogstatsd_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/datadog/dogstatsd_test.go
new file mode 100644
index 000000000..e7dc51152
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/armon/go-metrics/datadog/dogstatsd_test.go
@@ -0,0 +1,121 @@
+package datadog
+
+import (
+ "fmt"
+ "net"
+ "reflect"
+ "testing"
+)
+
+var EmptyTags []string
+
+const (
+ DogStatsdAddr = "127.0.0.1:7254"
+ HostnameEnabled = true
+ HostnameDisabled = false
+ TestHostname = "test_hostname"
+)
+
+func MockGetHostname() string {
+ return TestHostname
+}
+
+var ParseKeyTests = []struct {
+ KeyToParse []string
+ Tags []string
+ PropagateHostname bool
+ ExpectedKey []string
+ ExpectedTags []string
+}{
+ {[]string{"a", MockGetHostname(), "b", "c"}, EmptyTags, HostnameDisabled, []string{"a", "b", "c"}, EmptyTags},
+ {[]string{"a", "b", "c"}, EmptyTags, HostnameDisabled, []string{"a", "b", "c"}, EmptyTags},
+ {[]string{"a", "b", "c"}, EmptyTags, HostnameEnabled, []string{"a", "b", "c"}, []string{fmt.Sprintf("host:%s", MockGetHostname())}},
+}
+
+var FlattenKeyTests = []struct {
+ KeyToFlatten []string
+ Expected string
+}{
+ {[]string{"a", "b", "c"}, "a.b.c"},
+ {[]string{"spaces must", "flatten", "to", "underscores"}, "spaces_must.flatten.to.underscores"},
+}
+
+var MetricSinkTests = []struct {
+ Method string
+ Metric []string
+ Value interface{}
+ Tags []string
+ PropagateHostname bool
+ Expected string
+}{
+ {"SetGauge", []string{"foo", "bar"}, float32(42), EmptyTags, HostnameDisabled, "foo.bar:42.000000|g"},
+ {"SetGauge", []string{"foo", "bar", "baz"}, float32(42), EmptyTags, HostnameDisabled, "foo.bar.baz:42.000000|g"},
+ {"AddSample", []string{"sample", "thing"}, float32(4), EmptyTags, HostnameDisabled, "sample.thing:4.000000|ms"},
+ {"IncrCounter", []string{"count", "me"}, float32(3), EmptyTags, HostnameDisabled, "count.me:3|c"},
+
+ {"SetGauge", []string{"foo", "baz"}, float32(42), []string{"my_tag:my_value"}, HostnameDisabled, "foo.baz:42.000000|g|#my_tag:my_value"},
+ {"SetGauge", []string{"foo", "bar"}, float32(42), []string{"my_tag:my_value", "other_tag:other_value"}, HostnameDisabled, "foo.bar:42.000000|g|#my_tag:my_value,other_tag:other_value"},
+ {"SetGauge", []string{"foo", "bar"}, float32(42), []string{"my_tag:my_value", "other_tag:other_value"}, HostnameEnabled, "foo.bar:42.000000|g|#my_tag:my_value,other_tag:other_value,host:test_hostname"},
+}
+
+func MockNewDogStatsdSink(addr string, tags []string, tagWithHostname bool) *DogStatsdSink {
+ dog, _ := NewDogStatsdSink(addr, MockGetHostname())
+ dog.SetTags(tags)
+ if tagWithHostname {
+ dog.EnableHostNamePropagation()
+ }
+
+ return dog
+}
+
+func TestParseKey(t *testing.T) {
+ for _, tt := range ParseKeyTests {
+ dog := MockNewDogStatsdSink(DogStatsdAddr, tt.Tags, tt.PropagateHostname)
+ key, tags := dog.parseKey(tt.KeyToParse)
+
+ if !reflect.DeepEqual(key, tt.ExpectedKey) {
+ t.Fatalf("Key Parsing failed for %v", tt.KeyToParse)
+ }
+
+ if !reflect.DeepEqual(tags, tt.ExpectedTags) {
+ t.Fatalf("Tag Parsing Failed for %v", tt.KeyToParse)
+ }
+ }
+}
+
+func TestFlattenKey(t *testing.T) {
+ dog := MockNewDogStatsdSink(DogStatsdAddr, EmptyTags, HostnameDisabled)
+ for _, tt := range FlattenKeyTests {
+ if !reflect.DeepEqual(dog.flattenKey(tt.KeyToFlatten), tt.Expected) {
+ t.Fatalf("Flattening %v failed", tt.KeyToFlatten)
+ }
+ }
+}
+
+func TestMetricSink(t *testing.T) {
+ udpAddr, err := net.ResolveUDPAddr("udp", DogStatsdAddr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ server, err := net.ListenUDP("udp", udpAddr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer server.Close()
+
+ buf := make([]byte, 1024)
+
+ for _, tt := range MetricSinkTests {
+ dog := MockNewDogStatsdSink(DogStatsdAddr, tt.Tags, tt.PropagateHostname)
+ method := reflect.ValueOf(dog).MethodByName(tt.Method)
+ method.Call([]reflect.Value{
+ reflect.ValueOf(tt.Metric),
+ reflect.ValueOf(tt.Value)})
+
+ n, _ := server.Read(buf)
+ msg := buf[:n]
+ if string(msg) != tt.Expected {
+ t.Fatalf("Line %s does not match expected: %s", string(msg), tt.Expected)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal_test.go
new file mode 100644
index 000000000..9bbca5f25
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal_test.go
@@ -0,0 +1,46 @@
+package metrics
+
+import (
+ "bytes"
+ "os"
+ "strings"
+ "syscall"
+ "testing"
+ "time"
+)
+
+func TestInmemSignal(t *testing.T) {
+ buf := bytes.NewBuffer(nil)
+ inm := NewInmemSink(10*time.Millisecond, 50*time.Millisecond)
+ sig := NewInmemSignal(inm, syscall.SIGUSR1, buf)
+ defer sig.Stop()
+
+ inm.SetGauge([]string{"foo"}, 42)
+ inm.EmitKey([]string{"bar"}, 42)
+ inm.IncrCounter([]string{"baz"}, 42)
+ inm.AddSample([]string{"wow"}, 42)
+
+ // Wait for period to end
+ time.Sleep(15 * time.Millisecond)
+
+ // Send signal!
+ syscall.Kill(os.Getpid(), syscall.SIGUSR1)
+
+ // Wait for flush
+ time.Sleep(10 * time.Millisecond)
+
+ // Check the output
+ out := string(buf.Bytes())
+ if !strings.Contains(out, "[G] 'foo': 42") {
+ t.Fatalf("bad: %v", out)
+ }
+ if !strings.Contains(out, "[P] 'bar': 42") {
+ t.Fatalf("bad: %v", out)
+ }
+ if !strings.Contains(out, "[C] 'baz': Count: 1 Sum: 42") {
+ t.Fatalf("bad: %v", out)
+ }
+ if !strings.Contains(out, "[S] 'wow': Count: 1 Sum: 42") {
+ t.Fatalf("bad: %v", out)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_test.go
new file mode 100644
index 000000000..228a2fc1a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_test.go
@@ -0,0 +1,104 @@
+package metrics
+
+import (
+ "math"
+ "testing"
+ "time"
+)
+
+func TestInmemSink(t *testing.T) {
+ inm := NewInmemSink(10*time.Millisecond, 50*time.Millisecond)
+
+ data := inm.Data()
+ if len(data) != 1 {
+ t.Fatalf("bad: %v", data)
+ }
+
+ // Add data points
+ inm.SetGauge([]string{"foo", "bar"}, 42)
+ inm.EmitKey([]string{"foo", "bar"}, 42)
+ inm.IncrCounter([]string{"foo", "bar"}, 20)
+ inm.IncrCounter([]string{"foo", "bar"}, 22)
+ inm.AddSample([]string{"foo", "bar"}, 20)
+ inm.AddSample([]string{"foo", "bar"}, 22)
+
+ data = inm.Data()
+ if len(data) != 1 {
+ t.Fatalf("bad: %v", data)
+ }
+
+ intvM := data[0]
+ intvM.RLock()
+
+ if time.Now().Sub(intvM.Interval) > 10*time.Millisecond {
+ t.Fatalf("interval too old")
+ }
+ if intvM.Gauges["foo.bar"] != 42 {
+ t.Fatalf("bad val: %v", intvM.Gauges)
+ }
+ if intvM.Points["foo.bar"][0] != 42 {
+ t.Fatalf("bad val: %v", intvM.Points)
+ }
+
+ agg := intvM.Counters["foo.bar"]
+ if agg.Count != 2 {
+ t.Fatalf("bad val: %v", agg)
+ }
+ if agg.Sum != 42 {
+ t.Fatalf("bad val: %v", agg)
+ }
+ if agg.SumSq != 884 {
+ t.Fatalf("bad val: %v", agg)
+ }
+ if agg.Min != 20 {
+ t.Fatalf("bad val: %v", agg)
+ }
+ if agg.Max != 22 {
+ t.Fatalf("bad val: %v", agg)
+ }
+ if agg.Mean() != 21 {
+ t.Fatalf("bad val: %v", agg)
+ }
+ if agg.Stddev() != math.Sqrt(2) {
+ t.Fatalf("bad val: %v", agg)
+ }
+
+ if agg.LastUpdated.IsZero() {
+ t.Fatalf("agg.LastUpdated is not set: %v", agg)
+ }
+
+ diff := time.Now().Sub(agg.LastUpdated).Seconds()
+ if diff > 1 {
+ t.Fatalf("time diff too great: %f", diff)
+ }
+
+ if agg = intvM.Samples["foo.bar"]; agg == nil {
+ t.Fatalf("missing sample")
+ }
+
+ intvM.RUnlock()
+
+ for i := 1; i < 10; i++ {
+ time.Sleep(10 * time.Millisecond)
+ inm.SetGauge([]string{"foo", "bar"}, 42)
+ data = inm.Data()
+ if len(data) != min(i+1, 5) {
+ t.Fatalf("bad: %v", data)
+ }
+ }
+
+ // Should not exceed 5 intervals!
+ time.Sleep(10 * time.Millisecond)
+ inm.SetGauge([]string{"foo", "bar"}, 42)
+ data = inm.Data()
+ if len(data) != 5 {
+ t.Fatalf("bad: %v", data)
+ }
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/metrics_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/metrics_test.go
new file mode 100644
index 000000000..c7baf22bf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/armon/go-metrics/metrics_test.go
@@ -0,0 +1,262 @@
+package metrics
+
+import (
+ "reflect"
+ "runtime"
+ "testing"
+ "time"
+)
+
+func mockMetric() (*MockSink, *Metrics) {
+ m := &MockSink{}
+ met := &Metrics{sink: m}
+ return m, met
+}
+
+func TestMetrics_SetGauge(t *testing.T) {
+ m, met := mockMetric()
+ met.SetGauge([]string{"key"}, float32(1))
+ if m.keys[0][0] != "key" {
+ t.Fatalf("")
+ }
+ if m.vals[0] != 1 {
+ t.Fatalf("")
+ }
+
+ m, met = mockMetric()
+ met.HostName = "test"
+ met.EnableHostname = true
+ met.SetGauge([]string{"key"}, float32(1))
+ if m.keys[0][0] != "test" || m.keys[0][1] != "key" {
+ t.Fatalf("")
+ }
+ if m.vals[0] != 1 {
+ t.Fatalf("")
+ }
+
+ m, met = mockMetric()
+ met.EnableTypePrefix = true
+ met.SetGauge([]string{"key"}, float32(1))
+ if m.keys[0][0] != "gauge" || m.keys[0][1] != "key" {
+ t.Fatalf("")
+ }
+ if m.vals[0] != 1 {
+ t.Fatalf("")
+ }
+
+ m, met = mockMetric()
+ met.ServiceName = "service"
+ met.SetGauge([]string{"key"}, float32(1))
+ if m.keys[0][0] != "service" || m.keys[0][1] != "key" {
+ t.Fatalf("")
+ }
+ if m.vals[0] != 1 {
+ t.Fatalf("")
+ }
+}
+
+func TestMetrics_EmitKey(t *testing.T) {
+ m, met := mockMetric()
+ met.EmitKey([]string{"key"}, float32(1))
+ if m.keys[0][0] != "key" {
+ t.Fatalf("")
+ }
+ if m.vals[0] != 1 {
+ t.Fatalf("")
+ }
+
+ m, met = mockMetric()
+ met.EnableTypePrefix = true
+ met.EmitKey([]string{"key"}, float32(1))
+ if m.keys[0][0] != "kv" || m.keys[0][1] != "key" {
+ t.Fatalf("")
+ }
+ if m.vals[0] != 1 {
+ t.Fatalf("")
+ }
+
+ m, met = mockMetric()
+ met.ServiceName = "service"
+ met.EmitKey([]string{"key"}, float32(1))
+ if m.keys[0][0] != "service" || m.keys[0][1] != "key" {
+ t.Fatalf("")
+ }
+ if m.vals[0] != 1 {
+ t.Fatalf("")
+ }
+}
+
+func TestMetrics_IncrCounter(t *testing.T) {
+ m, met := mockMetric()
+ met.IncrCounter([]string{"key"}, float32(1))
+ if m.keys[0][0] != "key" {
+ t.Fatalf("")
+ }
+ if m.vals[0] != 1 {
+ t.Fatalf("")
+ }
+
+ m, met = mockMetric()
+ met.EnableTypePrefix = true
+ met.IncrCounter([]string{"key"}, float32(1))
+ if m.keys[0][0] != "counter" || m.keys[0][1] != "key" {
+ t.Fatalf("")
+ }
+ if m.vals[0] != 1 {
+ t.Fatalf("")
+ }
+
+ m, met = mockMetric()
+ met.ServiceName = "service"
+ met.IncrCounter([]string{"key"}, float32(1))
+ if m.keys[0][0] != "service" || m.keys[0][1] != "key" {
+ t.Fatalf("")
+ }
+ if m.vals[0] != 1 {
+ t.Fatalf("")
+ }
+}
+
+func TestMetrics_AddSample(t *testing.T) {
+ m, met := mockMetric()
+ met.AddSample([]string{"key"}, float32(1))
+ if m.keys[0][0] != "key" {
+ t.Fatalf("")
+ }
+ if m.vals[0] != 1 {
+ t.Fatalf("")
+ }
+
+ m, met = mockMetric()
+ met.EnableTypePrefix = true
+ met.AddSample([]string{"key"}, float32(1))
+ if m.keys[0][0] != "sample" || m.keys[0][1] != "key" {
+ t.Fatalf("")
+ }
+ if m.vals[0] != 1 {
+ t.Fatalf("")
+ }
+
+ m, met = mockMetric()
+ met.ServiceName = "service"
+ met.AddSample([]string{"key"}, float32(1))
+ if m.keys[0][0] != "service" || m.keys[0][1] != "key" {
+ t.Fatalf("")
+ }
+ if m.vals[0] != 1 {
+ t.Fatalf("")
+ }
+}
+
+func TestMetrics_MeasureSince(t *testing.T) {
+ m, met := mockMetric()
+ met.TimerGranularity = time.Millisecond
+ n := time.Now()
+ met.MeasureSince([]string{"key"}, n)
+ if m.keys[0][0] != "key" {
+ t.Fatalf("")
+ }
+ if m.vals[0] > 0.1 {
+ t.Fatalf("")
+ }
+
+ m, met = mockMetric()
+ met.TimerGranularity = time.Millisecond
+ met.EnableTypePrefix = true
+ met.MeasureSince([]string{"key"}, n)
+ if m.keys[0][0] != "timer" || m.keys[0][1] != "key" {
+ t.Fatalf("")
+ }
+ if m.vals[0] > 0.1 {
+ t.Fatalf("")
+ }
+
+ m, met = mockMetric()
+ met.TimerGranularity = time.Millisecond
+ met.ServiceName = "service"
+ met.MeasureSince([]string{"key"}, n)
+ if m.keys[0][0] != "service" || m.keys[0][1] != "key" {
+ t.Fatalf("")
+ }
+ if m.vals[0] > 0.1 {
+ t.Fatalf("")
+ }
+}
+
+func TestMetrics_EmitRuntimeStats(t *testing.T) {
+ runtime.GC()
+ m, met := mockMetric()
+ met.emitRuntimeStats()
+
+ if m.keys[0][0] != "runtime" || m.keys[0][1] != "num_goroutines" {
+ t.Fatalf("bad key %v", m.keys)
+ }
+ if m.vals[0] <= 1 {
+ t.Fatalf("bad val: %v", m.vals)
+ }
+
+ if m.keys[1][0] != "runtime" || m.keys[1][1] != "alloc_bytes" {
+ t.Fatalf("bad key %v", m.keys)
+ }
+ if m.vals[1] <= 40000 {
+ t.Fatalf("bad val: %v", m.vals)
+ }
+
+ if m.keys[2][0] != "runtime" || m.keys[2][1] != "sys_bytes" {
+ t.Fatalf("bad key %v", m.keys)
+ }
+ if m.vals[2] <= 100000 {
+ t.Fatalf("bad val: %v", m.vals)
+ }
+
+ if m.keys[3][0] != "runtime" || m.keys[3][1] != "malloc_count" {
+ t.Fatalf("bad key %v", m.keys)
+ }
+ if m.vals[3] <= 100 {
+ t.Fatalf("bad val: %v", m.vals)
+ }
+
+ if m.keys[4][0] != "runtime" || m.keys[4][1] != "free_count" {
+ t.Fatalf("bad key %v", m.keys)
+ }
+ if m.vals[4] <= 100 {
+ t.Fatalf("bad val: %v", m.vals)
+ }
+
+ if m.keys[5][0] != "runtime" || m.keys[5][1] != "heap_objects" {
+ t.Fatalf("bad key %v", m.keys)
+ }
+ if m.vals[5] <= 100 {
+ t.Fatalf("bad val: %v", m.vals)
+ }
+
+ if m.keys[6][0] != "runtime" || m.keys[6][1] != "total_gc_pause_ns" {
+ t.Fatalf("bad key %v", m.keys)
+ }
+ if m.vals[6] <= 100000 {
+ t.Fatalf("bad val: %v", m.vals)
+ }
+
+ if m.keys[7][0] != "runtime" || m.keys[7][1] != "total_gc_runs" {
+ t.Fatalf("bad key %v", m.keys)
+ }
+ if m.vals[7] <= 1 {
+ t.Fatalf("bad val: %v", m.vals)
+ }
+
+ if m.keys[8][0] != "runtime" || m.keys[8][1] != "gc_pause_ns" {
+ t.Fatalf("bad key %v", m.keys)
+ }
+ if m.vals[8] <= 1000 {
+ t.Fatalf("bad val: %v", m.vals)
+ }
+}
+
+func TestInsert(t *testing.T) {
+ k := []string{"hi", "bob"}
+ exp := []string{"hi", "there", "bob"}
+ out := insert(1, "there", k)
+ if !reflect.DeepEqual(exp, out) {
+ t.Fatalf("bad insert %v %v", exp, out)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/sink_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/sink_test.go
new file mode 100644
index 000000000..15c5d771a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/armon/go-metrics/sink_test.go
@@ -0,0 +1,120 @@
+package metrics
+
+import (
+ "reflect"
+ "testing"
+)
+
+type MockSink struct {
+ keys [][]string
+ vals []float32
+}
+
+func (m *MockSink) SetGauge(key []string, val float32) {
+ m.keys = append(m.keys, key)
+ m.vals = append(m.vals, val)
+}
+func (m *MockSink) EmitKey(key []string, val float32) {
+ m.keys = append(m.keys, key)
+ m.vals = append(m.vals, val)
+}
+func (m *MockSink) IncrCounter(key []string, val float32) {
+ m.keys = append(m.keys, key)
+ m.vals = append(m.vals, val)
+}
+func (m *MockSink) AddSample(key []string, val float32) {
+ m.keys = append(m.keys, key)
+ m.vals = append(m.vals, val)
+}
+
+func TestFanoutSink_Gauge(t *testing.T) {
+ m1 := &MockSink{}
+ m2 := &MockSink{}
+ fh := &FanoutSink{m1, m2}
+
+ k := []string{"test"}
+ v := float32(42.0)
+ fh.SetGauge(k, v)
+
+ if !reflect.DeepEqual(m1.keys[0], k) {
+ t.Fatalf("key not equal")
+ }
+ if !reflect.DeepEqual(m2.keys[0], k) {
+ t.Fatalf("key not equal")
+ }
+ if !reflect.DeepEqual(m1.vals[0], v) {
+ t.Fatalf("val not equal")
+ }
+ if !reflect.DeepEqual(m2.vals[0], v) {
+ t.Fatalf("val not equal")
+ }
+}
+
+func TestFanoutSink_Key(t *testing.T) {
+ m1 := &MockSink{}
+ m2 := &MockSink{}
+ fh := &FanoutSink{m1, m2}
+
+ k := []string{"test"}
+ v := float32(42.0)
+ fh.EmitKey(k, v)
+
+ if !reflect.DeepEqual(m1.keys[0], k) {
+ t.Fatalf("key not equal")
+ }
+ if !reflect.DeepEqual(m2.keys[0], k) {
+ t.Fatalf("key not equal")
+ }
+ if !reflect.DeepEqual(m1.vals[0], v) {
+ t.Fatalf("val not equal")
+ }
+ if !reflect.DeepEqual(m2.vals[0], v) {
+ t.Fatalf("val not equal")
+ }
+}
+
+func TestFanoutSink_Counter(t *testing.T) {
+ m1 := &MockSink{}
+ m2 := &MockSink{}
+ fh := &FanoutSink{m1, m2}
+
+ k := []string{"test"}
+ v := float32(42.0)
+ fh.IncrCounter(k, v)
+
+ if !reflect.DeepEqual(m1.keys[0], k) {
+ t.Fatalf("key not equal")
+ }
+ if !reflect.DeepEqual(m2.keys[0], k) {
+ t.Fatalf("key not equal")
+ }
+ if !reflect.DeepEqual(m1.vals[0], v) {
+ t.Fatalf("val not equal")
+ }
+ if !reflect.DeepEqual(m2.vals[0], v) {
+ t.Fatalf("val not equal")
+ }
+}
+
+func TestFanoutSink_Sample(t *testing.T) {
+ m1 := &MockSink{}
+ m2 := &MockSink{}
+ fh := &FanoutSink{m1, m2}
+
+ k := []string{"test"}
+ v := float32(42.0)
+ fh.AddSample(k, v)
+
+ if !reflect.DeepEqual(m1.keys[0], k) {
+ t.Fatalf("key not equal")
+ }
+ if !reflect.DeepEqual(m2.keys[0], k) {
+ t.Fatalf("key not equal")
+ }
+ if !reflect.DeepEqual(m1.vals[0], v) {
+ t.Fatalf("val not equal")
+ }
+ if !reflect.DeepEqual(m2.vals[0], v) {
+ t.Fatalf("val not equal")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/start_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/start_test.go
new file mode 100644
index 000000000..8b3210c15
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/armon/go-metrics/start_test.go
@@ -0,0 +1,110 @@
+package metrics
+
+import (
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestDefaultConfig(t *testing.T) {
+ conf := DefaultConfig("service")
+ if conf.ServiceName != "service" {
+ t.Fatalf("Bad name")
+ }
+ if conf.HostName == "" {
+ t.Fatalf("missing hostname")
+ }
+ if !conf.EnableHostname || !conf.EnableRuntimeMetrics {
+ t.Fatalf("expect true")
+ }
+ if conf.EnableTypePrefix {
+ t.Fatalf("expect false")
+ }
+ if conf.TimerGranularity != time.Millisecond {
+ t.Fatalf("bad granularity")
+ }
+ if conf.ProfileInterval != time.Second {
+ t.Fatalf("bad interval")
+ }
+}
+
+func Test_GlobalMetrics_SetGauge(t *testing.T) {
+ m := &MockSink{}
+ globalMetrics = &Metrics{sink: m}
+
+ k := []string{"test"}
+ v := float32(42.0)
+ SetGauge(k, v)
+
+ if !reflect.DeepEqual(m.keys[0], k) {
+ t.Fatalf("key not equal")
+ }
+ if !reflect.DeepEqual(m.vals[0], v) {
+ t.Fatalf("val not equal")
+ }
+}
+
+func Test_GlobalMetrics_EmitKey(t *testing.T) {
+ m := &MockSink{}
+ globalMetrics = &Metrics{sink: m}
+
+ k := []string{"test"}
+ v := float32(42.0)
+ EmitKey(k, v)
+
+ if !reflect.DeepEqual(m.keys[0], k) {
+ t.Fatalf("key not equal")
+ }
+ if !reflect.DeepEqual(m.vals[0], v) {
+ t.Fatalf("val not equal")
+ }
+}
+
+func Test_GlobalMetrics_IncrCounter(t *testing.T) {
+ m := &MockSink{}
+ globalMetrics = &Metrics{sink: m}
+
+ k := []string{"test"}
+ v := float32(42.0)
+ IncrCounter(k, v)
+
+ if !reflect.DeepEqual(m.keys[0], k) {
+ t.Fatalf("key not equal")
+ }
+ if !reflect.DeepEqual(m.vals[0], v) {
+ t.Fatalf("val not equal")
+ }
+}
+
+func Test_GlobalMetrics_AddSample(t *testing.T) {
+ m := &MockSink{}
+ globalMetrics = &Metrics{sink: m}
+
+ k := []string{"test"}
+ v := float32(42.0)
+ AddSample(k, v)
+
+ if !reflect.DeepEqual(m.keys[0], k) {
+ t.Fatalf("key not equal")
+ }
+ if !reflect.DeepEqual(m.vals[0], v) {
+ t.Fatalf("val not equal")
+ }
+}
+
+func Test_GlobalMetrics_MeasureSince(t *testing.T) {
+ m := &MockSink{}
+ globalMetrics = &Metrics{sink: m}
+ globalMetrics.TimerGranularity = time.Millisecond
+
+ k := []string{"test"}
+ now := time.Now()
+ MeasureSince(k, now)
+
+ if !reflect.DeepEqual(m.keys[0], k) {
+ t.Fatalf("key not equal")
+ }
+ if m.vals[0] > 0.1 {
+ t.Fatalf("val too large %v", m.vals[0])
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/statsd_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/statsd_test.go
new file mode 100644
index 000000000..622eb5d3a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/armon/go-metrics/statsd_test.go
@@ -0,0 +1,105 @@
+package metrics
+
+import (
+ "bufio"
+ "bytes"
+ "net"
+ "testing"
+ "time"
+)
+
+func TestStatsd_Flatten(t *testing.T) {
+ s := &StatsdSink{}
+ flat := s.flattenKey([]string{"a", "b", "c", "d"})
+ if flat != "a.b.c.d" {
+ t.Fatalf("Bad flat")
+ }
+}
+
+func TestStatsd_PushFullQueue(t *testing.T) {
+ q := make(chan string, 1)
+ q <- "full"
+
+ s := &StatsdSink{metricQueue: q}
+ s.pushMetric("omit")
+
+ out := <-q
+ if out != "full" {
+ t.Fatalf("bad val %v", out)
+ }
+
+ select {
+ case v := <-q:
+ t.Fatalf("bad val %v", v)
+ default:
+ }
+}
+
+func TestStatsd_Conn(t *testing.T) {
+ addr := "127.0.0.1:7524"
+ done := make(chan bool)
+ go func() {
+ list, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 7524})
+ if err != nil {
+ panic(err)
+ }
+ defer list.Close()
+ buf := make([]byte, 1500)
+ n, err := list.Read(buf)
+ if err != nil {
+ panic(err)
+ }
+ buf = buf[:n]
+ reader := bufio.NewReader(bytes.NewReader(buf))
+
+ line, err := reader.ReadString('\n')
+ if err != nil {
+ t.Fatalf("unexpected err %s", err)
+ }
+ if line != "gauge.val:1.000000|g\n" {
+ t.Fatalf("bad line %s", line)
+ }
+
+ line, err = reader.ReadString('\n')
+ if err != nil {
+ t.Fatalf("unexpected err %s", err)
+ }
+ if line != "key.other:2.000000|kv\n" {
+ t.Fatalf("bad line %s", line)
+ }
+
+ line, err = reader.ReadString('\n')
+ if err != nil {
+ t.Fatalf("unexpected err %s", err)
+ }
+ if line != "counter.me:3.000000|c\n" {
+ t.Fatalf("bad line %s", line)
+ }
+
+ line, err = reader.ReadString('\n')
+ if err != nil {
+ t.Fatalf("unexpected err %s", err)
+ }
+ if line != "sample.slow_thingy:4.000000|ms\n" {
+ t.Fatalf("bad line %s", line)
+ }
+
+ done <- true
+ }()
+ s, err := NewStatsdSink(addr)
+ if err != nil {
+ t.Fatalf("bad error")
+ }
+
+ s.SetGauge([]string{"gauge", "val"}, float32(1))
+ s.EmitKey([]string{"key", "other"}, float32(2))
+ s.IncrCounter([]string{"counter", "me"}, float32(3))
+ s.AddSample([]string{"sample", "slow thingy"}, float32(4))
+
+ select {
+ case <-done:
+ s.Shutdown()
+ case <-time.After(3 * time.Second):
+ t.Fatalf("timeout")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/statsite_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/statsite_test.go
new file mode 100644
index 000000000..d9c744f41
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/armon/go-metrics/statsite_test.go
@@ -0,0 +1,101 @@
+package metrics
+
+import (
+ "bufio"
+ "net"
+ "testing"
+ "time"
+)
+
+func acceptConn(addr string) net.Conn {
+ ln, _ := net.Listen("tcp", addr)
+ conn, _ := ln.Accept()
+ return conn
+}
+
+func TestStatsite_Flatten(t *testing.T) {
+ s := &StatsiteSink{}
+ flat := s.flattenKey([]string{"a", "b", "c", "d"})
+ if flat != "a.b.c.d" {
+ t.Fatalf("Bad flat")
+ }
+}
+
+func TestStatsite_PushFullQueue(t *testing.T) {
+ q := make(chan string, 1)
+ q <- "full"
+
+ s := &StatsiteSink{metricQueue: q}
+ s.pushMetric("omit")
+
+ out := <-q
+ if out != "full" {
+ t.Fatalf("bad val %v", out)
+ }
+
+ select {
+ case v := <-q:
+ t.Fatalf("bad val %v", v)
+ default:
+ }
+}
+
+func TestStatsite_Conn(t *testing.T) {
+ addr := "localhost:7523"
+ done := make(chan bool)
+ go func() {
+ conn := acceptConn(addr)
+ reader := bufio.NewReader(conn)
+
+ line, err := reader.ReadString('\n')
+ if err != nil {
+ t.Fatalf("unexpected err %s", err)
+ }
+ if line != "gauge.val:1.000000|g\n" {
+ t.Fatalf("bad line %s", line)
+ }
+
+ line, err = reader.ReadString('\n')
+ if err != nil {
+ t.Fatalf("unexpected err %s", err)
+ }
+ if line != "key.other:2.000000|kv\n" {
+ t.Fatalf("bad line %s", line)
+ }
+
+ line, err = reader.ReadString('\n')
+ if err != nil {
+ t.Fatalf("unexpected err %s", err)
+ }
+ if line != "counter.me:3.000000|c\n" {
+ t.Fatalf("bad line %s", line)
+ }
+
+ line, err = reader.ReadString('\n')
+ if err != nil {
+ t.Fatalf("unexpected err %s", err)
+ }
+ if line != "sample.slow_thingy:4.000000|ms\n" {
+ t.Fatalf("bad line %s", line)
+ }
+
+ conn.Close()
+ done <- true
+ }()
+ s, err := NewStatsiteSink(addr)
+ if err != nil {
+ t.Fatalf("bad error")
+ }
+
+ s.SetGauge([]string{"gauge", "val"}, float32(1))
+ s.EmitKey([]string{"key", "other"}, float32(2))
+ s.IncrCounter([]string{"counter", "me"}, float32(3))
+ s.AddSample([]string{"sample", "slow thingy"}, float32(4))
+
+ select {
+ case <-done:
+ s.Shutdown()
+ case <-time.After(3 * time.Second):
+ t.Fatalf("timeout")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/armon/go-radix/radix_test.go b/Godeps/_workspace/src/github.com/armon/go-radix/radix_test.go
new file mode 100644
index 000000000..23b415566
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/armon/go-radix/radix_test.go
@@ -0,0 +1,319 @@
+package radix
+
+import (
+ crand "crypto/rand"
+ "fmt"
+ "reflect"
+ "sort"
+ "testing"
+)
+
+func TestRadix(t *testing.T) {
+ var min, max string
+ inp := make(map[string]interface{})
+ for i := 0; i < 1000; i++ {
+ gen := generateUUID()
+ inp[gen] = i
+ if gen < min || i == 0 {
+ min = gen
+ }
+ if gen > max || i == 0 {
+ max = gen
+ }
+ }
+
+ r := NewFromMap(inp)
+ if r.Len() != len(inp) {
+ t.Fatalf("bad length: %v %v", r.Len(), len(inp))
+ }
+
+ r.Walk(func(k string, v interface{}) bool {
+ println(k)
+ return false
+ })
+
+ for k, v := range inp {
+ out, ok := r.Get(k)
+ if !ok {
+ t.Fatalf("missing key: %v", k)
+ }
+ if out != v {
+ t.Fatalf("value mis-match: %v %v", out, v)
+ }
+ }
+
+ // Check min and max
+ outMin, _, _ := r.Minimum()
+ if outMin != min {
+ t.Fatalf("bad minimum: %v %v", outMin, min)
+ }
+ outMax, _, _ := r.Maximum()
+ if outMax != max {
+ t.Fatalf("bad maximum: %v %v", outMax, max)
+ }
+
+ for k, v := range inp {
+ out, ok := r.Delete(k)
+ if !ok {
+ t.Fatalf("missing key: %v", k)
+ }
+ if out != v {
+ t.Fatalf("value mis-match: %v %v", out, v)
+ }
+ }
+ if r.Len() != 0 {
+ t.Fatalf("bad length: %v", r.Len())
+ }
+}
+
+func TestRoot(t *testing.T) {
+ r := New()
+ _, ok := r.Delete("")
+ if ok {
+ t.Fatalf("bad")
+ }
+ _, ok = r.Insert("", true)
+ if ok {
+ t.Fatalf("bad")
+ }
+ val, ok := r.Get("")
+ if !ok || val != true {
+ t.Fatalf("bad: %v", val)
+ }
+ val, ok = r.Delete("")
+ if !ok || val != true {
+ t.Fatalf("bad: %v", val)
+ }
+}
+
+func TestDelete(t *testing.T) {
+
+ r := New()
+
+ s := []string{"", "A", "AB"}
+
+ for _, ss := range s {
+ r.Insert(ss, true)
+ }
+
+ for _, ss := range s {
+ _, ok := r.Delete(ss)
+ if !ok {
+ t.Fatalf("bad %q", ss)
+ }
+ }
+}
+
+func TestLongestPrefix(t *testing.T) {
+ r := New()
+
+ keys := []string{
+ "",
+ "foo",
+ "foobar",
+ "foobarbaz",
+ "foobarbazzip",
+ "foozip",
+ }
+ for _, k := range keys {
+ r.Insert(k, nil)
+ }
+ if r.Len() != len(keys) {
+ t.Fatalf("bad len: %v %v", r.Len(), len(keys))
+ }
+
+ type exp struct {
+ inp string
+ out string
+ }
+ cases := []exp{
+ {"a", ""},
+ {"abc", ""},
+ {"fo", ""},
+ {"foo", "foo"},
+ {"foob", "foo"},
+ {"foobar", "foobar"},
+ {"foobarba", "foobar"},
+ {"foobarbaz", "foobarbaz"},
+ {"foobarbazzi", "foobarbaz"},
+ {"foobarbazzip", "foobarbazzip"},
+ {"foozi", "foo"},
+ {"foozip", "foozip"},
+ {"foozipzap", "foozip"},
+ }
+ for _, test := range cases {
+ m, _, ok := r.LongestPrefix(test.inp)
+ if !ok {
+ t.Fatalf("no match: %v", test)
+ }
+ if m != test.out {
+ t.Fatalf("mis-match: %v %v", m, test)
+ }
+ }
+}
+
+func TestWalkPrefix(t *testing.T) {
+ r := New()
+
+ keys := []string{
+ "foobar",
+ "foo/bar/baz",
+ "foo/baz/bar",
+ "foo/zip/zap",
+ "zipzap",
+ }
+ for _, k := range keys {
+ r.Insert(k, nil)
+ }
+ if r.Len() != len(keys) {
+ t.Fatalf("bad len: %v %v", r.Len(), len(keys))
+ }
+
+ type exp struct {
+ inp string
+ out []string
+ }
+ cases := []exp{
+ exp{
+ "f",
+ []string{"foobar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap"},
+ },
+ exp{
+ "foo",
+ []string{"foobar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap"},
+ },
+ exp{
+ "foob",
+ []string{"foobar"},
+ },
+ exp{
+ "foo/",
+ []string{"foo/bar/baz", "foo/baz/bar", "foo/zip/zap"},
+ },
+ exp{
+ "foo/b",
+ []string{"foo/bar/baz", "foo/baz/bar"},
+ },
+ exp{
+ "foo/ba",
+ []string{"foo/bar/baz", "foo/baz/bar"},
+ },
+ exp{
+ "foo/bar",
+ []string{"foo/bar/baz"},
+ },
+ exp{
+ "foo/bar/baz",
+ []string{"foo/bar/baz"},
+ },
+ exp{
+ "foo/bar/bazoo",
+ []string{},
+ },
+ exp{
+ "z",
+ []string{"zipzap"},
+ },
+ }
+
+ for _, test := range cases {
+ out := []string{}
+ fn := func(s string, v interface{}) bool {
+ out = append(out, s)
+ return false
+ }
+ r.WalkPrefix(test.inp, fn)
+ sort.Strings(out)
+ sort.Strings(test.out)
+ if !reflect.DeepEqual(out, test.out) {
+ t.Fatalf("mis-match: %v %v", out, test.out)
+ }
+ }
+}
+
+func TestWalkPath(t *testing.T) {
+ r := New()
+
+ keys := []string{
+ "foo",
+ "foo/bar",
+ "foo/bar/baz",
+ "foo/baz/bar",
+ "foo/zip/zap",
+ "zipzap",
+ }
+ for _, k := range keys {
+ r.Insert(k, nil)
+ }
+ if r.Len() != len(keys) {
+ t.Fatalf("bad len: %v %v", r.Len(), len(keys))
+ }
+
+ type exp struct {
+ inp string
+ out []string
+ }
+ cases := []exp{
+ exp{
+ "f",
+ []string{},
+ },
+ exp{
+ "foo",
+ []string{"foo"},
+ },
+ exp{
+ "foo/",
+ []string{"foo"},
+ },
+ exp{
+ "foo/ba",
+ []string{"foo"},
+ },
+ exp{
+ "foo/bar",
+ []string{"foo", "foo/bar"},
+ },
+ exp{
+ "foo/bar/baz",
+ []string{"foo", "foo/bar", "foo/bar/baz"},
+ },
+ exp{
+ "foo/bar/bazoo",
+ []string{"foo", "foo/bar", "foo/bar/baz"},
+ },
+ exp{
+ "z",
+ []string{},
+ },
+ }
+
+ for _, test := range cases {
+ out := []string{}
+ fn := func(s string, v interface{}) bool {
+ out = append(out, s)
+ return false
+ }
+ r.WalkPath(test.inp, fn)
+ sort.Strings(out)
+ sort.Strings(test.out)
+ if !reflect.DeepEqual(out, test.out) {
+ t.Fatalf("mis-match: %v %v", out, test.out)
+ }
+ }
+}
+
+// generateUUID is used to generate a random UUID
+func generateUUID() string {
+ buf := make([]byte, 16)
+ if _, err := crand.Read(buf); err != nil {
+ panic(fmt.Errorf("failed to read random bytes: %v", err))
+ }
+
+ return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
+ buf[0:4],
+ buf[4:6],
+ buf[6:8],
+ buf[8:10],
+ buf[10:16])
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awstesting/client.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awstesting/client.go
new file mode 100644
index 000000000..ca64a4478
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awstesting/client.go
@@ -0,0 +1,20 @@
+package awstesting
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/defaults"
+)
+
+// NewClient creates and initializes a generic service client for testing.
+func NewClient(cfgs ...*aws.Config) *client.Client {
+ info := metadata.ClientInfo{
+ Endpoint: "http://endpoint",
+ SigningName: "",
+ }
+ def := defaults.Get()
+ def.Config.MergeIn(cfgs...)
+
+ return client.New(*def.Config, info, def.Handlers)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
index f91743c6e..8429470b9 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
@@ -57,16 +57,13 @@ func rcopy(dst, src reflect.Value, root bool) {
}
}
case reflect.Struct:
- if !root {
- dst.Set(reflect.New(src.Type()).Elem())
- }
-
t := dst.Type()
for i := 0; i < t.NumField(); i++ {
name := t.Field(i).Name
- srcval := src.FieldByName(name)
- if srcval.IsValid() {
- rcopy(dst.FieldByName(name), srcval, false)
+ srcVal := src.FieldByName(name)
+ dstVal := dst.FieldByName(name)
+ if srcVal.IsValid() && dstVal.CanSet() {
+ rcopy(dstVal, srcVal, false)
}
}
case reflect.Slice:
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go
new file mode 100644
index 000000000..84b7e3f34
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go
@@ -0,0 +1,233 @@
+package awsutil_test
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/stretchr/testify/assert"
+)
+
+func ExampleCopy() {
+ type Foo struct {
+ A int
+ B []*string
+ }
+
+ // Create the initial value
+ str1 := "hello"
+ str2 := "bye bye"
+ f1 := &Foo{A: 1, B: []*string{&str1, &str2}}
+
+ // Do the copy
+ var f2 Foo
+ awsutil.Copy(&f2, f1)
+
+ // Print the result
+ fmt.Println(awsutil.Prettify(f2))
+
+ // Output:
+ // {
+ // A: 1,
+ // B: ["hello","bye bye"]
+ // }
+}
+
+func TestCopy(t *testing.T) {
+ type Foo struct {
+ A int
+ B []*string
+ C map[string]*int
+ }
+
+ // Create the initial value
+ str1 := "hello"
+ str2 := "bye bye"
+ int1 := 1
+ int2 := 2
+ f1 := &Foo{
+ A: 1,
+ B: []*string{&str1, &str2},
+ C: map[string]*int{
+ "A": &int1,
+ "B": &int2,
+ },
+ }
+
+ // Do the copy
+ var f2 Foo
+ awsutil.Copy(&f2, f1)
+
+ // Values are equal
+ assert.Equal(t, f2.A, f1.A)
+ assert.Equal(t, f2.B, f1.B)
+ assert.Equal(t, f2.C, f1.C)
+
+ // But pointers are not!
+ str3 := "nothello"
+ int3 := 57
+ f2.A = 100
+ f2.B[0] = &str3
+ f2.C["B"] = &int3
+ assert.NotEqual(t, f2.A, f1.A)
+ assert.NotEqual(t, f2.B, f1.B)
+ assert.NotEqual(t, f2.C, f1.C)
+}
+
+func TestCopyNestedWithUnexported(t *testing.T) {
+ type Bar struct {
+ a int
+ B int
+ }
+ type Foo struct {
+ A string
+ B Bar
+ }
+
+ f1 := &Foo{A: "string", B: Bar{a: 1, B: 2}}
+
+ var f2 Foo
+ awsutil.Copy(&f2, f1)
+
+ // Values match
+ assert.Equal(t, f2.A, f1.A)
+ assert.NotEqual(t, f2.B, f1.B)
+ assert.NotEqual(t, f2.B.a, f1.B.a)
+ assert.Equal(t, f2.B.B, f2.B.B)
+}
+
+func TestCopyIgnoreNilMembers(t *testing.T) {
+ type Foo struct {
+ A *string
+ B []string
+ C map[string]string
+ }
+
+ f := &Foo{}
+ assert.Nil(t, f.A)
+ assert.Nil(t, f.B)
+ assert.Nil(t, f.C)
+
+ var f2 Foo
+ awsutil.Copy(&f2, f)
+ assert.Nil(t, f2.A)
+ assert.Nil(t, f2.B)
+ assert.Nil(t, f2.C)
+
+ fcopy := awsutil.CopyOf(f)
+ f3 := fcopy.(*Foo)
+ assert.Nil(t, f3.A)
+ assert.Nil(t, f3.B)
+ assert.Nil(t, f3.C)
+}
+
+func TestCopyPrimitive(t *testing.T) {
+ str := "hello"
+ var s string
+ awsutil.Copy(&s, &str)
+ assert.Equal(t, "hello", s)
+}
+
+func TestCopyNil(t *testing.T) {
+ var s string
+ awsutil.Copy(&s, nil)
+ assert.Equal(t, "", s)
+}
+
+func TestCopyReader(t *testing.T) {
+ var buf io.Reader = bytes.NewReader([]byte("hello world"))
+ var r io.Reader
+ awsutil.Copy(&r, buf)
+ b, err := ioutil.ReadAll(r)
+ assert.NoError(t, err)
+ assert.Equal(t, []byte("hello world"), b)
+
+ // empty bytes because this is not a deep copy
+ b, err = ioutil.ReadAll(buf)
+ assert.NoError(t, err)
+ assert.Equal(t, []byte(""), b)
+}
+
+func TestCopyDifferentStructs(t *testing.T) {
+ type SrcFoo struct {
+ A int
+ B []*string
+ C map[string]*int
+ SrcUnique string
+ SameNameDiffType int
+ unexportedPtr *int
+ ExportedPtr *int
+ }
+ type DstFoo struct {
+ A int
+ B []*string
+ C map[string]*int
+ DstUnique int
+ SameNameDiffType string
+ unexportedPtr *int
+ ExportedPtr *int
+ }
+
+ // Create the initial value
+ str1 := "hello"
+ str2 := "bye bye"
+ int1 := 1
+ int2 := 2
+ f1 := &SrcFoo{
+ A: 1,
+ B: []*string{&str1, &str2},
+ C: map[string]*int{
+ "A": &int1,
+ "B": &int2,
+ },
+ SrcUnique: "unique",
+ SameNameDiffType: 1,
+ unexportedPtr: &int1,
+ ExportedPtr: &int2,
+ }
+
+ // Do the copy
+ var f2 DstFoo
+ awsutil.Copy(&f2, f1)
+
+ // Values are equal
+ assert.Equal(t, f2.A, f1.A)
+ assert.Equal(t, f2.B, f1.B)
+ assert.Equal(t, f2.C, f1.C)
+ assert.Equal(t, "unique", f1.SrcUnique)
+ assert.Equal(t, 1, f1.SameNameDiffType)
+ assert.Equal(t, 0, f2.DstUnique)
+ assert.Equal(t, "", f2.SameNameDiffType)
+ assert.Equal(t, int1, *f1.unexportedPtr)
+ assert.Nil(t, f2.unexportedPtr)
+ assert.Equal(t, int2, *f1.ExportedPtr)
+ assert.Equal(t, int2, *f2.ExportedPtr)
+}
+
+func ExampleCopyOf() {
+ type Foo struct {
+ A int
+ B []*string
+ }
+
+ // Create the initial value
+ str1 := "hello"
+ str2 := "bye bye"
+ f1 := &Foo{A: 1, B: []*string{&str1, &str2}}
+
+ // Do the copy
+ v := awsutil.CopyOf(f1)
+ var f2 *Foo = v.(*Foo)
+
+ // Print the result
+ fmt.Println(awsutil.Prettify(f2))
+
+ // Output:
+ // {
+ // A: 1,
+ // B: ["hello","bye bye"]
+ // }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go
new file mode 100644
index 000000000..0da6b06fd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go
@@ -0,0 +1,68 @@
+package awsutil_test
+
+import (
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/stretchr/testify/assert"
+)
+
+type Struct struct {
+ A []Struct
+ z []Struct
+ B *Struct
+ D *Struct
+ C string
+}
+
+var data = Struct{
+ A: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}},
+ z: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}},
+ B: &Struct{B: &Struct{C: "terminal"}, D: &Struct{C: "terminal2"}},
+ C: "initial",
+}
+
+func TestValueAtPathSuccess(t *testing.T) {
+ assert.Equal(t, []interface{}{"initial"}, awsutil.ValuesAtPath(data, "C"))
+ assert.Equal(t, []interface{}{"value1"}, awsutil.ValuesAtPath(data, "A[0].C"))
+ assert.Equal(t, []interface{}{"value2"}, awsutil.ValuesAtPath(data, "A[1].C"))
+ assert.Equal(t, []interface{}{"value3"}, awsutil.ValuesAtPath(data, "A[2].C"))
+ assert.Equal(t, []interface{}{"value3"}, awsutil.ValuesAtAnyPath(data, "a[2].c"))
+ assert.Equal(t, []interface{}{"value3"}, awsutil.ValuesAtPath(data, "A[-1].C"))
+ assert.Equal(t, []interface{}{"value1", "value2", "value3"}, awsutil.ValuesAtPath(data, "A[].C"))
+ assert.Equal(t, []interface{}{"terminal"}, awsutil.ValuesAtPath(data, "B . B . C"))
+ assert.Equal(t, []interface{}{"terminal", "terminal2"}, awsutil.ValuesAtPath(data, "B.*.C"))
+ assert.Equal(t, []interface{}{"initial"}, awsutil.ValuesAtPath(data, "A.D.X || C"))
+}
+
+func TestValueAtPathFailure(t *testing.T) {
+ assert.Equal(t, []interface{}(nil), awsutil.ValuesAtPath(data, "C.x"))
+ assert.Equal(t, []interface{}(nil), awsutil.ValuesAtPath(data, ".x"))
+ assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "X.Y.Z"))
+ assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "A[100].C"))
+ assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "A[3].C"))
+ assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "B.B.C.Z"))
+ assert.Equal(t, []interface{}(nil), awsutil.ValuesAtPath(data, "z[-1].C"))
+ assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(nil, "A.B.C"))
+ assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(Struct{}, "A"))
+}
+
+func TestSetValueAtPathSuccess(t *testing.T) {
+ var s Struct
+ awsutil.SetValueAtPath(&s, "C", "test1")
+ awsutil.SetValueAtPath(&s, "B.B.C", "test2")
+ awsutil.SetValueAtPath(&s, "B.D.C", "test3")
+ assert.Equal(t, "test1", s.C)
+ assert.Equal(t, "test2", s.B.B.C)
+ assert.Equal(t, "test3", s.B.D.C)
+
+ awsutil.SetValueAtPath(&s, "B.*.C", "test0")
+ assert.Equal(t, "test0", s.B.B.C)
+ assert.Equal(t, "test0", s.B.D.C)
+
+ var s2 Struct
+ awsutil.SetValueAtAnyPath(&s2, "b.b.c", "test0")
+ assert.Equal(t, "test0", s2.B.B.C)
+ awsutil.SetValueAtAnyPath(&s2, "A", []Struct{{}})
+ assert.Equal(t, []Struct{{}}, s2.A)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go
new file mode 100644
index 000000000..63ab805b9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go
@@ -0,0 +1,111 @@
+package client
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http/httputil"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// A Config provides configuration to a service client instance.
+type Config struct {
+ Config *aws.Config
+ Handlers request.Handlers
+ Endpoint, SigningRegion string
+}
+
+// ConfigProvider provides a generic way for a service client to receive
+// the ClientConfig without circular dependencies.
+type ConfigProvider interface {
+ ClientConfig(serviceName string, cfgs ...*aws.Config) Config
+}
+
+// A Client implements the base client request and response handling
+// used by all service clients.
+type Client struct {
+ request.Retryer
+ metadata.ClientInfo
+
+ Config aws.Config
+ Handlers request.Handlers
+}
+
+// New will return a pointer to a new initialized service client.
+func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
+ svc := &Client{
+ Config: cfg,
+ ClientInfo: info,
+ Handlers: handlers,
+ }
+
+ maxRetries := aws.IntValue(cfg.MaxRetries)
+ if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
+ maxRetries = 3
+ }
+ svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
+
+ svc.AddDebugHandlers()
+
+ for _, option := range options {
+ option(svc)
+ }
+
+ return svc
+}
+
+// NewRequest returns a new Request pointer for the service API
+// operation and parameters.
+func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
+ return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
+}
+
+// AddDebugHandlers injects debug logging handlers into the service to log request
+// debug information.
+func (c *Client) AddDebugHandlers() {
+ if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
+ return
+ }
+
+ c.Handlers.Send.PushFront(logRequest)
+ c.Handlers.Send.PushBack(logResponse)
+}
+
+const logReqMsg = `DEBUG: Request %s/%s Details:
+---[ REQUEST POST-SIGN ]-----------------------------
+%s
+-----------------------------------------------------`
+
+func logRequest(r *request.Request) {
+ logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+ dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody)
+
+ if logBody {
+ // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
+ // Body as a NoOpCloser and will not be reset after read by the HTTP
+ // client reader.
+ r.Body.Seek(r.BodyStart, 0)
+ r.HTTPRequest.Body = ioutil.NopCloser(r.Body)
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
+}
+
+const logRespMsg = `DEBUG: Response %s/%s Details:
+---[ RESPONSE ]--------------------------------------
+%s
+-----------------------------------------------------`
+
+func logResponse(r *request.Request) {
+ var msg = "no reponse data"
+ if r.HTTPResponse != nil {
+ logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+ dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody)
+ msg = string(dumpedBody)
+ } else if r.Error != nil {
+ msg = r.Error.Error()
+ }
+ r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg))
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/service/default_retryer.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
similarity index 83%
rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/service/default_retryer.go
rename to Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
index c3dd0fa35..24d39ce56 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/service/default_retryer.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
@@ -1,11 +1,10 @@
-package service
+package client
import (
"math"
"math/rand"
"time"
- "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
)
@@ -22,16 +21,13 @@ import (
// // This implementation always has 100 max retries
// func (d retryer) MaxRetries() uint { return 100 }
type DefaultRetryer struct {
- *Service
+ NumMaxRetries int
}
// MaxRetries returns the number of maximum returns the service will use to make
// an individual API request.
-func (d DefaultRetryer) MaxRetries() uint {
- if aws.IntValue(d.Service.Config.MaxRetries) < 0 {
- return d.DefaultMaxRetries
- }
- return uint(aws.IntValue(d.Service.Config.MaxRetries))
+func (d DefaultRetryer) MaxRetries() int {
+ return d.NumMaxRetries
}
// RetryRules returns the delay duration before retrying this request again
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
new file mode 100644
index 000000000..4778056dd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
@@ -0,0 +1,12 @@
+package metadata
+
+// ClientInfo wraps immutable data from the client.Client structure.
+type ClientInfo struct {
+ ServiceName string
+ APIVersion string
+ Endpoint string
+ SigningName string
+ SigningRegion string
+ JSONVersion string
+ TargetPrefix string
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go
index cd2aade28..f157f8b3b 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go
@@ -7,15 +7,17 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials"
)
-// DefaultRetries is the default number of retries for a service. The value of
-// -1 indicates that the service specific retry default will be used.
-const DefaultRetries = -1
+// UseServiceDefaultRetries instructs the config to use the service's own default
+// number of retries. This will be the default action if Config.MaxRetries
+// is nil also.
+const UseServiceDefaultRetries = -1
// A Config provides service configuration for service clients. By default,
// all clients will use the {defaults.DefaultConfig} structure.
type Config struct {
// The credentials object to use when signing requests. Defaults to
- // {defaults.DefaultChainCredentials}.
+ // a chain of credential providers to search for credentials in environment
+ // variables, shared credential file, and EC2 Instance Roles.
Credentials *credentials.Credentials
// An optional endpoint URL (hostname only or fully qualified URI)
@@ -171,15 +173,17 @@ func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
return c
}
-// Merge returns a new Config with the other Config's attribute values merged into
-// this Config. If the other Config's attribute is nil it will not be merged into
-// the new Config to be returned.
-func (c Config) Merge(other *Config) *Config {
- if other == nil {
- return &c
+// MergeIn merges the passed in configs into the existing config object.
+func (c *Config) MergeIn(cfgs ...*Config) {
+ for _, other := range cfgs {
+ mergeInConfig(c, other)
}
+}
- dst := c
+func mergeInConfig(dst *Config, other *Config) {
+ if other == nil {
+ return
+ }
if other.Credentials != nil {
dst.Credentials = other.Credentials
@@ -228,12 +232,17 @@ func (c Config) Merge(other *Config) *Config {
if other.SleepDelay != nil {
dst.SleepDelay = other.SleepDelay
}
-
- return &dst
}
-// Copy will return a shallow copy of the Config object.
-func (c Config) Copy() *Config {
- dst := c
- return &dst
+// Copy will return a shallow copy of the Config object. If any additional
+// configurations are provided they will be merged into the new config returned.
+func (c *Config) Copy(cfgs ...*Config) *Config {
+ dst := &Config{}
+ dst.MergeIn(c)
+
+ for _, cfg := range cfgs {
+ dst.MergeIn(cfg)
+ }
+
+ return dst
}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config_test.go
new file mode 100644
index 000000000..fe97a31fc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config_test.go
@@ -0,0 +1,86 @@
+package aws
+
+import (
+ "net/http"
+ "reflect"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws/credentials"
+)
+
+var testCredentials = credentials.NewStaticCredentials("AKID", "SECRET", "SESSION")
+
+var copyTestConfig = Config{
+ Credentials: testCredentials,
+ Endpoint: String("CopyTestEndpoint"),
+ Region: String("COPY_TEST_AWS_REGION"),
+ DisableSSL: Bool(true),
+ HTTPClient: http.DefaultClient,
+ LogLevel: LogLevel(LogDebug),
+ Logger: NewDefaultLogger(),
+ MaxRetries: Int(3),
+ DisableParamValidation: Bool(true),
+ DisableComputeChecksums: Bool(true),
+ S3ForcePathStyle: Bool(true),
+}
+
+func TestCopy(t *testing.T) {
+ want := copyTestConfig
+ got := copyTestConfig.Copy()
+ if !reflect.DeepEqual(*got, want) {
+ t.Errorf("Copy() = %+v", got)
+ t.Errorf(" want %+v", want)
+ }
+
+ got.Region = String("other")
+ if got.Region == want.Region {
+ t.Errorf("Expect setting copy values not not reflect in source")
+ }
+}
+
+func TestCopyReturnsNewInstance(t *testing.T) {
+ want := copyTestConfig
+ got := copyTestConfig.Copy()
+ if got == &want {
+ t.Errorf("Copy() = %p; want different instance as source %p", got, &want)
+ }
+}
+
+var mergeTestZeroValueConfig = Config{}
+
+var mergeTestConfig = Config{
+ Credentials: testCredentials,
+ Endpoint: String("MergeTestEndpoint"),
+ Region: String("MERGE_TEST_AWS_REGION"),
+ DisableSSL: Bool(true),
+ HTTPClient: http.DefaultClient,
+ LogLevel: LogLevel(LogDebug),
+ Logger: NewDefaultLogger(),
+ MaxRetries: Int(10),
+ DisableParamValidation: Bool(true),
+ DisableComputeChecksums: Bool(true),
+ S3ForcePathStyle: Bool(true),
+}
+
+var mergeTests = []struct {
+ cfg *Config
+ in *Config
+ want *Config
+}{
+ {&Config{}, nil, &Config{}},
+ {&Config{}, &mergeTestZeroValueConfig, &Config{}},
+ {&Config{}, &mergeTestConfig, &mergeTestConfig},
+}
+
+func TestMerge(t *testing.T) {
+ for i, tt := range mergeTests {
+ got := tt.cfg.Copy()
+ got.MergeIn(tt.in)
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("Config %d %+v", i, tt.cfg)
+ t.Errorf(" Merge(%+v)", tt.in)
+ t.Errorf(" got %+v", got)
+ t.Errorf(" want %+v", tt.want)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types_test.go
new file mode 100644
index 000000000..df7a3e5d2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types_test.go
@@ -0,0 +1,437 @@
+package aws
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var testCasesStringSlice = [][]string{
+ {"a", "b", "c", "d", "e"},
+ {"a", "b", "", "", "e"},
+}
+
+func TestStringSlice(t *testing.T) {
+ for idx, in := range testCasesStringSlice {
+ if in == nil {
+ continue
+ }
+ out := StringSlice(in)
+ assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
+ for i := range out {
+ assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
+ }
+
+ out2 := StringValueSlice(out)
+ assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
+ assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
+ }
+}
+
+var testCasesStringValueSlice = [][]*string{
+ {String("a"), String("b"), nil, String("c")},
+}
+
+func TestStringValueSlice(t *testing.T) {
+ for idx, in := range testCasesStringValueSlice {
+ if in == nil {
+ continue
+ }
+ out := StringValueSlice(in)
+ assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
+ for i := range out {
+ if in[i] == nil {
+ assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
+ } else {
+ assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
+ }
+ }
+
+ out2 := StringSlice(out)
+ assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
+ for i := range out2 {
+ if in[i] == nil {
+ assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
+ } else {
+ assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
+ }
+ }
+ }
+}
+
+var testCasesStringMap = []map[string]string{
+ {"a": "1", "b": "2", "c": "3"},
+}
+
+func TestStringMap(t *testing.T) {
+ for idx, in := range testCasesStringMap {
+ if in == nil {
+ continue
+ }
+ out := StringMap(in)
+ assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
+ for i := range out {
+ assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
+ }
+
+ out2 := StringValueMap(out)
+ assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
+ assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
+ }
+}
+
+var testCasesBoolSlice = [][]bool{
+ {true, true, false, false},
+}
+
+func TestBoolSlice(t *testing.T) {
+ for idx, in := range testCasesBoolSlice {
+ if in == nil {
+ continue
+ }
+ out := BoolSlice(in)
+ assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
+ for i := range out {
+ assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
+ }
+
+ out2 := BoolValueSlice(out)
+ assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
+ assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
+ }
+}
+
+var testCasesBoolValueSlice = [][]*bool{}
+
+func TestBoolValueSlice(t *testing.T) {
+ for idx, in := range testCasesBoolValueSlice {
+ if in == nil {
+ continue
+ }
+ out := BoolValueSlice(in)
+ assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
+ for i := range out {
+ if in[i] == nil {
+ assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
+ } else {
+ assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
+ }
+ }
+
+ out2 := BoolSlice(out)
+ assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
+ for i := range out2 {
+ if in[i] == nil {
+ assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
+ } else {
+ assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
+ }
+ }
+ }
+}
+
+var testCasesBoolMap = []map[string]bool{
+ {"a": true, "b": false, "c": true},
+}
+
+func TestBoolMap(t *testing.T) {
+ for idx, in := range testCasesBoolMap {
+ if in == nil {
+ continue
+ }
+ out := BoolMap(in)
+ assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
+ for i := range out {
+ assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
+ }
+
+ out2 := BoolValueMap(out)
+ assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
+ assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
+ }
+}
+
+var testCasesIntSlice = [][]int{
+ {1, 2, 3, 4},
+}
+
+func TestIntSlice(t *testing.T) {
+ for idx, in := range testCasesIntSlice {
+ if in == nil {
+ continue
+ }
+ out := IntSlice(in)
+ assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
+ for i := range out {
+ assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
+ }
+
+ out2 := IntValueSlice(out)
+ assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
+ assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
+ }
+}
+
+var testCasesIntValueSlice = [][]*int{}
+
+func TestIntValueSlice(t *testing.T) {
+ for idx, in := range testCasesIntValueSlice {
+ if in == nil {
+ continue
+ }
+ out := IntValueSlice(in)
+ assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
+ for i := range out {
+ if in[i] == nil {
+ assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
+ } else {
+ assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
+ }
+ }
+
+ out2 := IntSlice(out)
+ assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
+ for i := range out2 {
+ if in[i] == nil {
+ assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
+ } else {
+ assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
+ }
+ }
+ }
+}
+
+var testCasesIntMap = []map[string]int{
+ {"a": 3, "b": 2, "c": 1},
+}
+
+func TestIntMap(t *testing.T) {
+ for idx, in := range testCasesIntMap {
+ if in == nil {
+ continue
+ }
+ out := IntMap(in)
+ assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
+ for i := range out {
+ assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
+ }
+
+ out2 := IntValueMap(out)
+ assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
+ assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
+ }
+}
+
+var testCasesInt64Slice = [][]int64{
+ {1, 2, 3, 4},
+}
+
+func TestInt64Slice(t *testing.T) {
+ for idx, in := range testCasesInt64Slice {
+ if in == nil {
+ continue
+ }
+ out := Int64Slice(in)
+ assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
+ for i := range out {
+ assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
+ }
+
+ out2 := Int64ValueSlice(out)
+ assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
+ assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
+ }
+}
+
+var testCasesInt64ValueSlice = [][]*int64{}
+
+func TestInt64ValueSlice(t *testing.T) {
+ for idx, in := range testCasesInt64ValueSlice {
+ if in == nil {
+ continue
+ }
+ out := Int64ValueSlice(in)
+ assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
+ for i := range out {
+ if in[i] == nil {
+ assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
+ } else {
+ assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
+ }
+ }
+
+ out2 := Int64Slice(out)
+ assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
+ for i := range out2 {
+ if in[i] == nil {
+ assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
+ } else {
+ assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
+ }
+ }
+ }
+}
+
+var testCasesInt64Map = []map[string]int64{
+ {"a": 3, "b": 2, "c": 1},
+}
+
+func TestInt64Map(t *testing.T) {
+ for idx, in := range testCasesInt64Map {
+ if in == nil {
+ continue
+ }
+ out := Int64Map(in)
+ assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
+ for i := range out {
+ assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
+ }
+
+ out2 := Int64ValueMap(out)
+ assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
+ assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
+ }
+}
+
+var testCasesFloat64Slice = [][]float64{
+ {1, 2, 3, 4},
+}
+
+func TestFloat64Slice(t *testing.T) {
+ for idx, in := range testCasesFloat64Slice {
+ if in == nil {
+ continue
+ }
+ out := Float64Slice(in)
+ assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
+ for i := range out {
+ assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
+ }
+
+ out2 := Float64ValueSlice(out)
+ assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
+ assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
+ }
+}
+
+var testCasesFloat64ValueSlice = [][]*float64{}
+
+func TestFloat64ValueSlice(t *testing.T) {
+ for idx, in := range testCasesFloat64ValueSlice {
+ if in == nil {
+ continue
+ }
+ out := Float64ValueSlice(in)
+ assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
+ for i := range out {
+ if in[i] == nil {
+ assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
+ } else {
+ assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
+ }
+ }
+
+ out2 := Float64Slice(out)
+ assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
+ for i := range out2 {
+ if in[i] == nil {
+ assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
+ } else {
+ assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
+ }
+ }
+ }
+}
+
+var testCasesFloat64Map = []map[string]float64{
+ {"a": 3, "b": 2, "c": 1},
+}
+
+func TestFloat64Map(t *testing.T) {
+ for idx, in := range testCasesFloat64Map {
+ if in == nil {
+ continue
+ }
+ out := Float64Map(in)
+ assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
+ for i := range out {
+ assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
+ }
+
+ out2 := Float64ValueMap(out)
+ assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
+ assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
+ }
+}
+
+var testCasesTimeSlice = [][]time.Time{
+ {time.Now(), time.Now().AddDate(100, 0, 0)},
+}
+
+func TestTimeSlice(t *testing.T) {
+ for idx, in := range testCasesTimeSlice {
+ if in == nil {
+ continue
+ }
+ out := TimeSlice(in)
+ assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
+ for i := range out {
+ assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
+ }
+
+ out2 := TimeValueSlice(out)
+ assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
+ assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
+ }
+}
+
+var testCasesTimeValueSlice = [][]*time.Time{}
+
+func TestTimeValueSlice(t *testing.T) {
+ for idx, in := range testCasesTimeValueSlice {
+ if in == nil {
+ continue
+ }
+ out := TimeValueSlice(in)
+ assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
+ for i := range out {
+ if in[i] == nil {
+ assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
+ } else {
+ assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
+ }
+ }
+
+ out2 := TimeSlice(out)
+ assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
+ for i := range out2 {
+ if in[i] == nil {
+ assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
+ } else {
+ assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
+ }
+ }
+ }
+}
+
+var testCasesTimeMap = []map[string]time.Time{
+ {"a": time.Now().AddDate(-100, 0, 0), "b": time.Now()},
+}
+
+func TestTimeMap(t *testing.T) {
+ for idx, in := range testCasesTimeMap {
+ if in == nil {
+ continue
+ }
+ out := TimeMap(in)
+ assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
+ for i := range out {
+ assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
+ }
+
+ out2 := TimeValueMap(out)
+ assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
+ assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
index 597feb4fa..35c7cb984 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
@@ -59,7 +59,7 @@ var reStatusCode = regexp.MustCompile(`^(\d{3})`)
// SendHandler is a request handler to send service request using HTTP client.
var SendHandler = request.NamedHandler{"core.SendHandler", func(r *request.Request) {
var err error
- r.HTTPResponse, err = r.Service.Config.HTTPClient.Do(r.HTTPRequest)
+ r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest)
if err != nil {
// Capture the case where url.Error is returned for error processing
// response. e.g. 301 without location header comes back as string
@@ -110,13 +110,13 @@ var AfterRetryHandler = request.NamedHandler{"core.AfterRetryHandler", func(r *r
if r.WillRetry() {
r.RetryDelay = r.RetryRules(r)
- r.Service.Config.SleepDelay(r.RetryDelay)
+ r.Config.SleepDelay(r.RetryDelay)
// when the expired token exception occurs the credentials
// need to be expired locally so that the next request to
// get credentials will trigger a credentials refresh.
if r.IsErrorExpired() {
- r.Service.Config.Credentials.Expire()
+ r.Config.Credentials.Expire()
}
r.RetryCount++
@@ -128,9 +128,9 @@ var AfterRetryHandler = request.NamedHandler{"core.AfterRetryHandler", func(r *r
// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
// region is not valid.
var ValidateEndpointHandler = request.NamedHandler{"core.ValidateEndpointHandler", func(r *request.Request) {
- if r.Service.SigningRegion == "" && aws.StringValue(r.Service.Config.Region) == "" {
+ if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
r.Error = aws.ErrMissingRegion
- } else if r.Service.Endpoint == "" {
+ } else if r.ClientInfo.Endpoint == "" {
r.Error = aws.ErrMissingEndpoint
}
}}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go
new file mode 100644
index 000000000..882c323d7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go
@@ -0,0 +1,113 @@
+package corehandlers_test
+
+import (
+ "fmt"
+ "net/http"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awstesting"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+func TestValidateEndpointHandler(t *testing.T) {
+ os.Clearenv()
+
+ svc := awstesting.NewClient(aws.NewConfig().WithRegion("us-west-2"))
+ svc.Handlers.Clear()
+ svc.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
+
+ req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
+ err := req.Build()
+
+ assert.NoError(t, err)
+}
+
+func TestValidateEndpointHandlerErrorRegion(t *testing.T) {
+ os.Clearenv()
+
+ svc := awstesting.NewClient()
+ svc.Handlers.Clear()
+ svc.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
+
+ req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
+ err := req.Build()
+
+ assert.Error(t, err)
+ assert.Equal(t, aws.ErrMissingRegion, err)
+}
+
+type mockCredsProvider struct {
+ expired bool
+ retrieveCalled bool
+}
+
+func (m *mockCredsProvider) Retrieve() (credentials.Value, error) {
+ m.retrieveCalled = true
+ return credentials.Value{}, nil
+}
+
+func (m *mockCredsProvider) IsExpired() bool {
+ return m.expired
+}
+
+func TestAfterRetryRefreshCreds(t *testing.T) {
+ os.Clearenv()
+ credProvider := &mockCredsProvider{}
+
+ svc := awstesting.NewClient(&aws.Config{
+ Credentials: credentials.NewCredentials(credProvider),
+ MaxRetries: aws.Int(1),
+ })
+
+ svc.Handlers.Clear()
+ svc.Handlers.ValidateResponse.PushBack(func(r *request.Request) {
+ r.Error = awserr.New("UnknownError", "", nil)
+ r.HTTPResponse = &http.Response{StatusCode: 400}
+ })
+ svc.Handlers.UnmarshalError.PushBack(func(r *request.Request) {
+ r.Error = awserr.New("ExpiredTokenException", "", nil)
+ })
+ svc.Handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
+
+ assert.True(t, svc.Config.Credentials.IsExpired(), "Expect to start out expired")
+ assert.False(t, credProvider.retrieveCalled)
+
+ req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
+ req.Send()
+
+ assert.True(t, svc.Config.Credentials.IsExpired())
+ assert.False(t, credProvider.retrieveCalled)
+
+ _, err := svc.Config.Credentials.Get()
+ assert.NoError(t, err)
+ assert.True(t, credProvider.retrieveCalled)
+}
+
+type testSendHandlerTransport struct{}
+
+func (t *testSendHandlerTransport) RoundTrip(r *http.Request) (*http.Response, error) {
+ return nil, fmt.Errorf("mock error")
+}
+
+func TestSendHandlerError(t *testing.T) {
+ svc := awstesting.NewClient(&aws.Config{
+ HTTPClient: &http.Client{
+ Transport: &testSendHandlerTransport{},
+ },
+ })
+ svc.Handlers.Clear()
+ svc.Handlers.Send.PushBackNamed(corehandlers.SendHandler)
+ r := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
+
+ r.Send()
+
+ assert.Error(t, r.Error)
+ assert.NotNil(t, r.HTTPResponse)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go
new file mode 100644
index 000000000..96bfc0e67
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go
@@ -0,0 +1,134 @@
+package corehandlers_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/stretchr/testify/require"
+)
+
+var testSvc = func() *client.Client {
+ s := &client.Client{
+ Config: aws.Config{},
+ ClientInfo: metadata.ClientInfo{
+ ServiceName: "mock-service",
+ APIVersion: "2015-01-01",
+ },
+ }
+ return s
+}()
+
+type StructShape struct {
+ RequiredList []*ConditionalStructShape `required:"true"`
+ RequiredMap map[string]*ConditionalStructShape `required:"true"`
+ RequiredBool *bool `required:"true"`
+ OptionalStruct *ConditionalStructShape
+
+ hiddenParameter *string
+
+ metadataStructureShape
+}
+
+type metadataStructureShape struct {
+ SDKShapeTraits bool
+}
+
+type ConditionalStructShape struct {
+ Name *string `required:"true"`
+ SDKShapeTraits bool
+}
+
+func TestNoErrors(t *testing.T) {
+ input := &StructShape{
+ RequiredList: []*ConditionalStructShape{},
+ RequiredMap: map[string]*ConditionalStructShape{
+ "key1": {Name: aws.String("Name")},
+ "key2": {Name: aws.String("Name")},
+ },
+ RequiredBool: aws.Bool(true),
+ OptionalStruct: &ConditionalStructShape{Name: aws.String("Name")},
+ }
+
+ req := testSvc.NewRequest(&request.Operation{}, input, nil)
+ corehandlers.ValidateParametersHandler.Fn(req)
+ require.NoError(t, req.Error)
+}
+
+func TestMissingRequiredParameters(t *testing.T) {
+ input := &StructShape{}
+ req := testSvc.NewRequest(&request.Operation{}, input, nil)
+ corehandlers.ValidateParametersHandler.Fn(req)
+
+ require.Error(t, req.Error)
+ assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code())
+ assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList\n- missing required parameter: RequiredMap\n- missing required parameter: RequiredBool", req.Error.(awserr.Error).Message())
+}
+
+func TestNestedMissingRequiredParameters(t *testing.T) {
+ input := &StructShape{
+ RequiredList: []*ConditionalStructShape{{}},
+ RequiredMap: map[string]*ConditionalStructShape{
+ "key1": {Name: aws.String("Name")},
+ "key2": {},
+ },
+ RequiredBool: aws.Bool(true),
+ OptionalStruct: &ConditionalStructShape{},
+ }
+
+ req := testSvc.NewRequest(&request.Operation{}, input, nil)
+ corehandlers.ValidateParametersHandler.Fn(req)
+
+ require.Error(t, req.Error)
+ assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code())
+ assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList[0].Name\n- missing required parameter: RequiredMap[\"key2\"].Name\n- missing required parameter: OptionalStruct.Name", req.Error.(awserr.Error).Message())
+}
+
+type testInput struct {
+ StringField string `min:"5"`
+ PtrStrField *string `min:"2"`
+ ListField []string `min:"3"`
+ MapField map[string]string `min:"4"`
+}
+
+var testsFieldMin = []struct {
+ err awserr.Error
+ in testInput
+}{
+ {
+ err: awserr.New("InvalidParameter", "1 validation errors:\n- field too short, minimum length 5: StringField", nil),
+ in: testInput{StringField: "abcd"},
+ },
+ {
+ err: awserr.New("InvalidParameter", "2 validation errors:\n- field too short, minimum length 5: StringField\n- field too short, minimum length 3: ListField", nil),
+ in: testInput{StringField: "abcd", ListField: []string{"a", "b"}},
+ },
+ {
+ err: awserr.New("InvalidParameter", "3 validation errors:\n- field too short, minimum length 5: StringField\n- field too short, minimum length 3: ListField\n- field too short, minimum length 4: MapField", nil),
+ in: testInput{StringField: "abcd", ListField: []string{"a", "b"}, MapField: map[string]string{"a": "a", "b": "b"}},
+ },
+ {
+ err: awserr.New("InvalidParameter", "1 validation errors:\n- field too short, minimum length 2: PtrStrField", nil),
+ in: testInput{StringField: "abcde", PtrStrField: aws.String("v")},
+ },
+ {
+ err: nil,
+ in: testInput{StringField: "abcde", PtrStrField: aws.String("value"),
+ ListField: []string{"a", "b", "c"}, MapField: map[string]string{"a": "a", "b": "b", "c": "c", "d": "d"}},
+ },
+}
+
+func TestValidateFieldMinParameter(t *testing.T) {
+ for i, c := range testsFieldMin {
+ req := testSvc.NewRequest(&request.Operation{}, &c.in, nil)
+ corehandlers.ValidateParametersHandler.Fn(req)
+
+ require.Equal(t, c.err, req.Error, "%d case failed", i)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go
new file mode 100644
index 000000000..4fba22f29
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go
@@ -0,0 +1,73 @@
+package credentials
+
+import (
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestChainProviderGet(t *testing.T) {
+ p := &ChainProvider{
+ Providers: []Provider{
+ &stubProvider{err: awserr.New("FirstError", "first provider error", nil)},
+ &stubProvider{err: awserr.New("SecondError", "second provider error", nil)},
+ &stubProvider{
+ creds: Value{
+ AccessKeyID: "AKID",
+ SecretAccessKey: "SECRET",
+ SessionToken: "",
+ },
+ },
+ },
+ }
+
+ creds, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+ assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match")
+ assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Empty(t, creds.SessionToken, "Expect session token to be empty")
+}
+
+func TestChainProviderIsExpired(t *testing.T) {
+ stubProvider := &stubProvider{expired: true}
+ p := &ChainProvider{
+ Providers: []Provider{
+ stubProvider,
+ },
+ }
+
+ assert.True(t, p.IsExpired(), "Expect expired to be true before any Retrieve")
+ _, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+ assert.False(t, p.IsExpired(), "Expect not expired after retrieve")
+
+ stubProvider.expired = true
+ assert.True(t, p.IsExpired(), "Expect return of expired provider")
+
+ _, err = p.Retrieve()
+ assert.False(t, p.IsExpired(), "Expect not expired after retrieve")
+}
+
+func TestChainProviderWithNoProvider(t *testing.T) {
+ p := &ChainProvider{
+ Providers: []Provider{},
+ }
+
+ assert.True(t, p.IsExpired(), "Expect expired with no providers")
+ _, err := p.Retrieve()
+ assert.Equal(t, ErrNoValidProvidersFoundInChain, err, "Expect no providers error returned")
+}
+
+func TestChainProviderWithNoValidProvider(t *testing.T) {
+ p := &ChainProvider{
+ Providers: []Provider{
+ &stubProvider{err: awserr.New("FirstError", "first provider error", nil)},
+ &stubProvider{err: awserr.New("SecondError", "second provider error", nil)},
+ },
+ }
+
+ assert.True(t, p.IsExpired(), "Expect expired with no providers")
+ _, err := p.Retrieve()
+ assert.Equal(t, ErrNoValidProvidersFoundInChain, err, "Expect no providers error returned")
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go
new file mode 100644
index 000000000..99c2b4774
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go
@@ -0,0 +1,62 @@
+package credentials
+
+import (
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/stretchr/testify/assert"
+)
+
+type stubProvider struct {
+ creds Value
+ expired bool
+ err error
+}
+
+func (s *stubProvider) Retrieve() (Value, error) {
+ s.expired = false
+ return s.creds, s.err
+}
+func (s *stubProvider) IsExpired() bool {
+ return s.expired
+}
+
+func TestCredentialsGet(t *testing.T) {
+ c := NewCredentials(&stubProvider{
+ creds: Value{
+ AccessKeyID: "AKID",
+ SecretAccessKey: "SECRET",
+ SessionToken: "",
+ },
+ expired: true,
+ })
+
+ creds, err := c.Get()
+ assert.Nil(t, err, "Expected no error")
+ assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match")
+ assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Empty(t, creds.SessionToken, "Expect session token to be empty")
+}
+
+func TestCredentialsGetWithError(t *testing.T) {
+ c := NewCredentials(&stubProvider{err: awserr.New("provider error", "", nil), expired: true})
+
+ _, err := c.Get()
+ assert.Equal(t, "provider error", err.(awserr.Error).Code(), "Expected provider error")
+}
+
+func TestCredentialsExpire(t *testing.T) {
+ stub := &stubProvider{}
+ c := NewCredentials(stub)
+
+ stub.expired = false
+ assert.True(t, c.IsExpired(), "Expected to start out expired")
+ c.Expire()
+ assert.True(t, c.IsExpired(), "Expected to be expired")
+
+ c.forceRefresh = false
+ assert.False(t, c.IsExpired(), "Expected not to be expired")
+
+ stub.expired = true
+ assert.True(t, c.IsExpired(), "Expected to be expired")
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
index 946a11720..80702c26f 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
@@ -9,6 +9,7 @@ import (
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
)
@@ -25,9 +26,6 @@ import (
// Client: &http.Client{
// Timeout: 10 * time.Second,
// },
-// // Use default EC2 Role metadata endpoint, Alternate endpoints can be
-// // specified setting Endpoint to something else.
-// Endpoint: "",
// // Do not use early expiry of credentials. If a non zero value is
// // specified the credentials will be expired early
// ExpiryWindow: 0,
@@ -35,8 +33,8 @@ import (
type EC2RoleProvider struct {
credentials.Expiry
- // EC2Metadata client to use when connecting to EC2 metadata service
- Client *ec2metadata.Client
+ // Required EC2Metadata client to use when connecting to EC2 metadata service.
+ Client *ec2metadata.EC2Metadata
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
@@ -50,33 +48,40 @@ type EC2RoleProvider struct {
ExpiryWindow time.Duration
}
-// NewCredentials returns a pointer to a new Credentials object
-// wrapping the EC2RoleProvider.
-//
-// Takes a custom http.Client which can be configured for custom handling of
-// things such as timeout.
-//
-// Endpoint is the URL that the EC2RoleProvider will connect to when retrieving
-// role and credentials.
-//
-// Window is the expiry window that will be subtracted from the expiry returned
-// by the role credential request. This is done so that the credentials will
-// expire sooner than their actual lifespan.
-func NewCredentials(client *ec2metadata.Client, window time.Duration) *credentials.Credentials {
- return credentials.NewCredentials(&EC2RoleProvider{
- Client: client,
- ExpiryWindow: window,
- })
+// NewCredentials returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
+// The ConfigProvider is satisfied by the session.Session type.
+func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+ p := &EC2RoleProvider{
+ Client: ec2metadata.New(c),
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
+// metadata service.
+func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+ p := &EC2RoleProvider{
+ Client: client,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
}
// Retrieve retrieves credentials from the EC2 service.
// Error will be returned if the request fails, or unable to extract
// the desired credentials.
func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
- if m.Client == nil {
- m.Client = ec2metadata.New(nil)
- }
-
credsList, err := requestCredList(m.Client)
if err != nil {
return credentials.Value{}, err
@@ -101,7 +106,7 @@ func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
}, nil
}
-// A ec2RoleCredRespBody provides the shape for deserializing credential
+// A ec2RoleCredRespBody provides the shape for unmarshalling credential
// request responses.
type ec2RoleCredRespBody struct {
// Success State
@@ -119,7 +124,7 @@ const iamSecurityCredsPath = "/iam/security-credentials"
// requestCredList requests a list of credentials from the EC2 service.
// If there are no credentials, or there is an error making or receiving the request
-func requestCredList(client *ec2metadata.Client) ([]string, error) {
+func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
resp, err := client.GetMetadata(iamSecurityCredsPath)
if err != nil {
return nil, awserr.New("EC2RoleRequestError", "failed to list EC2 Roles", err)
@@ -142,7 +147,7 @@ func requestCredList(client *ec2metadata.Client) ([]string, error) {
//
// If the credentials cannot be found, or there is an error reading the response
// and error will be returned.
-func requestCred(client *ec2metadata.Client, credsName string) (ec2RoleCredRespBody, error) {
+func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
if err != nil {
return ec2RoleCredRespBody{},
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go
new file mode 100644
index 000000000..da3d8ed3e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go
@@ -0,0 +1,159 @@
+package ec2rolecreds_test
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/session"
+)
+
+const credsRespTmpl = `{
+ "Code": "Success",
+ "Type": "AWS-HMAC",
+ "AccessKeyId" : "accessKey",
+ "SecretAccessKey" : "secret",
+ "Token" : "token",
+ "Expiration" : "%s",
+ "LastUpdated" : "2009-11-23T0:00:00Z"
+}`
+
+const credsFailRespTmpl = `{
+ "Code": "ErrorCode",
+ "Message": "ErrorMsg",
+ "LastUpdated": "2009-11-23T0:00:00Z"
+}`
+
+func initTestServer(expireOn string, failAssume bool) *httptest.Server {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path == "/latest/meta-data/iam/security-credentials" {
+ fmt.Fprintln(w, "RoleName")
+ } else if r.URL.Path == "/latest/meta-data/iam/security-credentials/RoleName" {
+ if failAssume {
+ fmt.Fprintf(w, credsFailRespTmpl)
+ } else {
+ fmt.Fprintf(w, credsRespTmpl, expireOn)
+ }
+ } else {
+ http.Error(w, "bad request", http.StatusBadRequest)
+ }
+ }))
+
+ return server
+}
+
+func TestEC2RoleProvider(t *testing.T) {
+ server := initTestServer("2014-12-16T01:51:37Z", false)
+ defer server.Close()
+
+ p := &ec2rolecreds.EC2RoleProvider{
+ Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
+ }
+
+ creds, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error, %v", err)
+
+ assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
+ assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
+}
+
+func TestEC2RoleProviderFailAssume(t *testing.T) {
+ server := initTestServer("2014-12-16T01:51:37Z", true)
+ defer server.Close()
+
+ p := &ec2rolecreds.EC2RoleProvider{
+ Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
+ }
+
+ creds, err := p.Retrieve()
+ assert.Error(t, err, "Expect error")
+
+ e := err.(awserr.Error)
+ assert.Equal(t, "ErrorCode", e.Code())
+ assert.Equal(t, "ErrorMsg", e.Message())
+ assert.Nil(t, e.OrigErr())
+
+ assert.Equal(t, "", creds.AccessKeyID, "Expect access key ID to match")
+ assert.Equal(t, "", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Equal(t, "", creds.SessionToken, "Expect session token to match")
+}
+
+func TestEC2RoleProviderIsExpired(t *testing.T) {
+ server := initTestServer("2014-12-16T01:51:37Z", false)
+ defer server.Close()
+
+ p := &ec2rolecreds.EC2RoleProvider{
+ Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
+ }
+ p.CurrentTime = func() time.Time {
+ return time.Date(2014, 12, 15, 21, 26, 0, 0, time.UTC)
+ }
+
+ assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.")
+
+ _, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error, %v", err)
+
+ assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.")
+
+ p.CurrentTime = func() time.Time {
+ return time.Date(3014, 12, 15, 21, 26, 0, 0, time.UTC)
+ }
+
+ assert.True(t, p.IsExpired(), "Expect creds to be expired.")
+}
+
+func TestEC2RoleProviderExpiryWindowIsExpired(t *testing.T) {
+ server := initTestServer("2014-12-16T01:51:37Z", false)
+ defer server.Close()
+
+ p := &ec2rolecreds.EC2RoleProvider{
+ Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
+ ExpiryWindow: time.Hour * 1,
+ }
+ p.CurrentTime = func() time.Time {
+ return time.Date(2014, 12, 15, 0, 51, 37, 0, time.UTC)
+ }
+
+ assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.")
+
+ _, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error, %v", err)
+
+ assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.")
+
+ p.CurrentTime = func() time.Time {
+ return time.Date(2014, 12, 16, 0, 55, 37, 0, time.UTC)
+ }
+
+ assert.True(t, p.IsExpired(), "Expect creds to be expired.")
+}
+
+func BenchmarkEC3RoleProvider(b *testing.B) {
+ server := initTestServer("2014-12-16T01:51:37Z", false)
+ defer server.Close()
+
+ p := &ec2rolecreds.EC2RoleProvider{
+ Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
+ }
+ _, err := p.Retrieve()
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ if _, err := p.Retrieve(); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go
new file mode 100644
index 000000000..53f6ce256
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go
@@ -0,0 +1,70 @@
+package credentials
+
+import (
+ "github.com/stretchr/testify/assert"
+ "os"
+ "testing"
+)
+
+func TestEnvProviderRetrieve(t *testing.T) {
+ os.Clearenv()
+ os.Setenv("AWS_ACCESS_KEY_ID", "access")
+ os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
+ os.Setenv("AWS_SESSION_TOKEN", "token")
+
+ e := EnvProvider{}
+ creds, err := e.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+
+ assert.Equal(t, "access", creds.AccessKeyID, "Expect access key ID to match")
+ assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
+}
+
+func TestEnvProviderIsExpired(t *testing.T) {
+ os.Clearenv()
+ os.Setenv("AWS_ACCESS_KEY_ID", "access")
+ os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
+ os.Setenv("AWS_SESSION_TOKEN", "token")
+
+ e := EnvProvider{}
+
+ assert.True(t, e.IsExpired(), "Expect creds to be expired before retrieve.")
+
+ _, err := e.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+
+ assert.False(t, e.IsExpired(), "Expect creds to not be expired after retrieve.")
+}
+
+func TestEnvProviderNoAccessKeyID(t *testing.T) {
+ os.Clearenv()
+ os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
+
+ e := EnvProvider{}
+ creds, err := e.Retrieve()
+ assert.Equal(t, ErrAccessKeyIDNotFound, err, "ErrAccessKeyIDNotFound expected, but was %#v error: %#v", creds, err)
+}
+
+func TestEnvProviderNoSecretAccessKey(t *testing.T) {
+ os.Clearenv()
+ os.Setenv("AWS_ACCESS_KEY_ID", "access")
+
+ e := EnvProvider{}
+ creds, err := e.Retrieve()
+ assert.Equal(t, ErrSecretAccessKeyNotFound, err, "ErrSecretAccessKeyNotFound expected, but was %#v error: %#v", creds, err)
+}
+
+func TestEnvProviderAlternateNames(t *testing.T) {
+ os.Clearenv()
+ os.Setenv("AWS_ACCESS_KEY", "access")
+ os.Setenv("AWS_SECRET_KEY", "secret")
+
+ e := EnvProvider{}
+ creds, err := e.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+
+ assert.Equal(t, "access", creds.AccessKeyID, "Expected access key ID")
+ assert.Equal(t, "secret", creds.SecretAccessKey, "Expected secret access key")
+ assert.Empty(t, creds.SessionToken, "Expected no token")
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go
new file mode 100644
index 000000000..1d9983f2c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go
@@ -0,0 +1,88 @@
+package credentials
+
+import (
+ "github.com/stretchr/testify/assert"
+ "os"
+ "testing"
+)
+
+func TestSharedCredentialsProvider(t *testing.T) {
+ os.Clearenv()
+
+ p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
+ creds, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+
+ assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
+ assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
+}
+
+func TestSharedCredentialsProviderIsExpired(t *testing.T) {
+ os.Clearenv()
+
+ p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
+
+ assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve")
+
+ _, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+
+ assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve")
+}
+
+func TestSharedCredentialsProviderWithAWS_SHARED_CREDENTIALS_FILE(t *testing.T) {
+ os.Clearenv()
+ os.Setenv("AWS_SHARED_CREDENTIALS_FILE", "example.ini")
+ p := SharedCredentialsProvider{}
+ creds, err := p.Retrieve()
+
+ assert.Nil(t, err, "Expect no error")
+
+ assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
+ assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
+}
+
+func TestSharedCredentialsProviderWithAWS_PROFILE(t *testing.T) {
+ os.Clearenv()
+ os.Setenv("AWS_PROFILE", "no_token")
+
+ p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
+ creds, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+
+ assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
+ assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Empty(t, creds.SessionToken, "Expect no token")
+}
+
+func TestSharedCredentialsProviderWithoutTokenFromProfile(t *testing.T) {
+ os.Clearenv()
+
+ p := SharedCredentialsProvider{Filename: "example.ini", Profile: "no_token"}
+ creds, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+
+ assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
+ assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Empty(t, creds.SessionToken, "Expect no token")
+}
+
+func BenchmarkSharedCredentialsProvider(b *testing.B) {
+ os.Clearenv()
+
+ p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
+ _, err := p.Retrieve()
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := p.Retrieve()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go
new file mode 100644
index 000000000..ea0123696
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go
@@ -0,0 +1,34 @@
+package credentials
+
+import (
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestStaticProviderGet(t *testing.T) {
+ s := StaticProvider{
+ Value: Value{
+ AccessKeyID: "AKID",
+ SecretAccessKey: "SECRET",
+ SessionToken: "",
+ },
+ }
+
+ creds, err := s.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+ assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match")
+ assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Empty(t, creds.SessionToken, "Expect no session token")
+}
+
+func TestStaticProviderIsExpired(t *testing.T) {
+ s := StaticProvider{
+ Value: Value{
+ AccessKeyID: "AKID",
+ SecretAccessKey: "SECRET",
+ SessionToken: "",
+ },
+ }
+
+ assert.False(t, s.IsExpired(), "Expect static credentials to never expire")
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
index 7a4459f80..0214860d4 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
@@ -9,6 +9,7 @@ import (
"time"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/service/sts"
)
@@ -18,31 +19,17 @@ type AssumeRoler interface {
AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
}
+// DefaultDuration is the default amount of time in minutes that the credentials
+// will be valid for.
+var DefaultDuration = time.Duration(15) * time.Minute
+
// AssumeRoleProvider retrieves temporary credentials from the STS service, and
// keeps track of their expiration time. This provider must be used explicitly,
// as it is not included in the credentials chain.
-//
-// Example how to configure a service to use this provider:
-//
-// config := &aws.Config{
-// Credentials: stscreds.NewCredentials(nil, "arn-of-the-role-to-assume", 10*time.Second),
-// })
-// // Use config for creating your AWS service.
-//
-// Example how to obtain customised credentials:
-//
-// provider := &stscreds.Provider{
-// // Extend the duration to 1 hour.
-// Duration: time.Hour,
-// // Custom role name.
-// RoleSessionName: "custom-session-name",
-// }
-// creds := credentials.NewCredentials(provider)
-//
type AssumeRoleProvider struct {
credentials.Expiry
- // Custom STS client. If not set the default STS client will be used.
+ // STS client to make assume role request with.
Client AssumeRoler
// Role to be assumed.
@@ -70,37 +57,55 @@ type AssumeRoleProvider struct {
}
// NewCredentials returns a pointer to a new Credentials object wrapping the
-// AssumeRoleProvider. The credentials will expire every 15 minutes and the
+// AssumeRoleProvider. The credentials will expire every 15 minutes and the
// role will be named after a nanosecond timestamp of this operation.
//
-// The sts and roleARN parameters are used for building the "AssumeRole" call.
-// Pass nil as sts to use the default client.
+// Takes a Config provider to create the STS client. The ConfigProvider is
+// satisfied by the session.Session type.
+func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
+ p := &AssumeRoleProvider{
+ Client: sts.New(c),
+ RoleARN: roleARN,
+ Duration: DefaultDuration,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
+// AssumeRoleProvider. The credentials will expire every 15 minutes and the
+// role will be named after a nanosecond timestamp of this operation.
//
-// Window is the expiry window that will be subtracted from the expiry returned
-// by the role credential request. This is done so that the credentials will
-// expire sooner than their actual lifespan.
-func NewCredentials(client AssumeRoler, roleARN string, window time.Duration) *credentials.Credentials {
- return credentials.NewCredentials(&AssumeRoleProvider{
- Client: client,
- RoleARN: roleARN,
- ExpiryWindow: window,
- })
+// Takes an AssumeRoler which can be satisfiede by the STS client.
+func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
+ p := &AssumeRoleProvider{
+ Client: svc,
+ RoleARN: roleARN,
+ Duration: DefaultDuration,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
}
// Retrieve generates a new set of temporary credentials using STS.
func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
// Apply defaults where parameters are not set.
- if p.Client == nil {
- p.Client = sts.New(nil)
- }
if p.RoleSessionName == "" {
// Try to work out a role name that will hopefully end up unique.
p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
}
if p.Duration == 0 {
// Expire as often as AWS permits.
- p.Duration = 15 * time.Minute
+ p.Duration = DefaultDuration
}
roleOutput, err := p.Client.AssumeRole(&sts.AssumeRoleInput{
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go
new file mode 100644
index 000000000..6bd6e9197
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go
@@ -0,0 +1,56 @@
+package stscreds
+
+import (
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/sts"
+ "github.com/stretchr/testify/assert"
+)
+
+type stubSTS struct {
+}
+
+func (s *stubSTS) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) {
+ expiry := time.Now().Add(60 * time.Minute)
+ return &sts.AssumeRoleOutput{
+ Credentials: &sts.Credentials{
+ // Just reflect the role arn to the provider.
+ AccessKeyId: input.RoleArn,
+ SecretAccessKey: aws.String("assumedSecretAccessKey"),
+ SessionToken: aws.String("assumedSessionToken"),
+ Expiration: &expiry,
+ },
+ }, nil
+}
+
+func TestAssumeRoleProvider(t *testing.T) {
+ stub := &stubSTS{}
+ p := &AssumeRoleProvider{
+ Client: stub,
+ RoleARN: "roleARN",
+ }
+
+ creds, err := p.Retrieve()
+ assert.Nil(t, err, "Expect no error")
+
+ assert.Equal(t, "roleARN", creds.AccessKeyID, "Expect access key ID to be reflected role ARN")
+ assert.Equal(t, "assumedSecretAccessKey", creds.SecretAccessKey, "Expect secret access key to match")
+ assert.Equal(t, "assumedSessionToken", creds.SessionToken, "Expect session token to match")
+}
+
+func BenchmarkAssumeRoleProvider(b *testing.B) {
+ stub := &stubSTS{}
+ p := &AssumeRoleProvider{
+ Client: stub,
+ RoleARN: "roleARN",
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ if _, err := p.Retrieve(); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
index 2f161b57f..74cdf9299 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
@@ -1,3 +1,5 @@
+// Package defaults is a collection of helpers to retrieve the SDK's default
+// configuration and handlers.
package defaults
import (
@@ -6,34 +8,69 @@ import (
"time"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/endpoints"
)
-// DefaultChainCredentials is a Credentials which will find the first available
-// credentials Value from the list of Providers.
-//
-// This should be used in the default case. Once the type of credentials are
-// known switching to the specific Credentials will be more efficient.
-var DefaultChainCredentials = credentials.NewChainCredentials(
- []credentials.Provider{
- &credentials.EnvProvider{},
- &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
- &ec2rolecreds.EC2RoleProvider{ExpiryWindow: 5 * time.Minute},
- })
+// A Defaults provides a collection of default values for SDK clients.
+type Defaults struct {
+ Config *aws.Config
+ Handlers request.Handlers
+}
-// DefaultConfig is the default all service configuration will be based off of.
-// By default, all clients use this structure for initialization options unless
-// a custom configuration object is passed in.
-//
-// You may modify this global structure to change all default configuration
-// in the SDK. Note that configuration options are copied by value, so any
-// modifications must happen before constructing a client.
-var DefaultConfig = aws.NewConfig().
- WithCredentials(DefaultChainCredentials).
- WithRegion(os.Getenv("AWS_REGION")).
- WithHTTPClient(http.DefaultClient).
- WithMaxRetries(aws.DefaultRetries).
- WithLogger(aws.NewDefaultLogger()).
- WithLogLevel(aws.LogOff).
- WithSleepDelay(time.Sleep)
+// Get returns the SDK's default values with Config and handlers pre-configured.
+func Get() Defaults {
+ cfg := Config()
+ handlers := Handlers()
+ cfg.Credentials = CredChain(cfg, handlers)
+
+ return Defaults{
+ Config: cfg,
+ Handlers: handlers,
+ }
+}
+
+// Config returns the default configuration.
+func Config() *aws.Config {
+ return aws.NewConfig().
+ WithCredentials(credentials.AnonymousCredentials).
+ WithRegion(os.Getenv("AWS_REGION")).
+ WithHTTPClient(http.DefaultClient).
+ WithMaxRetries(aws.UseServiceDefaultRetries).
+ WithLogger(aws.NewDefaultLogger()).
+ WithLogLevel(aws.LogOff).
+ WithSleepDelay(time.Sleep)
+}
+
+// Handlers returns the default request handlers.
+func Handlers() request.Handlers {
+ var handlers request.Handlers
+
+ handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
+ handlers.Build.PushBackNamed(corehandlers.UserAgentHandler)
+ handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
+ handlers.Send.PushBackNamed(corehandlers.SendHandler)
+ handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
+ handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
+
+ return handlers
+}
+
+// CredChain returns the default credential chain.
+func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
+ endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true)
+
+ return credentials.NewChainCredentials(
+ []credentials.Provider{
+ &credentials.EnvProvider{},
+ &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
+ &ec2rolecreds.EC2RoleProvider{
+ Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion),
+ ExpiryWindow: 5 * time.Minute,
+ },
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
index 9d784b6e6..e5137ca17 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
@@ -7,7 +7,7 @@ import (
)
// GetMetadata uses the path provided to request
-func (c *Client) GetMetadata(p string) (string, error) {
+func (c *EC2Metadata) GetMetadata(p string) (string, error) {
op := &request.Operation{
Name: "GetMetadata",
HTTPMethod: "GET",
@@ -15,13 +15,13 @@ func (c *Client) GetMetadata(p string) (string, error) {
}
output := &metadataOutput{}
- req := request.New(c.Service.ServiceInfo, c.Service.Handlers, c.Service.Retryer, op, nil, output)
+ req := c.NewRequest(op, nil, output)
return output.Content, req.Send()
}
// Region returns the region the instance is running in.
-func (c *Client) Region() (string, error) {
+func (c *EC2Metadata) Region() (string, error) {
resp, err := c.GetMetadata("placement/availability-zone")
if err != nil {
return "", err
@@ -34,7 +34,7 @@ func (c *Client) Region() (string, error) {
// Available returns if the application has access to the EC2 Metadata service.
// Can be used to determine if application is running within an EC2 Instance and
// the metadata service is available.
-func (c *Client) Available() bool {
+func (c *EC2Metadata) Available() bool {
if _, err := c.GetMetadata("instance-id"); err != nil {
return false
}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go
new file mode 100644
index 000000000..c3c92972b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go
@@ -0,0 +1,101 @@
+package ec2metadata_test
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "path"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/session"
+)
+
+func initTestServer(path string, resp string) *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.RequestURI != path {
+ http.Error(w, "not found", http.StatusNotFound)
+ return
+ }
+
+ w.Write([]byte(resp))
+ }))
+}
+
+func TestEndpoint(t *testing.T) {
+ c := ec2metadata.New(session.New())
+ op := &request.Operation{
+ Name: "GetMetadata",
+ HTTPMethod: "GET",
+ HTTPPath: path.Join("/", "meta-data", "testpath"),
+ }
+
+ req := c.NewRequest(op, nil, nil)
+ assert.Equal(t, "http://169.254.169.254/latest", req.ClientInfo.Endpoint)
+ assert.Equal(t, "http://169.254.169.254/latest/meta-data/testpath", req.HTTPRequest.URL.String())
+}
+
+func TestGetMetadata(t *testing.T) {
+ server := initTestServer(
+ "/latest/meta-data/some/path",
+ "success", // real response includes suffix
+ )
+ defer server.Close()
+ c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
+
+ resp, err := c.GetMetadata("some/path")
+
+ assert.NoError(t, err)
+ assert.Equal(t, "success", resp)
+}
+
+func TestGetRegion(t *testing.T) {
+ server := initTestServer(
+ "/latest/meta-data/placement/availability-zone",
+ "us-west-2a", // real response includes suffix
+ )
+ defer server.Close()
+ c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
+
+ region, err := c.Region()
+
+ assert.NoError(t, err)
+ assert.Equal(t, "us-west-2", region)
+}
+
+func TestMetadataAvailable(t *testing.T) {
+ server := initTestServer(
+ "/latest/meta-data/instance-id",
+ "instance-id",
+ )
+ defer server.Close()
+ c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
+
+ available := c.Available()
+
+ assert.True(t, available)
+}
+
+func TestMetadataNotAvailable(t *testing.T) {
+ c := ec2metadata.New(session.New())
+ c.Handlers.Send.Clear()
+ c.Handlers.Send.PushBack(func(r *request.Request) {
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(0),
+ Status: http.StatusText(int(0)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ r.Error = awserr.New("RequestError", "send request failed", nil)
+ r.Retryable = aws.Bool(true) // network errors are retryable
+ })
+
+ available := c.Available()
+
+ assert.False(t, available)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
index 73a12c912..f0dc331e0 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
@@ -1,3 +1,5 @@
+// Package ec2metadata provides the client for making API calls to the
+// EC2 Metadata service.
package ec2metadata
import (
@@ -8,89 +10,41 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/aws/service"
- "github.com/aws/aws-sdk-go/aws/service/serviceinfo"
)
-// DefaultRetries states the default number of times the service client will
-// attempt to retry a failed request before failing.
-const DefaultRetries = 3
+// ServiceName is the name of the service.
+const ServiceName = "ec2metadata"
-// A Config provides the configuration for the EC2 Metadata service.
-type Config struct {
- // An optional endpoint URL (hostname only or fully qualified URI)
- // that overrides the default service endpoint for a client. Set this
- // to nil, or `""` to use the default service endpoint.
- Endpoint *string
-
- // The HTTP client to use when sending requests. Defaults to
- // `http.DefaultClient`.
- HTTPClient *http.Client
-
- // An integer value representing the logging level. The default log level
- // is zero (LogOff), which represents no logging. To enable logging set
- // to a LogLevel Value.
- Logger aws.Logger
-
- // The logger writer interface to write logging messages to. Defaults to
- // standard out.
- LogLevel *aws.LogLevelType
-
- // The maximum number of times that a request will be retried for failures.
- // Defaults to DefaultRetries for the number of retries to be performed
- // per request.
- MaxRetries *int
+// A EC2Metadata is an EC2 Metadata service Client.
+type EC2Metadata struct {
+ *client.Client
}
-// A Client is an EC2 Metadata service Client.
-type Client struct {
- *service.Service
-}
-
-// New creates a new instance of the EC2 Metadata service client.
+// New creates a new instance of the EC2Metadata client with a session.
+// This client is safe to use across multiple goroutines.
//
-// In the general use case the configuration for this service client should not
-// be needed and `nil` can be provided. Configuration is only needed if the
-// `ec2metadata.Config` defaults need to be overridden. Eg. Setting LogLevel.
+// Example:
+// // Create a EC2Metadata client from just a session.
+// svc := ec2metadata.New(mySession)
//
-// @note This configuration will NOT be merged with the default AWS service
-// client configuration `defaults.DefaultConfig`. Due to circular dependencies
-// with the defaults package and credentials EC2 Role Provider.
-func New(config *Config) *Client {
- service := &service.Service{
- ServiceInfo: serviceinfo.ServiceInfo{
- Config: copyConfig(config),
- ServiceName: "Client",
- Endpoint: "http://169.254.169.254/latest",
- APIVersion: "latest",
- },
- }
- service.Initialize()
- service.Handlers.Unmarshal.PushBack(unmarshalHandler)
- service.Handlers.UnmarshalError.PushBack(unmarshalError)
- service.Handlers.Validate.Clear()
- service.Handlers.Validate.PushBack(validateEndpointHandler)
-
- return &Client{service}
+// // Create a EC2Metadata client with additional configuration
+// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
+ c := p.ClientConfig(ServiceName, cfgs...)
+ return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
}
-func copyConfig(config *Config) *aws.Config {
- if config == nil {
- config = &Config{}
- }
- c := &aws.Config{
- Credentials: credentials.AnonymousCredentials,
- Endpoint: config.Endpoint,
- HTTPClient: config.HTTPClient,
- Logger: config.Logger,
- LogLevel: config.LogLevel,
- MaxRetries: config.MaxRetries,
- }
-
- if c.HTTPClient == nil {
- c.HTTPClient = &http.Client{
+// NewClient returns a new EC2Metadata client. Should be used to create
+// a client when not using a session. Generally using just New with a session
+// is preferred.
+func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
+ // If the default http client is provided, replace it with a custom
+ // client using default timeouts.
+ if cfg.HTTPClient == http.DefaultClient {
+ cfg.HTTPClient = &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
@@ -104,17 +58,30 @@ func copyConfig(config *Config) *aws.Config {
},
}
}
- if c.Logger == nil {
- c.Logger = aws.NewDefaultLogger()
- }
- if c.LogLevel == nil {
- c.LogLevel = aws.LogLevel(aws.LogOff)
- }
- if c.MaxRetries == nil {
- c.MaxRetries = aws.Int(DefaultRetries)
+
+ svc := &EC2Metadata{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ Endpoint: endpoint,
+ APIVersion: "latest",
+ },
+ handlers,
+ ),
}
- return c
+ svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
+ svc.Handlers.UnmarshalError.PushBack(unmarshalError)
+ svc.Handlers.Validate.Clear()
+ svc.Handlers.Validate.PushBack(validateEndpointHandler)
+
+ // Add additional options to the service config
+ for _, option := range opts {
+ option(svc.Client)
+ }
+
+ return svc
}
type metadataOutput struct {
@@ -143,7 +110,7 @@ func unmarshalError(r *request.Request) {
}
func validateEndpointHandler(r *request.Request) {
- if r.Service.Endpoint == "" {
+ if r.ClientInfo.Endpoint == "" {
r.Error = aws.ErrMissingEndpoint
}
}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers_test.go
new file mode 100644
index 000000000..2ff05a815
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers_test.go
@@ -0,0 +1,47 @@
+package request_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+func TestHandlerList(t *testing.T) {
+ s := ""
+ r := &request.Request{}
+ l := request.HandlerList{}
+ l.PushBack(func(r *request.Request) {
+ s += "a"
+ r.Data = s
+ })
+ l.Run(r)
+ assert.Equal(t, "a", s)
+ assert.Equal(t, "a", r.Data)
+}
+
+func TestMultipleHandlers(t *testing.T) {
+ r := &request.Request{}
+ l := request.HandlerList{}
+ l.PushBack(func(r *request.Request) { r.Data = nil })
+ l.PushFront(func(r *request.Request) { r.Data = aws.Bool(true) })
+ l.Run(r)
+ if r.Data != nil {
+ t.Error("Expected handler to execute")
+ }
+}
+
+func TestNamedHandlers(t *testing.T) {
+ l := request.HandlerList{}
+ named := request.NamedHandler{"Name", func(r *request.Request) {}}
+ named2 := request.NamedHandler{"NotName", func(r *request.Request) {}}
+ l.PushBackNamed(named)
+ l.PushBackNamed(named)
+ l.PushBackNamed(named2)
+ l.PushBack(func(r *request.Request) {})
+ assert.Equal(t, 4, l.Len())
+ l.Remove(named)
+ assert.Equal(t, 2, l.Len())
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go
index 70c28b883..3595acf97 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go
@@ -12,15 +12,16 @@ import (
"time"
"github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awsutil"
- "github.com/aws/aws-sdk-go/aws/service/serviceinfo"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
)
// A Request is the service request to be made.
type Request struct {
+ Config aws.Config
+ ClientInfo metadata.ClientInfo
+ Handlers Handlers
+
Retryer
- Service serviceinfo.ServiceInfo
- Handlers Handlers
Time time.Time
ExpireTime time.Duration
Operation *Operation
@@ -32,7 +33,7 @@ type Request struct {
Error error
Data interface{}
RequestID string
- RetryCount uint
+ RetryCount int
Retryable *bool
RetryDelay time.Duration
@@ -61,7 +62,9 @@ type Paginator struct {
// Params is any value of input parameters to be the request payload.
// Data is pointer value to an object which the request's response
// payload will be deserialized to.
-func New(service serviceinfo.ServiceInfo, handlers Handlers, retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
+func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
+ retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
+
method := operation.HTTPMethod
if method == "" {
method = "POST"
@@ -72,12 +75,14 @@ func New(service serviceinfo.ServiceInfo, handlers Handlers, retryer Retryer, op
}
httpReq, _ := http.NewRequest(method, "", nil)
- httpReq.URL, _ = url.Parse(service.Endpoint + p)
+ httpReq.URL, _ = url.Parse(clientInfo.Endpoint + p)
r := &Request{
+ Config: cfg,
+ ClientInfo: clientInfo,
+ Handlers: handlers.Copy(),
+
Retryer: retryer,
- Service: service,
- Handlers: handlers.Copy(),
Time: time.Now(),
ExpireTime: 0,
Operation: operation,
@@ -140,7 +145,7 @@ func (r *Request) Presign(expireTime time.Duration) (string, error) {
}
func debugLogReqError(r *Request, stage string, retrying bool, err error) {
- if !r.Service.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
+ if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
return
}
@@ -149,8 +154,8 @@ func debugLogReqError(r *Request, stage string, retrying bool, err error) {
retryStr = "will retry"
}
- r.Service.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
- stage, r.Service.ServiceName, r.Operation.Name, retryStr, err))
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
+ stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
}
// Build will build the request's object so it can be signed and sent
@@ -205,9 +210,9 @@ func (r *Request) Send() error {
}
if aws.BoolValue(r.Retryable) {
- if r.Service.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
- r.Service.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
- r.Service.ServiceName, r.Operation.Name, r.RetryCount))
+ if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
+ r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
}
// Re-seek the body back to the original point in for a retry so that
@@ -263,86 +268,3 @@ func (r *Request) Send() error {
return nil
}
-
-// HasNextPage returns true if this request has more pages of data available.
-func (r *Request) HasNextPage() bool {
- return r.nextPageTokens() != nil
-}
-
-// nextPageTokens returns the tokens to use when asking for the next page of
-// data.
-func (r *Request) nextPageTokens() []interface{} {
- if r.Operation.Paginator == nil {
- return nil
- }
-
- if r.Operation.TruncationToken != "" {
- tr := awsutil.ValuesAtAnyPath(r.Data, r.Operation.TruncationToken)
- if tr == nil || len(tr) == 0 {
- return nil
- }
- switch v := tr[0].(type) {
- case bool:
- if v == false {
- return nil
- }
- }
- }
-
- found := false
- tokens := make([]interface{}, len(r.Operation.OutputTokens))
-
- for i, outtok := range r.Operation.OutputTokens {
- v := awsutil.ValuesAtAnyPath(r.Data, outtok)
- if v != nil && len(v) > 0 {
- found = true
- tokens[i] = v[0]
- }
- }
-
- if found {
- return tokens
- }
- return nil
-}
-
-// NextPage returns a new Request that can be executed to return the next
-// page of result data. Call .Send() on this request to execute it.
-func (r *Request) NextPage() *Request {
- tokens := r.nextPageTokens()
- if tokens == nil {
- return nil
- }
-
- data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
- nr := New(r.Service, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
- for i, intok := range nr.Operation.InputTokens {
- awsutil.SetValueAtAnyPath(nr.Params, intok, tokens[i])
- }
- return nr
-}
-
-// EachPage iterates over each page of a paginated request object. The fn
-// parameter should be a function with the following sample signature:
-//
-// func(page *T, lastPage bool) bool {
-// return true // return false to stop iterating
-// }
-//
-// Where "T" is the structure type matching the output structure of the given
-// operation. For example, a request object generated by
-// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
-// as the structure "T". The lastPage value represents whether the page is
-// the last page of data or not. The return value of this function should
-// return true to keep iterating or false to stop.
-func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
- for page := r; page != nil; page = page.NextPage() {
- page.Send()
- shouldContinue := fn(page.Data, !page.HasNextPage())
- if page.Error != nil || !shouldContinue {
- return page.Error
- }
- }
-
- return nil
-}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
new file mode 100644
index 000000000..69c6a41ff
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
@@ -0,0 +1,96 @@
+package request
+
+import (
+ "reflect"
+
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+//type Paginater interface {
+// HasNextPage() bool
+// NextPage() *Request
+// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error
+//}
+
+// HasNextPage returns true if this request has more pages of data available.
+func (r *Request) HasNextPage() bool {
+ return r.nextPageTokens() != nil
+}
+
+// nextPageTokens returns the tokens to use when asking for the next page of
+// data.
+func (r *Request) nextPageTokens() []interface{} {
+ if r.Operation.Paginator == nil {
+ return nil
+ }
+
+ if r.Operation.TruncationToken != "" {
+ tr := awsutil.ValuesAtAnyPath(r.Data, r.Operation.TruncationToken)
+ if tr == nil || len(tr) == 0 {
+ return nil
+ }
+ switch v := tr[0].(type) {
+ case bool:
+ if v == false {
+ return nil
+ }
+ }
+ }
+
+ found := false
+ tokens := make([]interface{}, len(r.Operation.OutputTokens))
+
+ for i, outToken := range r.Operation.OutputTokens {
+ v := awsutil.ValuesAtAnyPath(r.Data, outToken)
+ if v != nil && len(v) > 0 {
+ found = true
+ tokens[i] = v[0]
+ }
+ }
+
+ if found {
+ return tokens
+ }
+ return nil
+}
+
+// NextPage returns a new Request that can be executed to return the next
+// page of result data. Call .Send() on this request to execute it.
+func (r *Request) NextPage() *Request {
+ tokens := r.nextPageTokens()
+ if tokens == nil {
+ return nil
+ }
+
+ data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
+ nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
+ for i, intok := range nr.Operation.InputTokens {
+ awsutil.SetValueAtAnyPath(nr.Params, intok, tokens[i])
+ }
+ return nr
+}
+
+// EachPage iterates over each page of a paginated request object. The fn
+// parameter should be a function with the following sample signature:
+//
+// func(page *T, lastPage bool) bool {
+// return true // return false to stop iterating
+// }
+//
+// Where "T" is the structure type matching the output structure of the given
+// operation. For example, a request object generated by
+// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
+// as the structure "T". The lastPage value represents whether the page is
+// the last page of data or not. The return value of this function should
+// return true to keep iterating or false to stop.
+func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
+ for page := r; page != nil; page = page.NextPage() {
+ page.Send()
+ shouldContinue := fn(page.Data, !page.HasNextPage())
+ if page.Error != nil || !shouldContinue {
+ return page.Error
+ }
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go
new file mode 100644
index 000000000..bb297899c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go
@@ -0,0 +1,305 @@
+package request_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/awstesting/unit"
+ "github.com/aws/aws-sdk-go/service/dynamodb"
+ "github.com/aws/aws-sdk-go/service/s3"
+)
+
+// Use DynamoDB methods for simplicity
+func TestPagination(t *testing.T) {
+ db := dynamodb.New(unit.Session)
+ tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false
+
+ reqNum := 0
+ resps := []*dynamodb.ListTablesOutput{
+ {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")},
+ {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")},
+ {TableNames: []*string{aws.String("Table5")}},
+ }
+
+ db.Handlers.Send.Clear() // mock sending
+ db.Handlers.Unmarshal.Clear()
+ db.Handlers.UnmarshalMeta.Clear()
+ db.Handlers.ValidateResponse.Clear()
+ db.Handlers.Build.PushBack(func(r *request.Request) {
+ in := r.Params.(*dynamodb.ListTablesInput)
+ if in == nil {
+ tokens = append(tokens, "")
+ } else if in.ExclusiveStartTableName != nil {
+ tokens = append(tokens, *in.ExclusiveStartTableName)
+ }
+ })
+ db.Handlers.Unmarshal.PushBack(func(r *request.Request) {
+ r.Data = resps[reqNum]
+ reqNum++
+ })
+
+ params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
+ err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool {
+ numPages++
+ for _, t := range p.TableNames {
+ pages = append(pages, *t)
+ }
+ if last {
+ if gotToEnd {
+ assert.Fail(t, "last=true happened twice")
+ }
+ gotToEnd = true
+ }
+ return true
+ })
+
+ assert.Equal(t, []string{"Table2", "Table4"}, tokens)
+ assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages)
+ assert.Equal(t, 3, numPages)
+ assert.True(t, gotToEnd)
+ assert.Nil(t, err)
+ assert.Nil(t, params.ExclusiveStartTableName)
+}
+
+// Use DynamoDB methods for simplicity
+func TestPaginationEachPage(t *testing.T) {
+ db := dynamodb.New(unit.Session)
+ tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false
+
+ reqNum := 0
+ resps := []*dynamodb.ListTablesOutput{
+ {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")},
+ {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")},
+ {TableNames: []*string{aws.String("Table5")}},
+ }
+
+ db.Handlers.Send.Clear() // mock sending
+ db.Handlers.Unmarshal.Clear()
+ db.Handlers.UnmarshalMeta.Clear()
+ db.Handlers.ValidateResponse.Clear()
+ db.Handlers.Build.PushBack(func(r *request.Request) {
+ in := r.Params.(*dynamodb.ListTablesInput)
+ if in == nil {
+ tokens = append(tokens, "")
+ } else if in.ExclusiveStartTableName != nil {
+ tokens = append(tokens, *in.ExclusiveStartTableName)
+ }
+ })
+ db.Handlers.Unmarshal.PushBack(func(r *request.Request) {
+ r.Data = resps[reqNum]
+ reqNum++
+ })
+
+ params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
+ req, _ := db.ListTablesRequest(params)
+ err := req.EachPage(func(p interface{}, last bool) bool {
+ numPages++
+ for _, t := range p.(*dynamodb.ListTablesOutput).TableNames {
+ pages = append(pages, *t)
+ }
+ if last {
+ if gotToEnd {
+ assert.Fail(t, "last=true happened twice")
+ }
+ gotToEnd = true
+ }
+
+ return true
+ })
+
+ assert.Equal(t, []string{"Table2", "Table4"}, tokens)
+ assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages)
+ assert.Equal(t, 3, numPages)
+ assert.True(t, gotToEnd)
+ assert.Nil(t, err)
+}
+
+// Use DynamoDB methods for simplicity
+func TestPaginationEarlyExit(t *testing.T) {
+ db := dynamodb.New(unit.Session)
+ numPages, gotToEnd := 0, false
+
+ reqNum := 0
+ resps := []*dynamodb.ListTablesOutput{
+ {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")},
+ {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")},
+ {TableNames: []*string{aws.String("Table5")}},
+ }
+
+ db.Handlers.Send.Clear() // mock sending
+ db.Handlers.Unmarshal.Clear()
+ db.Handlers.UnmarshalMeta.Clear()
+ db.Handlers.ValidateResponse.Clear()
+ db.Handlers.Unmarshal.PushBack(func(r *request.Request) {
+ r.Data = resps[reqNum]
+ reqNum++
+ })
+
+ params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
+ err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool {
+ numPages++
+ if numPages == 2 {
+ return false
+ }
+ if last {
+ if gotToEnd {
+ assert.Fail(t, "last=true happened twice")
+ }
+ gotToEnd = true
+ }
+ return true
+ })
+
+ assert.Equal(t, 2, numPages)
+ assert.False(t, gotToEnd)
+ assert.Nil(t, err)
+}
+
+func TestSkipPagination(t *testing.T) {
+ client := s3.New(unit.Session)
+ client.Handlers.Send.Clear() // mock sending
+ client.Handlers.Unmarshal.Clear()
+ client.Handlers.UnmarshalMeta.Clear()
+ client.Handlers.ValidateResponse.Clear()
+ client.Handlers.Unmarshal.PushBack(func(r *request.Request) {
+ r.Data = &s3.HeadBucketOutput{}
+ })
+
+ req, _ := client.HeadBucketRequest(&s3.HeadBucketInput{Bucket: aws.String("bucket")})
+
+ numPages, gotToEnd := 0, false
+ req.EachPage(func(p interface{}, last bool) bool {
+ numPages++
+ if last {
+ gotToEnd = true
+ }
+ return true
+ })
+ assert.Equal(t, 1, numPages)
+ assert.True(t, gotToEnd)
+}
+
+// Use S3 for simplicity
+func TestPaginationTruncation(t *testing.T) {
+ count := 0
+ client := s3.New(unit.Session)
+
+ reqNum := &count
+ resps := []*s3.ListObjectsOutput{
+ {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key1")}}},
+ {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key2")}}},
+ {IsTruncated: aws.Bool(false), Contents: []*s3.Object{{Key: aws.String("Key3")}}},
+ {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key4")}}},
+ }
+
+ client.Handlers.Send.Clear() // mock sending
+ client.Handlers.Unmarshal.Clear()
+ client.Handlers.UnmarshalMeta.Clear()
+ client.Handlers.ValidateResponse.Clear()
+ client.Handlers.Unmarshal.PushBack(func(r *request.Request) {
+ r.Data = resps[*reqNum]
+ *reqNum++
+ })
+
+ params := &s3.ListObjectsInput{Bucket: aws.String("bucket")}
+
+ results := []string{}
+ err := client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool {
+ results = append(results, *p.Contents[0].Key)
+ return true
+ })
+
+ assert.Equal(t, []string{"Key1", "Key2", "Key3"}, results)
+ assert.Nil(t, err)
+
+ // Try again without truncation token at all
+ count = 0
+ resps[1].IsTruncated = nil
+ resps[2].IsTruncated = aws.Bool(true)
+ results = []string{}
+ err = client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool {
+ results = append(results, *p.Contents[0].Key)
+ return true
+ })
+
+ assert.Equal(t, []string{"Key1", "Key2"}, results)
+ assert.Nil(t, err)
+
+}
+
+// Benchmarks
+var benchResps = []*dynamodb.ListTablesOutput{
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
+ {TableNames: []*string{aws.String("TABLE")}},
+}
+
+var benchDb = func() *dynamodb.DynamoDB {
+ db := dynamodb.New(unit.Session)
+ db.Handlers.Send.Clear() // mock sending
+ db.Handlers.Unmarshal.Clear()
+ db.Handlers.UnmarshalMeta.Clear()
+ db.Handlers.ValidateResponse.Clear()
+ return db
+}
+
+func BenchmarkCodegenIterator(b *testing.B) {
+ reqNum := 0
+ db := benchDb()
+ db.Handlers.Unmarshal.PushBack(func(r *request.Request) {
+ r.Data = benchResps[reqNum]
+ reqNum++
+ })
+
+ input := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
+ iter := func(fn func(*dynamodb.ListTablesOutput, bool) bool) error {
+ page, _ := db.ListTablesRequest(input)
+ for ; page != nil; page = page.NextPage() {
+ page.Send()
+ out := page.Data.(*dynamodb.ListTablesOutput)
+ if result := fn(out, !page.HasNextPage()); page.Error != nil || !result {
+ return page.Error
+ }
+ }
+ return nil
+ }
+
+ for i := 0; i < b.N; i++ {
+ reqNum = 0
+ iter(func(p *dynamodb.ListTablesOutput, last bool) bool {
+ return true
+ })
+ }
+}
+
+func BenchmarkEachPageIterator(b *testing.B) {
+ reqNum := 0
+ db := benchDb()
+ db.Handlers.Unmarshal.PushBack(func(r *request.Request) {
+ r.Data = benchResps[reqNum]
+ reqNum++
+ })
+
+ input := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
+ for i := 0; i < b.N; i++ {
+ reqNum = 0
+ req, _ := db.ListTablesRequest(input)
+ req.EachPage(func(p interface{}, last bool) bool {
+ return true
+ })
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_test.go
new file mode 100644
index 000000000..02e9cab7f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_test.go
@@ -0,0 +1,229 @@
+package request_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awstesting"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+type testData struct {
+ Data string
+}
+
+func body(str string) io.ReadCloser {
+ return ioutil.NopCloser(bytes.NewReader([]byte(str)))
+}
+
+func unmarshal(req *request.Request) {
+ defer req.HTTPResponse.Body.Close()
+ if req.Data != nil {
+ json.NewDecoder(req.HTTPResponse.Body).Decode(req.Data)
+ }
+ return
+}
+
+func unmarshalError(req *request.Request) {
+ bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body)
+ if err != nil {
+ req.Error = awserr.New("UnmarshaleError", req.HTTPResponse.Status, err)
+ return
+ }
+ if len(bodyBytes) == 0 {
+ req.Error = awserr.NewRequestFailure(
+ awserr.New("UnmarshaleError", req.HTTPResponse.Status, fmt.Errorf("empty body")),
+ req.HTTPResponse.StatusCode,
+ "",
+ )
+ return
+ }
+ var jsonErr jsonErrorResponse
+ if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil {
+ req.Error = awserr.New("UnmarshaleError", "JSON unmarshal", err)
+ return
+ }
+ req.Error = awserr.NewRequestFailure(
+ awserr.New(jsonErr.Code, jsonErr.Message, nil),
+ req.HTTPResponse.StatusCode,
+ "",
+ )
+}
+
+type jsonErrorResponse struct {
+ Code string `json:"__type"`
+ Message string `json:"message"`
+}
+
+// test that retries occur for 5xx status codes
+func TestRequestRecoverRetry5xx(t *testing.T) {
+ reqNum := 0
+ reqs := []http.Response{
+ {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
+ {StatusCode: 501, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
+ {StatusCode: 200, Body: body(`{"data":"valid"}`)},
+ }
+
+ s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10))
+ s.Handlers.Validate.Clear()
+ s.Handlers.Unmarshal.PushBack(unmarshal)
+ s.Handlers.UnmarshalError.PushBack(unmarshalError)
+ s.Handlers.Send.Clear() // mock sending
+ s.Handlers.Send.PushBack(func(r *request.Request) {
+ r.HTTPResponse = &reqs[reqNum]
+ reqNum++
+ })
+ out := &testData{}
+ r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
+ err := r.Send()
+ assert.Nil(t, err)
+ assert.Equal(t, 2, int(r.RetryCount))
+ assert.Equal(t, "valid", out.Data)
+}
+
+// test that retries occur for 4xx status codes with a response type that can be retried - see `shouldRetry`
+func TestRequestRecoverRetry4xxRetryable(t *testing.T) {
+ reqNum := 0
+ reqs := []http.Response{
+ {StatusCode: 400, Body: body(`{"__type":"Throttling","message":"Rate exceeded."}`)},
+ {StatusCode: 429, Body: body(`{"__type":"ProvisionedThroughputExceededException","message":"Rate exceeded."}`)},
+ {StatusCode: 200, Body: body(`{"data":"valid"}`)},
+ }
+
+ s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10))
+ s.Handlers.Validate.Clear()
+ s.Handlers.Unmarshal.PushBack(unmarshal)
+ s.Handlers.UnmarshalError.PushBack(unmarshalError)
+ s.Handlers.Send.Clear() // mock sending
+ s.Handlers.Send.PushBack(func(r *request.Request) {
+ r.HTTPResponse = &reqs[reqNum]
+ reqNum++
+ })
+ out := &testData{}
+ r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
+ err := r.Send()
+ assert.Nil(t, err)
+ assert.Equal(t, 2, int(r.RetryCount))
+ assert.Equal(t, "valid", out.Data)
+}
+
+// test that retries don't occur for 4xx status codes with a response type that can't be retried
+func TestRequest4xxUnretryable(t *testing.T) {
+ s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10))
+ s.Handlers.Validate.Clear()
+ s.Handlers.Unmarshal.PushBack(unmarshal)
+ s.Handlers.UnmarshalError.PushBack(unmarshalError)
+ s.Handlers.Send.Clear() // mock sending
+ s.Handlers.Send.PushBack(func(r *request.Request) {
+ r.HTTPResponse = &http.Response{StatusCode: 401, Body: body(`{"__type":"SignatureDoesNotMatch","message":"Signature does not match."}`)}
+ })
+ out := &testData{}
+ r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
+ err := r.Send()
+ assert.NotNil(t, err)
+ if e, ok := err.(awserr.RequestFailure); ok {
+ assert.Equal(t, 401, e.StatusCode())
+ } else {
+ assert.Fail(t, "Expected error to be a service failure")
+ }
+ assert.Equal(t, "SignatureDoesNotMatch", err.(awserr.Error).Code())
+ assert.Equal(t, "Signature does not match.", err.(awserr.Error).Message())
+ assert.Equal(t, 0, int(r.RetryCount))
+}
+
+func TestRequestExhaustRetries(t *testing.T) {
+ delays := []time.Duration{}
+ sleepDelay := func(delay time.Duration) {
+ delays = append(delays, delay)
+ }
+
+ reqNum := 0
+ reqs := []http.Response{
+ {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
+ {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
+ {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
+ {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
+ }
+
+ s := awstesting.NewClient(aws.NewConfig().WithSleepDelay(sleepDelay))
+ s.Handlers.Validate.Clear()
+ s.Handlers.Unmarshal.PushBack(unmarshal)
+ s.Handlers.UnmarshalError.PushBack(unmarshalError)
+ s.Handlers.Send.Clear() // mock sending
+ s.Handlers.Send.PushBack(func(r *request.Request) {
+ r.HTTPResponse = &reqs[reqNum]
+ reqNum++
+ })
+ r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
+ err := r.Send()
+ assert.NotNil(t, err)
+ if e, ok := err.(awserr.RequestFailure); ok {
+ assert.Equal(t, 500, e.StatusCode())
+ } else {
+ assert.Fail(t, "Expected error to be a service failure")
+ }
+ assert.Equal(t, "UnknownError", err.(awserr.Error).Code())
+ assert.Equal(t, "An error occurred.", err.(awserr.Error).Message())
+ assert.Equal(t, 3, int(r.RetryCount))
+
+ expectDelays := []struct{ min, max time.Duration }{{30, 59}, {60, 118}, {120, 236}}
+ for i, v := range delays {
+ min := expectDelays[i].min * time.Millisecond
+ max := expectDelays[i].max * time.Millisecond
+ assert.True(t, min <= v && v <= max,
+ "Expect delay to be within range, i:%d, v:%s, min:%s, max:%s", i, v, min, max)
+ }
+}
+
+// test that the request is retried after the credentials are expired.
+func TestRequestRecoverExpiredCreds(t *testing.T) {
+ reqNum := 0
+ reqs := []http.Response{
+ {StatusCode: 400, Body: body(`{"__type":"ExpiredTokenException","message":"expired token"}`)},
+ {StatusCode: 200, Body: body(`{"data":"valid"}`)},
+ }
+
+ s := awstesting.NewClient(&aws.Config{MaxRetries: aws.Int(10), Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "")})
+ s.Handlers.Validate.Clear()
+ s.Handlers.Unmarshal.PushBack(unmarshal)
+ s.Handlers.UnmarshalError.PushBack(unmarshalError)
+
+ credExpiredBeforeRetry := false
+ credExpiredAfterRetry := false
+
+ s.Handlers.AfterRetry.PushBack(func(r *request.Request) {
+ credExpiredAfterRetry = r.Config.Credentials.IsExpired()
+ })
+
+ s.Handlers.Sign.Clear()
+ s.Handlers.Sign.PushBack(func(r *request.Request) {
+ r.Config.Credentials.Get()
+ })
+ s.Handlers.Send.Clear() // mock sending
+ s.Handlers.Send.PushBack(func(r *request.Request) {
+ r.HTTPResponse = &reqs[reqNum]
+ reqNum++
+ })
+ out := &testData{}
+ r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
+ err := r.Send()
+ assert.Nil(t, err)
+
+ assert.False(t, credExpiredBeforeRetry, "Expect valid creds before retry check")
+ assert.True(t, credExpiredAfterRetry, "Expect expired creds after retry check")
+ assert.False(t, s.Config.Credentials.IsExpired(), "Expect valid creds after cred expired recovery")
+
+ assert.Equal(t, 1, int(r.RetryCount))
+ assert.Equal(t, "valid", out.Data)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go
index 526675102..b06143b83 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go
@@ -12,13 +12,14 @@ import (
type Retryer interface {
RetryRules(*Request) time.Duration
ShouldRetry(*Request) bool
- MaxRetries() uint
+ MaxRetries() int
}
// retryableCodes is a collection of service response codes which are retry-able
// without any further action.
var retryableCodes = map[string]struct{}{
"RequestError": {},
+ "RequestTimeout": {},
"ProvisionedThroughputExceededException": {},
"Throttling": {},
"ThrottlingException": {},
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/service/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/service/service.go
deleted file mode 100644
index 7205212e1..000000000
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/service/service.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package service
-
-import (
- "fmt"
- "io/ioutil"
- "net/http"
- "net/http/httputil"
- "regexp"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/corehandlers"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/aws/service/serviceinfo"
- "github.com/aws/aws-sdk-go/internal/endpoints"
-)
-
-// A Service implements the base service request and response handling
-// used by all services.
-type Service struct {
- serviceinfo.ServiceInfo
- request.Retryer
- DefaultMaxRetries uint
- Handlers request.Handlers
-}
-
-var schemeRE = regexp.MustCompile("^([^:]+)://")
-
-// New will return a pointer to a new Server object initialized.
-func New(config *aws.Config) *Service {
- svc := &Service{ServiceInfo: serviceinfo.ServiceInfo{Config: config}}
- svc.Initialize()
- return svc
-}
-
-// Initialize initializes the service.
-func (s *Service) Initialize() {
- if s.Config == nil {
- s.Config = &aws.Config{}
- }
- if s.Config.HTTPClient == nil {
- s.Config.HTTPClient = http.DefaultClient
- }
- if s.Config.SleepDelay == nil {
- s.Config.SleepDelay = time.Sleep
- }
-
- s.Retryer = DefaultRetryer{s}
- s.DefaultMaxRetries = 3
- s.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
- s.Handlers.Build.PushBackNamed(corehandlers.UserAgentHandler)
- s.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
- s.Handlers.Send.PushBackNamed(corehandlers.SendHandler)
- s.Handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
- s.Handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
- if !aws.BoolValue(s.Config.DisableParamValidation) {
- s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
- }
- s.AddDebugHandlers()
- s.buildEndpoint()
-}
-
-// NewRequest returns a new Request pointer for the service API
-// operation and parameters.
-func (s *Service) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
- return request.New(s.ServiceInfo, s.Handlers, s.Retryer, operation, params, data)
-}
-
-// buildEndpoint builds the endpoint values the service will use to make requests with.
-func (s *Service) buildEndpoint() {
- if aws.StringValue(s.Config.Endpoint) != "" {
- s.Endpoint = *s.Config.Endpoint
- } else if s.Endpoint == "" {
- s.Endpoint, s.SigningRegion =
- endpoints.EndpointForRegion(s.ServiceName, aws.StringValue(s.Config.Region))
- }
-
- if s.Endpoint != "" && !schemeRE.MatchString(s.Endpoint) {
- scheme := "https"
- if aws.BoolValue(s.Config.DisableSSL) {
- scheme = "http"
- }
- s.Endpoint = scheme + "://" + s.Endpoint
- }
-}
-
-// AddDebugHandlers injects debug logging handlers into the service to log request
-// debug information.
-func (s *Service) AddDebugHandlers() {
- if !s.Config.LogLevel.AtLeast(aws.LogDebug) {
- return
- }
-
- s.Handlers.Send.PushFront(logRequest)
- s.Handlers.Send.PushBack(logResponse)
-}
-
-const logReqMsg = `DEBUG: Request %s/%s Details:
----[ REQUEST POST-SIGN ]-----------------------------
-%s
------------------------------------------------------`
-
-func logRequest(r *request.Request) {
- logBody := r.Service.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
- dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody)
-
- if logBody {
- // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
- // Body as a NoOpCloser and will not be reset after read by the HTTP
- // client reader.
- r.Body.Seek(r.BodyStart, 0)
- r.HTTPRequest.Body = ioutil.NopCloser(r.Body)
- }
-
- r.Service.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.Service.ServiceName, r.Operation.Name, string(dumpedBody)))
-}
-
-const logRespMsg = `DEBUG: Response %s/%s Details:
----[ RESPONSE ]--------------------------------------
-%s
------------------------------------------------------`
-
-func logResponse(r *request.Request) {
- var msg = "no reponse data"
- if r.HTTPResponse != nil {
- logBody := r.Service.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
- dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody)
- msg = string(dumpedBody)
- } else if r.Error != nil {
- msg = r.Error.Error()
- }
- r.Service.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.Service.ServiceName, r.Operation.Name, msg))
-}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/service/serviceinfo/service_info.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/service/serviceinfo/service_info.go
deleted file mode 100644
index a920e96a9..000000000
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/service/serviceinfo/service_info.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package serviceinfo
-
-import "github.com/aws/aws-sdk-go/aws"
-
-// ServiceInfo wraps immutable data from the service.Service structure.
-type ServiceInfo struct {
- Config *aws.Config
- ServiceName string
- APIVersion string
- Endpoint string
- SigningName string
- SigningRegion string
- JSONVersion string
- TargetPrefix string
-}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session.go
new file mode 100644
index 000000000..eb7fc2052
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -0,0 +1,105 @@
+// Package session provides a way to create service clients with shared configuration
+// and handlers.
+package session
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/defaults"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/endpoints"
+)
+
+// A Session provides a central location to create service clients from and
+// store configurations and request handlers for those services.
+//
+// Sessions are safe to create service clients concurrently, but it is not safe
+// to mutate the session concurrently.
+type Session struct {
+ Config *aws.Config
+ Handlers request.Handlers
+}
+
+// New creates a new instance of the handlers merging in the provided Configs
+// on top of the SDK's default configurations. Once the session is created it
+// can be mutated to modify Configs or Handlers. The session is safe to be read
+// concurrently, but it should not be written to concurrently.
+//
+// Example:
+// // Create a session with the default config and request handlers.
+// sess := session.New()
+//
+// // Create a session with a custom region
+// sess := session.New(&aws.Config{Region: aws.String("us-east-1")})
+//
+// // Create a session, and add additional handlers for all service
+// // clients created with the session to inherit. Adds logging handler.
+// sess := session.New()
+// sess.Handlers.Send.PushFront(func(r *request.Request) {
+// // Log every request made and its payload
+// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params)
+// })
+//
+// // Create a S3 client instance from a session
+// sess := session.New()
+// svc := s3.New(sess)
+func New(cfgs ...*aws.Config) *Session {
+ def := defaults.Get()
+ s := &Session{
+ Config: def.Config,
+ Handlers: def.Handlers,
+ }
+ s.Config.MergeIn(cfgs...)
+
+ initHandlers(s)
+
+ return s
+}
+
+func initHandlers(s *Session) {
+ // Add the Validate parameter handler if it is not disabled.
+ s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
+ if !aws.BoolValue(s.Config.DisableParamValidation) {
+ s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
+ }
+}
+
+// Copy creates and returns a copy of the current session, coping the config
+// and handlers. If any additional configs are provided they will be merged
+// on top of the session's copied config.
+//
+// Example:
+// // Create a copy of the current session, configured for the us-west-2 region.
+// sess.Copy(&aws.Config{Region: aws.String("us-west-2"})
+func (s *Session) Copy(cfgs ...*aws.Config) *Session {
+ newSession := &Session{
+ Config: s.Config.Copy(cfgs...),
+ Handlers: s.Handlers.Copy(),
+ }
+
+ initHandlers(newSession)
+
+ return newSession
+}
+
+// ClientConfig satisfies the client.ConfigProvider interface and is used to
+// configure the service client instances. Passing the Session to the service
+// client's constructor (New) will use this method to configure the client.
+//
+// Example:
+// sess := session.New()
+// s3.New(sess)
+func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
+ s = s.Copy(cfgs...)
+ endpoint, signingRegion := endpoints.NormalizeEndpoint(
+ aws.StringValue(s.Config.Endpoint), serviceName,
+ aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL))
+
+ return client.Config{
+ Config: s.Config,
+ Handlers: s.Handlers,
+ Endpoint: endpoint,
+ SigningRegion: signingRegion,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session_test.go
new file mode 100644
index 000000000..e56c02fc6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session_test.go
@@ -0,0 +1,20 @@
+package session_test
+
+import (
+ "net/http"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/session"
+)
+
+func TestNewDefaultSession(t *testing.T) {
+ s := session.New(&aws.Config{Region: aws.String("region")})
+
+ assert.Equal(t, "region", *s.Config.Region)
+ assert.Equal(t, http.DefaultClient, s.Config.HTTPClient)
+ assert.NotNil(t, s.Config.Logger)
+ assert.Equal(t, aws.LogOff, *s.Config.LogLevel)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go
index 846b732dd..0f067c57f 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go
@@ -5,7 +5,7 @@ import (
"sync"
)
-// ReadSeekCloser wraps a io.Reader returning a ReaderSeakerCloser
+// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser
func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
return ReaderSeekerCloser{r}
}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types_test.go
new file mode 100644
index 000000000..a4ed20e7d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types_test.go
@@ -0,0 +1,56 @@
+package aws
+
+import (
+ "math/rand"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestWriteAtBuffer(t *testing.T) {
+ b := &WriteAtBuffer{}
+
+ n, err := b.WriteAt([]byte{1}, 0)
+ assert.NoError(t, err)
+ assert.Equal(t, 1, n)
+
+ n, err = b.WriteAt([]byte{1, 1, 1}, 5)
+ assert.NoError(t, err)
+ assert.Equal(t, 3, n)
+
+ n, err = b.WriteAt([]byte{2}, 1)
+ assert.NoError(t, err)
+ assert.Equal(t, 1, n)
+
+ n, err = b.WriteAt([]byte{3}, 2)
+ assert.NoError(t, err)
+ assert.Equal(t, 1, n)
+
+ assert.Equal(t, []byte{1, 2, 3, 0, 0, 1, 1, 1}, b.Bytes())
+}
+
+func BenchmarkWriteAtBuffer(b *testing.B) {
+ buf := &WriteAtBuffer{}
+ r := rand.New(rand.NewSource(1))
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ to := r.Intn(10) * 4096
+ bs := make([]byte, to)
+ buf.WriteAt(bs, r.Int63n(10)*4096)
+ }
+}
+
+func BenchmarkWriteAtBufferParallel(b *testing.B) {
+ buf := &WriteAtBuffer{}
+ r := rand.New(rand.NewSource(1))
+
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ to := r.Intn(10) * 4096
+ bs := make([]byte, to)
+ buf.WriteAt(bs, r.Int63n(10)*4096)
+ }
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go
index b7de9e351..39e7b4d5f 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
-const SDKVersion = "0.9.16"
+const SDKVersion = "release-v0.10.0"
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go
deleted file mode 100644
index d040cccd5..000000000
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Package endpoints validates regional endpoints for services.
-package endpoints
-
-//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
-//go:generate gofmt -s -w endpoints_map.go
-
-import "strings"
-
-// EndpointForRegion returns an endpoint and its signing region for a service and region.
-// if the service and region pair are not found endpoint and signingRegion will be empty.
-func EndpointForRegion(svcName, region string) (endpoint, signingRegion string) {
- derivedKeys := []string{
- region + "/" + svcName,
- region + "/*",
- "*/" + svcName,
- "*/*",
- }
-
- for _, key := range derivedKeys {
- if val, ok := endpointsMap.Endpoints[key]; ok {
- ep := val.Endpoint
- ep = strings.Replace(ep, "{region}", region, -1)
- ep = strings.Replace(ep, "{service}", svcName, -1)
-
- endpoint = ep
- signingRegion = val.SigningRegion
- return
- }
- }
- return
-}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go
new file mode 100644
index 000000000..2b279e659
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go
@@ -0,0 +1,65 @@
+// Package endpoints validates regional endpoints for services.
+package endpoints
+
+//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
+//go:generate gofmt -s -w endpoints_map.go
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// NormalizeEndpoint takes and endpoint and service API information to return a
+// normalized endpoint and signing region. If the endpoint is not an empty string
+// the service name and region will be used to look up the service's API endpoint.
+// If the endpoint is provided the scheme will be added if it is not present.
+func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) {
+ if endpoint == "" {
+ return EndpointForRegion(serviceName, region, disableSSL)
+ }
+
+ return AddScheme(endpoint, disableSSL), ""
+}
+
+// EndpointForRegion returns an endpoint and its signing region for a service and region.
+// if the service and region pair are not found endpoint and signingRegion will be empty.
+func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) {
+ derivedKeys := []string{
+ region + "/" + svcName,
+ region + "/*",
+ "*/" + svcName,
+ "*/*",
+ }
+
+ for _, key := range derivedKeys {
+ if val, ok := endpointsMap.Endpoints[key]; ok {
+ ep := val.Endpoint
+ ep = strings.Replace(ep, "{region}", region, -1)
+ ep = strings.Replace(ep, "{service}", svcName, -1)
+
+ endpoint = ep
+ signingRegion = val.SigningRegion
+ break
+ }
+ }
+
+ return AddScheme(endpoint, disableSSL), signingRegion
+}
+
+// Regular expression to determine if the endpoint string is prefixed with a scheme.
+var schemeRE = regexp.MustCompile("^([^:]+)://")
+
+// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
+// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS.
+func AddScheme(endpoint string, disableSSL bool) string {
+ if endpoint != "" && !schemeRE.MatchString(endpoint) {
+ scheme := "https"
+ if disableSSL {
+ scheme = "http"
+ }
+ endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
+ }
+
+ return endpoint
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.json b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
similarity index 90%
rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.json
rename to Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
index 62b8231d4..ea819b1ec 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.json
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
@@ -29,6 +29,10 @@
"endpoint": "",
"signingRegion": "us-east-1"
},
+ "*/ec2metadata": {
+ "endpoint": "http://169.254.169.254/latest",
+ "signingRegion": "us-east-1"
+ },
"*/iam": {
"endpoint": "iam.amazonaws.com",
"signingRegion": "us-east-1"
@@ -45,6 +49,10 @@
"endpoint": "sts.amazonaws.com",
"signingRegion": "us-east-1"
},
+ "*/waf": {
+ "endpoint": "waf.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
"us-east-1/sdb": {
"endpoint": "sdb.amazonaws.com",
"signingRegion": "us-east-1"
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_map.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
similarity index 91%
rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_map.go
rename to Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
index 62fdc16bd..3fab91c7f 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_map.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
@@ -30,6 +30,10 @@ var endpointsMap = endpointStruct{
Endpoint: "",
SigningRegion: "us-east-1",
},
+ "*/ec2metadata": {
+ Endpoint: "http://169.254.169.254/latest",
+ SigningRegion: "us-east-1",
+ },
"*/iam": {
Endpoint: "iam.amazonaws.com",
SigningRegion: "us-east-1",
@@ -46,6 +50,10 @@ var endpointsMap = endpointStruct{
Endpoint: "sts.amazonaws.com",
SigningRegion: "us-east-1",
},
+ "*/waf": {
+ Endpoint: "waf.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
"ap-northeast-1/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_test.go
new file mode 100644
index 000000000..2add48890
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_test.go
@@ -0,0 +1,41 @@
+package endpoints_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/aws/aws-sdk-go/private/endpoints"
+)
+
+func TestGenericEndpoint(t *testing.T) {
+ name := "service"
+ region := "mock-region-1"
+
+ ep, sr := endpoints.EndpointForRegion(name, region, false)
+ assert.Equal(t, fmt.Sprintf("https://%s.%s.amazonaws.com", name, region), ep)
+ assert.Empty(t, sr)
+}
+
+func TestGlobalEndpoints(t *testing.T) {
+ region := "mock-region-1"
+ svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts", "waf"}
+
+ for _, name := range svcs {
+ ep, sr := endpoints.EndpointForRegion(name, region, false)
+ assert.Equal(t, fmt.Sprintf("https://%s.amazonaws.com", name), ep)
+ assert.Equal(t, "us-east-1", sr)
+ }
+}
+
+func TestServicesInCN(t *testing.T) {
+ region := "cn-north-1"
+ svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts", "s3", "waf"}
+
+ for _, name := range svcs {
+ ep, sr := endpoints.EndpointForRegion(name, region, false)
+ assert.Equal(t, fmt.Sprintf("https://%s.%s.amazonaws.com.cn", name, region), ep)
+ assert.Empty(t, sr)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/ec2query/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go
similarity index 78%
rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/ec2query/build.go
rename to Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go
index fabe9b3c3..0ead0126e 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/ec2query/build.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go
@@ -1,21 +1,21 @@
// Package ec2query provides serialisation of AWS EC2 requests and responses.
package ec2query
-//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/input/ec2.json build_test.go
+//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/ec2.json build_test.go
import (
"net/url"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/internal/protocol/query/queryutil"
+ "github.com/aws/aws-sdk-go/private/protocol/query/queryutil"
)
// Build builds a request for the EC2 protocol.
func Build(r *request.Request) {
body := url.Values{
"Action": {r.Operation.Name},
- "Version": {r.Service.APIVersion},
+ "Version": {r.ClientInfo.APIVersion},
}
if err := queryutil.Parse(body, r.Params, true); err != nil {
r.Error = awserr.New("SerializationError", "failed encoding EC2 Query request", err)
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_bench_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_bench_test.go
new file mode 100644
index 000000000..5b8d04bc1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_bench_test.go
@@ -0,0 +1,85 @@
+// +build bench
+
+package ec2query_test
+
+import (
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awstesting"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/ec2query"
+ "github.com/aws/aws-sdk-go/service/ec2"
+)
+
+func BenchmarkEC2QueryBuild_Complex_ec2AuthorizeSecurityGroupEgress(b *testing.B) {
+ params := &ec2.AuthorizeSecurityGroupEgressInput{
+ GroupId: aws.String("String"), // Required
+ CidrIp: aws.String("String"),
+ DryRun: aws.Bool(true),
+ FromPort: aws.Int64(1),
+ IpPermissions: []*ec2.IpPermission{
+ { // Required
+ FromPort: aws.Int64(1),
+ IpProtocol: aws.String("String"),
+ IpRanges: []*ec2.IpRange{
+ { // Required
+ CidrIp: aws.String("String"),
+ },
+ // More values...
+ },
+ PrefixListIds: []*ec2.PrefixListId{
+ { // Required
+ PrefixListId: aws.String("String"),
+ },
+ // More values...
+ },
+ ToPort: aws.Int64(1),
+ UserIdGroupPairs: []*ec2.UserIdGroupPair{
+ { // Required
+ GroupId: aws.String("String"),
+ GroupName: aws.String("String"),
+ UserId: aws.String("String"),
+ },
+ // More values...
+ },
+ },
+ // More values...
+ },
+ IpProtocol: aws.String("String"),
+ SourceSecurityGroupName: aws.String("String"),
+ SourceSecurityGroupOwnerId: aws.String("String"),
+ ToPort: aws.Int64(1),
+ }
+
+ benchEC2QueryBuild(b, "AuthorizeSecurityGroupEgress", params)
+}
+
+func BenchmarkEC2QueryBuild_Simple_ec2AttachNetworkInterface(b *testing.B) {
+ params := &ec2.AttachNetworkInterfaceInput{
+ DeviceIndex: aws.Int64(1), // Required
+ InstanceId: aws.String("String"), // Required
+ NetworkInterfaceId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+
+ benchEC2QueryBuild(b, "AttachNetworkInterface", params)
+}
+
+func benchEC2QueryBuild(b *testing.B, opName string, params interface{}) {
+ svc := awstesting.NewClient()
+ svc.ServiceName = "ec2"
+ svc.APIVersion = "2015-04-15"
+
+ for i := 0; i < b.N; i++ {
+ r := svc.NewRequest(&request.Operation{
+ Name: opName,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }, params, nil)
+ ec2query.Build(r)
+ if r.Error != nil {
+ b.Fatal("Unexpected error", r.Error)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go
new file mode 100644
index 000000000..887feeb1e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go
@@ -0,0 +1,1051 @@
+package ec2query_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "encoding/xml"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/awstesting"
+ "github.com/aws/aws-sdk-go/private/protocol/ec2query"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+ "github.com/aws/aws-sdk-go/private/signer/v4"
+ "github.com/aws/aws-sdk-go/private/util"
+ "github.com/stretchr/testify/assert"
+)
+
+var _ bytes.Buffer // always import bytes
+var _ http.Request
+var _ json.Marshaler
+var _ time.Time
+var _ xmlutil.XMLNode
+var _ xml.Attr
+var _ = awstesting.GenerateAssertions
+var _ = ioutil.Discard
+var _ = util.Trim("")
+var _ = url.Values{}
+var _ = io.EOF
+var _ = aws.String
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService1ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService1ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService1ProtocolTest client from just a session.
+// svc := inputservice1protocoltest.New(mySession)
+//
+// // Create a InputService1ProtocolTest client with additional configuration
+// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest {
+ c := p.ClientConfig("inputservice1protocoltest", cfgs...)
+ return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest {
+ svc := &InputService1ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice1protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(ec2query.Build)
+ svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService1ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService1TestCaseOperation1 = "OperationName"
+
+// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation.
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputService1TestCaseOperation1Input) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService1TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService1TestShapeInputService1TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService1TestShapeInputService1TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputService1TestCaseOperation1Input) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) {
+ req, out := c.InputService1TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService1TestShapeInputService1TestCaseOperation1Input struct {
+ Bar *string `type:"string"`
+
+ Foo *string `type:"string"`
+
+ metadataInputService1TestShapeInputService1TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService1TestShapeInputService1TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService1TestShapeInputService1TestCaseOperation1Output struct {
+ metadataInputService1TestShapeInputService1TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService1TestShapeInputService1TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService2ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService2ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService2ProtocolTest client from just a session.
+// svc := inputservice2protocoltest.New(mySession)
+//
+// // Create a InputService2ProtocolTest client with additional configuration
+// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest {
+ c := p.ClientConfig("inputservice2protocoltest", cfgs...)
+ return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest {
+ svc := &InputService2ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice2protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(ec2query.Build)
+ svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService2ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService2TestCaseOperation1 = "OperationName"
+
+// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation.
+func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService2TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService2TestShapeInputService2TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService2TestShapeInputService2TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) {
+ req, out := c.InputService2TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService2TestShapeInputService2TestCaseOperation1Input struct {
+ Bar *string `locationName:"barLocationName" type:"string"`
+
+ Foo *string `type:"string"`
+
+ Yuck *string `locationName:"yuckLocationName" queryName:"yuckQueryName" type:"string"`
+
+ metadataInputService2TestShapeInputService2TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService2TestShapeInputService2TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService2TestShapeInputService2TestCaseOperation1Output struct {
+ metadataInputService2TestShapeInputService2TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService2TestShapeInputService2TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService3ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService3ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService3ProtocolTest client from just a session.
+// svc := inputservice3protocoltest.New(mySession)
+//
+// // Create a InputService3ProtocolTest client with additional configuration
+// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest {
+ c := p.ClientConfig("inputservice3protocoltest", cfgs...)
+ return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest {
+ svc := &InputService3ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice3protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(ec2query.Build)
+ svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService3ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService3TestCaseOperation1 = "OperationName"
+
+// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation.
+func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputService3TestCaseOperation1Input) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService3TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService3TestShapeInputService3TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService3TestShapeInputService3TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputService3TestCaseOperation1Input) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) {
+ req, out := c.InputService3TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService3TestShapeInputService3TestCaseOperation1Input struct {
+ StructArg *InputService3TestShapeStructType `locationName:"Struct" type:"structure"`
+
+ metadataInputService3TestShapeInputService3TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService3TestShapeInputService3TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService3TestShapeInputService3TestCaseOperation1Output struct {
+ metadataInputService3TestShapeInputService3TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService3TestShapeInputService3TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService3TestShapeStructType struct {
+ ScalarArg *string `locationName:"Scalar" type:"string"`
+
+ metadataInputService3TestShapeStructType `json:"-" xml:"-"`
+}
+
+type metadataInputService3TestShapeStructType struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService4ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService4ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService4ProtocolTest client from just a session.
+// svc := inputservice4protocoltest.New(mySession)
+//
+// // Create a InputService4ProtocolTest client with additional configuration
+// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest {
+ c := p.ClientConfig("inputservice4protocoltest", cfgs...)
+ return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest {
+ svc := &InputService4ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice4protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(ec2query.Build)
+ svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService4ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService4TestCaseOperation1 = "OperationName"
+
+// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation.
+func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputService4TestCaseOperation1Input) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService4TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService4TestShapeInputService4TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService4TestShapeInputService4TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputService4TestCaseOperation1Input) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) {
+ req, out := c.InputService4TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService4TestShapeInputService4TestCaseOperation1Input struct {
+ ListArg []*string `type:"list"`
+
+ metadataInputService4TestShapeInputService4TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService4TestShapeInputService4TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService4TestShapeInputService4TestCaseOperation1Output struct {
+ metadataInputService4TestShapeInputService4TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService4TestShapeInputService4TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService5ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService5ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService5ProtocolTest client from just a session.
+// svc := inputservice5protocoltest.New(mySession)
+//
+// // Create a InputService5ProtocolTest client with additional configuration
+// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest {
+ c := p.ClientConfig("inputservice5protocoltest", cfgs...)
+ return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest {
+ svc := &InputService5ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice5protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(ec2query.Build)
+ svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService5ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService5TestCaseOperation1 = "OperationName"
+
+// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation.
+func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputService5TestCaseOperation1Input) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService5TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService5TestShapeInputService5TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService5TestShapeInputService5TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputService5TestCaseOperation1Input) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) {
+ req, out := c.InputService5TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService5TestShapeInputService5TestCaseOperation1Input struct {
+ ListArg []*string `locationName:"ListMemberName" locationNameList:"item" type:"list"`
+
+ metadataInputService5TestShapeInputService5TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService5TestShapeInputService5TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService5TestShapeInputService5TestCaseOperation1Output struct {
+ metadataInputService5TestShapeInputService5TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService5TestShapeInputService5TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService6ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService6ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService6ProtocolTest client from just a session.
+// svc := inputservice6protocoltest.New(mySession)
+//
+// // Create a InputService6ProtocolTest client with additional configuration
+// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest {
+ c := p.ClientConfig("inputservice6protocoltest", cfgs...)
+ return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest {
+ svc := &InputService6ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice6protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(ec2query.Build)
+ svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService6ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService6TestCaseOperation1 = "OperationName"
+
+// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation.
+func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService6TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService6TestShapeInputService6TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService6TestShapeInputService6TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) {
+ req, out := c.InputService6TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService6TestShapeInputService6TestCaseOperation1Input struct {
+ ListArg []*string `locationName:"ListMemberName" queryName:"ListQueryName" locationNameList:"item" type:"list"`
+
+ metadataInputService6TestShapeInputService6TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService6TestShapeInputService6TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService6TestShapeInputService6TestCaseOperation1Output struct {
+ metadataInputService6TestShapeInputService6TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService6TestShapeInputService6TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService7ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService7ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService7ProtocolTest client from just a session.
+// svc := inputservice7protocoltest.New(mySession)
+//
+// // Create a InputService7ProtocolTest client with additional configuration
+// svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest {
+ c := p.ClientConfig("inputservice7protocoltest", cfgs...)
+ return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService7ProtocolTest {
+ svc := &InputService7ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice7protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(ec2query.Build)
+ svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService7ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService7TestCaseOperation1 = "OperationName"
+
+// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation.
+func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputService7TestCaseOperation1Input) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService7TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService7TestShapeInputService7TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService7TestShapeInputService7TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputService7TestCaseOperation1Input) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) {
+ req, out := c.InputService7TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService7TestShapeInputService7TestCaseOperation1Input struct {
+ BlobArg []byte `type:"blob"`
+
+ metadataInputService7TestShapeInputService7TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService7TestShapeInputService7TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService7TestShapeInputService7TestCaseOperation1Output struct {
+ metadataInputService7TestShapeInputService7TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService7TestShapeInputService7TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService8ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService8ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService8ProtocolTest client from just a session.
+// svc := inputservice8protocoltest.New(mySession)
+//
+// // Create a InputService8ProtocolTest client with additional configuration
+// svc := inputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService8ProtocolTest {
+ c := p.ClientConfig("inputservice8protocoltest", cfgs...)
+ return newInputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService8ProtocolTest {
+ svc := &InputService8ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice8protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(ec2query.Build)
+ svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService8ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService8TestCaseOperation1 = "OperationName"
+
+// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation.
+func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputService8TestCaseOperation1Input) (req *request.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService8TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService8TestShapeInputService8TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService8TestShapeInputService8TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputService8TestCaseOperation1Input) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) {
+ req, out := c.InputService8TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService8TestShapeInputService8TestCaseOperation1Input struct {
+ TimeArg *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ metadataInputService8TestShapeInputService8TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService8TestShapeInputService8TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService8TestShapeInputService8TestCaseOperation1Output struct {
+ metadataInputService8TestShapeInputService8TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService8TestShapeInputService8TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//
+// Tests begin here
+//
+
+func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService1TestShapeInputService1TestCaseOperation1Input{
+ Bar: aws.String("val2"),
+ Foo: aws.String("val1"),
+ }
+ req, _ := svc.InputService1TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ ec2query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService2ProtocolTestStructureWithLocationNameAndQueryNameAppliedToMembersCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService2TestShapeInputService2TestCaseOperation1Input{
+ Bar: aws.String("val2"),
+ Foo: aws.String("val1"),
+ Yuck: aws.String("val3"),
+ }
+ req, _ := svc.InputService2TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ ec2query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&BarLocationName=val2&Foo=val1&Version=2014-01-01&yuckQueryName=val3`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService3ProtocolTestNestedStructureMembersCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService3TestShapeInputService3TestCaseOperation1Input{
+ StructArg: &InputService3TestShapeStructType{
+ ScalarArg: aws.String("foo"),
+ },
+ }
+ req, _ := svc.InputService3TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ ec2query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&Struct.Scalar=foo&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService4ProtocolTestListTypesCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService4TestShapeInputService4TestCaseOperation1Input{
+ ListArg: []*string{
+ aws.String("foo"),
+ aws.String("bar"),
+ aws.String("baz"),
+ },
+ }
+ req, _ := svc.InputService4TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ ec2query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&ListArg.1=foo&ListArg.2=bar&ListArg.3=baz&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService5ProtocolTestListWithLocationNameAppliedToMemberCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService5TestShapeInputService5TestCaseOperation1Input{
+ ListArg: []*string{
+ aws.String("a"),
+ aws.String("b"),
+ aws.String("c"),
+ },
+ }
+ req, _ := svc.InputService5TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ ec2query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&ListMemberName.1=a&ListMemberName.2=b&ListMemberName.3=c&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService6ProtocolTestListWithLocationNameAndQueryNameCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService6TestShapeInputService6TestCaseOperation1Input{
+ ListArg: []*string{
+ aws.String("a"),
+ aws.String("b"),
+ aws.String("c"),
+ },
+ }
+ req, _ := svc.InputService6TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ ec2query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&ListQueryName.1=a&ListQueryName.2=b&ListQueryName.3=c&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService7ProtocolTestBase64EncodedBlobsCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService7TestShapeInputService7TestCaseOperation1Input{
+ BlobArg: []byte("foo"),
+ }
+ req, _ := svc.InputService7TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ ec2query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService8ProtocolTestTimestampValuesCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService8TestShapeInputService8TestCaseOperation1Input{
+ TimeArg: aws.Time(time.Unix(1422172800, 0)),
+ }
+ req, _ := svc.InputService8TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ ec2query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/ec2query/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go
similarity index 88%
rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/ec2query/unmarshal.go
rename to Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go
index bb0f01588..658190f70 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/ec2query/unmarshal.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go
@@ -1,6 +1,6 @@
package ec2query
-//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/output/ec2.json unmarshal_test.go
+//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/ec2.json unmarshal_test.go
import (
"encoding/xml"
@@ -8,7 +8,7 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
)
// Unmarshal unmarshals a response body for the EC2 protocol.
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go
new file mode 100644
index 000000000..c347c371c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go
@@ -0,0 +1,1132 @@
+package ec2query_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "encoding/xml"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/awstesting"
+ "github.com/aws/aws-sdk-go/private/protocol/ec2query"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+ "github.com/aws/aws-sdk-go/private/signer/v4"
+ "github.com/aws/aws-sdk-go/private/util"
+ "github.com/stretchr/testify/assert"
+)
+
+var _ bytes.Buffer // always import bytes
+var _ http.Request
+var _ json.Marshaler
+var _ time.Time
+var _ xmlutil.XMLNode
+var _ xml.Attr
+var _ = awstesting.GenerateAssertions
+var _ = ioutil.Discard
+var _ = util.Trim("")
+var _ = url.Values{}
+var _ = io.EOF
+var _ = aws.String
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService1ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService1ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService1ProtocolTest client from just a session.
+// svc := outputservice1protocoltest.New(mySession)
+//
+// // Create a OutputService1ProtocolTest client with additional configuration
+// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest {
+ c := p.ClientConfig("outputservice1protocoltest", cfgs...)
+ return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest {
+ svc := &OutputService1ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice1protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(ec2query.Build)
+ svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService1TestCaseOperation1 = "OperationName"
+
+// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation.
+func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputService1TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService1TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService1TestShapeOutputService1TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputService1TestCaseOperation1Output, error) {
+ req, out := c.OutputService1TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService1TestShapeOutputService1TestCaseOperation1Input struct {
+ metadataOutputService1TestShapeOutputService1TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService1TestShapeOutputService1TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService1TestShapeOutputService1TestCaseOperation1Output struct {
+ Char *string `type:"character"`
+
+ Double *float64 `type:"double"`
+
+ FalseBool *bool `type:"boolean"`
+
+ Float *float64 `type:"float"`
+
+ Long *int64 `type:"long"`
+
+ Num *int64 `locationName:"FooNum" type:"integer"`
+
+ Str *string `type:"string"`
+
+ TrueBool *bool `type:"boolean"`
+
+ metadataOutputService1TestShapeOutputService1TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService1TestShapeOutputService1TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService2ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService2ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService2ProtocolTest client from just a session.
+// svc := outputservice2protocoltest.New(mySession)
+//
+// // Create a OutputService2ProtocolTest client with additional configuration
+// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest {
+ c := p.ClientConfig("outputservice2protocoltest", cfgs...)
+ return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest {
+ svc := &OutputService2ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice2protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(ec2query.Build)
+ svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService2TestCaseOperation1 = "OperationName"
+
+// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation.
+func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService2TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) {
+ req, out := c.OutputService2TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService2TestShapeOutputService2TestCaseOperation1Input struct {
+ metadataOutputService2TestShapeOutputService2TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService2TestShapeOutputService2TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService2TestShapeOutputService2TestCaseOperation1Output struct {
+ Blob []byte `type:"blob"`
+
+ metadataOutputService2TestShapeOutputService2TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService2TestShapeOutputService2TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService3ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService3ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService3ProtocolTest client from just a session.
+// svc := outputservice3protocoltest.New(mySession)
+//
+// // Create a OutputService3ProtocolTest client with additional configuration
+// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest {
+ c := p.ClientConfig("outputservice3protocoltest", cfgs...)
+ return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest {
+ svc := &OutputService3ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice3protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(ec2query.Build)
+ svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService3TestCaseOperation1 = "OperationName"
+
+// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation.
+func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService3TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) {
+ req, out := c.OutputService3TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService3TestShapeOutputService3TestCaseOperation1Input struct {
+ metadataOutputService3TestShapeOutputService3TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService3TestShapeOutputService3TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService3TestShapeOutputService3TestCaseOperation1Output struct {
+ ListMember []*string `type:"list"`
+
+ metadataOutputService3TestShapeOutputService3TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService3TestShapeOutputService3TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService4ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService4ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService4ProtocolTest client from just a session.
+// svc := outputservice4protocoltest.New(mySession)
+//
+// // Create a OutputService4ProtocolTest client with additional configuration
+// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest {
+ c := p.ClientConfig("outputservice4protocoltest", cfgs...)
+ return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest {
+ svc := &OutputService4ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice4protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(ec2query.Build)
+ svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService4TestCaseOperation1 = "OperationName"
+
+// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation.
+func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputService4TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService4TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService4TestShapeOutputService4TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputService4TestCaseOperation1Output, error) {
+ req, out := c.OutputService4TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService4TestShapeOutputService4TestCaseOperation1Input struct {
+ metadataOutputService4TestShapeOutputService4TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService4TestShapeOutputService4TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService4TestShapeOutputService4TestCaseOperation1Output struct {
+ ListMember []*string `locationNameList:"item" type:"list"`
+
+ metadataOutputService4TestShapeOutputService4TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService4TestShapeOutputService4TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService5ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService5ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService5ProtocolTest client from just a session.
+// svc := outputservice5protocoltest.New(mySession)
+//
+// // Create a OutputService5ProtocolTest client with additional configuration
+// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest {
+ c := p.ClientConfig("outputservice5protocoltest", cfgs...)
+ return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest {
+ svc := &OutputService5ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice5protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(ec2query.Build)
+ svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService5TestCaseOperation1 = "OperationName"
+
+// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation.
+func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService5TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) {
+ req, out := c.OutputService5TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService5TestShapeOutputService5TestCaseOperation1Input struct {
+ metadataOutputService5TestShapeOutputService5TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService5TestShapeOutputService5TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService5TestShapeOutputService5TestCaseOperation1Output struct {
+ ListMember []*string `type:"list" flattened:"true"`
+
+ metadataOutputService5TestShapeOutputService5TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService5TestShapeOutputService5TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService6ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService6ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService6ProtocolTest client from just a session.
+// svc := outputservice6protocoltest.New(mySession)
+//
+// // Create a OutputService6ProtocolTest client with additional configuration
+// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest {
+ c := p.ClientConfig("outputservice6protocoltest", cfgs...)
+ return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest {
+ svc := &OutputService6ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice6protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(ec2query.Build)
+ svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService6TestCaseOperation1 = "OperationName"
+
+// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation.
+func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService6TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) {
+ req, out := c.OutputService6TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService6TestShapeOutputService6TestCaseOperation1Input struct {
+ metadataOutputService6TestShapeOutputService6TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService6TestShapeOutputService6TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService6TestShapeOutputService6TestCaseOperation1Output struct {
+ Map map[string]*OutputService6TestShapeStructureType `type:"map"`
+
+ metadataOutputService6TestShapeOutputService6TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService6TestShapeOutputService6TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService6TestShapeStructureType struct {
+ Foo *string `locationName:"foo" type:"string"`
+
+ metadataOutputService6TestShapeStructureType `json:"-" xml:"-"`
+}
+
+type metadataOutputService6TestShapeStructureType struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService7ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService7ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService7ProtocolTest client from just a session.
+// svc := outputservice7protocoltest.New(mySession)
+//
+// // Create a OutputService7ProtocolTest client with additional configuration
+// svc := outputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService7ProtocolTest {
+ c := p.ClientConfig("outputservice7protocoltest", cfgs...)
+ return newOutputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService7ProtocolTest {
+ svc := &OutputService7ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice7protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(ec2query.Build)
+ svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService7TestCaseOperation1 = "OperationName"
+
+// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation.
+func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *request.Request, output *OutputService7TestShapeOutputService7TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService7TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService7TestShapeOutputService7TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputService7TestCaseOperation1Output, error) {
+ req, out := c.OutputService7TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService7TestShapeOutputService7TestCaseOperation1Input struct {
+ metadataOutputService7TestShapeOutputService7TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService7TestShapeOutputService7TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService7TestShapeOutputService7TestCaseOperation1Output struct {
+ Map map[string]*string `type:"map" flattened:"true"`
+
+ metadataOutputService7TestShapeOutputService7TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService7TestShapeOutputService7TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService8ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService8ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService8ProtocolTest client from just a session.
+// svc := outputservice8protocoltest.New(mySession)
+//
+// // Create a OutputService8ProtocolTest client with additional configuration
+// svc := outputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService8ProtocolTest {
+ c := p.ClientConfig("outputservice8protocoltest", cfgs...)
+ return newOutputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService8ProtocolTest {
+ svc := &OutputService8ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice8protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(ec2query.Build)
+ svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService8TestCaseOperation1 = "OperationName"
+
+// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation.
+func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *request.Request, output *OutputService8TestShapeOutputService8TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService8TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService8TestShapeOutputService8TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputService8TestCaseOperation1Output, error) {
+ req, out := c.OutputService8TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService8TestShapeOutputService8TestCaseOperation1Input struct {
+ metadataOutputService8TestShapeOutputService8TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService8TestShapeOutputService8TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService8TestShapeOutputService8TestCaseOperation1Output struct {
+ Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map" flattened:"true"`
+
+ metadataOutputService8TestShapeOutputService8TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService8TestShapeOutputService8TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService9ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService9ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService9ProtocolTest client from just a session.
+// svc := outputservice9protocoltest.New(mySession)
+//
+// // Create a OutputService9ProtocolTest client with additional configuration
+// svc := outputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService9ProtocolTest {
+ c := p.ClientConfig("outputservice9protocoltest", cfgs...)
+ return newOutputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService9ProtocolTest {
+ svc := &OutputService9ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice9protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(ec2query.Build)
+ svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService9TestCaseOperation1 = "OperationName"
+
+// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation.
+func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *request.Request, output *OutputService9TestShapeOutputService9TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService9TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService9TestShapeOutputService9TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputService9TestCaseOperation1Output, error) {
+ req, out := c.OutputService9TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService9TestShapeOutputService9TestCaseOperation1Input struct {
+ metadataOutputService9TestShapeOutputService9TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService9TestShapeOutputService9TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService9TestShapeOutputService9TestCaseOperation1Output struct {
+ Foo *string `type:"string"`
+
+ metadataOutputService9TestShapeOutputService9TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService9TestShapeOutputService9TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//
+// Tests begin here
+//
+
+func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200arequest-id"))
+ req, out := svc.OutputService1TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ ec2query.UnmarshalMeta(req)
+ ec2query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "a", *out.Char)
+ assert.Equal(t, 1.3, *out.Double)
+ assert.Equal(t, false, *out.FalseBool)
+ assert.Equal(t, 1.2, *out.Float)
+ assert.Equal(t, int64(200), *out.Long)
+ assert.Equal(t, int64(123), *out.Num)
+ assert.Equal(t, "myname", *out.Str)
+ assert.Equal(t, true, *out.TrueBool)
+
+}
+
+func TestOutputService2ProtocolTestBlobCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("dmFsdWU=requestid"))
+ req, out := svc.OutputService2TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ ec2query.UnmarshalMeta(req)
+ ec2query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "value", string(out.Blob))
+
+}
+
+func TestOutputService3ProtocolTestListsCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("abc123requestid"))
+ req, out := svc.OutputService3TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ ec2query.UnmarshalMeta(req)
+ ec2query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", *out.ListMember[0])
+ assert.Equal(t, "123", *out.ListMember[1])
+
+}
+
+func TestOutputService4ProtocolTestListWithCustomMemberNameCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("- abc
- 123
requestid"))
+ req, out := svc.OutputService4TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ ec2query.UnmarshalMeta(req)
+ ec2query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", *out.ListMember[0])
+ assert.Equal(t, "123", *out.ListMember[1])
+
+}
+
+func TestOutputService5ProtocolTestFlattenedListCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("abc123requestid"))
+ req, out := svc.OutputService5TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ ec2query.UnmarshalMeta(req)
+ ec2query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", *out.ListMember[0])
+ assert.Equal(t, "123", *out.ListMember[1])
+
+}
+
+func TestOutputService6ProtocolTestNormalMapCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("requestid"))
+ req, out := svc.OutputService6TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ ec2query.UnmarshalMeta(req)
+ ec2query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "bam", *out.Map["baz"].Foo)
+ assert.Equal(t, "bar", *out.Map["qux"].Foo)
+
+}
+
+func TestOutputService7ProtocolTestFlattenedMapCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("requestid"))
+ req, out := svc.OutputService7TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ ec2query.UnmarshalMeta(req)
+ ec2query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "bam", *out.Map["baz"])
+ assert.Equal(t, "bar", *out.Map["qux"])
+
+}
+
+func TestOutputService8ProtocolTestNamedMapCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("requestid"))
+ req, out := svc.OutputService8TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ ec2query.UnmarshalMeta(req)
+ ec2query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "bam", *out.Map["baz"])
+ assert.Equal(t, "bar", *out.Map["qux"])
+
+}
+
+func TestOutputService9ProtocolTestEmptyStringCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("requestid"))
+ req, out := svc.OutputService9TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ ec2query.UnmarshalMeta(req)
+ ec2query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "", *out.Foo)
+
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build.go
similarity index 78%
rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build.go
rename to Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build.go
index 83ed8758e..2d78c35c2 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build.go
@@ -1,21 +1,21 @@
// Package query provides serialisation of AWS query requests, and responses.
package query
-//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/input/query.json build_test.go
+//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go
import (
"net/url"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/internal/protocol/query/queryutil"
+ "github.com/aws/aws-sdk-go/private/protocol/query/queryutil"
)
// Build builds a request for an AWS Query service.
func Build(r *request.Request) {
body := url.Values{
"Action": {r.Operation.Name},
- "Version": {r.Service.APIVersion},
+ "Version": {r.ClientInfo.APIVersion},
}
if err := queryutil.Parse(body, r.Params, false); err != nil {
r.Error = awserr.New("SerializationError", "failed encoding Query request", err)
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go
new file mode 100644
index 000000000..535821b2f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go
@@ -0,0 +1,2139 @@
+package query_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "encoding/xml"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/awstesting"
+ "github.com/aws/aws-sdk-go/private/protocol/query"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+ "github.com/aws/aws-sdk-go/private/signer/v4"
+ "github.com/aws/aws-sdk-go/private/util"
+ "github.com/stretchr/testify/assert"
+)
+
+var _ bytes.Buffer // always import bytes
+var _ http.Request
+var _ json.Marshaler
+var _ time.Time
+var _ xmlutil.XMLNode
+var _ xml.Attr
+var _ = awstesting.GenerateAssertions
+var _ = ioutil.Discard
+var _ = util.Trim("")
+var _ = url.Values{}
+var _ = io.EOF
+var _ = aws.String
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService1ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService1ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService1ProtocolTest client from just a session.
+// svc := inputservice1protocoltest.New(mySession)
+//
+// // Create a InputService1ProtocolTest client with additional configuration
+// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest {
+ c := p.ClientConfig("inputservice1protocoltest", cfgs...)
+ return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest {
+ svc := &InputService1ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice1protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService1ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService1TestCaseOperation1 = "OperationName"
+
+// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation.
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService1TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService1TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService1TestShapeInputService1TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) {
+ req, out := c.InputService1TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService1TestCaseOperation2 = "OperationName"
+
+// InputService1TestCaseOperation2Request generates a request for the InputService1TestCaseOperation2 operation.
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation2Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation2Output) {
+ op := &request.Operation{
+ Name: opInputService1TestCaseOperation2,
+ }
+
+ if input == nil {
+ input = &InputService1TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService1TestShapeInputService1TestCaseOperation2Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation2(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation2Output, error) {
+ req, out := c.InputService1TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService1TestCaseOperation3 = "OperationName"
+
+// InputService1TestCaseOperation3Request generates a request for the InputService1TestCaseOperation3 operation.
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation3Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation3Output) {
+ op := &request.Operation{
+ Name: opInputService1TestCaseOperation3,
+ }
+
+ if input == nil {
+ input = &InputService1TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService1TestShapeInputService1TestCaseOperation3Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation3(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation3Output, error) {
+ req, out := c.InputService1TestCaseOperation3Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService1TestShapeInputService1TestCaseOperation1Output struct {
+ metadataInputService1TestShapeInputService1TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService1TestShapeInputService1TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService1TestShapeInputService1TestCaseOperation2Output struct {
+ metadataInputService1TestShapeInputService1TestCaseOperation2Output `json:"-" xml:"-"`
+}
+
+type metadataInputService1TestShapeInputService1TestCaseOperation2Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService1TestShapeInputService1TestCaseOperation3Output struct {
+ metadataInputService1TestShapeInputService1TestCaseOperation3Output `json:"-" xml:"-"`
+}
+
+type metadataInputService1TestShapeInputService1TestCaseOperation3Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService1TestShapeInputShape struct {
+ Bar *string `type:"string"`
+
+ Baz *bool `type:"boolean"`
+
+ Foo *string `type:"string"`
+
+ metadataInputService1TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService1TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService2ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService2ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService2ProtocolTest client from just a session.
+// svc := inputservice2protocoltest.New(mySession)
+//
+// // Create a InputService2ProtocolTest client with additional configuration
+// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest {
+ c := p.ClientConfig("inputservice2protocoltest", cfgs...)
+ return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest {
+ svc := &InputService2ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice2protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService2ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService2TestCaseOperation1 = "OperationName"
+
+// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation.
+func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService2TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService2TestShapeInputService2TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService2TestShapeInputService2TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) {
+ req, out := c.InputService2TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService2TestShapeInputService2TestCaseOperation1Input struct {
+ StructArg *InputService2TestShapeStructType `type:"structure"`
+
+ metadataInputService2TestShapeInputService2TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService2TestShapeInputService2TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService2TestShapeInputService2TestCaseOperation1Output struct {
+ metadataInputService2TestShapeInputService2TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService2TestShapeInputService2TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService2TestShapeStructType struct {
+ ScalarArg *string `type:"string"`
+
+ metadataInputService2TestShapeStructType `json:"-" xml:"-"`
+}
+
+type metadataInputService2TestShapeStructType struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService3ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService3ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService3ProtocolTest client from just a session.
+// svc := inputservice3protocoltest.New(mySession)
+//
+// // Create a InputService3ProtocolTest client with additional configuration
+// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest {
+ c := p.ClientConfig("inputservice3protocoltest", cfgs...)
+ return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest {
+ svc := &InputService3ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice3protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService3ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService3TestCaseOperation1 = "OperationName"
+
+// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation.
+func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService3TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService3TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService3TestShapeInputService3TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) {
+ req, out := c.InputService3TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService3TestCaseOperation2 = "OperationName"
+
+// InputService3TestCaseOperation2Request generates a request for the InputService3TestCaseOperation2 operation.
+func (c *InputService3ProtocolTest) InputService3TestCaseOperation2Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation2Output) {
+ op := &request.Operation{
+ Name: opInputService3TestCaseOperation2,
+ }
+
+ if input == nil {
+ input = &InputService3TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService3TestShapeInputService3TestCaseOperation2Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService3ProtocolTest) InputService3TestCaseOperation2(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation2Output, error) {
+ req, out := c.InputService3TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService3TestShapeInputService3TestCaseOperation1Output struct {
+ metadataInputService3TestShapeInputService3TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService3TestShapeInputService3TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService3TestShapeInputService3TestCaseOperation2Output struct {
+ metadataInputService3TestShapeInputService3TestCaseOperation2Output `json:"-" xml:"-"`
+}
+
+type metadataInputService3TestShapeInputService3TestCaseOperation2Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService3TestShapeInputShape struct {
+ ListArg []*string `type:"list"`
+
+ metadataInputService3TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService3TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService4ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService4ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService4ProtocolTest client from just a session.
+// svc := inputservice4protocoltest.New(mySession)
+//
+// // Create a InputService4ProtocolTest client with additional configuration
+// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest {
+ c := p.ClientConfig("inputservice4protocoltest", cfgs...)
+ return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest {
+ svc := &InputService4ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice4protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService4ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService4TestCaseOperation1 = "OperationName"
+
+// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation.
+func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputShape) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService4TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService4TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService4TestShapeInputService4TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) {
+ req, out := c.InputService4TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService4TestCaseOperation2 = "OperationName"
+
+// InputService4TestCaseOperation2Request generates a request for the InputService4TestCaseOperation2 operation.
+func (c *InputService4ProtocolTest) InputService4TestCaseOperation2Request(input *InputService4TestShapeInputShape) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation2Output) {
+ op := &request.Operation{
+ Name: opInputService4TestCaseOperation2,
+ }
+
+ if input == nil {
+ input = &InputService4TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService4TestShapeInputService4TestCaseOperation2Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService4ProtocolTest) InputService4TestCaseOperation2(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation2Output, error) {
+ req, out := c.InputService4TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService4TestShapeInputService4TestCaseOperation1Output struct {
+ metadataInputService4TestShapeInputService4TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService4TestShapeInputService4TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService4TestShapeInputService4TestCaseOperation2Output struct {
+ metadataInputService4TestShapeInputService4TestCaseOperation2Output `json:"-" xml:"-"`
+}
+
+type metadataInputService4TestShapeInputService4TestCaseOperation2Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService4TestShapeInputShape struct {
+ ListArg []*string `type:"list" flattened:"true"`
+
+ NamedListArg []*string `locationNameList:"Foo" type:"list" flattened:"true"`
+
+ ScalarArg *string `type:"string"`
+
+ metadataInputService4TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService4TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService5ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService5ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService5ProtocolTest client from just a session.
+// svc := inputservice5protocoltest.New(mySession)
+//
+// // Create a InputService5ProtocolTest client with additional configuration
+// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest {
+ c := p.ClientConfig("inputservice5protocoltest", cfgs...)
+ return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest {
+ svc := &InputService5ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice5protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService5ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService5TestCaseOperation1 = "OperationName"
+
+// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation.
+func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputService5TestCaseOperation1Input) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService5TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService5TestShapeInputService5TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService5TestShapeInputService5TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputService5TestCaseOperation1Input) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) {
+ req, out := c.InputService5TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService5TestShapeInputService5TestCaseOperation1Input struct {
+ MapArg map[string]*string `type:"map" flattened:"true"`
+
+ metadataInputService5TestShapeInputService5TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService5TestShapeInputService5TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService5TestShapeInputService5TestCaseOperation1Output struct {
+ metadataInputService5TestShapeInputService5TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService5TestShapeInputService5TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService6ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService6ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService6ProtocolTest client from just a session.
+// svc := inputservice6protocoltest.New(mySession)
+//
+// // Create a InputService6ProtocolTest client with additional configuration
+// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest {
+ c := p.ClientConfig("inputservice6protocoltest", cfgs...)
+ return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest {
+ svc := &InputService6ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice6protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService6ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService6TestCaseOperation1 = "OperationName"
+
+// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation.
+func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService6TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService6TestShapeInputService6TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService6TestShapeInputService6TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) {
+ req, out := c.InputService6TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService6TestShapeInputService6TestCaseOperation1Input struct {
+ ListArg []*string `locationNameList:"item" type:"list"`
+
+ metadataInputService6TestShapeInputService6TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService6TestShapeInputService6TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService6TestShapeInputService6TestCaseOperation1Output struct {
+ metadataInputService6TestShapeInputService6TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService6TestShapeInputService6TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService7ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService7ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService7ProtocolTest client from just a session.
+// svc := inputservice7protocoltest.New(mySession)
+//
+// // Create a InputService7ProtocolTest client with additional configuration
+// svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest {
+ c := p.ClientConfig("inputservice7protocoltest", cfgs...)
+ return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService7ProtocolTest {
+ svc := &InputService7ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice7protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService7ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService7TestCaseOperation1 = "OperationName"
+
+// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation.
+func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputService7TestCaseOperation1Input) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService7TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService7TestShapeInputService7TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService7TestShapeInputService7TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputService7TestCaseOperation1Input) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) {
+ req, out := c.InputService7TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService7TestShapeInputService7TestCaseOperation1Input struct {
+ ListArg []*string `locationNameList:"ListArgLocation" type:"list" flattened:"true"`
+
+ ScalarArg *string `type:"string"`
+
+ metadataInputService7TestShapeInputService7TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService7TestShapeInputService7TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService7TestShapeInputService7TestCaseOperation1Output struct {
+ metadataInputService7TestShapeInputService7TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService7TestShapeInputService7TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService8ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService8ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService8ProtocolTest client from just a session.
+// svc := inputservice8protocoltest.New(mySession)
+//
+// // Create a InputService8ProtocolTest client with additional configuration
+// svc := inputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService8ProtocolTest {
+ c := p.ClientConfig("inputservice8protocoltest", cfgs...)
+ return newInputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService8ProtocolTest {
+ svc := &InputService8ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice8protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService8ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService8TestCaseOperation1 = "OperationName"
+
+// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation.
+func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputService8TestCaseOperation1Input) (req *request.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService8TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService8TestShapeInputService8TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService8TestShapeInputService8TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputService8TestCaseOperation1Input) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) {
+ req, out := c.InputService8TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService8TestShapeInputService8TestCaseOperation1Input struct {
+ MapArg map[string]*string `type:"map"`
+
+ metadataInputService8TestShapeInputService8TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService8TestShapeInputService8TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService8TestShapeInputService8TestCaseOperation1Output struct {
+ metadataInputService8TestShapeInputService8TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService8TestShapeInputService8TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService9ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService9ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService9ProtocolTest client from just a session.
+// svc := inputservice9protocoltest.New(mySession)
+//
+// // Create a InputService9ProtocolTest client with additional configuration
+// svc := inputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService9ProtocolTest {
+ c := p.ClientConfig("inputservice9protocoltest", cfgs...)
+ return newInputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService9ProtocolTest {
+ svc := &InputService9ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice9protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService9ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService9TestCaseOperation1 = "OperationName"
+
+// InputService9TestCaseOperation1Request generates a request for the InputService9TestCaseOperation1 operation.
+func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputService9TestCaseOperation1Input) (req *request.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService9TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService9TestShapeInputService9TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService9TestShapeInputService9TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputService9TestCaseOperation1Input) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) {
+ req, out := c.InputService9TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService9TestShapeInputService9TestCaseOperation1Input struct {
+ MapArg map[string]*string `locationNameKey:"TheKey" locationNameValue:"TheValue" type:"map"`
+
+ metadataInputService9TestShapeInputService9TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService9TestShapeInputService9TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService9TestShapeInputService9TestCaseOperation1Output struct {
+ metadataInputService9TestShapeInputService9TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService9TestShapeInputService9TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService10ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService10ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService10ProtocolTest client from just a session.
+// svc := inputservice10protocoltest.New(mySession)
+//
+// // Create a InputService10ProtocolTest client with additional configuration
+// svc := inputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService10ProtocolTest {
+ c := p.ClientConfig("inputservice10protocoltest", cfgs...)
+ return newInputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService10ProtocolTest {
+ svc := &InputService10ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice10protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService10ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService10TestCaseOperation1 = "OperationName"
+
+// InputService10TestCaseOperation1Request generates a request for the InputService10TestCaseOperation1 operation.
+func (c *InputService10ProtocolTest) InputService10TestCaseOperation1Request(input *InputService10TestShapeInputService10TestCaseOperation1Input) (req *request.Request, output *InputService10TestShapeInputService10TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService10TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService10TestShapeInputService10TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService10TestShapeInputService10TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService10ProtocolTest) InputService10TestCaseOperation1(input *InputService10TestShapeInputService10TestCaseOperation1Input) (*InputService10TestShapeInputService10TestCaseOperation1Output, error) {
+ req, out := c.InputService10TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService10TestShapeInputService10TestCaseOperation1Input struct {
+ BlobArg []byte `type:"blob"`
+
+ metadataInputService10TestShapeInputService10TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService10TestShapeInputService10TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService10TestShapeInputService10TestCaseOperation1Output struct {
+ metadataInputService10TestShapeInputService10TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService10TestShapeInputService10TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService11ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService11ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService11ProtocolTest client from just a session.
+// svc := inputservice11protocoltest.New(mySession)
+//
+// // Create a InputService11ProtocolTest client with additional configuration
+// svc := inputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService11ProtocolTest {
+ c := p.ClientConfig("inputservice11protocoltest", cfgs...)
+ return newInputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService11ProtocolTest {
+ svc := &InputService11ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice11protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService11ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService11TestCaseOperation1 = "OperationName"
+
+// InputService11TestCaseOperation1Request generates a request for the InputService11TestCaseOperation1 operation.
+func (c *InputService11ProtocolTest) InputService11TestCaseOperation1Request(input *InputService11TestShapeInputService11TestCaseOperation1Input) (req *request.Request, output *InputService11TestShapeInputService11TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService11TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService11TestShapeInputService11TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService11TestShapeInputService11TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService11ProtocolTest) InputService11TestCaseOperation1(input *InputService11TestShapeInputService11TestCaseOperation1Input) (*InputService11TestShapeInputService11TestCaseOperation1Output, error) {
+ req, out := c.InputService11TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService11TestShapeInputService11TestCaseOperation1Input struct {
+ TimeArg *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ metadataInputService11TestShapeInputService11TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService11TestShapeInputService11TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService11TestShapeInputService11TestCaseOperation1Output struct {
+ metadataInputService11TestShapeInputService11TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService11TestShapeInputService11TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService12ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService12ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService12ProtocolTest client from just a session.
+// svc := inputservice12protocoltest.New(mySession)
+//
+// // Create a InputService12ProtocolTest client with additional configuration
+// svc := inputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService12ProtocolTest {
+ c := p.ClientConfig("inputservice12protocoltest", cfgs...)
+ return newInputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService12ProtocolTest {
+ svc := &InputService12ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice12protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService12ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService12TestCaseOperation1 = "OperationName"
+
+// InputService12TestCaseOperation1Request generates a request for the InputService12TestCaseOperation1 operation.
+func (c *InputService12ProtocolTest) InputService12TestCaseOperation1Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService12TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &InputService12TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService12TestShapeInputService12TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService12ProtocolTest) InputService12TestCaseOperation1(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) {
+ req, out := c.InputService12TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService12TestCaseOperation2 = "OperationName"
+
+// InputService12TestCaseOperation2Request generates a request for the InputService12TestCaseOperation2 operation.
+func (c *InputService12ProtocolTest) InputService12TestCaseOperation2Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation2Output) {
+ op := &request.Operation{
+ Name: opInputService12TestCaseOperation2,
+ }
+
+ if input == nil {
+ input = &InputService12TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService12TestShapeInputService12TestCaseOperation2Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService12ProtocolTest) InputService12TestCaseOperation2(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation2Output, error) {
+ req, out := c.InputService12TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService12TestCaseOperation3 = "OperationName"
+
+// InputService12TestCaseOperation3Request generates a request for the InputService12TestCaseOperation3 operation.
+func (c *InputService12ProtocolTest) InputService12TestCaseOperation3Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation3Output) {
+ op := &request.Operation{
+ Name: opInputService12TestCaseOperation3,
+ }
+
+ if input == nil {
+ input = &InputService12TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService12TestShapeInputService12TestCaseOperation3Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService12ProtocolTest) InputService12TestCaseOperation3(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation3Output, error) {
+ req, out := c.InputService12TestCaseOperation3Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService12TestCaseOperation4 = "OperationName"
+
+// InputService12TestCaseOperation4Request generates a request for the InputService12TestCaseOperation4 operation.
+func (c *InputService12ProtocolTest) InputService12TestCaseOperation4Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation4Output) {
+ op := &request.Operation{
+ Name: opInputService12TestCaseOperation4,
+ }
+
+ if input == nil {
+ input = &InputService12TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService12TestShapeInputService12TestCaseOperation4Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService12ProtocolTest) InputService12TestCaseOperation4(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation4Output, error) {
+ req, out := c.InputService12TestCaseOperation4Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService12TestCaseOperation5 = "OperationName"
+
+// InputService12TestCaseOperation5Request generates a request for the InputService12TestCaseOperation5 operation.
+func (c *InputService12ProtocolTest) InputService12TestCaseOperation5Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation5Output) {
+ op := &request.Operation{
+ Name: opInputService12TestCaseOperation5,
+ }
+
+ if input == nil {
+ input = &InputService12TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService12TestShapeInputService12TestCaseOperation5Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService12ProtocolTest) InputService12TestCaseOperation5(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation5Output, error) {
+ req, out := c.InputService12TestCaseOperation5Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService12TestCaseOperation6 = "OperationName"
+
+// InputService12TestCaseOperation6Request generates a request for the InputService12TestCaseOperation6 operation.
+func (c *InputService12ProtocolTest) InputService12TestCaseOperation6Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation6Output) {
+ op := &request.Operation{
+ Name: opInputService12TestCaseOperation6,
+ }
+
+ if input == nil {
+ input = &InputService12TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService12TestShapeInputService12TestCaseOperation6Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService12ProtocolTest) InputService12TestCaseOperation6(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation6Output, error) {
+ req, out := c.InputService12TestCaseOperation6Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService12TestShapeInputService12TestCaseOperation1Output struct {
+ metadataInputService12TestShapeInputService12TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService12TestShapeInputService12TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService12TestShapeInputService12TestCaseOperation2Output struct {
+ metadataInputService12TestShapeInputService12TestCaseOperation2Output `json:"-" xml:"-"`
+}
+
+type metadataInputService12TestShapeInputService12TestCaseOperation2Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService12TestShapeInputService12TestCaseOperation3Output struct {
+ metadataInputService12TestShapeInputService12TestCaseOperation3Output `json:"-" xml:"-"`
+}
+
+type metadataInputService12TestShapeInputService12TestCaseOperation3Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService12TestShapeInputService12TestCaseOperation4Output struct {
+ metadataInputService12TestShapeInputService12TestCaseOperation4Output `json:"-" xml:"-"`
+}
+
+type metadataInputService12TestShapeInputService12TestCaseOperation4Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService12TestShapeInputService12TestCaseOperation5Output struct {
+ metadataInputService12TestShapeInputService12TestCaseOperation5Output `json:"-" xml:"-"`
+}
+
+type metadataInputService12TestShapeInputService12TestCaseOperation5Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService12TestShapeInputService12TestCaseOperation6Output struct {
+ metadataInputService12TestShapeInputService12TestCaseOperation6Output `json:"-" xml:"-"`
+}
+
+type metadataInputService12TestShapeInputService12TestCaseOperation6Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService12TestShapeInputShape struct {
+ RecursiveStruct *InputService12TestShapeRecursiveStructType `type:"structure"`
+
+ metadataInputService12TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService12TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService12TestShapeRecursiveStructType struct {
+ NoRecurse *string `type:"string"`
+
+ RecursiveList []*InputService12TestShapeRecursiveStructType `type:"list"`
+
+ RecursiveMap map[string]*InputService12TestShapeRecursiveStructType `type:"map"`
+
+ RecursiveStruct *InputService12TestShapeRecursiveStructType `type:"structure"`
+
+ metadataInputService12TestShapeRecursiveStructType `json:"-" xml:"-"`
+}
+
+type metadataInputService12TestShapeRecursiveStructType struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//
+// Tests begin here
+//
+
+func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService1TestShapeInputShape{
+ Bar: aws.String("val2"),
+ Foo: aws.String("val1"),
+ }
+ req, _ := svc.InputService1TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService1ProtocolTestScalarMembersCase2(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService1TestShapeInputShape{
+ Baz: aws.Bool(true),
+ }
+ req, _ := svc.InputService1TestCaseOperation2Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&Baz=true&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService1ProtocolTestScalarMembersCase3(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService1TestShapeInputShape{
+ Baz: aws.Bool(false),
+ }
+ req, _ := svc.InputService1TestCaseOperation3Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&Baz=false&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService2ProtocolTestNestedStructureMembersCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService2TestShapeInputService2TestCaseOperation1Input{
+ StructArg: &InputService2TestShapeStructType{
+ ScalarArg: aws.String("foo"),
+ },
+ }
+ req, _ := svc.InputService2TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&StructArg.ScalarArg=foo&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService3ProtocolTestListTypesCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService3TestShapeInputShape{
+ ListArg: []*string{
+ aws.String("foo"),
+ aws.String("bar"),
+ aws.String("baz"),
+ },
+ }
+ req, _ := svc.InputService3TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&ListArg.member.1=foo&ListArg.member.2=bar&ListArg.member.3=baz&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService3ProtocolTestListTypesCase2(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService3TestShapeInputShape{
+ ListArg: []*string{},
+ }
+ req, _ := svc.InputService3TestCaseOperation2Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&ListArg=&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService4ProtocolTestFlattenedListCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService4TestShapeInputShape{
+ ListArg: []*string{
+ aws.String("a"),
+ aws.String("b"),
+ aws.String("c"),
+ },
+ ScalarArg: aws.String("foo"),
+ }
+ req, _ := svc.InputService4TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&ListArg.1=a&ListArg.2=b&ListArg.3=c&ScalarArg=foo&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService4ProtocolTestFlattenedListCase2(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService4TestShapeInputShape{
+ NamedListArg: []*string{
+ aws.String("a"),
+ },
+ }
+ req, _ := svc.InputService4TestCaseOperation2Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&Foo.1=a&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService5ProtocolTestSerializeFlattenedMapTypeCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService5TestShapeInputService5TestCaseOperation1Input{
+ MapArg: map[string]*string{
+ "key1": aws.String("val1"),
+ "key2": aws.String("val2"),
+ },
+ }
+ req, _ := svc.InputService5TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&MapArg.1.key=key1&MapArg.1.value=val1&MapArg.2.key=key2&MapArg.2.value=val2&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService6ProtocolTestNonFlattenedListWithLocationNameCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService6TestShapeInputService6TestCaseOperation1Input{
+ ListArg: []*string{
+ aws.String("a"),
+ aws.String("b"),
+ aws.String("c"),
+ },
+ }
+ req, _ := svc.InputService6TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&ListArg.item.1=a&ListArg.item.2=b&ListArg.item.3=c&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService7ProtocolTestFlattenedListWithLocationNameCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService7TestShapeInputService7TestCaseOperation1Input{
+ ListArg: []*string{
+ aws.String("a"),
+ aws.String("b"),
+ aws.String("c"),
+ },
+ ScalarArg: aws.String("foo"),
+ }
+ req, _ := svc.InputService7TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&ListArgLocation.1=a&ListArgLocation.2=b&ListArgLocation.3=c&ScalarArg=foo&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService8ProtocolTestSerializeMapTypeCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService8TestShapeInputService8TestCaseOperation1Input{
+ MapArg: map[string]*string{
+ "key1": aws.String("val1"),
+ "key2": aws.String("val2"),
+ },
+ }
+ req, _ := svc.InputService8TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&MapArg.entry.1.key=key1&MapArg.entry.1.value=val1&MapArg.entry.2.key=key2&MapArg.entry.2.value=val2&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService9ProtocolTestSerializeMapTypeWithLocationNameCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService9TestShapeInputService9TestCaseOperation1Input{
+ MapArg: map[string]*string{
+ "key1": aws.String("val1"),
+ "key2": aws.String("val2"),
+ },
+ }
+ req, _ := svc.InputService9TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&MapArg.entry.1.TheKey=key1&MapArg.entry.1.TheValue=val1&MapArg.entry.2.TheKey=key2&MapArg.entry.2.TheValue=val2&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService10ProtocolTestBase64EncodedBlobsCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService10TestShapeInputService10TestCaseOperation1Input{
+ BlobArg: []byte("foo"),
+ }
+ req, _ := svc.InputService10TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService11ProtocolTestTimestampValuesCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService11TestShapeInputService11TestCaseOperation1Input{
+ TimeArg: aws.Time(time.Unix(1422172800, 0)),
+ }
+ req, _ := svc.InputService11TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService12ProtocolTestRecursiveShapesCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService12TestShapeInputShape{
+ RecursiveStruct: &InputService12TestShapeRecursiveStructType{
+ NoRecurse: aws.String("foo"),
+ },
+ }
+ req, _ := svc.InputService12TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.NoRecurse=foo&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService12ProtocolTestRecursiveShapesCase2(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService12TestShapeInputShape{
+ RecursiveStruct: &InputService12TestShapeRecursiveStructType{
+ RecursiveStruct: &InputService12TestShapeRecursiveStructType{
+ NoRecurse: aws.String("foo"),
+ },
+ },
+ }
+ req, _ := svc.InputService12TestCaseOperation2Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService12ProtocolTestRecursiveShapesCase3(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService12TestShapeInputShape{
+ RecursiveStruct: &InputService12TestShapeRecursiveStructType{
+ RecursiveStruct: &InputService12TestShapeRecursiveStructType{
+ RecursiveStruct: &InputService12TestShapeRecursiveStructType{
+ RecursiveStruct: &InputService12TestShapeRecursiveStructType{
+ NoRecurse: aws.String("foo"),
+ },
+ },
+ },
+ },
+ }
+ req, _ := svc.InputService12TestCaseOperation3Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveStruct.RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService12ProtocolTestRecursiveShapesCase4(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService12TestShapeInputShape{
+ RecursiveStruct: &InputService12TestShapeRecursiveStructType{
+ RecursiveList: []*InputService12TestShapeRecursiveStructType{
+ {
+ NoRecurse: aws.String("foo"),
+ },
+ {
+ NoRecurse: aws.String("bar"),
+ },
+ },
+ },
+ }
+ req, _ := svc.InputService12TestCaseOperation4Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.NoRecurse=bar&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService12ProtocolTestRecursiveShapesCase5(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService12TestShapeInputShape{
+ RecursiveStruct: &InputService12TestShapeRecursiveStructType{
+ RecursiveList: []*InputService12TestShapeRecursiveStructType{
+ {
+ NoRecurse: aws.String("foo"),
+ },
+ {
+ RecursiveStruct: &InputService12TestShapeRecursiveStructType{
+ NoRecurse: aws.String("bar"),
+ },
+ },
+ },
+ },
+ }
+ req, _ := svc.InputService12TestCaseOperation5Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.RecursiveStruct.NoRecurse=bar&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService12ProtocolTestRecursiveShapesCase6(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService12TestShapeInputShape{
+ RecursiveStruct: &InputService12TestShapeRecursiveStructType{
+ RecursiveMap: map[string]*InputService12TestShapeRecursiveStructType{
+ "bar": {
+ NoRecurse: aws.String("bar"),
+ },
+ "foo": {
+ NoRecurse: aws.String("foo"),
+ },
+ },
+ },
+ }
+ req, _ := svc.InputService12TestCaseOperation6Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ query.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body, _ := ioutil.ReadAll(r.Body)
+ awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveMap.entry.1.key=foo&RecursiveStruct.RecursiveMap.entry.1.value.NoRecurse=foo&RecursiveStruct.RecursiveMap.entry.2.key=bar&RecursiveStruct.RecursiveMap.entry.2.value.NoRecurse=bar&Version=2014-01-01`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
similarity index 100%
rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go
rename to Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
similarity index 78%
rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal.go
rename to Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
index a374f88b1..1fcab1d1a 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
@@ -1,13 +1,13 @@
package query
-//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/output/query.json unmarshal_test.go
+//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go
import (
"encoding/xml"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
)
// Unmarshal unmarshals a response for an AWS Query service.
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
similarity index 100%
rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_error.go
rename to Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go
new file mode 100644
index 000000000..fe2a58e36
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go
@@ -0,0 +1,1878 @@
+package query_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "encoding/xml"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/awstesting"
+ "github.com/aws/aws-sdk-go/private/protocol/query"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+ "github.com/aws/aws-sdk-go/private/signer/v4"
+ "github.com/aws/aws-sdk-go/private/util"
+ "github.com/stretchr/testify/assert"
+)
+
+var _ bytes.Buffer // always import bytes
+var _ http.Request
+var _ json.Marshaler
+var _ time.Time
+var _ xmlutil.XMLNode
+var _ xml.Attr
+var _ = awstesting.GenerateAssertions
+var _ = ioutil.Discard
+var _ = util.Trim("")
+var _ = url.Values{}
+var _ = io.EOF
+var _ = aws.String
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService1ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService1ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService1ProtocolTest client from just a session.
+// svc := outputservice1protocoltest.New(mySession)
+//
+// // Create a OutputService1ProtocolTest client with additional configuration
+// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest {
+ c := p.ClientConfig("outputservice1protocoltest", cfgs...)
+ return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest {
+ svc := &OutputService1ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice1protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService1TestCaseOperation1 = "OperationName"
+
+// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation.
+func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputService1TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService1TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService1TestShapeOutputService1TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputService1TestCaseOperation1Output, error) {
+ req, out := c.OutputService1TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService1TestShapeOutputService1TestCaseOperation1Input struct {
+ metadataOutputService1TestShapeOutputService1TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService1TestShapeOutputService1TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService1TestShapeOutputService1TestCaseOperation1Output struct {
+ Char *string `type:"character"`
+
+ Double *float64 `type:"double"`
+
+ FalseBool *bool `type:"boolean"`
+
+ Float *float64 `type:"float"`
+
+ Long *int64 `type:"long"`
+
+ Num *int64 `locationName:"FooNum" type:"integer"`
+
+ Str *string `type:"string"`
+
+ Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ TrueBool *bool `type:"boolean"`
+
+ metadataOutputService1TestShapeOutputService1TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService1TestShapeOutputService1TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService2ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService2ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService2ProtocolTest client from just a session.
+// svc := outputservice2protocoltest.New(mySession)
+//
+// // Create a OutputService2ProtocolTest client with additional configuration
+// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest {
+ c := p.ClientConfig("outputservice2protocoltest", cfgs...)
+ return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest {
+ svc := &OutputService2ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice2protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService2TestCaseOperation1 = "OperationName"
+
+// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation.
+func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService2TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) {
+ req, out := c.OutputService2TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService2TestShapeOutputService2TestCaseOperation1Input struct {
+ metadataOutputService2TestShapeOutputService2TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService2TestShapeOutputService2TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService2TestShapeOutputService2TestCaseOperation1Output struct {
+ Num *int64 `type:"integer"`
+
+ Str *string `type:"string"`
+
+ metadataOutputService2TestShapeOutputService2TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService2TestShapeOutputService2TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService3ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService3ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService3ProtocolTest client from just a session.
+// svc := outputservice3protocoltest.New(mySession)
+//
+// // Create a OutputService3ProtocolTest client with additional configuration
+// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest {
+ c := p.ClientConfig("outputservice3protocoltest", cfgs...)
+ return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest {
+ svc := &OutputService3ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice3protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService3TestCaseOperation1 = "OperationName"
+
+// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation.
+func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService3TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) {
+ req, out := c.OutputService3TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService3TestShapeOutputService3TestCaseOperation1Input struct {
+ metadataOutputService3TestShapeOutputService3TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService3TestShapeOutputService3TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService3TestShapeOutputService3TestCaseOperation1Output struct {
+ Blob []byte `type:"blob"`
+
+ metadataOutputService3TestShapeOutputService3TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService3TestShapeOutputService3TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService4ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService4ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService4ProtocolTest client from just a session.
+// svc := outputservice4protocoltest.New(mySession)
+//
+// // Create a OutputService4ProtocolTest client with additional configuration
+// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest {
+ c := p.ClientConfig("outputservice4protocoltest", cfgs...)
+ return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest {
+ svc := &OutputService4ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice4protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService4TestCaseOperation1 = "OperationName"
+
+// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation.
+func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputService4TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService4TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService4TestShapeOutputService4TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputService4TestCaseOperation1Output, error) {
+ req, out := c.OutputService4TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService4TestShapeOutputService4TestCaseOperation1Input struct {
+ metadataOutputService4TestShapeOutputService4TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService4TestShapeOutputService4TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService4TestShapeOutputService4TestCaseOperation1Output struct {
+ ListMember []*string `type:"list"`
+
+ metadataOutputService4TestShapeOutputService4TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService4TestShapeOutputService4TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService5ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService5ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService5ProtocolTest client from just a session.
+// svc := outputservice5protocoltest.New(mySession)
+//
+// // Create a OutputService5ProtocolTest client with additional configuration
+// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest {
+ c := p.ClientConfig("outputservice5protocoltest", cfgs...)
+ return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest {
+ svc := &OutputService5ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice5protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService5TestCaseOperation1 = "OperationName"
+
+// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation.
+func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService5TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) {
+ req, out := c.OutputService5TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService5TestShapeOutputService5TestCaseOperation1Input struct {
+ metadataOutputService5TestShapeOutputService5TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService5TestShapeOutputService5TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService5TestShapeOutputService5TestCaseOperation1Output struct {
+ ListMember []*string `locationNameList:"item" type:"list"`
+
+ metadataOutputService5TestShapeOutputService5TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService5TestShapeOutputService5TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService6ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService6ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService6ProtocolTest client from just a session.
+// svc := outputservice6protocoltest.New(mySession)
+//
+// // Create a OutputService6ProtocolTest client with additional configuration
+// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest {
+ c := p.ClientConfig("outputservice6protocoltest", cfgs...)
+ return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest {
+ svc := &OutputService6ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice6protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService6TestCaseOperation1 = "OperationName"
+
+// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation.
+func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService6TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) {
+ req, out := c.OutputService6TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService6TestShapeOutputService6TestCaseOperation1Input struct {
+ metadataOutputService6TestShapeOutputService6TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService6TestShapeOutputService6TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService6TestShapeOutputService6TestCaseOperation1Output struct {
+ ListMember []*string `type:"list" flattened:"true"`
+
+ metadataOutputService6TestShapeOutputService6TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService6TestShapeOutputService6TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService7ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService7ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService7ProtocolTest client from just a session.
+// svc := outputservice7protocoltest.New(mySession)
+//
+// // Create a OutputService7ProtocolTest client with additional configuration
+// svc := outputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService7ProtocolTest {
+ c := p.ClientConfig("outputservice7protocoltest", cfgs...)
+ return newOutputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService7ProtocolTest {
+ svc := &OutputService7ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice7protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService7TestCaseOperation1 = "OperationName"
+
+// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation.
+func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *request.Request, output *OutputService7TestShapeOutputService7TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService7TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService7TestShapeOutputService7TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputService7TestCaseOperation1Output, error) {
+ req, out := c.OutputService7TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService7TestShapeOutputService7TestCaseOperation1Input struct {
+ metadataOutputService7TestShapeOutputService7TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService7TestShapeOutputService7TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService7TestShapeOutputService7TestCaseOperation1Output struct {
+ ListMember []*string `type:"list" flattened:"true"`
+
+ metadataOutputService7TestShapeOutputService7TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService7TestShapeOutputService7TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService8ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService8ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService8ProtocolTest client from just a session.
+// svc := outputservice8protocoltest.New(mySession)
+//
+// // Create a OutputService8ProtocolTest client with additional configuration
+// svc := outputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService8ProtocolTest {
+ c := p.ClientConfig("outputservice8protocoltest", cfgs...)
+ return newOutputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService8ProtocolTest {
+ svc := &OutputService8ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice8protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService8TestCaseOperation1 = "OperationName"
+
+// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation.
+func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *request.Request, output *OutputService8TestShapeOutputService8TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService8TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService8TestShapeOutputService8TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputService8TestCaseOperation1Output, error) {
+ req, out := c.OutputService8TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService8TestShapeOutputService8TestCaseOperation1Input struct {
+ metadataOutputService8TestShapeOutputService8TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService8TestShapeOutputService8TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService8TestShapeOutputService8TestCaseOperation1Output struct {
+ List []*OutputService8TestShapeStructureShape `type:"list"`
+
+ metadataOutputService8TestShapeOutputService8TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService8TestShapeOutputService8TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService8TestShapeStructureShape struct {
+ Bar *string `type:"string"`
+
+ Baz *string `type:"string"`
+
+ Foo *string `type:"string"`
+
+ metadataOutputService8TestShapeStructureShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService8TestShapeStructureShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService9ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService9ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService9ProtocolTest client from just a session.
+// svc := outputservice9protocoltest.New(mySession)
+//
+// // Create a OutputService9ProtocolTest client with additional configuration
+// svc := outputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService9ProtocolTest {
+ c := p.ClientConfig("outputservice9protocoltest", cfgs...)
+ return newOutputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService9ProtocolTest {
+ svc := &OutputService9ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice9protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService9TestCaseOperation1 = "OperationName"
+
+// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation.
+func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *request.Request, output *OutputService9TestShapeOutputService9TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService9TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService9TestShapeOutputService9TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputService9TestCaseOperation1Output, error) {
+ req, out := c.OutputService9TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService9TestShapeOutputService9TestCaseOperation1Input struct {
+ metadataOutputService9TestShapeOutputService9TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService9TestShapeOutputService9TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService9TestShapeOutputService9TestCaseOperation1Output struct {
+ List []*OutputService9TestShapeStructureShape `type:"list" flattened:"true"`
+
+ metadataOutputService9TestShapeOutputService9TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService9TestShapeOutputService9TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService9TestShapeStructureShape struct {
+ Bar *string `type:"string"`
+
+ Baz *string `type:"string"`
+
+ Foo *string `type:"string"`
+
+ metadataOutputService9TestShapeStructureShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService9TestShapeStructureShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService10ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService10ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService10ProtocolTest client from just a session.
+// svc := outputservice10protocoltest.New(mySession)
+//
+// // Create a OutputService10ProtocolTest client with additional configuration
+// svc := outputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService10ProtocolTest {
+ c := p.ClientConfig("outputservice10protocoltest", cfgs...)
+ return newOutputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService10ProtocolTest {
+ svc := &OutputService10ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice10protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService10ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService10TestCaseOperation1 = "OperationName"
+
+// OutputService10TestCaseOperation1Request generates a request for the OutputService10TestCaseOperation1 operation.
+func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1Request(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (req *request.Request, output *OutputService10TestShapeOutputService10TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService10TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService10TestShapeOutputService10TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService10TestShapeOutputService10TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (*OutputService10TestShapeOutputService10TestCaseOperation1Output, error) {
+ req, out := c.OutputService10TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService10TestShapeOutputService10TestCaseOperation1Input struct {
+ metadataOutputService10TestShapeOutputService10TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService10TestShapeOutputService10TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService10TestShapeOutputService10TestCaseOperation1Output struct {
+ List []*string `locationNameList:"NamedList" type:"list" flattened:"true"`
+
+ metadataOutputService10TestShapeOutputService10TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService10TestShapeOutputService10TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService11ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService11ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService11ProtocolTest client from just a session.
+// svc := outputservice11protocoltest.New(mySession)
+//
+// // Create a OutputService11ProtocolTest client with additional configuration
+// svc := outputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService11ProtocolTest {
+ c := p.ClientConfig("outputservice11protocoltest", cfgs...)
+ return newOutputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService11ProtocolTest {
+ svc := &OutputService11ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice11protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService11ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService11TestCaseOperation1 = "OperationName"
+
+// OutputService11TestCaseOperation1Request generates a request for the OutputService11TestCaseOperation1 operation.
+func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1Request(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (req *request.Request, output *OutputService11TestShapeOutputService11TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService11TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService11TestShapeOutputService11TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService11TestShapeOutputService11TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (*OutputService11TestShapeOutputService11TestCaseOperation1Output, error) {
+ req, out := c.OutputService11TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService11TestShapeOutputService11TestCaseOperation1Input struct {
+ metadataOutputService11TestShapeOutputService11TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService11TestShapeOutputService11TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService11TestShapeOutputService11TestCaseOperation1Output struct {
+ Map map[string]*OutputService11TestShapeStructType `type:"map"`
+
+ metadataOutputService11TestShapeOutputService11TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService11TestShapeOutputService11TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService11TestShapeStructType struct {
+ Foo *string `locationName:"foo" type:"string"`
+
+ metadataOutputService11TestShapeStructType `json:"-" xml:"-"`
+}
+
+type metadataOutputService11TestShapeStructType struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService12ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService12ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService12ProtocolTest client from just a session.
+// svc := outputservice12protocoltest.New(mySession)
+//
+// // Create a OutputService12ProtocolTest client with additional configuration
+// svc := outputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService12ProtocolTest {
+ c := p.ClientConfig("outputservice12protocoltest", cfgs...)
+ return newOutputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService12ProtocolTest {
+ svc := &OutputService12ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice12protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService12ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService12TestCaseOperation1 = "OperationName"
+
+// OutputService12TestCaseOperation1Request generates a request for the OutputService12TestCaseOperation1 operation.
+func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1Request(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (req *request.Request, output *OutputService12TestShapeOutputService12TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService12TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService12TestShapeOutputService12TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService12TestShapeOutputService12TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (*OutputService12TestShapeOutputService12TestCaseOperation1Output, error) {
+ req, out := c.OutputService12TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService12TestShapeOutputService12TestCaseOperation1Input struct {
+ metadataOutputService12TestShapeOutputService12TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService12TestShapeOutputService12TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService12TestShapeOutputService12TestCaseOperation1Output struct {
+ Map map[string]*string `type:"map" flattened:"true"`
+
+ metadataOutputService12TestShapeOutputService12TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService12TestShapeOutputService12TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService13ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService13ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService13ProtocolTest client from just a session.
+// svc := outputservice13protocoltest.New(mySession)
+//
+// // Create a OutputService13ProtocolTest client with additional configuration
+// svc := outputservice13protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService13ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService13ProtocolTest {
+ c := p.ClientConfig("outputservice13protocoltest", cfgs...)
+ return newOutputService13ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService13ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService13ProtocolTest {
+ svc := &OutputService13ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice13protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService13ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService13TestCaseOperation1 = "OperationName"
+
+// OutputService13TestCaseOperation1Request generates a request for the OutputService13TestCaseOperation1 operation.
+func (c *OutputService13ProtocolTest) OutputService13TestCaseOperation1Request(input *OutputService13TestShapeOutputService13TestCaseOperation1Input) (req *request.Request, output *OutputService13TestShapeOutputService13TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService13TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService13TestShapeOutputService13TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService13TestShapeOutputService13TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService13ProtocolTest) OutputService13TestCaseOperation1(input *OutputService13TestShapeOutputService13TestCaseOperation1Input) (*OutputService13TestShapeOutputService13TestCaseOperation1Output, error) {
+ req, out := c.OutputService13TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService13TestShapeOutputService13TestCaseOperation1Input struct {
+ metadataOutputService13TestShapeOutputService13TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService13TestShapeOutputService13TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService13TestShapeOutputService13TestCaseOperation1Output struct {
+ Map map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"`
+
+ metadataOutputService13TestShapeOutputService13TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService13TestShapeOutputService13TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService14ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService14ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService14ProtocolTest client from just a session.
+// svc := outputservice14protocoltest.New(mySession)
+//
+// // Create a OutputService14ProtocolTest client with additional configuration
+// svc := outputservice14protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService14ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService14ProtocolTest {
+ c := p.ClientConfig("outputservice14protocoltest", cfgs...)
+ return newOutputService14ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService14ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService14ProtocolTest {
+ svc := &OutputService14ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice14protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService14ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService14ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService14TestCaseOperation1 = "OperationName"
+
+// OutputService14TestCaseOperation1Request generates a request for the OutputService14TestCaseOperation1 operation.
+func (c *OutputService14ProtocolTest) OutputService14TestCaseOperation1Request(input *OutputService14TestShapeOutputService14TestCaseOperation1Input) (req *request.Request, output *OutputService14TestShapeOutputService14TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService14TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService14TestShapeOutputService14TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService14TestShapeOutputService14TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService14ProtocolTest) OutputService14TestCaseOperation1(input *OutputService14TestShapeOutputService14TestCaseOperation1Input) (*OutputService14TestShapeOutputService14TestCaseOperation1Output, error) {
+ req, out := c.OutputService14TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService14TestShapeOutputService14TestCaseOperation1Input struct {
+ metadataOutputService14TestShapeOutputService14TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService14TestShapeOutputService14TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService14TestShapeOutputService14TestCaseOperation1Output struct {
+ Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map" flattened:"true"`
+
+ metadataOutputService14TestShapeOutputService14TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService14TestShapeOutputService14TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService15ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService15ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService15ProtocolTest client from just a session.
+// svc := outputservice15protocoltest.New(mySession)
+//
+// // Create a OutputService15ProtocolTest client with additional configuration
+// svc := outputservice15protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService15ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService15ProtocolTest {
+ c := p.ClientConfig("outputservice15protocoltest", cfgs...)
+ return newOutputService15ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService15ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService15ProtocolTest {
+ svc := &OutputService15ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice15protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService15ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService15ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService15TestCaseOperation1 = "OperationName"
+
+// OutputService15TestCaseOperation1Request generates a request for the OutputService15TestCaseOperation1 operation.
+func (c *OutputService15ProtocolTest) OutputService15TestCaseOperation1Request(input *OutputService15TestShapeOutputService15TestCaseOperation1Input) (req *request.Request, output *OutputService15TestShapeOutputService15TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService15TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService15TestShapeOutputService15TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService15TestShapeOutputService15TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService15ProtocolTest) OutputService15TestCaseOperation1(input *OutputService15TestShapeOutputService15TestCaseOperation1Input) (*OutputService15TestShapeOutputService15TestCaseOperation1Output, error) {
+ req, out := c.OutputService15TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService15TestShapeOutputService15TestCaseOperation1Input struct {
+ metadataOutputService15TestShapeOutputService15TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService15TestShapeOutputService15TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService15TestShapeOutputService15TestCaseOperation1Output struct {
+ Foo *string `type:"string"`
+
+ metadataOutputService15TestShapeOutputService15TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService15TestShapeOutputService15TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//
+// Tests begin here
+//
+
+func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200a2015-01-25T08:00:00Zrequest-id"))
+ req, out := svc.OutputService1TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "a", *out.Char)
+ assert.Equal(t, 1.3, *out.Double)
+ assert.Equal(t, false, *out.FalseBool)
+ assert.Equal(t, 1.2, *out.Float)
+ assert.Equal(t, int64(200), *out.Long)
+ assert.Equal(t, int64(123), *out.Num)
+ assert.Equal(t, "myname", *out.Str)
+ assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String())
+ assert.Equal(t, true, *out.TrueBool)
+
+}
+
+func TestOutputService2ProtocolTestNotAllMembersInResponseCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("mynamerequest-id"))
+ req, out := svc.OutputService2TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "myname", *out.Str)
+
+}
+
+func TestOutputService3ProtocolTestBlobCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("dmFsdWU=requestid"))
+ req, out := svc.OutputService3TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "value", string(out.Blob))
+
+}
+
+func TestOutputService4ProtocolTestListsCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("abc123requestid"))
+ req, out := svc.OutputService4TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", *out.ListMember[0])
+ assert.Equal(t, "123", *out.ListMember[1])
+
+}
+
+func TestOutputService5ProtocolTestListWithCustomMemberNameCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("- abc
- 123
requestid"))
+ req, out := svc.OutputService5TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", *out.ListMember[0])
+ assert.Equal(t, "123", *out.ListMember[1])
+
+}
+
+func TestOutputService6ProtocolTestFlattenedListCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("abc123requestid"))
+ req, out := svc.OutputService6TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", *out.ListMember[0])
+ assert.Equal(t, "123", *out.ListMember[1])
+
+}
+
+func TestOutputService7ProtocolTestFlattenedSingleElementListCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("abcrequestid"))
+ req, out := svc.OutputService7TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", *out.ListMember[0])
+
+}
+
+func TestOutputService8ProtocolTestListOfStructuresCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("firstfoofirstbarfirstbazsecondfoosecondbarsecondbaz
requestid"))
+ req, out := svc.OutputService8TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "firstbar", *out.List[0].Bar)
+ assert.Equal(t, "firstbaz", *out.List[0].Baz)
+ assert.Equal(t, "firstfoo", *out.List[0].Foo)
+ assert.Equal(t, "secondbar", *out.List[1].Bar)
+ assert.Equal(t, "secondbaz", *out.List[1].Baz)
+ assert.Equal(t, "secondfoo", *out.List[1].Foo)
+
+}
+
+func TestOutputService9ProtocolTestFlattenedListOfStructuresCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("firstfoofirstbarfirstbaz
secondfoosecondbarsecondbaz
requestid"))
+ req, out := svc.OutputService9TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "firstbar", *out.List[0].Bar)
+ assert.Equal(t, "firstbaz", *out.List[0].Baz)
+ assert.Equal(t, "firstfoo", *out.List[0].Foo)
+ assert.Equal(t, "secondbar", *out.List[1].Bar)
+ assert.Equal(t, "secondbaz", *out.List[1].Baz)
+ assert.Equal(t, "secondfoo", *out.List[1].Foo)
+
+}
+
+func TestOutputService10ProtocolTestFlattenedListWithLocationNameCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("abrequestid"))
+ req, out := svc.OutputService10TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "a", *out.List[0])
+ assert.Equal(t, "b", *out.List[1])
+
+}
+
+func TestOutputService11ProtocolTestNormalMapCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("requestid"))
+ req, out := svc.OutputService11TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "bam", *out.Map["baz"].Foo)
+ assert.Equal(t, "bar", *out.Map["qux"].Foo)
+
+}
+
+func TestOutputService12ProtocolTestFlattenedMapCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("requestid"))
+ req, out := svc.OutputService12TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "bam", *out.Map["baz"])
+ assert.Equal(t, "bar", *out.Map["qux"])
+
+}
+
+func TestOutputService13ProtocolTestFlattenedMapInShapeDefinitionCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("quxbarrequestid"))
+ req, out := svc.OutputService13TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "bar", *out.Map["qux"])
+
+}
+
+func TestOutputService14ProtocolTestNamedMapCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService14ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("requestid"))
+ req, out := svc.OutputService14TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "bam", *out.Map["baz"])
+ assert.Equal(t, "bar", *out.Map["qux"])
+
+}
+
+func TestOutputService15ProtocolTestEmptyStringCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("requestid"))
+ req, out := svc.OutputService15TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ query.UnmarshalMeta(req)
+ query.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "", *out.Foo)
+
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
similarity index 100%
rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/build.go
rename to Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/payload.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
similarity index 100%
rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/payload.go
rename to Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
similarity index 100%
rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/unmarshal.go
rename to Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go
new file mode 100644
index 000000000..29e9230c1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go
@@ -0,0 +1,246 @@
+// +build bench
+
+package restxml_test
+
+import (
+ "testing"
+
+ "bytes"
+ "encoding/xml"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awstesting"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/restxml"
+ "github.com/aws/aws-sdk-go/service/cloudfront"
+)
+
+func BenchmarkRESTXMLBuild_Complex_cloudfrontCreateDistribution(b *testing.B) {
+ params := restxmlBuildCreateDistroParms
+
+ op := &request.Operation{
+ Name: "CreateDistribution",
+ HTTPMethod: "POST",
+ HTTPPath: "/2015-04-17/distribution/{DistributionId}/invalidation",
+ }
+
+ benchRESTXMLBuild(b, op, params)
+}
+
+func BenchmarkRESTXMLBuild_Simple_cloudfrontDeleteStreamingDistribution(b *testing.B) {
+ params := &cloudfront.DeleteDistributionInput{
+ Id: aws.String("string"), // Required
+ IfMatch: aws.String("string"),
+ }
+ op := &request.Operation{
+ Name: "DeleteStreamingDistribution",
+ HTTPMethod: "DELETE",
+ HTTPPath: "/2015-04-17/streaming-distribution/{Id}",
+ }
+ benchRESTXMLBuild(b, op, params)
+}
+
+func BenchmarkEncodingXMLMarshal_Simple_cloudfrontDeleteStreamingDistribution(b *testing.B) {
+ params := &cloudfront.DeleteDistributionInput{
+ Id: aws.String("string"), // Required
+ IfMatch: aws.String("string"),
+ }
+
+ for i := 0; i < b.N; i++ {
+ buf := &bytes.Buffer{}
+ encoder := xml.NewEncoder(buf)
+ if err := encoder.Encode(params); err != nil {
+ b.Fatal("Unexpected error", err)
+ }
+ }
+}
+
+func benchRESTXMLBuild(b *testing.B, op *request.Operation, params interface{}) {
+ svc := awstesting.NewClient()
+ svc.ServiceName = "cloudfront"
+ svc.APIVersion = "2015-04-17"
+
+ for i := 0; i < b.N; i++ {
+ r := svc.NewRequest(op, params, nil)
+ restxml.Build(r)
+ if r.Error != nil {
+ b.Fatal("Unexpected error", r.Error)
+ }
+ }
+}
+
+var restxmlBuildCreateDistroParms = &cloudfront.CreateDistributionInput{
+ DistributionConfig: &cloudfront.DistributionConfig{ // Required
+ CallerReference: aws.String("string"), // Required
+ Comment: aws.String("string"), // Required
+ DefaultCacheBehavior: &cloudfront.DefaultCacheBehavior{ // Required
+ ForwardedValues: &cloudfront.ForwardedValues{ // Required
+ Cookies: &cloudfront.CookiePreference{ // Required
+ Forward: aws.String("ItemSelection"), // Required
+ WhitelistedNames: &cloudfront.CookieNames{
+ Quantity: aws.Int64(1), // Required
+ Items: []*string{
+ aws.String("string"), // Required
+ // More values...
+ },
+ },
+ },
+ QueryString: aws.Bool(true), // Required
+ Headers: &cloudfront.Headers{
+ Quantity: aws.Int64(1), // Required
+ Items: []*string{
+ aws.String("string"), // Required
+ // More values...
+ },
+ },
+ },
+ MinTTL: aws.Int64(1), // Required
+ TargetOriginId: aws.String("string"), // Required
+ TrustedSigners: &cloudfront.TrustedSigners{ // Required
+ Enabled: aws.Bool(true), // Required
+ Quantity: aws.Int64(1), // Required
+ Items: []*string{
+ aws.String("string"), // Required
+ // More values...
+ },
+ },
+ ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required
+ AllowedMethods: &cloudfront.AllowedMethods{
+ Items: []*string{ // Required
+ aws.String("Method"), // Required
+ // More values...
+ },
+ Quantity: aws.Int64(1), // Required
+ CachedMethods: &cloudfront.CachedMethods{
+ Items: []*string{ // Required
+ aws.String("Method"), // Required
+ // More values...
+ },
+ Quantity: aws.Int64(1), // Required
+ },
+ },
+ DefaultTTL: aws.Int64(1),
+ MaxTTL: aws.Int64(1),
+ SmoothStreaming: aws.Bool(true),
+ },
+ Enabled: aws.Bool(true), // Required
+ Origins: &cloudfront.Origins{ // Required
+ Quantity: aws.Int64(1), // Required
+ Items: []*cloudfront.Origin{
+ { // Required
+ DomainName: aws.String("string"), // Required
+ Id: aws.String("string"), // Required
+ CustomOriginConfig: &cloudfront.CustomOriginConfig{
+ HTTPPort: aws.Int64(1), // Required
+ HTTPSPort: aws.Int64(1), // Required
+ OriginProtocolPolicy: aws.String("OriginProtocolPolicy"), // Required
+ },
+ OriginPath: aws.String("string"),
+ S3OriginConfig: &cloudfront.S3OriginConfig{
+ OriginAccessIdentity: aws.String("string"), // Required
+ },
+ },
+ // More values...
+ },
+ },
+ Aliases: &cloudfront.Aliases{
+ Quantity: aws.Int64(1), // Required
+ Items: []*string{
+ aws.String("string"), // Required
+ // More values...
+ },
+ },
+ CacheBehaviors: &cloudfront.CacheBehaviors{
+ Quantity: aws.Int64(1), // Required
+ Items: []*cloudfront.CacheBehavior{
+ { // Required
+ ForwardedValues: &cloudfront.ForwardedValues{ // Required
+ Cookies: &cloudfront.CookiePreference{ // Required
+ Forward: aws.String("ItemSelection"), // Required
+ WhitelistedNames: &cloudfront.CookieNames{
+ Quantity: aws.Int64(1), // Required
+ Items: []*string{
+ aws.String("string"), // Required
+ // More values...
+ },
+ },
+ },
+ QueryString: aws.Bool(true), // Required
+ Headers: &cloudfront.Headers{
+ Quantity: aws.Int64(1), // Required
+ Items: []*string{
+ aws.String("string"), // Required
+ // More values...
+ },
+ },
+ },
+ MinTTL: aws.Int64(1), // Required
+ PathPattern: aws.String("string"), // Required
+ TargetOriginId: aws.String("string"), // Required
+ TrustedSigners: &cloudfront.TrustedSigners{ // Required
+ Enabled: aws.Bool(true), // Required
+ Quantity: aws.Int64(1), // Required
+ Items: []*string{
+ aws.String("string"), // Required
+ // More values...
+ },
+ },
+ ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required
+ AllowedMethods: &cloudfront.AllowedMethods{
+ Items: []*string{ // Required
+ aws.String("Method"), // Required
+ // More values...
+ },
+ Quantity: aws.Int64(1), // Required
+ CachedMethods: &cloudfront.CachedMethods{
+ Items: []*string{ // Required
+ aws.String("Method"), // Required
+ // More values...
+ },
+ Quantity: aws.Int64(1), // Required
+ },
+ },
+ DefaultTTL: aws.Int64(1),
+ MaxTTL: aws.Int64(1),
+ SmoothStreaming: aws.Bool(true),
+ },
+ // More values...
+ },
+ },
+ CustomErrorResponses: &cloudfront.CustomErrorResponses{
+ Quantity: aws.Int64(1), // Required
+ Items: []*cloudfront.CustomErrorResponse{
+ { // Required
+ ErrorCode: aws.Int64(1), // Required
+ ErrorCachingMinTTL: aws.Int64(1),
+ ResponseCode: aws.String("string"),
+ ResponsePagePath: aws.String("string"),
+ },
+ // More values...
+ },
+ },
+ DefaultRootObject: aws.String("string"),
+ Logging: &cloudfront.LoggingConfig{
+ Bucket: aws.String("string"), // Required
+ Enabled: aws.Bool(true), // Required
+ IncludeCookies: aws.Bool(true), // Required
+ Prefix: aws.String("string"), // Required
+ },
+ PriceClass: aws.String("PriceClass"),
+ Restrictions: &cloudfront.Restrictions{
+ GeoRestriction: &cloudfront.GeoRestriction{ // Required
+ Quantity: aws.Int64(1), // Required
+ RestrictionType: aws.String("GeoRestrictionType"), // Required
+ Items: []*string{
+ aws.String("string"), // Required
+ // More values...
+ },
+ },
+ },
+ ViewerCertificate: &cloudfront.ViewerCertificate{
+ CloudFrontDefaultCertificate: aws.Bool(true),
+ IAMCertificateId: aws.String("string"),
+ MinimumProtocolVersion: aws.String("MinimumProtocolVersion"),
+ SSLSupportMethod: aws.String("SSLSupportMethod"),
+ },
+ },
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restxml/build_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restxml/build_test.go
new file mode 100644
index 000000000..54b2f68fd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restxml/build_test.go
@@ -0,0 +1,3594 @@
+package restxml_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "encoding/xml"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/awstesting"
+ "github.com/aws/aws-sdk-go/private/protocol/restxml"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+ "github.com/aws/aws-sdk-go/private/signer/v4"
+ "github.com/aws/aws-sdk-go/private/util"
+ "github.com/stretchr/testify/assert"
+)
+
+var _ bytes.Buffer // always import bytes
+var _ http.Request
+var _ json.Marshaler
+var _ time.Time
+var _ xmlutil.XMLNode
+var _ xml.Attr
+var _ = awstesting.GenerateAssertions
+var _ = ioutil.Discard
+var _ = util.Trim("")
+var _ = url.Values{}
+var _ = io.EOF
+var _ = aws.String
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService1ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService1ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService1ProtocolTest client from just a session.
+// svc := inputservice1protocoltest.New(mySession)
+//
+// // Create a InputService1ProtocolTest client with additional configuration
+// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest {
+ c := p.ClientConfig("inputservice1protocoltest", cfgs...)
+ return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest {
+ svc := &InputService1ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice1protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService1ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService1TestCaseOperation1 = "OperationName"
+
+// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation.
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService1TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService1TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService1TestShapeInputService1TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) {
+ req, out := c.InputService1TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService1TestCaseOperation2 = "OperationName"
+
+// InputService1TestCaseOperation2Request generates a request for the InputService1TestCaseOperation2 operation.
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation2Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation2Output) {
+ op := &request.Operation{
+ Name: opInputService1TestCaseOperation2,
+ HTTPMethod: "PUT",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService1TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService1TestShapeInputService1TestCaseOperation2Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation2(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation2Output, error) {
+ req, out := c.InputService1TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService1TestCaseOperation3 = "OperationName"
+
+// InputService1TestCaseOperation3Request generates a request for the InputService1TestCaseOperation3 operation.
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation3Request(input *InputService1TestShapeInputService1TestCaseOperation3Input) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation3Output) {
+ op := &request.Operation{
+ Name: opInputService1TestCaseOperation3,
+ HTTPMethod: "GET",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService1TestShapeInputService1TestCaseOperation3Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService1TestShapeInputService1TestCaseOperation3Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService1ProtocolTest) InputService1TestCaseOperation3(input *InputService1TestShapeInputService1TestCaseOperation3Input) (*InputService1TestShapeInputService1TestCaseOperation3Output, error) {
+ req, out := c.InputService1TestCaseOperation3Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService1TestShapeInputService1TestCaseOperation1Output struct {
+ metadataInputService1TestShapeInputService1TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService1TestShapeInputService1TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService1TestShapeInputService1TestCaseOperation2Output struct {
+ metadataInputService1TestShapeInputService1TestCaseOperation2Output `json:"-" xml:"-"`
+}
+
+type metadataInputService1TestShapeInputService1TestCaseOperation2Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService1TestShapeInputService1TestCaseOperation3Input struct {
+ metadataInputService1TestShapeInputService1TestCaseOperation3Input `json:"-" xml:"-"`
+}
+
+type metadataInputService1TestShapeInputService1TestCaseOperation3Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService1TestShapeInputService1TestCaseOperation3Output struct {
+ metadataInputService1TestShapeInputService1TestCaseOperation3Output `json:"-" xml:"-"`
+}
+
+type metadataInputService1TestShapeInputService1TestCaseOperation3Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService1TestShapeInputShape struct {
+ Description *string `type:"string"`
+
+ Name *string `type:"string"`
+
+ metadataInputService1TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService1TestShapeInputShape struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService2ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService2ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService2ProtocolTest client from just a session.
+// svc := inputservice2protocoltest.New(mySession)
+//
+// // Create a InputService2ProtocolTest client with additional configuration
+// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest {
+ c := p.ClientConfig("inputservice2protocoltest", cfgs...)
+ return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest {
+ svc := &InputService2ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice2protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService2ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService2TestCaseOperation1 = "OperationName"
+
+// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation.
+func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService2TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService2TestShapeInputService2TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService2TestShapeInputService2TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) {
+ req, out := c.InputService2TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService2TestShapeInputService2TestCaseOperation1Input struct {
+ First *bool `type:"boolean"`
+
+ Fourth *int64 `type:"integer"`
+
+ Second *bool `type:"boolean"`
+
+ Third *float64 `type:"float"`
+
+ metadataInputService2TestShapeInputService2TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService2TestShapeInputService2TestCaseOperation1Input struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+type InputService2TestShapeInputService2TestCaseOperation1Output struct {
+ metadataInputService2TestShapeInputService2TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService2TestShapeInputService2TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService3ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService3ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService3ProtocolTest client from just a session.
+// svc := inputservice3protocoltest.New(mySession)
+//
+// // Create a InputService3ProtocolTest client with additional configuration
+// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest {
+ c := p.ClientConfig("inputservice3protocoltest", cfgs...)
+ return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest {
+ svc := &InputService3ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice3protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService3ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService3TestCaseOperation1 = "OperationName"
+
+// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation.
+func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService3TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService3TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService3TestShapeInputService3TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) {
+ req, out := c.InputService3TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService3TestCaseOperation2 = "OperationName"
+
+// InputService3TestCaseOperation2Request generates a request for the InputService3TestCaseOperation2 operation.
+func (c *InputService3ProtocolTest) InputService3TestCaseOperation2Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation2Output) {
+ op := &request.Operation{
+ Name: opInputService3TestCaseOperation2,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService3TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService3TestShapeInputService3TestCaseOperation2Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService3ProtocolTest) InputService3TestCaseOperation2(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation2Output, error) {
+ req, out := c.InputService3TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService3TestShapeInputService3TestCaseOperation1Output struct {
+ metadataInputService3TestShapeInputService3TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService3TestShapeInputService3TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService3TestShapeInputService3TestCaseOperation2Output struct {
+ metadataInputService3TestShapeInputService3TestCaseOperation2Output `json:"-" xml:"-"`
+}
+
+type metadataInputService3TestShapeInputService3TestCaseOperation2Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService3TestShapeInputShape struct {
+ Description *string `type:"string"`
+
+ SubStructure *InputService3TestShapeSubStructure `type:"structure"`
+
+ metadataInputService3TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService3TestShapeInputShape struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+type InputService3TestShapeSubStructure struct {
+ Bar *string `type:"string"`
+
+ Foo *string `type:"string"`
+
+ metadataInputService3TestShapeSubStructure `json:"-" xml:"-"`
+}
+
+type metadataInputService3TestShapeSubStructure struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService4ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService4ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService4ProtocolTest client from just a session.
+// svc := inputservice4protocoltest.New(mySession)
+//
+// // Create a InputService4ProtocolTest client with additional configuration
+// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest {
+ c := p.ClientConfig("inputservice4protocoltest", cfgs...)
+ return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest {
+ svc := &InputService4ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice4protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService4ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService4TestCaseOperation1 = "OperationName"
+
+// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation.
+func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputService4TestCaseOperation1Input) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService4TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService4TestShapeInputService4TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService4TestShapeInputService4TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputService4TestCaseOperation1Input) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) {
+ req, out := c.InputService4TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService4TestShapeInputService4TestCaseOperation1Input struct {
+ Description *string `type:"string"`
+
+ SubStructure *InputService4TestShapeSubStructure `type:"structure"`
+
+ metadataInputService4TestShapeInputService4TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService4TestShapeInputService4TestCaseOperation1Input struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+type InputService4TestShapeInputService4TestCaseOperation1Output struct {
+ metadataInputService4TestShapeInputService4TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService4TestShapeInputService4TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService4TestShapeSubStructure struct {
+ Bar *string `type:"string"`
+
+ Foo *string `type:"string"`
+
+ metadataInputService4TestShapeSubStructure `json:"-" xml:"-"`
+}
+
+type metadataInputService4TestShapeSubStructure struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService5ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService5ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService5ProtocolTest client from just a session.
+// svc := inputservice5protocoltest.New(mySession)
+//
+// // Create a InputService5ProtocolTest client with additional configuration
+// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest {
+ c := p.ClientConfig("inputservice5protocoltest", cfgs...)
+ return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest {
+ svc := &InputService5ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice5protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService5ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService5TestCaseOperation1 = "OperationName"
+
+// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation.
+func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputService5TestCaseOperation1Input) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService5TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService5TestShapeInputService5TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService5TestShapeInputService5TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputService5TestCaseOperation1Input) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) {
+ req, out := c.InputService5TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService5TestShapeInputService5TestCaseOperation1Input struct {
+ ListParam []*string `type:"list"`
+
+ metadataInputService5TestShapeInputService5TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService5TestShapeInputService5TestCaseOperation1Input struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+type InputService5TestShapeInputService5TestCaseOperation1Output struct {
+ metadataInputService5TestShapeInputService5TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService5TestShapeInputService5TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService6ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService6ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService6ProtocolTest client from just a session.
+// svc := inputservice6protocoltest.New(mySession)
+//
+// // Create a InputService6ProtocolTest client with additional configuration
+// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest {
+ c := p.ClientConfig("inputservice6protocoltest", cfgs...)
+ return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest {
+ svc := &InputService6ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice6protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService6ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService6TestCaseOperation1 = "OperationName"
+
+// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation.
+func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService6TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService6TestShapeInputService6TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService6TestShapeInputService6TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) {
+ req, out := c.InputService6TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService6TestShapeInputService6TestCaseOperation1Input struct {
+ ListParam []*string `locationName:"AlternateName" locationNameList:"NotMember" type:"list"`
+
+ metadataInputService6TestShapeInputService6TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService6TestShapeInputService6TestCaseOperation1Input struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+type InputService6TestShapeInputService6TestCaseOperation1Output struct {
+ metadataInputService6TestShapeInputService6TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService6TestShapeInputService6TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService7ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService7ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService7ProtocolTest client from just a session.
+// svc := inputservice7protocoltest.New(mySession)
+//
+// // Create a InputService7ProtocolTest client with additional configuration
+// svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest {
+ c := p.ClientConfig("inputservice7protocoltest", cfgs...)
+ return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService7ProtocolTest {
+ svc := &InputService7ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice7protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService7ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService7TestCaseOperation1 = "OperationName"
+
+// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation.
+func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputService7TestCaseOperation1Input) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService7TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService7TestShapeInputService7TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService7TestShapeInputService7TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputService7TestCaseOperation1Input) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) {
+ req, out := c.InputService7TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService7TestShapeInputService7TestCaseOperation1Input struct {
+ ListParam []*string `type:"list" flattened:"true"`
+
+ metadataInputService7TestShapeInputService7TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService7TestShapeInputService7TestCaseOperation1Input struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+type InputService7TestShapeInputService7TestCaseOperation1Output struct {
+ metadataInputService7TestShapeInputService7TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService7TestShapeInputService7TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService8ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService8ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService8ProtocolTest client from just a session.
+// svc := inputservice8protocoltest.New(mySession)
+//
+// // Create a InputService8ProtocolTest client with additional configuration
+// svc := inputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService8ProtocolTest {
+ c := p.ClientConfig("inputservice8protocoltest", cfgs...)
+ return newInputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService8ProtocolTest {
+ svc := &InputService8ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice8protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService8ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService8TestCaseOperation1 = "OperationName"
+
+// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation.
+func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputService8TestCaseOperation1Input) (req *request.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService8TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService8TestShapeInputService8TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService8TestShapeInputService8TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputService8TestCaseOperation1Input) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) {
+ req, out := c.InputService8TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService8TestShapeInputService8TestCaseOperation1Input struct {
+ ListParam []*string `locationName:"item" type:"list" flattened:"true"`
+
+ metadataInputService8TestShapeInputService8TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService8TestShapeInputService8TestCaseOperation1Input struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+type InputService8TestShapeInputService8TestCaseOperation1Output struct {
+ metadataInputService8TestShapeInputService8TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService8TestShapeInputService8TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService9ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService9ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService9ProtocolTest client from just a session.
+// svc := inputservice9protocoltest.New(mySession)
+//
+// // Create a InputService9ProtocolTest client with additional configuration
+// svc := inputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService9ProtocolTest {
+ c := p.ClientConfig("inputservice9protocoltest", cfgs...)
+ return newInputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService9ProtocolTest {
+ svc := &InputService9ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice9protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService9ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService9TestCaseOperation1 = "OperationName"
+
+// InputService9TestCaseOperation1Request generates a request for the InputService9TestCaseOperation1 operation.
+func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputService9TestCaseOperation1Input) (req *request.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService9TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService9TestShapeInputService9TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService9TestShapeInputService9TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputService9TestCaseOperation1Input) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) {
+ req, out := c.InputService9TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService9TestShapeInputService9TestCaseOperation1Input struct {
+ ListParam []*InputService9TestShapeSingleFieldStruct `locationName:"item" type:"list" flattened:"true"`
+
+ metadataInputService9TestShapeInputService9TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService9TestShapeInputService9TestCaseOperation1Input struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+type InputService9TestShapeInputService9TestCaseOperation1Output struct {
+ metadataInputService9TestShapeInputService9TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService9TestShapeInputService9TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService9TestShapeSingleFieldStruct struct {
+ Element *string `locationName:"value" type:"string"`
+
+ metadataInputService9TestShapeSingleFieldStruct `json:"-" xml:"-"`
+}
+
+type metadataInputService9TestShapeSingleFieldStruct struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService10ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService10ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService10ProtocolTest client from just a session.
+// svc := inputservice10protocoltest.New(mySession)
+//
+// // Create a InputService10ProtocolTest client with additional configuration
+// svc := inputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService10ProtocolTest {
+ c := p.ClientConfig("inputservice10protocoltest", cfgs...)
+ return newInputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService10ProtocolTest {
+ svc := &InputService10ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice10protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService10ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService10TestCaseOperation1 = "OperationName"
+
+// InputService10TestCaseOperation1Request generates a request for the InputService10TestCaseOperation1 operation.
+func (c *InputService10ProtocolTest) InputService10TestCaseOperation1Request(input *InputService10TestShapeInputService10TestCaseOperation1Input) (req *request.Request, output *InputService10TestShapeInputService10TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService10TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/2014-01-01/hostedzone",
+ }
+
+ if input == nil {
+ input = &InputService10TestShapeInputService10TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService10TestShapeInputService10TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService10ProtocolTest) InputService10TestCaseOperation1(input *InputService10TestShapeInputService10TestCaseOperation1Input) (*InputService10TestShapeInputService10TestCaseOperation1Output, error) {
+ req, out := c.InputService10TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService10TestShapeInputService10TestCaseOperation1Input struct {
+ StructureParam *InputService10TestShapeStructureShape `type:"structure"`
+
+ metadataInputService10TestShapeInputService10TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService10TestShapeInputService10TestCaseOperation1Input struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+type InputService10TestShapeInputService10TestCaseOperation1Output struct {
+ metadataInputService10TestShapeInputService10TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService10TestShapeInputService10TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService10TestShapeStructureShape struct {
+ B []byte `locationName:"b" type:"blob"`
+
+ T *time.Time `locationName:"t" type:"timestamp" timestampFormat:"iso8601"`
+
+ metadataInputService10TestShapeStructureShape `json:"-" xml:"-"`
+}
+
+type metadataInputService10TestShapeStructureShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService11ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService11ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService11ProtocolTest client from just a session.
+// svc := inputservice11protocoltest.New(mySession)
+//
+// // Create a InputService11ProtocolTest client with additional configuration
+// svc := inputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService11ProtocolTest {
+ c := p.ClientConfig("inputservice11protocoltest", cfgs...)
+ return newInputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService11ProtocolTest {
+ svc := &InputService11ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice11protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService11ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService11TestCaseOperation1 = "OperationName"
+
+// InputService11TestCaseOperation1Request generates a request for the InputService11TestCaseOperation1 operation.
+func (c *InputService11ProtocolTest) InputService11TestCaseOperation1Request(input *InputService11TestShapeInputService11TestCaseOperation1Input) (req *request.Request, output *InputService11TestShapeInputService11TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService11TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &InputService11TestShapeInputService11TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService11TestShapeInputService11TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService11ProtocolTest) InputService11TestCaseOperation1(input *InputService11TestShapeInputService11TestCaseOperation1Input) (*InputService11TestShapeInputService11TestCaseOperation1Output, error) {
+ req, out := c.InputService11TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService11TestShapeInputService11TestCaseOperation1Input struct {
+ Foo map[string]*string `location:"headers" locationName:"x-foo-" type:"map"`
+
+ metadataInputService11TestShapeInputService11TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService11TestShapeInputService11TestCaseOperation1Input struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+type InputService11TestShapeInputService11TestCaseOperation1Output struct {
+ metadataInputService11TestShapeInputService11TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService11TestShapeInputService11TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService12ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService12ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService12ProtocolTest client from just a session.
+// svc := inputservice12protocoltest.New(mySession)
+//
+// // Create a InputService12ProtocolTest client with additional configuration
+// svc := inputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService12ProtocolTest {
+ c := p.ClientConfig("inputservice12protocoltest", cfgs...)
+ return newInputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService12ProtocolTest {
+ svc := &InputService12ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice12protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService12ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService12TestCaseOperation1 = "OperationName"
+
+// InputService12TestCaseOperation1Request generates a request for the InputService12TestCaseOperation1 operation.
+func (c *InputService12ProtocolTest) InputService12TestCaseOperation1Request(input *InputService12TestShapeInputService12TestCaseOperation1Input) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService12TestCaseOperation1,
+ HTTPMethod: "GET",
+ HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}",
+ }
+
+ if input == nil {
+ input = &InputService12TestShapeInputService12TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService12TestShapeInputService12TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService12ProtocolTest) InputService12TestCaseOperation1(input *InputService12TestShapeInputService12TestCaseOperation1Input) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) {
+ req, out := c.InputService12TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService12TestShapeInputService12TestCaseOperation1Input struct {
+ PipelineId *string `location:"uri" type:"string"`
+
+ QueryDoc map[string]*string `location:"querystring" type:"map"`
+
+ metadataInputService12TestShapeInputService12TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService12TestShapeInputService12TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService12TestShapeInputService12TestCaseOperation1Output struct {
+ metadataInputService12TestShapeInputService12TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService12TestShapeInputService12TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService13ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService13ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService13ProtocolTest client from just a session.
+// svc := inputservice13protocoltest.New(mySession)
+//
+// // Create a InputService13ProtocolTest client with additional configuration
+// svc := inputservice13protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService13ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService13ProtocolTest {
+ c := p.ClientConfig("inputservice13protocoltest", cfgs...)
+ return newInputService13ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService13ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService13ProtocolTest {
+ svc := &InputService13ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice13protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService13ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService13TestCaseOperation1 = "OperationName"
+
+// InputService13TestCaseOperation1Request generates a request for the InputService13TestCaseOperation1 operation.
+func (c *InputService13ProtocolTest) InputService13TestCaseOperation1Request(input *InputService13TestShapeInputService13TestCaseOperation1Input) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService13TestCaseOperation1,
+ HTTPMethod: "GET",
+ HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}",
+ }
+
+ if input == nil {
+ input = &InputService13TestShapeInputService13TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService13TestShapeInputService13TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService13ProtocolTest) InputService13TestCaseOperation1(input *InputService13TestShapeInputService13TestCaseOperation1Input) (*InputService13TestShapeInputService13TestCaseOperation1Output, error) {
+ req, out := c.InputService13TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService13TestShapeInputService13TestCaseOperation1Input struct {
+ PipelineId *string `location:"uri" type:"string"`
+
+ QueryDoc map[string][]*string `location:"querystring" type:"map"`
+
+ metadataInputService13TestShapeInputService13TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService13TestShapeInputService13TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService13TestShapeInputService13TestCaseOperation1Output struct {
+ metadataInputService13TestShapeInputService13TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService13TestShapeInputService13TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService14ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService14ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService14ProtocolTest client from just a session.
+// svc := inputservice14protocoltest.New(mySession)
+//
+// // Create a InputService14ProtocolTest client with additional configuration
+// svc := inputservice14protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService14ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService14ProtocolTest {
+ c := p.ClientConfig("inputservice14protocoltest", cfgs...)
+ return newInputService14ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService14ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService14ProtocolTest {
+ svc := &InputService14ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice14protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService14ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService14ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService14TestCaseOperation1 = "OperationName"
+
+// InputService14TestCaseOperation1Request generates a request for the InputService14TestCaseOperation1 operation.
+func (c *InputService14ProtocolTest) InputService14TestCaseOperation1Request(input *InputService14TestShapeInputService14TestCaseOperation1Input) (req *request.Request, output *InputService14TestShapeInputService14TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService14TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &InputService14TestShapeInputService14TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService14TestShapeInputService14TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService14ProtocolTest) InputService14TestCaseOperation1(input *InputService14TestShapeInputService14TestCaseOperation1Input) (*InputService14TestShapeInputService14TestCaseOperation1Output, error) {
+ req, out := c.InputService14TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService14TestShapeInputService14TestCaseOperation1Input struct {
+ Foo *string `locationName:"foo" type:"string"`
+
+ metadataInputService14TestShapeInputService14TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService14TestShapeInputService14TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure" payload:"Foo"`
+}
+
+type InputService14TestShapeInputService14TestCaseOperation1Output struct {
+ metadataInputService14TestShapeInputService14TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService14TestShapeInputService14TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService15ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService15ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService15ProtocolTest client from just a session.
+// svc := inputservice15protocoltest.New(mySession)
+//
+// // Create a InputService15ProtocolTest client with additional configuration
+// svc := inputservice15protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService15ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService15ProtocolTest {
+ c := p.ClientConfig("inputservice15protocoltest", cfgs...)
+ return newInputService15ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService15ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService15ProtocolTest {
+ svc := &InputService15ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice15protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService15ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService15ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService15TestCaseOperation1 = "OperationName"
+
+// InputService15TestCaseOperation1Request generates a request for the InputService15TestCaseOperation1 operation.
+func (c *InputService15ProtocolTest) InputService15TestCaseOperation1Request(input *InputService15TestShapeInputShape) (req *request.Request, output *InputService15TestShapeInputService15TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService15TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &InputService15TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService15TestShapeInputService15TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService15ProtocolTest) InputService15TestCaseOperation1(input *InputService15TestShapeInputShape) (*InputService15TestShapeInputService15TestCaseOperation1Output, error) {
+ req, out := c.InputService15TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService15TestCaseOperation2 = "OperationName"
+
+// InputService15TestCaseOperation2Request generates a request for the InputService15TestCaseOperation2 operation.
+func (c *InputService15ProtocolTest) InputService15TestCaseOperation2Request(input *InputService15TestShapeInputShape) (req *request.Request, output *InputService15TestShapeInputService15TestCaseOperation2Output) {
+ op := &request.Operation{
+ Name: opInputService15TestCaseOperation2,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &InputService15TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService15TestShapeInputService15TestCaseOperation2Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService15ProtocolTest) InputService15TestCaseOperation2(input *InputService15TestShapeInputShape) (*InputService15TestShapeInputService15TestCaseOperation2Output, error) {
+ req, out := c.InputService15TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService15TestShapeInputService15TestCaseOperation1Output struct {
+ metadataInputService15TestShapeInputService15TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService15TestShapeInputService15TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService15TestShapeInputService15TestCaseOperation2Output struct {
+ metadataInputService15TestShapeInputService15TestCaseOperation2Output `json:"-" xml:"-"`
+}
+
+type metadataInputService15TestShapeInputService15TestCaseOperation2Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService15TestShapeInputShape struct {
+ Foo []byte `locationName:"foo" type:"blob"`
+
+ metadataInputService15TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService15TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure" payload:"Foo"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService16ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService16ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService16ProtocolTest client from just a session.
+// svc := inputservice16protocoltest.New(mySession)
+//
+// // Create a InputService16ProtocolTest client with additional configuration
+// svc := inputservice16protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService16ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService16ProtocolTest {
+ c := p.ClientConfig("inputservice16protocoltest", cfgs...)
+ return newInputService16ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService16ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService16ProtocolTest {
+ svc := &InputService16ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice16protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService16ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService16ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService16TestCaseOperation1 = "OperationName"
+
+// InputService16TestCaseOperation1Request generates a request for the InputService16TestCaseOperation1 operation.
+func (c *InputService16ProtocolTest) InputService16TestCaseOperation1Request(input *InputService16TestShapeInputShape) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService16TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &InputService16TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService16TestShapeInputService16TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService16ProtocolTest) InputService16TestCaseOperation1(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation1Output, error) {
+ req, out := c.InputService16TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService16TestCaseOperation2 = "OperationName"
+
+// InputService16TestCaseOperation2Request generates a request for the InputService16TestCaseOperation2 operation.
+func (c *InputService16ProtocolTest) InputService16TestCaseOperation2Request(input *InputService16TestShapeInputShape) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation2Output) {
+ op := &request.Operation{
+ Name: opInputService16TestCaseOperation2,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &InputService16TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService16TestShapeInputService16TestCaseOperation2Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService16ProtocolTest) InputService16TestCaseOperation2(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation2Output, error) {
+ req, out := c.InputService16TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService16TestCaseOperation3 = "OperationName"
+
+// InputService16TestCaseOperation3Request generates a request for the InputService16TestCaseOperation3 operation.
+func (c *InputService16ProtocolTest) InputService16TestCaseOperation3Request(input *InputService16TestShapeInputShape) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation3Output) {
+ op := &request.Operation{
+ Name: opInputService16TestCaseOperation3,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &InputService16TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService16TestShapeInputService16TestCaseOperation3Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService16ProtocolTest) InputService16TestCaseOperation3(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation3Output, error) {
+ req, out := c.InputService16TestCaseOperation3Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService16TestCaseOperation4 = "OperationName"
+
+// InputService16TestCaseOperation4Request generates a request for the InputService16TestCaseOperation4 operation.
+func (c *InputService16ProtocolTest) InputService16TestCaseOperation4Request(input *InputService16TestShapeInputShape) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation4Output) {
+ op := &request.Operation{
+ Name: opInputService16TestCaseOperation4,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &InputService16TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService16TestShapeInputService16TestCaseOperation4Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService16ProtocolTest) InputService16TestCaseOperation4(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation4Output, error) {
+ req, out := c.InputService16TestCaseOperation4Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService16TestShapeFooShape struct {
+ Baz *string `locationName:"baz" type:"string"`
+
+ metadataInputService16TestShapeFooShape `json:"-" xml:"-"`
+}
+
+type metadataInputService16TestShapeFooShape struct {
+ SDKShapeTraits bool `locationName:"foo" type:"structure"`
+}
+
+type InputService16TestShapeInputService16TestCaseOperation1Output struct {
+ metadataInputService16TestShapeInputService16TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService16TestShapeInputService16TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService16TestShapeInputService16TestCaseOperation2Output struct {
+ metadataInputService16TestShapeInputService16TestCaseOperation2Output `json:"-" xml:"-"`
+}
+
+type metadataInputService16TestShapeInputService16TestCaseOperation2Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService16TestShapeInputService16TestCaseOperation3Output struct {
+ metadataInputService16TestShapeInputService16TestCaseOperation3Output `json:"-" xml:"-"`
+}
+
+type metadataInputService16TestShapeInputService16TestCaseOperation3Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService16TestShapeInputService16TestCaseOperation4Output struct {
+ metadataInputService16TestShapeInputService16TestCaseOperation4Output `json:"-" xml:"-"`
+}
+
+type metadataInputService16TestShapeInputService16TestCaseOperation4Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService16TestShapeInputShape struct {
+ Foo *InputService16TestShapeFooShape `locationName:"foo" type:"structure"`
+
+ metadataInputService16TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService16TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure" payload:"Foo"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService17ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService17ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService17ProtocolTest client from just a session.
+// svc := inputservice17protocoltest.New(mySession)
+//
+// // Create a InputService17ProtocolTest client with additional configuration
+// svc := inputservice17protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService17ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService17ProtocolTest {
+ c := p.ClientConfig("inputservice17protocoltest", cfgs...)
+ return newInputService17ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService17ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService17ProtocolTest {
+ svc := &InputService17ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice17protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService17ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService17ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService17TestCaseOperation1 = "OperationName"
+
+// InputService17TestCaseOperation1Request generates a request for the InputService17TestCaseOperation1 operation.
+func (c *InputService17ProtocolTest) InputService17TestCaseOperation1Request(input *InputService17TestShapeInputService17TestCaseOperation1Input) (req *request.Request, output *InputService17TestShapeInputService17TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService17TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &InputService17TestShapeInputService17TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService17TestShapeInputService17TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService17ProtocolTest) InputService17TestCaseOperation1(input *InputService17TestShapeInputService17TestCaseOperation1Input) (*InputService17TestShapeInputService17TestCaseOperation1Output, error) {
+ req, out := c.InputService17TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService17TestShapeGrant struct {
+ Grantee *InputService17TestShapeGrantee `type:"structure"`
+
+ metadataInputService17TestShapeGrant `json:"-" xml:"-"`
+}
+
+type metadataInputService17TestShapeGrant struct {
+ SDKShapeTraits bool `locationName:"Grant" type:"structure"`
+}
+
+type InputService17TestShapeGrantee struct {
+ EmailAddress *string `type:"string"`
+
+ Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true"`
+
+ metadataInputService17TestShapeGrantee `json:"-" xml:"-"`
+}
+
+type metadataInputService17TestShapeGrantee struct {
+ SDKShapeTraits bool `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
+}
+
+type InputService17TestShapeInputService17TestCaseOperation1Input struct {
+ Grant *InputService17TestShapeGrant `locationName:"Grant" type:"structure"`
+
+ metadataInputService17TestShapeInputService17TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService17TestShapeInputService17TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure" payload:"Grant"`
+}
+
+type InputService17TestShapeInputService17TestCaseOperation1Output struct {
+ metadataInputService17TestShapeInputService17TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService17TestShapeInputService17TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService18ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService18ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService18ProtocolTest client from just a session.
+// svc := inputservice18protocoltest.New(mySession)
+//
+// // Create a InputService18ProtocolTest client with additional configuration
+// svc := inputservice18protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService18ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService18ProtocolTest {
+ c := p.ClientConfig("inputservice18protocoltest", cfgs...)
+ return newInputService18ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService18ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService18ProtocolTest {
+ svc := &InputService18ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice18protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService18ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService18ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService18TestCaseOperation1 = "OperationName"
+
+// InputService18TestCaseOperation1Request generates a request for the InputService18TestCaseOperation1 operation.
+func (c *InputService18ProtocolTest) InputService18TestCaseOperation1Request(input *InputService18TestShapeInputService18TestCaseOperation1Input) (req *request.Request, output *InputService18TestShapeInputService18TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService18TestCaseOperation1,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &InputService18TestShapeInputService18TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService18TestShapeInputService18TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService18ProtocolTest) InputService18TestCaseOperation1(input *InputService18TestShapeInputService18TestCaseOperation1Input) (*InputService18TestShapeInputService18TestCaseOperation1Output, error) {
+ req, out := c.InputService18TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService18TestShapeInputService18TestCaseOperation1Input struct {
+ Bucket *string `location:"uri" type:"string"`
+
+ Key *string `location:"uri" type:"string"`
+
+ metadataInputService18TestShapeInputService18TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService18TestShapeInputService18TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService18TestShapeInputService18TestCaseOperation1Output struct {
+ metadataInputService18TestShapeInputService18TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService18TestShapeInputService18TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService19ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService19ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService19ProtocolTest client from just a session.
+// svc := inputservice19protocoltest.New(mySession)
+//
+// // Create a InputService19ProtocolTest client with additional configuration
+// svc := inputservice19protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService19ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService19ProtocolTest {
+ c := p.ClientConfig("inputservice19protocoltest", cfgs...)
+ return newInputService19ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService19ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService19ProtocolTest {
+ svc := &InputService19ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice19protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService19ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService19ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService19TestCaseOperation1 = "OperationName"
+
+// InputService19TestCaseOperation1Request generates a request for the InputService19TestCaseOperation1 operation.
+func (c *InputService19ProtocolTest) InputService19TestCaseOperation1Request(input *InputService19TestShapeInputShape) (req *request.Request, output *InputService19TestShapeInputService19TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService19TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/path",
+ }
+
+ if input == nil {
+ input = &InputService19TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService19TestShapeInputService19TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService19ProtocolTest) InputService19TestCaseOperation1(input *InputService19TestShapeInputShape) (*InputService19TestShapeInputService19TestCaseOperation1Output, error) {
+ req, out := c.InputService19TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService19TestCaseOperation2 = "OperationName"
+
+// InputService19TestCaseOperation2Request generates a request for the InputService19TestCaseOperation2 operation.
+func (c *InputService19ProtocolTest) InputService19TestCaseOperation2Request(input *InputService19TestShapeInputShape) (req *request.Request, output *InputService19TestShapeInputService19TestCaseOperation2Output) {
+ op := &request.Operation{
+ Name: opInputService19TestCaseOperation2,
+ HTTPMethod: "POST",
+ HTTPPath: "/path?abc=mno",
+ }
+
+ if input == nil {
+ input = &InputService19TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService19TestShapeInputService19TestCaseOperation2Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService19ProtocolTest) InputService19TestCaseOperation2(input *InputService19TestShapeInputShape) (*InputService19TestShapeInputService19TestCaseOperation2Output, error) {
+ req, out := c.InputService19TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService19TestShapeInputService19TestCaseOperation1Output struct {
+ metadataInputService19TestShapeInputService19TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService19TestShapeInputService19TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService19TestShapeInputService19TestCaseOperation2Output struct {
+ metadataInputService19TestShapeInputService19TestCaseOperation2Output `json:"-" xml:"-"`
+}
+
+type metadataInputService19TestShapeInputService19TestCaseOperation2Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService19TestShapeInputShape struct {
+ Foo *string `location:"querystring" locationName:"param-name" type:"string"`
+
+ metadataInputService19TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService19TestShapeInputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService20ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService20ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService20ProtocolTest client from just a session.
+// svc := inputservice20protocoltest.New(mySession)
+//
+// // Create a InputService20ProtocolTest client with additional configuration
+// svc := inputservice20protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService20ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService20ProtocolTest {
+ c := p.ClientConfig("inputservice20protocoltest", cfgs...)
+ return newInputService20ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService20ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService20ProtocolTest {
+ svc := &InputService20ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice20protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService20ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService20ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService20TestCaseOperation1 = "OperationName"
+
+// InputService20TestCaseOperation1Request generates a request for the InputService20TestCaseOperation1 operation.
+func (c *InputService20ProtocolTest) InputService20TestCaseOperation1Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService20TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/path",
+ }
+
+ if input == nil {
+ input = &InputService20TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService20TestShapeInputService20TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService20ProtocolTest) InputService20TestCaseOperation1(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation1Output, error) {
+ req, out := c.InputService20TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService20TestCaseOperation2 = "OperationName"
+
+// InputService20TestCaseOperation2Request generates a request for the InputService20TestCaseOperation2 operation.
+func (c *InputService20ProtocolTest) InputService20TestCaseOperation2Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation2Output) {
+ op := &request.Operation{
+ Name: opInputService20TestCaseOperation2,
+ HTTPMethod: "POST",
+ HTTPPath: "/path",
+ }
+
+ if input == nil {
+ input = &InputService20TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService20TestShapeInputService20TestCaseOperation2Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService20ProtocolTest) InputService20TestCaseOperation2(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation2Output, error) {
+ req, out := c.InputService20TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService20TestCaseOperation3 = "OperationName"
+
+// InputService20TestCaseOperation3Request generates a request for the InputService20TestCaseOperation3 operation.
+func (c *InputService20ProtocolTest) InputService20TestCaseOperation3Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation3Output) {
+ op := &request.Operation{
+ Name: opInputService20TestCaseOperation3,
+ HTTPMethod: "POST",
+ HTTPPath: "/path",
+ }
+
+ if input == nil {
+ input = &InputService20TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService20TestShapeInputService20TestCaseOperation3Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService20ProtocolTest) InputService20TestCaseOperation3(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation3Output, error) {
+ req, out := c.InputService20TestCaseOperation3Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService20TestCaseOperation4 = "OperationName"
+
+// InputService20TestCaseOperation4Request generates a request for the InputService20TestCaseOperation4 operation.
+func (c *InputService20ProtocolTest) InputService20TestCaseOperation4Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation4Output) {
+ op := &request.Operation{
+ Name: opInputService20TestCaseOperation4,
+ HTTPMethod: "POST",
+ HTTPPath: "/path",
+ }
+
+ if input == nil {
+ input = &InputService20TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService20TestShapeInputService20TestCaseOperation4Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService20ProtocolTest) InputService20TestCaseOperation4(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation4Output, error) {
+ req, out := c.InputService20TestCaseOperation4Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService20TestCaseOperation5 = "OperationName"
+
+// InputService20TestCaseOperation5Request generates a request for the InputService20TestCaseOperation5 operation.
+func (c *InputService20ProtocolTest) InputService20TestCaseOperation5Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation5Output) {
+ op := &request.Operation{
+ Name: opInputService20TestCaseOperation5,
+ HTTPMethod: "POST",
+ HTTPPath: "/path",
+ }
+
+ if input == nil {
+ input = &InputService20TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService20TestShapeInputService20TestCaseOperation5Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService20ProtocolTest) InputService20TestCaseOperation5(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation5Output, error) {
+ req, out := c.InputService20TestCaseOperation5Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opInputService20TestCaseOperation6 = "OperationName"
+
+// InputService20TestCaseOperation6Request generates a request for the InputService20TestCaseOperation6 operation.
+func (c *InputService20ProtocolTest) InputService20TestCaseOperation6Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation6Output) {
+ op := &request.Operation{
+ Name: opInputService20TestCaseOperation6,
+ HTTPMethod: "POST",
+ HTTPPath: "/path",
+ }
+
+ if input == nil {
+ input = &InputService20TestShapeInputShape{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService20TestShapeInputService20TestCaseOperation6Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService20ProtocolTest) InputService20TestCaseOperation6(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation6Output, error) {
+ req, out := c.InputService20TestCaseOperation6Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService20TestShapeInputService20TestCaseOperation1Output struct {
+ metadataInputService20TestShapeInputService20TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService20TestShapeInputService20TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService20TestShapeInputService20TestCaseOperation2Output struct {
+ metadataInputService20TestShapeInputService20TestCaseOperation2Output `json:"-" xml:"-"`
+}
+
+type metadataInputService20TestShapeInputService20TestCaseOperation2Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService20TestShapeInputService20TestCaseOperation3Output struct {
+ metadataInputService20TestShapeInputService20TestCaseOperation3Output `json:"-" xml:"-"`
+}
+
+type metadataInputService20TestShapeInputService20TestCaseOperation3Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService20TestShapeInputService20TestCaseOperation4Output struct {
+ metadataInputService20TestShapeInputService20TestCaseOperation4Output `json:"-" xml:"-"`
+}
+
+type metadataInputService20TestShapeInputService20TestCaseOperation4Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService20TestShapeInputService20TestCaseOperation5Output struct {
+ metadataInputService20TestShapeInputService20TestCaseOperation5Output `json:"-" xml:"-"`
+}
+
+type metadataInputService20TestShapeInputService20TestCaseOperation5Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService20TestShapeInputService20TestCaseOperation6Output struct {
+ metadataInputService20TestShapeInputService20TestCaseOperation6Output `json:"-" xml:"-"`
+}
+
+type metadataInputService20TestShapeInputService20TestCaseOperation6Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService20TestShapeInputShape struct {
+ RecursiveStruct *InputService20TestShapeRecursiveStructType `type:"structure"`
+
+ metadataInputService20TestShapeInputShape `json:"-" xml:"-"`
+}
+
+type metadataInputService20TestShapeInputShape struct {
+ SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"`
+}
+
+type InputService20TestShapeRecursiveStructType struct {
+ NoRecurse *string `type:"string"`
+
+ RecursiveList []*InputService20TestShapeRecursiveStructType `type:"list"`
+
+ RecursiveMap map[string]*InputService20TestShapeRecursiveStructType `type:"map"`
+
+ RecursiveStruct *InputService20TestShapeRecursiveStructType `type:"structure"`
+
+ metadataInputService20TestShapeRecursiveStructType `json:"-" xml:"-"`
+}
+
+type metadataInputService20TestShapeRecursiveStructType struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type InputService21ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the InputService21ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a InputService21ProtocolTest client from just a session.
+// svc := inputservice21protocoltest.New(mySession)
+//
+// // Create a InputService21ProtocolTest client with additional configuration
+// svc := inputservice21protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewInputService21ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService21ProtocolTest {
+ c := p.ClientConfig("inputservice21protocoltest", cfgs...)
+ return newInputService21ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newInputService21ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService21ProtocolTest {
+ svc := &InputService21ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "inputservice21protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2014-01-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a InputService21ProtocolTest operation and runs any
+// custom request initialization.
+func (c *InputService21ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opInputService21TestCaseOperation1 = "OperationName"
+
+// InputService21TestCaseOperation1Request generates a request for the InputService21TestCaseOperation1 operation.
+func (c *InputService21ProtocolTest) InputService21TestCaseOperation1Request(input *InputService21TestShapeInputService21TestCaseOperation1Input) (req *request.Request, output *InputService21TestShapeInputService21TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opInputService21TestCaseOperation1,
+ HTTPMethod: "POST",
+ HTTPPath: "/path",
+ }
+
+ if input == nil {
+ input = &InputService21TestShapeInputService21TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &InputService21TestShapeInputService21TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *InputService21ProtocolTest) InputService21TestCaseOperation1(input *InputService21TestShapeInputService21TestCaseOperation1Input) (*InputService21TestShapeInputService21TestCaseOperation1Output, error) {
+ req, out := c.InputService21TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type InputService21TestShapeInputService21TestCaseOperation1Input struct {
+ TimeArgInHeader *time.Time `location:"header" locationName:"x-amz-timearg" type:"timestamp" timestampFormat:"rfc822"`
+
+ metadataInputService21TestShapeInputService21TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataInputService21TestShapeInputService21TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type InputService21TestShapeInputService21TestCaseOperation1Output struct {
+ metadataInputService21TestShapeInputService21TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataInputService21TestShapeInputService21TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//
+// Tests begin here
+//
+
+func TestInputService1ProtocolTestBasicXMLSerializationCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService1TestShapeInputShape{
+ Description: aws.String("bar"),
+ Name: aws.String("foo"),
+ }
+ req, _ := svc.InputService1TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `barfoo`, util.Trim(string(body)), InputService1TestShapeInputShape{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService1ProtocolTestBasicXMLSerializationCase2(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService1TestShapeInputShape{
+ Description: aws.String("bar"),
+ Name: aws.String("foo"),
+ }
+ req, _ := svc.InputService1TestCaseOperation2Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `barfoo`, util.Trim(string(body)), InputService1TestShapeInputShape{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService1ProtocolTestBasicXMLSerializationCase3(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService1TestShapeInputService1TestCaseOperation3Input{}
+ req, _ := svc.InputService1TestCaseOperation3Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService2ProtocolTestSerializeOtherScalarTypesCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService2TestShapeInputService2TestCaseOperation1Input{
+ First: aws.Bool(true),
+ Fourth: aws.Int64(3),
+ Second: aws.Bool(false),
+ Third: aws.Float64(1.2),
+ }
+ req, _ := svc.InputService2TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `true3false1.2`, util.Trim(string(body)), InputService2TestShapeInputService2TestCaseOperation1Input{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService3ProtocolTestNestedStructuresCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService3TestShapeInputShape{
+ Description: aws.String("baz"),
+ SubStructure: &InputService3TestShapeSubStructure{
+ Bar: aws.String("b"),
+ Foo: aws.String("a"),
+ },
+ }
+ req, _ := svc.InputService3TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `bazba`, util.Trim(string(body)), InputService3TestShapeInputShape{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService3ProtocolTestNestedStructuresCase2(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService3TestShapeInputShape{
+ Description: aws.String("baz"),
+ SubStructure: &InputService3TestShapeSubStructure{
+ Foo: aws.String("a"),
+ },
+ }
+ req, _ := svc.InputService3TestCaseOperation2Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `baza`, util.Trim(string(body)), InputService3TestShapeInputShape{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService4ProtocolTestNestedStructuresCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService4TestShapeInputService4TestCaseOperation1Input{
+ Description: aws.String("baz"),
+ SubStructure: &InputService4TestShapeSubStructure{},
+ }
+ req, _ := svc.InputService4TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `baz`, util.Trim(string(body)), InputService4TestShapeInputService4TestCaseOperation1Input{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService5ProtocolTestNonFlattenedListsCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService5TestShapeInputService5TestCaseOperation1Input{
+ ListParam: []*string{
+ aws.String("one"),
+ aws.String("two"),
+ aws.String("three"),
+ },
+ }
+ req, _ := svc.InputService5TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `onetwothree`, util.Trim(string(body)), InputService5TestShapeInputService5TestCaseOperation1Input{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService6ProtocolTestNonFlattenedListsWithLocationNameCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService6TestShapeInputService6TestCaseOperation1Input{
+ ListParam: []*string{
+ aws.String("one"),
+ aws.String("two"),
+ aws.String("three"),
+ },
+ }
+ req, _ := svc.InputService6TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `onetwothree`, util.Trim(string(body)), InputService6TestShapeInputService6TestCaseOperation1Input{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService7ProtocolTestFlattenedListsCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService7TestShapeInputService7TestCaseOperation1Input{
+ ListParam: []*string{
+ aws.String("one"),
+ aws.String("two"),
+ aws.String("three"),
+ },
+ }
+ req, _ := svc.InputService7TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `onetwothree`, util.Trim(string(body)), InputService7TestShapeInputService7TestCaseOperation1Input{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService8ProtocolTestFlattenedListsWithLocationNameCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService8TestShapeInputService8TestCaseOperation1Input{
+ ListParam: []*string{
+ aws.String("one"),
+ aws.String("two"),
+ aws.String("three"),
+ },
+ }
+ req, _ := svc.InputService8TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `- one
- two
- three
`, util.Trim(string(body)), InputService8TestShapeInputService8TestCaseOperation1Input{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService9ProtocolTestListOfStructuresCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService9TestShapeInputService9TestCaseOperation1Input{
+ ListParam: []*InputService9TestShapeSingleFieldStruct{
+ {
+ Element: aws.String("one"),
+ },
+ {
+ Element: aws.String("two"),
+ },
+ {
+ Element: aws.String("three"),
+ },
+ },
+ }
+ req, _ := svc.InputService9TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `- one
- two
- three
`, util.Trim(string(body)), InputService9TestShapeInputService9TestCaseOperation1Input{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService10ProtocolTestBlobAndTimestampShapesCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService10TestShapeInputService10TestCaseOperation1Input{
+ StructureParam: &InputService10TestShapeStructureShape{
+ B: []byte("foo"),
+ T: aws.Time(time.Unix(1422172800, 0)),
+ },
+ }
+ req, _ := svc.InputService10TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `Zm9v2015-01-25T08:00:00Z`, util.Trim(string(body)), InputService10TestShapeInputService10TestCaseOperation1Input{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService11ProtocolTestHeaderMapsCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService11TestShapeInputService11TestCaseOperation1Input{
+ Foo: map[string]*string{
+ "a": aws.String("b"),
+ "c": aws.String("d"),
+ },
+ }
+ req, _ := svc.InputService11TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+ assert.Equal(t, "b", r.Header.Get("x-foo-a"))
+ assert.Equal(t, "d", r.Header.Get("x-foo-c"))
+
+}
+
+func TestInputService12ProtocolTestStringToStringMapsInQuerystringCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService12TestShapeInputService12TestCaseOperation1Input{
+ PipelineId: aws.String("foo"),
+ QueryDoc: map[string]*string{
+ "bar": aws.String("baz"),
+ "fizz": aws.String("buzz"),
+ },
+ }
+ req, _ := svc.InputService12TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?bar=baz&fizz=buzz", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService13ProtocolTestStringToStringListMapsInQuerystringCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService13TestShapeInputService13TestCaseOperation1Input{
+ PipelineId: aws.String("id"),
+ QueryDoc: map[string][]*string{
+ "fizz": {
+ aws.String("buzz"),
+ aws.String("pop"),
+ },
+ "foo": {
+ aws.String("bar"),
+ aws.String("baz"),
+ },
+ },
+ }
+ req, _ := svc.InputService13TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/id?foo=bar&foo=baz&fizz=buzz&fizz=pop", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService14ProtocolTestStringPayloadCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService14ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService14TestShapeInputService14TestCaseOperation1Input{
+ Foo: aws.String("bar"),
+ }
+ req, _ := svc.InputService14TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, `bar`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService15ProtocolTestBlobPayloadCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService15TestShapeInputShape{
+ Foo: []byte("bar"),
+ }
+ req, _ := svc.InputService15TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ assert.Equal(t, `bar`, util.Trim(string(body)))
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService15ProtocolTestBlobPayloadCase2(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService15TestShapeInputShape{}
+ req, _ := svc.InputService15TestCaseOperation2Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService16ProtocolTestStructurePayloadCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService16ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService16TestShapeInputShape{
+ Foo: &InputService16TestShapeFooShape{
+ Baz: aws.String("bar"),
+ },
+ }
+ req, _ := svc.InputService16TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `bar`, util.Trim(string(body)), InputService16TestShapeInputShape{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService16ProtocolTestStructurePayloadCase2(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService16ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService16TestShapeInputShape{}
+ req, _ := svc.InputService16TestCaseOperation2Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService16ProtocolTestStructurePayloadCase3(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService16ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService16TestShapeInputShape{
+ Foo: &InputService16TestShapeFooShape{},
+ }
+ req, _ := svc.InputService16TestCaseOperation3Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, ``, util.Trim(string(body)), InputService16TestShapeInputShape{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService16ProtocolTestStructurePayloadCase4(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService16ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService16TestShapeInputShape{}
+ req, _ := svc.InputService16TestCaseOperation4Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService17ProtocolTestXMLAttributeCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService17ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService17TestShapeInputService17TestCaseOperation1Input{
+ Grant: &InputService17TestShapeGrant{
+ Grantee: &InputService17TestShapeGrantee{
+ EmailAddress: aws.String("foo@example.com"),
+ Type: aws.String("CanonicalUser"),
+ },
+ },
+ }
+ req, _ := svc.InputService17TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `foo@example.com`, util.Trim(string(body)), InputService17TestShapeInputService17TestCaseOperation1Input{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService18ProtocolTestGreedyKeysCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService18ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService18TestShapeInputService18TestCaseOperation1Input{
+ Bucket: aws.String("my/bucket"),
+ Key: aws.String("testing /123"),
+ }
+ req, _ := svc.InputService18TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/my%2Fbucket/testing%20/123", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService19ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService19ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService19TestShapeInputShape{}
+ req, _ := svc.InputService19TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/path", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService19ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase2(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService19ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService19TestShapeInputShape{
+ Foo: aws.String(""),
+ }
+ req, _ := svc.InputService19TestCaseOperation2Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/path?abc=mno¶m-name=", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService20ProtocolTestRecursiveShapesCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService20TestShapeInputShape{
+ RecursiveStruct: &InputService20TestShapeRecursiveStructType{
+ NoRecurse: aws.String("foo"),
+ },
+ }
+ req, _ := svc.InputService20TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `foo`, util.Trim(string(body)), InputService20TestShapeInputShape{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/path", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService20ProtocolTestRecursiveShapesCase2(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService20TestShapeInputShape{
+ RecursiveStruct: &InputService20TestShapeRecursiveStructType{
+ RecursiveStruct: &InputService20TestShapeRecursiveStructType{
+ NoRecurse: aws.String("foo"),
+ },
+ },
+ }
+ req, _ := svc.InputService20TestCaseOperation2Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `foo`, util.Trim(string(body)), InputService20TestShapeInputShape{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/path", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService20ProtocolTestRecursiveShapesCase3(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService20TestShapeInputShape{
+ RecursiveStruct: &InputService20TestShapeRecursiveStructType{
+ RecursiveStruct: &InputService20TestShapeRecursiveStructType{
+ RecursiveStruct: &InputService20TestShapeRecursiveStructType{
+ RecursiveStruct: &InputService20TestShapeRecursiveStructType{
+ NoRecurse: aws.String("foo"),
+ },
+ },
+ },
+ },
+ }
+ req, _ := svc.InputService20TestCaseOperation3Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `foo`, util.Trim(string(body)), InputService20TestShapeInputShape{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/path", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService20ProtocolTestRecursiveShapesCase4(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService20TestShapeInputShape{
+ RecursiveStruct: &InputService20TestShapeRecursiveStructType{
+ RecursiveList: []*InputService20TestShapeRecursiveStructType{
+ {
+ NoRecurse: aws.String("foo"),
+ },
+ {
+ NoRecurse: aws.String("bar"),
+ },
+ },
+ },
+ }
+ req, _ := svc.InputService20TestCaseOperation4Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `foobar`, util.Trim(string(body)), InputService20TestShapeInputShape{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/path", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService20ProtocolTestRecursiveShapesCase5(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService20TestShapeInputShape{
+ RecursiveStruct: &InputService20TestShapeRecursiveStructType{
+ RecursiveList: []*InputService20TestShapeRecursiveStructType{
+ {
+ NoRecurse: aws.String("foo"),
+ },
+ {
+ RecursiveStruct: &InputService20TestShapeRecursiveStructType{
+ NoRecurse: aws.String("bar"),
+ },
+ },
+ },
+ },
+ }
+ req, _ := svc.InputService20TestCaseOperation5Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `foobar`, util.Trim(string(body)), InputService20TestShapeInputShape{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/path", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService20ProtocolTestRecursiveShapesCase6(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService20TestShapeInputShape{
+ RecursiveStruct: &InputService20TestShapeRecursiveStructType{
+ RecursiveMap: map[string]*InputService20TestShapeRecursiveStructType{
+ "bar": {
+ NoRecurse: aws.String("bar"),
+ },
+ "foo": {
+ NoRecurse: aws.String("foo"),
+ },
+ },
+ },
+ }
+ req, _ := svc.InputService20TestCaseOperation6Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert body
+ assert.NotNil(t, r.Body)
+ body := util.SortXML(r.Body)
+ awstesting.AssertXML(t, `foofoobarbar`, util.Trim(string(body)), InputService20TestShapeInputShape{})
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/path", r.URL.String())
+
+ // assert headers
+
+}
+
+func TestInputService21ProtocolTestTimestampInHeaderCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewInputService21ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ input := &InputService21TestShapeInputService21TestCaseOperation1Input{
+ TimeArgInHeader: aws.Time(time.Unix(1422172800, 0)),
+ }
+ req, _ := svc.InputService21TestCaseOperation1Request(input)
+ r := req.HTTPRequest
+
+ // build request
+ restxml.Build(req)
+ assert.NoError(t, req.Error)
+
+ // assert URL
+ awstesting.AssertURL(t, "https://test/path", r.URL.String())
+
+ // assert headers
+ assert.Equal(t, "Sun, 25 Jan 2015 08:00:00 GMT", r.Header.Get("x-amz-timearg"))
+
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/restxml.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
similarity index 76%
rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/restxml.go
rename to Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
index 1e88f901b..a6bc0c74c 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/restxml.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
@@ -2,8 +2,8 @@
// requests and responses.
package restxml
-//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/input/rest-xml.json build_test.go
-//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/output/rest-xml.json unmarshal_test.go
+//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go
+//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go
import (
"bytes"
@@ -11,9 +11,9 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/internal/protocol/query"
- "github.com/aws/aws-sdk-go/internal/protocol/rest"
- "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil"
+ "github.com/aws/aws-sdk-go/private/protocol/query"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
)
// Build builds a request payload for the REST XML protocol.
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restxml/unmarshal_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restxml/unmarshal_test.go
new file mode 100644
index 000000000..a88d31922
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restxml/unmarshal_test.go
@@ -0,0 +1,1606 @@
+package restxml_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "encoding/xml"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/awstesting"
+ "github.com/aws/aws-sdk-go/private/protocol/restxml"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+ "github.com/aws/aws-sdk-go/private/signer/v4"
+ "github.com/aws/aws-sdk-go/private/util"
+ "github.com/stretchr/testify/assert"
+)
+
+var _ bytes.Buffer // always import bytes
+var _ http.Request
+var _ json.Marshaler
+var _ time.Time
+var _ xmlutil.XMLNode
+var _ xml.Attr
+var _ = awstesting.GenerateAssertions
+var _ = ioutil.Discard
+var _ = util.Trim("")
+var _ = url.Values{}
+var _ = io.EOF
+var _ = aws.String
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService1ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService1ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService1ProtocolTest client from just a session.
+// svc := outputservice1protocoltest.New(mySession)
+//
+// // Create a OutputService1ProtocolTest client with additional configuration
+// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest {
+ c := p.ClientConfig("outputservice1protocoltest", cfgs...)
+ return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest {
+ svc := &OutputService1ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice1protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService1TestCaseOperation1 = "OperationName"
+
+// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation.
+func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputShape) {
+ op := &request.Operation{
+ Name: opOutputService1TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService1TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputShape, error) {
+ req, out := c.OutputService1TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opOutputService1TestCaseOperation2 = "OperationName"
+
+// OutputService1TestCaseOperation2Request generates a request for the OutputService1TestCaseOperation2 operation.
+func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation2Request(input *OutputService1TestShapeOutputService1TestCaseOperation2Input) (req *request.Request, output *OutputService1TestShapeOutputShape) {
+ op := &request.Operation{
+ Name: opOutputService1TestCaseOperation2,
+ }
+
+ if input == nil {
+ input = &OutputService1TestShapeOutputService1TestCaseOperation2Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService1TestShapeOutputShape{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation2(input *OutputService1TestShapeOutputService1TestCaseOperation2Input) (*OutputService1TestShapeOutputShape, error) {
+ req, out := c.OutputService1TestCaseOperation2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService1TestShapeOutputService1TestCaseOperation1Input struct {
+ metadataOutputService1TestShapeOutputService1TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService1TestShapeOutputService1TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService1TestShapeOutputService1TestCaseOperation2Input struct {
+ metadataOutputService1TestShapeOutputService1TestCaseOperation2Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService1TestShapeOutputService1TestCaseOperation2Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService1TestShapeOutputShape struct {
+ Char *string `type:"character"`
+
+ Double *float64 `type:"double"`
+
+ FalseBool *bool `type:"boolean"`
+
+ Float *float64 `type:"float"`
+
+ ImaHeader *string `location:"header" type:"string"`
+
+ ImaHeaderLocation *string `location:"header" locationName:"X-Foo" type:"string"`
+
+ Long *int64 `type:"long"`
+
+ Num *int64 `locationName:"FooNum" type:"integer"`
+
+ Str *string `type:"string"`
+
+ Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ TrueBool *bool `type:"boolean"`
+
+ metadataOutputService1TestShapeOutputShape `json:"-" xml:"-"`
+}
+
+type metadataOutputService1TestShapeOutputShape struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService2ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService2ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService2ProtocolTest client from just a session.
+// svc := outputservice2protocoltest.New(mySession)
+//
+// // Create a OutputService2ProtocolTest client with additional configuration
+// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest {
+ c := p.ClientConfig("outputservice2protocoltest", cfgs...)
+ return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest {
+ svc := &OutputService2ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice2protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService2TestCaseOperation1 = "OperationName"
+
+// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation.
+func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService2TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) {
+ req, out := c.OutputService2TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService2TestShapeOutputService2TestCaseOperation1Input struct {
+ metadataOutputService2TestShapeOutputService2TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService2TestShapeOutputService2TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService2TestShapeOutputService2TestCaseOperation1Output struct {
+ Blob []byte `type:"blob"`
+
+ metadataOutputService2TestShapeOutputService2TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService2TestShapeOutputService2TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService3ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService3ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService3ProtocolTest client from just a session.
+// svc := outputservice3protocoltest.New(mySession)
+//
+// // Create a OutputService3ProtocolTest client with additional configuration
+// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest {
+ c := p.ClientConfig("outputservice3protocoltest", cfgs...)
+ return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest {
+ svc := &OutputService3ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice3protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService3TestCaseOperation1 = "OperationName"
+
+// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation.
+func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService3TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) {
+ req, out := c.OutputService3TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService3TestShapeOutputService3TestCaseOperation1Input struct {
+ metadataOutputService3TestShapeOutputService3TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService3TestShapeOutputService3TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService3TestShapeOutputService3TestCaseOperation1Output struct {
+ ListMember []*string `type:"list"`
+
+ metadataOutputService3TestShapeOutputService3TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService3TestShapeOutputService3TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService4ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService4ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService4ProtocolTest client from just a session.
+// svc := outputservice4protocoltest.New(mySession)
+//
+// // Create a OutputService4ProtocolTest client with additional configuration
+// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest {
+ c := p.ClientConfig("outputservice4protocoltest", cfgs...)
+ return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest {
+ svc := &OutputService4ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice4protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService4TestCaseOperation1 = "OperationName"
+
+// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation.
+func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputService4TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService4TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService4TestShapeOutputService4TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputService4TestCaseOperation1Output, error) {
+ req, out := c.OutputService4TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService4TestShapeOutputService4TestCaseOperation1Input struct {
+ metadataOutputService4TestShapeOutputService4TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService4TestShapeOutputService4TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService4TestShapeOutputService4TestCaseOperation1Output struct {
+ ListMember []*string `locationNameList:"item" type:"list"`
+
+ metadataOutputService4TestShapeOutputService4TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService4TestShapeOutputService4TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService5ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService5ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService5ProtocolTest client from just a session.
+// svc := outputservice5protocoltest.New(mySession)
+//
+// // Create a OutputService5ProtocolTest client with additional configuration
+// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest {
+ c := p.ClientConfig("outputservice5protocoltest", cfgs...)
+ return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest {
+ svc := &OutputService5ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice5protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService5TestCaseOperation1 = "OperationName"
+
+// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation.
+func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService5TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) {
+ req, out := c.OutputService5TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService5TestShapeOutputService5TestCaseOperation1Input struct {
+ metadataOutputService5TestShapeOutputService5TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService5TestShapeOutputService5TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService5TestShapeOutputService5TestCaseOperation1Output struct {
+ ListMember []*string `type:"list" flattened:"true"`
+
+ metadataOutputService5TestShapeOutputService5TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService5TestShapeOutputService5TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService6ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService6ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService6ProtocolTest client from just a session.
+// svc := outputservice6protocoltest.New(mySession)
+//
+// // Create a OutputService6ProtocolTest client with additional configuration
+// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest {
+ c := p.ClientConfig("outputservice6protocoltest", cfgs...)
+ return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest {
+ svc := &OutputService6ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice6protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService6TestCaseOperation1 = "OperationName"
+
+// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation.
+func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService6TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) {
+ req, out := c.OutputService6TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService6TestShapeOutputService6TestCaseOperation1Input struct {
+ metadataOutputService6TestShapeOutputService6TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService6TestShapeOutputService6TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService6TestShapeOutputService6TestCaseOperation1Output struct {
+ Map map[string]*OutputService6TestShapeSingleStructure `type:"map"`
+
+ metadataOutputService6TestShapeOutputService6TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService6TestShapeOutputService6TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService6TestShapeSingleStructure struct {
+ Foo *string `locationName:"foo" type:"string"`
+
+ metadataOutputService6TestShapeSingleStructure `json:"-" xml:"-"`
+}
+
+type metadataOutputService6TestShapeSingleStructure struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService7ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService7ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService7ProtocolTest client from just a session.
+// svc := outputservice7protocoltest.New(mySession)
+//
+// // Create a OutputService7ProtocolTest client with additional configuration
+// svc := outputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService7ProtocolTest {
+ c := p.ClientConfig("outputservice7protocoltest", cfgs...)
+ return newOutputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService7ProtocolTest {
+ svc := &OutputService7ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice7protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService7TestCaseOperation1 = "OperationName"
+
+// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation.
+func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *request.Request, output *OutputService7TestShapeOutputService7TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService7TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService7TestShapeOutputService7TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputService7TestCaseOperation1Output, error) {
+ req, out := c.OutputService7TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService7TestShapeOutputService7TestCaseOperation1Input struct {
+ metadataOutputService7TestShapeOutputService7TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService7TestShapeOutputService7TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService7TestShapeOutputService7TestCaseOperation1Output struct {
+ Map map[string]*string `type:"map" flattened:"true"`
+
+ metadataOutputService7TestShapeOutputService7TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService7TestShapeOutputService7TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService8ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService8ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService8ProtocolTest client from just a session.
+// svc := outputservice8protocoltest.New(mySession)
+//
+// // Create a OutputService8ProtocolTest client with additional configuration
+// svc := outputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService8ProtocolTest {
+ c := p.ClientConfig("outputservice8protocoltest", cfgs...)
+ return newOutputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService8ProtocolTest {
+ svc := &OutputService8ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice8protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService8TestCaseOperation1 = "OperationName"
+
+// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation.
+func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *request.Request, output *OutputService8TestShapeOutputService8TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService8TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService8TestShapeOutputService8TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputService8TestCaseOperation1Output, error) {
+ req, out := c.OutputService8TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService8TestShapeOutputService8TestCaseOperation1Input struct {
+ metadataOutputService8TestShapeOutputService8TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService8TestShapeOutputService8TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService8TestShapeOutputService8TestCaseOperation1Output struct {
+ Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map"`
+
+ metadataOutputService8TestShapeOutputService8TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService8TestShapeOutputService8TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService9ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService9ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService9ProtocolTest client from just a session.
+// svc := outputservice9protocoltest.New(mySession)
+//
+// // Create a OutputService9ProtocolTest client with additional configuration
+// svc := outputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService9ProtocolTest {
+ c := p.ClientConfig("outputservice9protocoltest", cfgs...)
+ return newOutputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService9ProtocolTest {
+ svc := &OutputService9ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice9protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService9TestCaseOperation1 = "OperationName"
+
+// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation.
+func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *request.Request, output *OutputService9TestShapeOutputService9TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService9TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService9TestShapeOutputService9TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputService9TestCaseOperation1Output, error) {
+ req, out := c.OutputService9TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService9TestShapeOutputService9TestCaseOperation1Input struct {
+ metadataOutputService9TestShapeOutputService9TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService9TestShapeOutputService9TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService9TestShapeOutputService9TestCaseOperation1Output struct {
+ Data *OutputService9TestShapeSingleStructure `type:"structure"`
+
+ Header *string `location:"header" locationName:"X-Foo" type:"string"`
+
+ metadataOutputService9TestShapeOutputService9TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService9TestShapeOutputService9TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure" payload:"Data"`
+}
+
+type OutputService9TestShapeSingleStructure struct {
+ Foo *string `type:"string"`
+
+ metadataOutputService9TestShapeSingleStructure `json:"-" xml:"-"`
+}
+
+type metadataOutputService9TestShapeSingleStructure struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService10ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService10ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService10ProtocolTest client from just a session.
+// svc := outputservice10protocoltest.New(mySession)
+//
+// // Create a OutputService10ProtocolTest client with additional configuration
+// svc := outputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService10ProtocolTest {
+ c := p.ClientConfig("outputservice10protocoltest", cfgs...)
+ return newOutputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService10ProtocolTest {
+ svc := &OutputService10ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice10protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService10ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService10TestCaseOperation1 = "OperationName"
+
+// OutputService10TestCaseOperation1Request generates a request for the OutputService10TestCaseOperation1 operation.
+func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1Request(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (req *request.Request, output *OutputService10TestShapeOutputService10TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService10TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService10TestShapeOutputService10TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService10TestShapeOutputService10TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (*OutputService10TestShapeOutputService10TestCaseOperation1Output, error) {
+ req, out := c.OutputService10TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService10TestShapeOutputService10TestCaseOperation1Input struct {
+ metadataOutputService10TestShapeOutputService10TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService10TestShapeOutputService10TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService10TestShapeOutputService10TestCaseOperation1Output struct {
+ Stream []byte `type:"blob"`
+
+ metadataOutputService10TestShapeOutputService10TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService10TestShapeOutputService10TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure" payload:"Stream"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService11ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService11ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService11ProtocolTest client from just a session.
+// svc := outputservice11protocoltest.New(mySession)
+//
+// // Create a OutputService11ProtocolTest client with additional configuration
+// svc := outputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService11ProtocolTest {
+ c := p.ClientConfig("outputservice11protocoltest", cfgs...)
+ return newOutputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService11ProtocolTest {
+ svc := &OutputService11ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice11protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService11ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService11TestCaseOperation1 = "OperationName"
+
+// OutputService11TestCaseOperation1Request generates a request for the OutputService11TestCaseOperation1 operation.
+func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1Request(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (req *request.Request, output *OutputService11TestShapeOutputService11TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService11TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService11TestShapeOutputService11TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService11TestShapeOutputService11TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (*OutputService11TestShapeOutputService11TestCaseOperation1Output, error) {
+ req, out := c.OutputService11TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService11TestShapeOutputService11TestCaseOperation1Input struct {
+ metadataOutputService11TestShapeOutputService11TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService11TestShapeOutputService11TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService11TestShapeOutputService11TestCaseOperation1Output struct {
+ Char *string `location:"header" locationName:"x-char" type:"character"`
+
+ Double *float64 `location:"header" locationName:"x-double" type:"double"`
+
+ FalseBool *bool `location:"header" locationName:"x-false-bool" type:"boolean"`
+
+ Float *float64 `location:"header" locationName:"x-float" type:"float"`
+
+ Integer *int64 `location:"header" locationName:"x-int" type:"integer"`
+
+ Long *int64 `location:"header" locationName:"x-long" type:"long"`
+
+ Str *string `location:"header" locationName:"x-str" type:"string"`
+
+ Timestamp *time.Time `location:"header" locationName:"x-timestamp" type:"timestamp" timestampFormat:"iso8601"`
+
+ TrueBool *bool `location:"header" locationName:"x-true-bool" type:"boolean"`
+
+ metadataOutputService11TestShapeOutputService11TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService11TestShapeOutputService11TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type OutputService12ProtocolTest struct {
+ *client.Client
+}
+
+// New creates a new instance of the OutputService12ProtocolTest client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a OutputService12ProtocolTest client from just a session.
+// svc := outputservice12protocoltest.New(mySession)
+//
+// // Create a OutputService12ProtocolTest client with additional configuration
+// svc := outputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func NewOutputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService12ProtocolTest {
+ c := p.ClientConfig("outputservice12protocoltest", cfgs...)
+ return newOutputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newOutputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService12ProtocolTest {
+ svc := &OutputService12ProtocolTest{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "outputservice12protocoltest",
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+
+ return svc
+}
+
+// newRequest creates a new request for a OutputService12ProtocolTest operation and runs any
+// custom request initialization.
+func (c *OutputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ return req
+}
+
+const opOutputService12TestCaseOperation1 = "OperationName"
+
+// OutputService12TestCaseOperation1Request generates a request for the OutputService12TestCaseOperation1 operation.
+func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1Request(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (req *request.Request, output *OutputService12TestShapeOutputService12TestCaseOperation1Output) {
+ op := &request.Operation{
+ Name: opOutputService12TestCaseOperation1,
+ }
+
+ if input == nil {
+ input = &OutputService12TestShapeOutputService12TestCaseOperation1Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &OutputService12TestShapeOutputService12TestCaseOperation1Output{}
+ req.Data = output
+ return
+}
+
+func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (*OutputService12TestShapeOutputService12TestCaseOperation1Output, error) {
+ req, out := c.OutputService12TestCaseOperation1Request(input)
+ err := req.Send()
+ return out, err
+}
+
+type OutputService12TestShapeOutputService12TestCaseOperation1Input struct {
+ metadataOutputService12TestShapeOutputService12TestCaseOperation1Input `json:"-" xml:"-"`
+}
+
+type metadataOutputService12TestShapeOutputService12TestCaseOperation1Input struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+type OutputService12TestShapeOutputService12TestCaseOperation1Output struct {
+ Foo *string `type:"string"`
+
+ metadataOutputService12TestShapeOutputService12TestCaseOperation1Output `json:"-" xml:"-"`
+}
+
+type metadataOutputService12TestShapeOutputService12TestCaseOperation1Output struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+//
+// Tests begin here
+//
+
+func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200a2015-01-25T08:00:00Z"))
+ req, out := svc.OutputService1TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+ req.HTTPResponse.Header.Set("ImaHeader", "test")
+ req.HTTPResponse.Header.Set("X-Foo", "abc")
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "a", *out.Char)
+ assert.Equal(t, 1.3, *out.Double)
+ assert.Equal(t, false, *out.FalseBool)
+ assert.Equal(t, 1.2, *out.Float)
+ assert.Equal(t, "test", *out.ImaHeader)
+ assert.Equal(t, "abc", *out.ImaHeaderLocation)
+ assert.Equal(t, int64(200), *out.Long)
+ assert.Equal(t, int64(123), *out.Num)
+ assert.Equal(t, "myname", *out.Str)
+ assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String())
+ assert.Equal(t, true, *out.TrueBool)
+
+}
+
+func TestOutputService1ProtocolTestScalarMembersCase2(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("123falsetrue1.21.3200a2015-01-25T08:00:00Z"))
+ req, out := svc.OutputService1TestCaseOperation2Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+ req.HTTPResponse.Header.Set("ImaHeader", "test")
+ req.HTTPResponse.Header.Set("X-Foo", "abc")
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "a", *out.Char)
+ assert.Equal(t, 1.3, *out.Double)
+ assert.Equal(t, false, *out.FalseBool)
+ assert.Equal(t, 1.2, *out.Float)
+ assert.Equal(t, "test", *out.ImaHeader)
+ assert.Equal(t, "abc", *out.ImaHeaderLocation)
+ assert.Equal(t, int64(200), *out.Long)
+ assert.Equal(t, int64(123), *out.Num)
+ assert.Equal(t, "", *out.Str)
+ assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String())
+ assert.Equal(t, true, *out.TrueBool)
+
+}
+
+func TestOutputService2ProtocolTestBlobCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("dmFsdWU="))
+ req, out := svc.OutputService2TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "value", string(out.Blob))
+
+}
+
+func TestOutputService3ProtocolTestListsCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("abc123"))
+ req, out := svc.OutputService3TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", *out.ListMember[0])
+ assert.Equal(t, "123", *out.ListMember[1])
+
+}
+
+func TestOutputService4ProtocolTestListWithCustomMemberNameCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("- abc
- 123
"))
+ req, out := svc.OutputService4TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", *out.ListMember[0])
+ assert.Equal(t, "123", *out.ListMember[1])
+
+}
+
+func TestOutputService5ProtocolTestFlattenedListCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("abc123"))
+ req, out := svc.OutputService5TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", *out.ListMember[0])
+ assert.Equal(t, "123", *out.ListMember[1])
+
+}
+
+func TestOutputService6ProtocolTestNormalMapCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte(""))
+ req, out := svc.OutputService6TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "bam", *out.Map["baz"].Foo)
+ assert.Equal(t, "bar", *out.Map["qux"].Foo)
+
+}
+
+func TestOutputService7ProtocolTestFlattenedMapCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte(""))
+ req, out := svc.OutputService7TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "bam", *out.Map["baz"])
+ assert.Equal(t, "bar", *out.Map["qux"])
+
+}
+
+func TestOutputService8ProtocolTestNamedMapCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte(""))
+ req, out := svc.OutputService8TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "bam", *out.Map["baz"])
+ assert.Equal(t, "bar", *out.Map["qux"])
+
+}
+
+func TestOutputService9ProtocolTestXMLPayloadCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("abc"))
+ req, out := svc.OutputService9TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+ req.HTTPResponse.Header.Set("X-Foo", "baz")
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", *out.Data.Foo)
+ assert.Equal(t, "baz", *out.Header)
+
+}
+
+func TestOutputService10ProtocolTestStreamingPayloadCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("abc"))
+ req, out := svc.OutputService10TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "abc", string(out.Stream))
+
+}
+
+func TestOutputService11ProtocolTestScalarMembersInHeadersCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte(""))
+ req, out := svc.OutputService11TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+ req.HTTPResponse.Header.Set("x-char", "a")
+ req.HTTPResponse.Header.Set("x-double", "1.5")
+ req.HTTPResponse.Header.Set("x-false-bool", "false")
+ req.HTTPResponse.Header.Set("x-float", "1.5")
+ req.HTTPResponse.Header.Set("x-int", "1")
+ req.HTTPResponse.Header.Set("x-long", "100")
+ req.HTTPResponse.Header.Set("x-str", "string")
+ req.HTTPResponse.Header.Set("x-timestamp", "Sun, 25 Jan 2015 08:00:00 GMT")
+ req.HTTPResponse.Header.Set("x-true-bool", "true")
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "a", *out.Char)
+ assert.Equal(t, 1.5, *out.Double)
+ assert.Equal(t, false, *out.FalseBool)
+ assert.Equal(t, 1.5, *out.Float)
+ assert.Equal(t, int64(1), *out.Integer)
+ assert.Equal(t, int64(100), *out.Long)
+ assert.Equal(t, "string", *out.Str)
+ assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String())
+ assert.Equal(t, true, *out.TrueBool)
+
+}
+
+func TestOutputService12ProtocolTestEmptyStringCase1(t *testing.T) {
+ sess := session.New()
+ svc := NewOutputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
+
+ buf := bytes.NewReader([]byte("requestid"))
+ req, out := svc.OutputService12TestCaseOperation1Request(nil)
+ req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
+
+ // set headers
+
+ // unmarshal response
+ restxml.UnmarshalMeta(req)
+ restxml.Unmarshal(req)
+ assert.NoError(t, req.Error)
+
+ // assert response
+ assert.NotNil(t, out) // ensure out variable is used
+ assert.Equal(t, "", *out.Foo)
+
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
similarity index 100%
rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/build.go
rename to Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
similarity index 100%
rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go
rename to Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
similarity index 100%
rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go
rename to Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/functional_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/functional_test.go
new file mode 100644
index 000000000..51a26d6ad
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/functional_test.go
@@ -0,0 +1,42 @@
+package v4_test
+
+import (
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/awstesting/unit"
+ "github.com/aws/aws-sdk-go/service/s3"
+)
+
+func TestPresignHandler(t *testing.T) {
+ svc := s3.New(unit.Session)
+ req, _ := svc.PutObjectRequest(&s3.PutObjectInput{
+ Bucket: aws.String("bucket"),
+ Key: aws.String("key"),
+ ContentDisposition: aws.String("a+b c$d"),
+ ACL: aws.String("public-read"),
+ })
+ req.Time = time.Unix(0, 0)
+ urlstr, err := req.Presign(5 * time.Minute)
+
+ assert.NoError(t, err)
+
+ expectedDate := "19700101T000000Z"
+ expectedHeaders := "host;x-amz-acl"
+ expectedSig := "7edcb4e3a1bf12f4989018d75acbe3a7f03df24bd6f3112602d59fc551f0e4e2"
+ expectedCred := "AKID/19700101/mock-region/s3/aws4_request"
+
+ u, _ := url.Parse(urlstr)
+ urlQ := u.Query()
+ assert.Equal(t, expectedSig, urlQ.Get("X-Amz-Signature"))
+ assert.Equal(t, expectedCred, urlQ.Get("X-Amz-Credential"))
+ assert.Equal(t, expectedHeaders, urlQ.Get("X-Amz-SignedHeaders"))
+ assert.Equal(t, expectedDate, urlQ.Get("X-Amz-Date"))
+ assert.Equal(t, "300", urlQ.Get("X-Amz-Expires"))
+
+ assert.NotContains(t, urlstr, "+") // + encoded as %20
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go
similarity index 95%
rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4.go
rename to Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go
index fc7bc3535..dc176f312 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go
@@ -17,7 +17,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/internal/protocol/rest"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
)
const (
@@ -67,18 +67,18 @@ type signer struct {
func Sign(req *request.Request) {
// If the request does not need to be signed ignore the signing of the
// request if the AnonymousCredentials object is used.
- if req.Service.Config.Credentials == credentials.AnonymousCredentials {
+ if req.Config.Credentials == credentials.AnonymousCredentials {
return
}
- region := req.Service.SigningRegion
+ region := req.ClientInfo.SigningRegion
if region == "" {
- region = aws.StringValue(req.Service.Config.Region)
+ region = aws.StringValue(req.Config.Region)
}
- name := req.Service.SigningName
+ name := req.ClientInfo.SigningName
if name == "" {
- name = req.Service.ServiceName
+ name = req.ClientInfo.ServiceName
}
s := signer{
@@ -89,9 +89,9 @@ func Sign(req *request.Request) {
Body: req.Body,
ServiceName: name,
Region: region,
- Credentials: req.Service.Config.Credentials,
- Debug: req.Service.Config.LogLevel.Value(),
- Logger: req.Service.Config.Logger,
+ Credentials: req.Config.Credentials,
+ Debug: req.Config.LogLevel.Value(),
+ Logger: req.Config.Logger,
}
req.Error = s.sign()
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4_test.go
new file mode 100644
index 000000000..5a5b72402
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4_test.go
@@ -0,0 +1,248 @@
+package v4
+
+import (
+ "net/http"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awstesting"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+func buildSigner(serviceName string, region string, signTime time.Time, expireTime time.Duration, body string) signer {
+ endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
+ reader := strings.NewReader(body)
+ req, _ := http.NewRequest("POST", endpoint, reader)
+ req.URL.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()"
+ req.Header.Add("X-Amz-Target", "prefix.Operation")
+ req.Header.Add("Content-Type", "application/x-amz-json-1.0")
+ req.Header.Add("Content-Length", string(len(body)))
+ req.Header.Add("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)")
+
+ return signer{
+ Request: req,
+ Time: signTime,
+ ExpireTime: expireTime,
+ Query: req.URL.Query(),
+ Body: reader,
+ ServiceName: serviceName,
+ Region: region,
+ Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"),
+ }
+}
+
+func removeWS(text string) string {
+ text = strings.Replace(text, " ", "", -1)
+ text = strings.Replace(text, "\n", "", -1)
+ text = strings.Replace(text, "\t", "", -1)
+ return text
+}
+
+func assertEqual(t *testing.T, expected, given string) {
+ if removeWS(expected) != removeWS(given) {
+ t.Errorf("\nExpected: %s\nGiven: %s", expected, given)
+ }
+}
+
+func TestPresignRequest(t *testing.T) {
+ signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 300*time.Second, "{}")
+ signer.sign()
+
+ expectedDate := "19700101T000000Z"
+ expectedHeaders := "host;x-amz-meta-other-header;x-amz-target"
+ expectedSig := "5eeedebf6f995145ce56daa02902d10485246d3defb34f97b973c1f40ab82d36"
+ expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request"
+
+ q := signer.Request.URL.Query()
+ assert.Equal(t, expectedSig, q.Get("X-Amz-Signature"))
+ assert.Equal(t, expectedCred, q.Get("X-Amz-Credential"))
+ assert.Equal(t, expectedHeaders, q.Get("X-Amz-SignedHeaders"))
+ assert.Equal(t, expectedDate, q.Get("X-Amz-Date"))
+}
+
+func TestSignRequest(t *testing.T) {
+ signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 0, "{}")
+ signer.sign()
+
+ expectedDate := "19700101T000000Z"
+ expectedSig := "AWS4-HMAC-SHA256 Credential=AKID/19700101/us-east-1/dynamodb/aws4_request, SignedHeaders=host;x-amz-date;x-amz-meta-other-header;x-amz-security-token;x-amz-target, Signature=69ada33fec48180dab153576e4dd80c4e04124f80dda3eccfed8a67c2b91ed5e"
+
+ q := signer.Request.Header
+ assert.Equal(t, expectedSig, q.Get("Authorization"))
+ assert.Equal(t, expectedDate, q.Get("X-Amz-Date"))
+}
+
+func TestSignEmptyBody(t *testing.T) {
+ signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "")
+ signer.Body = nil
+ signer.sign()
+ hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
+ assert.Equal(t, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hash)
+}
+
+func TestSignBody(t *testing.T) {
+ signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "hello")
+ signer.sign()
+ hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
+ assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash)
+}
+
+func TestSignSeekedBody(t *testing.T) {
+ signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, " hello")
+ signer.Body.Read(make([]byte, 3)) // consume first 3 bytes so body is now "hello"
+ signer.sign()
+ hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
+ assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash)
+
+ start, _ := signer.Body.Seek(0, 1)
+ assert.Equal(t, int64(3), start)
+}
+
+func TestPresignEmptyBodyS3(t *testing.T) {
+ signer := buildSigner("s3", "us-east-1", time.Now(), 5*time.Minute, "hello")
+ signer.sign()
+ hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
+ assert.Equal(t, "UNSIGNED-PAYLOAD", hash)
+}
+
+func TestSignPrecomputedBodyChecksum(t *testing.T) {
+ signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "hello")
+ signer.Request.Header.Set("X-Amz-Content-Sha256", "PRECOMPUTED")
+ signer.sign()
+ hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
+ assert.Equal(t, "PRECOMPUTED", hash)
+}
+
+func TestAnonymousCredentials(t *testing.T) {
+ svc := awstesting.NewClient(&aws.Config{Credentials: credentials.AnonymousCredentials})
+ r := svc.NewRequest(
+ &request.Operation{
+ Name: "BatchGetItem",
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ },
+ nil,
+ nil,
+ )
+ Sign(r)
+
+ urlQ := r.HTTPRequest.URL.Query()
+ assert.Empty(t, urlQ.Get("X-Amz-Signature"))
+ assert.Empty(t, urlQ.Get("X-Amz-Credential"))
+ assert.Empty(t, urlQ.Get("X-Amz-SignedHeaders"))
+ assert.Empty(t, urlQ.Get("X-Amz-Date"))
+
+ hQ := r.HTTPRequest.Header
+ assert.Empty(t, hQ.Get("Authorization"))
+ assert.Empty(t, hQ.Get("X-Amz-Date"))
+}
+
+func TestIgnoreResignRequestWithValidCreds(t *testing.T) {
+ svc := awstesting.NewClient(&aws.Config{
+ Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"),
+ Region: aws.String("us-west-2"),
+ })
+ r := svc.NewRequest(
+ &request.Operation{
+ Name: "BatchGetItem",
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ },
+ nil,
+ nil,
+ )
+
+ Sign(r)
+ sig := r.HTTPRequest.Header.Get("Authorization")
+
+ Sign(r)
+ assert.Equal(t, sig, r.HTTPRequest.Header.Get("Authorization"))
+}
+
+func TestIgnorePreResignRequestWithValidCreds(t *testing.T) {
+ svc := awstesting.NewClient(&aws.Config{
+ Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"),
+ Region: aws.String("us-west-2"),
+ })
+ r := svc.NewRequest(
+ &request.Operation{
+ Name: "BatchGetItem",
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ },
+ nil,
+ nil,
+ )
+ r.ExpireTime = time.Minute * 10
+
+ Sign(r)
+ sig := r.HTTPRequest.Header.Get("X-Amz-Signature")
+
+ Sign(r)
+ assert.Equal(t, sig, r.HTTPRequest.Header.Get("X-Amz-Signature"))
+}
+
+func TestResignRequestExpiredCreds(t *testing.T) {
+ creds := credentials.NewStaticCredentials("AKID", "SECRET", "SESSION")
+ svc := awstesting.NewClient(&aws.Config{Credentials: creds})
+ r := svc.NewRequest(
+ &request.Operation{
+ Name: "BatchGetItem",
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ },
+ nil,
+ nil,
+ )
+ Sign(r)
+ querySig := r.HTTPRequest.Header.Get("Authorization")
+
+ creds.Expire()
+
+ Sign(r)
+ assert.NotEqual(t, querySig, r.HTTPRequest.Header.Get("Authorization"))
+}
+
+func TestPreResignRequestExpiredCreds(t *testing.T) {
+ provider := &credentials.StaticProvider{credentials.Value{"AKID", "SECRET", "SESSION"}}
+ creds := credentials.NewCredentials(provider)
+ svc := awstesting.NewClient(&aws.Config{Credentials: creds})
+ r := svc.NewRequest(
+ &request.Operation{
+ Name: "BatchGetItem",
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ },
+ nil,
+ nil,
+ )
+ r.ExpireTime = time.Minute * 10
+
+ Sign(r)
+ querySig := r.HTTPRequest.URL.Query().Get("X-Amz-Signature")
+
+ creds.Expire()
+ r.Time = time.Now().Add(time.Hour * 48)
+
+ Sign(r)
+ assert.NotEqual(t, querySig, r.HTTPRequest.URL.Query().Get("X-Amz-Signature"))
+}
+
+func BenchmarkPresignRequest(b *testing.B) {
+ signer := buildSigner("dynamodb", "us-east-1", time.Now(), 300*time.Second, "{}")
+ for i := 0; i < b.N; i++ {
+ signer.sign()
+ }
+}
+
+func BenchmarkSignRequest(b *testing.B) {
+ signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "{}")
+ for i := 0; i < b.N; i++ {
+ signer.sign()
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/customizations.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/customizations.go
index 99f082047..9e94fe671 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/customizations.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/customizations.go
@@ -3,8 +3,10 @@ package ec2
import (
"time"
+ "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/endpoints"
)
func init() {
@@ -20,38 +22,34 @@ func fillPresignedURL(r *request.Request) {
return
}
- params := r.Params.(*CopySnapshotInput)
+ origParams := r.Params.(*CopySnapshotInput)
// Stop if PresignedURL/DestinationRegion is set
- if params.PresignedUrl != nil || params.DestinationRegion != nil {
+ if origParams.PresignedUrl != nil || origParams.DestinationRegion != nil {
return
}
- // First generate a copy of parameters
- r.Params = awsutil.CopyOf(r.Params)
- params = r.Params.(*CopySnapshotInput)
+ origParams.DestinationRegion = r.Config.Region
+ newParams := awsutil.CopyOf(r.Params).(*CopySnapshotInput)
- // Set destination region. Avoids infinite handler loop.
- // Also needed to sign sub-request.
- params.DestinationRegion = r.Service.Config.Region
-
- // Create a new client pointing at source region.
- // We will use this to presign the CopySnapshot request against
- // the source region
- config := r.Service.Config.Copy().
+ // Create a new request based on the existing request. We will use this to
+ // presign the CopySnapshot request against the source region.
+ cfg := r.Config.Copy(aws.NewConfig().
WithEndpoint("").
- WithRegion(*params.SourceRegion)
+ WithRegion(aws.StringValue(origParams.SourceRegion)))
- client := New(config)
+ clientInfo := r.ClientInfo
+ clientInfo.Endpoint, clientInfo.SigningRegion = endpoints.EndpointForRegion(
+ clientInfo.ServiceName, aws.StringValue(cfg.Region), aws.BoolValue(cfg.DisableSSL))
// Presign a CopySnapshot request with modified params
- req, _ := client.CopySnapshotRequest(params)
- url, err := req.Presign(300 * time.Second) // 5 minutes should be enough.
-
- if err != nil { // bubble error back up to original request
+ req := request.New(*cfg, clientInfo, r.Handlers, r.Retryer, r.Operation, newParams, r.Data)
+ url, err := req.Presign(5 * time.Minute) // 5 minutes should be enough.
+ if err != nil { // bubble error back up to original request
r.Error = err
+ return
}
// We have our URL, set it on params
- params.PresignedUrl = &url
+ origParams.PresignedUrl = &url
}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go
new file mode 100644
index 000000000..11d1ca214
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go
@@ -0,0 +1,35 @@
+package ec2_test
+
+import (
+ "io/ioutil"
+ "net/url"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/awstesting/unit"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCopySnapshotPresignedURL(t *testing.T) {
+ svc := ec2.New(unit.Session, &aws.Config{Region: aws.String("us-west-2")})
+
+ assert.NotPanics(t, func() {
+ // Doesn't panic on nil input
+ req, _ := svc.CopySnapshotRequest(nil)
+ req.Sign()
+ })
+
+ req, _ := svc.CopySnapshotRequest(&ec2.CopySnapshotInput{
+ SourceRegion: aws.String("us-west-1"),
+ SourceSnapshotId: aws.String("snap-id"),
+ })
+ req.Sign()
+
+ b, _ := ioutil.ReadAll(req.HTTPRequest.Body)
+ q, _ := url.ParseQuery(string(b))
+ url, _ := url.QueryUnescape(q.Get("PresignedUrl"))
+ assert.Equal(t, "us-west-2", q.Get("DestinationRegion"))
+ assert.Equal(t, "us-west-1", q.Get("SourceRegion"))
+ assert.Regexp(t, `^https://ec2\.us-west-1\.amazonaws\.com/.+&DestinationRegion=us-west-2`, url)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go
index 88a209d16..c123466cc 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go
@@ -758,3 +758,5 @@ type EC2API interface {
UnmonitorInstances(*ec2.UnmonitorInstancesInput) (*ec2.UnmonitorInstancesOutput, error)
}
+
+var _ EC2API = (*ec2.EC2)(nil)
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/examples_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/examples_test.go
new file mode 100644
index 000000000..86557fc15
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/examples_test.go
@@ -0,0 +1,5188 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+package ec2_test
+
+import (
+ "bytes"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/ec2"
+)
+
+var _ time.Duration
+var _ bytes.Buffer
+
+func ExampleEC2_AcceptVpcPeeringConnection() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.AcceptVpcPeeringConnectionInput{
+ DryRun: aws.Bool(true),
+ VpcPeeringConnectionId: aws.String("String"),
+ }
+ resp, err := svc.AcceptVpcPeeringConnection(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_AllocateAddress() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.AllocateAddressInput{
+ Domain: aws.String("DomainType"),
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.AllocateAddress(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_AssignPrivateIpAddresses() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.AssignPrivateIpAddressesInput{
+ NetworkInterfaceId: aws.String("String"), // Required
+ AllowReassignment: aws.Bool(true),
+ PrivateIpAddresses: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ SecondaryPrivateIpAddressCount: aws.Int64(1),
+ }
+ resp, err := svc.AssignPrivateIpAddresses(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_AssociateAddress() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.AssociateAddressInput{
+ AllocationId: aws.String("String"),
+ AllowReassociation: aws.Bool(true),
+ DryRun: aws.Bool(true),
+ InstanceId: aws.String("String"),
+ NetworkInterfaceId: aws.String("String"),
+ PrivateIpAddress: aws.String("String"),
+ PublicIp: aws.String("String"),
+ }
+ resp, err := svc.AssociateAddress(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_AssociateDhcpOptions() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.AssociateDhcpOptionsInput{
+ DhcpOptionsId: aws.String("String"), // Required
+ VpcId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.AssociateDhcpOptions(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_AssociateRouteTable() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.AssociateRouteTableInput{
+ RouteTableId: aws.String("String"), // Required
+ SubnetId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.AssociateRouteTable(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_AttachClassicLinkVpc() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.AttachClassicLinkVpcInput{
+ Groups: []*string{ // Required
+ aws.String("String"), // Required
+ // More values...
+ },
+ InstanceId: aws.String("String"), // Required
+ VpcId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.AttachClassicLinkVpc(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_AttachInternetGateway() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.AttachInternetGatewayInput{
+ InternetGatewayId: aws.String("String"), // Required
+ VpcId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.AttachInternetGateway(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_AttachNetworkInterface() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.AttachNetworkInterfaceInput{
+ DeviceIndex: aws.Int64(1), // Required
+ InstanceId: aws.String("String"), // Required
+ NetworkInterfaceId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.AttachNetworkInterface(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_AttachVolume() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.AttachVolumeInput{
+ Device: aws.String("String"), // Required
+ InstanceId: aws.String("String"), // Required
+ VolumeId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.AttachVolume(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_AttachVpnGateway() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.AttachVpnGatewayInput{
+ VpcId: aws.String("String"), // Required
+ VpnGatewayId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.AttachVpnGateway(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_AuthorizeSecurityGroupEgress() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.AuthorizeSecurityGroupEgressInput{
+ GroupId: aws.String("String"), // Required
+ CidrIp: aws.String("String"),
+ DryRun: aws.Bool(true),
+ FromPort: aws.Int64(1),
+ IpPermissions: []*ec2.IpPermission{
+ { // Required
+ FromPort: aws.Int64(1),
+ IpProtocol: aws.String("String"),
+ IpRanges: []*ec2.IpRange{
+ { // Required
+ CidrIp: aws.String("String"),
+ },
+ // More values...
+ },
+ PrefixListIds: []*ec2.PrefixListId{
+ { // Required
+ PrefixListId: aws.String("String"),
+ },
+ // More values...
+ },
+ ToPort: aws.Int64(1),
+ UserIdGroupPairs: []*ec2.UserIdGroupPair{
+ { // Required
+ GroupId: aws.String("String"),
+ GroupName: aws.String("String"),
+ UserId: aws.String("String"),
+ },
+ // More values...
+ },
+ },
+ // More values...
+ },
+ IpProtocol: aws.String("String"),
+ SourceSecurityGroupName: aws.String("String"),
+ SourceSecurityGroupOwnerId: aws.String("String"),
+ ToPort: aws.Int64(1),
+ }
+ resp, err := svc.AuthorizeSecurityGroupEgress(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_AuthorizeSecurityGroupIngress() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.AuthorizeSecurityGroupIngressInput{
+ CidrIp: aws.String("String"),
+ DryRun: aws.Bool(true),
+ FromPort: aws.Int64(1),
+ GroupId: aws.String("String"),
+ GroupName: aws.String("String"),
+ IpPermissions: []*ec2.IpPermission{
+ { // Required
+ FromPort: aws.Int64(1),
+ IpProtocol: aws.String("String"),
+ IpRanges: []*ec2.IpRange{
+ { // Required
+ CidrIp: aws.String("String"),
+ },
+ // More values...
+ },
+ PrefixListIds: []*ec2.PrefixListId{
+ { // Required
+ PrefixListId: aws.String("String"),
+ },
+ // More values...
+ },
+ ToPort: aws.Int64(1),
+ UserIdGroupPairs: []*ec2.UserIdGroupPair{
+ { // Required
+ GroupId: aws.String("String"),
+ GroupName: aws.String("String"),
+ UserId: aws.String("String"),
+ },
+ // More values...
+ },
+ },
+ // More values...
+ },
+ IpProtocol: aws.String("String"),
+ SourceSecurityGroupName: aws.String("String"),
+ SourceSecurityGroupOwnerId: aws.String("String"),
+ ToPort: aws.Int64(1),
+ }
+ resp, err := svc.AuthorizeSecurityGroupIngress(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_BundleInstance() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.BundleInstanceInput{
+ InstanceId: aws.String("String"), // Required
+ Storage: &ec2.Storage{ // Required
+ S3: &ec2.S3Storage{
+ AWSAccessKeyId: aws.String("String"),
+ Bucket: aws.String("String"),
+ Prefix: aws.String("String"),
+ UploadPolicy: []byte("PAYLOAD"),
+ UploadPolicySignature: aws.String("String"),
+ },
+ },
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.BundleInstance(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CancelBundleTask() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CancelBundleTaskInput{
+ BundleId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.CancelBundleTask(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CancelConversionTask() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CancelConversionTaskInput{
+ ConversionTaskId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ ReasonMessage: aws.String("String"),
+ }
+ resp, err := svc.CancelConversionTask(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CancelExportTask() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CancelExportTaskInput{
+ ExportTaskId: aws.String("String"), // Required
+ }
+ resp, err := svc.CancelExportTask(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CancelImportTask() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CancelImportTaskInput{
+ CancelReason: aws.String("String"),
+ DryRun: aws.Bool(true),
+ ImportTaskId: aws.String("String"),
+ }
+ resp, err := svc.CancelImportTask(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CancelReservedInstancesListing() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CancelReservedInstancesListingInput{
+ ReservedInstancesListingId: aws.String("String"), // Required
+ }
+ resp, err := svc.CancelReservedInstancesListing(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CancelSpotFleetRequests() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CancelSpotFleetRequestsInput{
+ SpotFleetRequestIds: []*string{ // Required
+ aws.String("String"), // Required
+ // More values...
+ },
+ TerminateInstances: aws.Bool(true), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.CancelSpotFleetRequests(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CancelSpotInstanceRequests() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CancelSpotInstanceRequestsInput{
+ SpotInstanceRequestIds: []*string{ // Required
+ aws.String("String"), // Required
+ // More values...
+ },
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.CancelSpotInstanceRequests(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ConfirmProductInstance() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ConfirmProductInstanceInput{
+ InstanceId: aws.String("String"), // Required
+ ProductCode: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.ConfirmProductInstance(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CopyImage() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CopyImageInput{
+ Name: aws.String("String"), // Required
+ SourceImageId: aws.String("String"), // Required
+ SourceRegion: aws.String("String"), // Required
+ ClientToken: aws.String("String"),
+ Description: aws.String("String"),
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.CopyImage(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CopySnapshot() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CopySnapshotInput{
+ SourceRegion: aws.String("String"), // Required
+ SourceSnapshotId: aws.String("String"), // Required
+ Description: aws.String("String"),
+ DestinationRegion: aws.String("String"),
+ DryRun: aws.Bool(true),
+ Encrypted: aws.Bool(true),
+ KmsKeyId: aws.String("String"),
+ PresignedUrl: aws.String("String"),
+ }
+ resp, err := svc.CopySnapshot(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateCustomerGateway() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateCustomerGatewayInput{
+ BgpAsn: aws.Int64(1), // Required
+ PublicIp: aws.String("String"), // Required
+ Type: aws.String("GatewayType"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.CreateCustomerGateway(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateDhcpOptions() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateDhcpOptionsInput{
+ DhcpConfigurations: []*ec2.NewDhcpConfiguration{ // Required
+ { // Required
+ Key: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.CreateDhcpOptions(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateFlowLogs() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateFlowLogsInput{
+ DeliverLogsPermissionArn: aws.String("String"), // Required
+ LogGroupName: aws.String("String"), // Required
+ ResourceIds: []*string{ // Required
+ aws.String("String"), // Required
+ // More values...
+ },
+ ResourceType: aws.String("FlowLogsResourceType"), // Required
+ TrafficType: aws.String("TrafficType"), // Required
+ ClientToken: aws.String("String"),
+ }
+ resp, err := svc.CreateFlowLogs(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateImage() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateImageInput{
+ InstanceId: aws.String("String"), // Required
+ Name: aws.String("String"), // Required
+ BlockDeviceMappings: []*ec2.BlockDeviceMapping{
+ { // Required
+ DeviceName: aws.String("String"),
+ Ebs: &ec2.EbsBlockDevice{
+ DeleteOnTermination: aws.Bool(true),
+ Encrypted: aws.Bool(true),
+ Iops: aws.Int64(1),
+ SnapshotId: aws.String("String"),
+ VolumeSize: aws.Int64(1),
+ VolumeType: aws.String("VolumeType"),
+ },
+ NoDevice: aws.String("String"),
+ VirtualName: aws.String("String"),
+ },
+ // More values...
+ },
+ Description: aws.String("String"),
+ DryRun: aws.Bool(true),
+ NoReboot: aws.Bool(true),
+ }
+ resp, err := svc.CreateImage(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateInstanceExportTask() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateInstanceExportTaskInput{
+ InstanceId: aws.String("String"), // Required
+ Description: aws.String("String"),
+ ExportToS3Task: &ec2.ExportToS3TaskSpecification{
+ ContainerFormat: aws.String("ContainerFormat"),
+ DiskImageFormat: aws.String("DiskImageFormat"),
+ S3Bucket: aws.String("String"),
+ S3Prefix: aws.String("String"),
+ },
+ TargetEnvironment: aws.String("ExportEnvironment"),
+ }
+ resp, err := svc.CreateInstanceExportTask(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateInternetGateway() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateInternetGatewayInput{
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.CreateInternetGateway(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateKeyPair() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateKeyPairInput{
+ KeyName: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.CreateKeyPair(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateNetworkAcl() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateNetworkAclInput{
+ VpcId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.CreateNetworkAcl(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateNetworkAclEntry() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateNetworkAclEntryInput{
+ CidrBlock: aws.String("String"), // Required
+ Egress: aws.Bool(true), // Required
+ NetworkAclId: aws.String("String"), // Required
+ Protocol: aws.String("String"), // Required
+ RuleAction: aws.String("RuleAction"), // Required
+ RuleNumber: aws.Int64(1), // Required
+ DryRun: aws.Bool(true),
+ IcmpTypeCode: &ec2.IcmpTypeCode{
+ Code: aws.Int64(1),
+ Type: aws.Int64(1),
+ },
+ PortRange: &ec2.PortRange{
+ From: aws.Int64(1),
+ To: aws.Int64(1),
+ },
+ }
+ resp, err := svc.CreateNetworkAclEntry(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateNetworkInterface() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateNetworkInterfaceInput{
+ SubnetId: aws.String("String"), // Required
+ Description: aws.String("String"),
+ DryRun: aws.Bool(true),
+ Groups: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ PrivateIpAddress: aws.String("String"),
+ PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{
+ { // Required
+ PrivateIpAddress: aws.String("String"), // Required
+ Primary: aws.Bool(true),
+ },
+ // More values...
+ },
+ SecondaryPrivateIpAddressCount: aws.Int64(1),
+ }
+ resp, err := svc.CreateNetworkInterface(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreatePlacementGroup() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreatePlacementGroupInput{
+ GroupName: aws.String("String"), // Required
+ Strategy: aws.String("PlacementStrategy"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.CreatePlacementGroup(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateReservedInstancesListing() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateReservedInstancesListingInput{
+ ClientToken: aws.String("String"), // Required
+ InstanceCount: aws.Int64(1), // Required
+ PriceSchedules: []*ec2.PriceScheduleSpecification{ // Required
+ { // Required
+ CurrencyCode: aws.String("CurrencyCodeValues"),
+ Price: aws.Float64(1.0),
+ Term: aws.Int64(1),
+ },
+ // More values...
+ },
+ ReservedInstancesId: aws.String("String"), // Required
+ }
+ resp, err := svc.CreateReservedInstancesListing(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateRoute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateRouteInput{
+ DestinationCidrBlock: aws.String("String"), // Required
+ RouteTableId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ GatewayId: aws.String("String"),
+ InstanceId: aws.String("String"),
+ NetworkInterfaceId: aws.String("String"),
+ VpcPeeringConnectionId: aws.String("String"),
+ }
+ resp, err := svc.CreateRoute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateRouteTable() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateRouteTableInput{
+ VpcId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.CreateRouteTable(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateSecurityGroup() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateSecurityGroupInput{
+ Description: aws.String("String"), // Required
+ GroupName: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ VpcId: aws.String("String"),
+ }
+ resp, err := svc.CreateSecurityGroup(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateSnapshot() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateSnapshotInput{
+ VolumeId: aws.String("String"), // Required
+ Description: aws.String("String"),
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.CreateSnapshot(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateSpotDatafeedSubscription() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateSpotDatafeedSubscriptionInput{
+ Bucket: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ Prefix: aws.String("String"),
+ }
+ resp, err := svc.CreateSpotDatafeedSubscription(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateSubnet() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateSubnetInput{
+ CidrBlock: aws.String("String"), // Required
+ VpcId: aws.String("String"), // Required
+ AvailabilityZone: aws.String("String"),
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.CreateSubnet(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateTags() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateTagsInput{
+ Resources: []*string{ // Required
+ aws.String("String"), // Required
+ // More values...
+ },
+ Tags: []*ec2.Tag{ // Required
+ { // Required
+ Key: aws.String("String"),
+ Value: aws.String("String"),
+ },
+ // More values...
+ },
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.CreateTags(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateVolume() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateVolumeInput{
+ AvailabilityZone: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ Encrypted: aws.Bool(true),
+ Iops: aws.Int64(1),
+ KmsKeyId: aws.String("String"),
+ Size: aws.Int64(1),
+ SnapshotId: aws.String("String"),
+ VolumeType: aws.String("VolumeType"),
+ }
+ resp, err := svc.CreateVolume(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateVpc() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateVpcInput{
+ CidrBlock: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ InstanceTenancy: aws.String("Tenancy"),
+ }
+ resp, err := svc.CreateVpc(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateVpcEndpoint() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateVpcEndpointInput{
+ ServiceName: aws.String("String"), // Required
+ VpcId: aws.String("String"), // Required
+ ClientToken: aws.String("String"),
+ DryRun: aws.Bool(true),
+ PolicyDocument: aws.String("String"),
+ RouteTableIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.CreateVpcEndpoint(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateVpcPeeringConnection() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateVpcPeeringConnectionInput{
+ DryRun: aws.Bool(true),
+ PeerOwnerId: aws.String("String"),
+ PeerVpcId: aws.String("String"),
+ VpcId: aws.String("String"),
+ }
+ resp, err := svc.CreateVpcPeeringConnection(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateVpnConnection() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateVpnConnectionInput{
+ CustomerGatewayId: aws.String("String"), // Required
+ Type: aws.String("String"), // Required
+ VpnGatewayId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ Options: &ec2.VpnConnectionOptionsSpecification{
+ StaticRoutesOnly: aws.Bool(true),
+ },
+ }
+ resp, err := svc.CreateVpnConnection(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateVpnConnectionRoute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateVpnConnectionRouteInput{
+ DestinationCidrBlock: aws.String("String"), // Required
+ VpnConnectionId: aws.String("String"), // Required
+ }
+ resp, err := svc.CreateVpnConnectionRoute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_CreateVpnGateway() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.CreateVpnGatewayInput{
+ Type: aws.String("GatewayType"), // Required
+ AvailabilityZone: aws.String("String"),
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.CreateVpnGateway(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteCustomerGateway() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteCustomerGatewayInput{
+ CustomerGatewayId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeleteCustomerGateway(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteDhcpOptions() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteDhcpOptionsInput{
+ DhcpOptionsId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeleteDhcpOptions(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteFlowLogs() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteFlowLogsInput{
+ FlowLogIds: []*string{ // Required
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DeleteFlowLogs(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteInternetGateway() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteInternetGatewayInput{
+ InternetGatewayId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeleteInternetGateway(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteKeyPair() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteKeyPairInput{
+ KeyName: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeleteKeyPair(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteNetworkAcl() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteNetworkAclInput{
+ NetworkAclId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeleteNetworkAcl(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteNetworkAclEntry() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteNetworkAclEntryInput{
+ Egress: aws.Bool(true), // Required
+ NetworkAclId: aws.String("String"), // Required
+ RuleNumber: aws.Int64(1), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeleteNetworkAclEntry(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteNetworkInterface() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteNetworkInterfaceInput{
+ NetworkInterfaceId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeleteNetworkInterface(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeletePlacementGroup() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeletePlacementGroupInput{
+ GroupName: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeletePlacementGroup(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteRoute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteRouteInput{
+ DestinationCidrBlock: aws.String("String"), // Required
+ RouteTableId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeleteRoute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteRouteTable() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteRouteTableInput{
+ RouteTableId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeleteRouteTable(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteSecurityGroup() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteSecurityGroupInput{
+ DryRun: aws.Bool(true),
+ GroupId: aws.String("String"),
+ GroupName: aws.String("String"),
+ }
+ resp, err := svc.DeleteSecurityGroup(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteSnapshot() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteSnapshotInput{
+ SnapshotId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeleteSnapshot(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteSpotDatafeedSubscription() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteSpotDatafeedSubscriptionInput{
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeleteSpotDatafeedSubscription(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteSubnet() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteSubnetInput{
+ SubnetId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeleteSubnet(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteTags() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteTagsInput{
+ Resources: []*string{ // Required
+ aws.String("String"), // Required
+ // More values...
+ },
+ DryRun: aws.Bool(true),
+ Tags: []*ec2.Tag{
+ { // Required
+ Key: aws.String("String"),
+ Value: aws.String("String"),
+ },
+ // More values...
+ },
+ }
+ resp, err := svc.DeleteTags(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteVolume() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteVolumeInput{
+ VolumeId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeleteVolume(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteVpc() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteVpcInput{
+ VpcId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeleteVpc(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteVpcEndpoints() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteVpcEndpointsInput{
+ VpcEndpointIds: []*string{ // Required
+ aws.String("String"), // Required
+ // More values...
+ },
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeleteVpcEndpoints(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteVpcPeeringConnection() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteVpcPeeringConnectionInput{
+ VpcPeeringConnectionId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeleteVpcPeeringConnection(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteVpnConnection() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteVpnConnectionInput{
+ VpnConnectionId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeleteVpnConnection(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteVpnConnectionRoute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteVpnConnectionRouteInput{
+ DestinationCidrBlock: aws.String("String"), // Required
+ VpnConnectionId: aws.String("String"), // Required
+ }
+ resp, err := svc.DeleteVpnConnectionRoute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeleteVpnGateway() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeleteVpnGatewayInput{
+ VpnGatewayId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeleteVpnGateway(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DeregisterImage() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DeregisterImageInput{
+ ImageId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DeregisterImage(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeAccountAttributes() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeAccountAttributesInput{
+ AttributeNames: []*string{
+ aws.String("AccountAttributeName"), // Required
+ // More values...
+ },
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DescribeAccountAttributes(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeAddresses() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeAddressesInput{
+ AllocationIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ PublicIps: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeAddresses(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeAvailabilityZones() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeAvailabilityZonesInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ ZoneNames: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeAvailabilityZones(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeBundleTasks() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeBundleTasksInput{
+ BundleIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeBundleTasks(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeClassicLinkInstances() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeClassicLinkInstancesInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ InstanceIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ MaxResults: aws.Int64(1),
+ NextToken: aws.String("String"),
+ }
+ resp, err := svc.DescribeClassicLinkInstances(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeConversionTasks() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeConversionTasksInput{
+ ConversionTaskIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeConversionTasks(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeCustomerGateways() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeCustomerGatewaysInput{
+ CustomerGatewayIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeCustomerGateways(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeDhcpOptions() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeDhcpOptionsInput{
+ DhcpOptionsIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeDhcpOptions(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeExportTasks() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeExportTasksInput{
+ ExportTaskIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeExportTasks(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeFlowLogs() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeFlowLogsInput{
+ Filter: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ FlowLogIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ MaxResults: aws.Int64(1),
+ NextToken: aws.String("String"),
+ }
+ resp, err := svc.DescribeFlowLogs(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeImageAttribute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeImageAttributeInput{
+ Attribute: aws.String("ImageAttributeName"), // Required
+ ImageId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DescribeImageAttribute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeImages() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeImagesInput{
+ DryRun: aws.Bool(true),
+ ExecutableUsers: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ ImageIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ Owners: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeImages(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeImportImageTasks() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeImportImageTasksInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ ImportTaskIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ MaxResults: aws.Int64(1),
+ NextToken: aws.String("String"),
+ }
+ resp, err := svc.DescribeImportImageTasks(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeImportSnapshotTasks() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeImportSnapshotTasksInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ ImportTaskIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ MaxResults: aws.Int64(1),
+ NextToken: aws.String("String"),
+ }
+ resp, err := svc.DescribeImportSnapshotTasks(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeInstanceAttribute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeInstanceAttributeInput{
+ Attribute: aws.String("InstanceAttributeName"), // Required
+ InstanceId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DescribeInstanceAttribute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeInstanceStatus() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeInstanceStatusInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ IncludeAllInstances: aws.Bool(true),
+ InstanceIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ MaxResults: aws.Int64(1),
+ NextToken: aws.String("String"),
+ }
+ resp, err := svc.DescribeInstanceStatus(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeInstances() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeInstancesInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ InstanceIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ MaxResults: aws.Int64(1),
+ NextToken: aws.String("String"),
+ }
+ resp, err := svc.DescribeInstances(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeInternetGateways() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeInternetGatewaysInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ InternetGatewayIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeInternetGateways(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeKeyPairs() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeKeyPairsInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ KeyNames: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeKeyPairs(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeMovingAddresses() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeMovingAddressesInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ MaxResults: aws.Int64(1),
+ NextToken: aws.String("String"),
+ PublicIps: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeMovingAddresses(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeNetworkAcls() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeNetworkAclsInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ NetworkAclIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeNetworkAcls(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeNetworkInterfaceAttribute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeNetworkInterfaceAttributeInput{
+ NetworkInterfaceId: aws.String("String"), // Required
+ Attribute: aws.String("NetworkInterfaceAttribute"),
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DescribeNetworkInterfaceAttribute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeNetworkInterfaces() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeNetworkInterfacesInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ NetworkInterfaceIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeNetworkInterfaces(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribePlacementGroups() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribePlacementGroupsInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ GroupNames: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribePlacementGroups(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribePrefixLists() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribePrefixListsInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ MaxResults: aws.Int64(1),
+ NextToken: aws.String("String"),
+ PrefixListIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribePrefixLists(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeRegions() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeRegionsInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ RegionNames: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeRegions(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeReservedInstances() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeReservedInstancesInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ OfferingType: aws.String("OfferingTypeValues"),
+ ReservedInstancesIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeReservedInstances(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeReservedInstancesListings() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeReservedInstancesListingsInput{
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ ReservedInstancesId: aws.String("String"),
+ ReservedInstancesListingId: aws.String("String"),
+ }
+ resp, err := svc.DescribeReservedInstancesListings(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeReservedInstancesModifications() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeReservedInstancesModificationsInput{
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ NextToken: aws.String("String"),
+ ReservedInstancesModificationIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeReservedInstancesModifications(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeReservedInstancesOfferings() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeReservedInstancesOfferingsInput{
+ AvailabilityZone: aws.String("String"),
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ IncludeMarketplace: aws.Bool(true),
+ InstanceTenancy: aws.String("Tenancy"),
+ InstanceType: aws.String("InstanceType"),
+ MaxDuration: aws.Int64(1),
+ MaxInstanceCount: aws.Int64(1),
+ MaxResults: aws.Int64(1),
+ MinDuration: aws.Int64(1),
+ NextToken: aws.String("String"),
+ OfferingType: aws.String("OfferingTypeValues"),
+ ProductDescription: aws.String("RIProductDescription"),
+ ReservedInstancesOfferingIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeReservedInstancesOfferings(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeRouteTables() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeRouteTablesInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ RouteTableIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeRouteTables(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeSecurityGroups() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeSecurityGroupsInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ GroupIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ GroupNames: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeSecurityGroups(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeSnapshotAttribute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeSnapshotAttributeInput{
+ Attribute: aws.String("SnapshotAttributeName"), // Required
+ SnapshotId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DescribeSnapshotAttribute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeSnapshots() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeSnapshotsInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ MaxResults: aws.Int64(1),
+ NextToken: aws.String("String"),
+ OwnerIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ RestorableByUserIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ SnapshotIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeSnapshots(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeSpotDatafeedSubscription() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeSpotDatafeedSubscriptionInput{
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DescribeSpotDatafeedSubscription(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeSpotFleetInstances() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeSpotFleetInstancesInput{
+ SpotFleetRequestId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ MaxResults: aws.Int64(1),
+ NextToken: aws.String("String"),
+ }
+ resp, err := svc.DescribeSpotFleetInstances(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeSpotFleetRequestHistory() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeSpotFleetRequestHistoryInput{
+ SpotFleetRequestId: aws.String("String"), // Required
+ StartTime: aws.Time(time.Now()), // Required
+ DryRun: aws.Bool(true),
+ EventType: aws.String("EventType"),
+ MaxResults: aws.Int64(1),
+ NextToken: aws.String("String"),
+ }
+ resp, err := svc.DescribeSpotFleetRequestHistory(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeSpotFleetRequests() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeSpotFleetRequestsInput{
+ DryRun: aws.Bool(true),
+ MaxResults: aws.Int64(1),
+ NextToken: aws.String("String"),
+ SpotFleetRequestIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeSpotFleetRequests(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeSpotInstanceRequests() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeSpotInstanceRequestsInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ SpotInstanceRequestIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeSpotInstanceRequests(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeSpotPriceHistory() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeSpotPriceHistoryInput{
+ AvailabilityZone: aws.String("String"),
+ DryRun: aws.Bool(true),
+ EndTime: aws.Time(time.Now()),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ InstanceTypes: []*string{
+ aws.String("InstanceType"), // Required
+ // More values...
+ },
+ MaxResults: aws.Int64(1),
+ NextToken: aws.String("String"),
+ ProductDescriptions: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ StartTime: aws.Time(time.Now()),
+ }
+ resp, err := svc.DescribeSpotPriceHistory(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeSubnets() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeSubnetsInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ SubnetIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeSubnets(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeTags() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeTagsInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ MaxResults: aws.Int64(1),
+ NextToken: aws.String("String"),
+ }
+ resp, err := svc.DescribeTags(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeVolumeAttribute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeVolumeAttributeInput{
+ VolumeId: aws.String("String"), // Required
+ Attribute: aws.String("VolumeAttributeName"),
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DescribeVolumeAttribute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeVolumeStatus() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeVolumeStatusInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ MaxResults: aws.Int64(1),
+ NextToken: aws.String("String"),
+ VolumeIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeVolumeStatus(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeVolumes() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeVolumesInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ MaxResults: aws.Int64(1),
+ NextToken: aws.String("String"),
+ VolumeIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeVolumes(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeVpcAttribute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeVpcAttributeInput{
+ VpcId: aws.String("String"), // Required
+ Attribute: aws.String("VpcAttributeName"),
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DescribeVpcAttribute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeVpcClassicLink() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeVpcClassicLinkInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ VpcIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeVpcClassicLink(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeVpcEndpointServices() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeVpcEndpointServicesInput{
+ DryRun: aws.Bool(true),
+ MaxResults: aws.Int64(1),
+ NextToken: aws.String("String"),
+ }
+ resp, err := svc.DescribeVpcEndpointServices(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeVpcEndpoints() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeVpcEndpointsInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ MaxResults: aws.Int64(1),
+ NextToken: aws.String("String"),
+ VpcEndpointIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeVpcEndpoints(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeVpcPeeringConnections() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeVpcPeeringConnectionsInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ VpcPeeringConnectionIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeVpcPeeringConnections(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeVpcs() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeVpcsInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ VpcIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeVpcs(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeVpnConnections() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeVpnConnectionsInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ VpnConnectionIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeVpnConnections(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DescribeVpnGateways() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DescribeVpnGatewaysInput{
+ DryRun: aws.Bool(true),
+ Filters: []*ec2.Filter{
+ { // Required
+ Name: aws.String("String"),
+ Values: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ VpnGatewayIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.DescribeVpnGateways(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DetachClassicLinkVpc() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DetachClassicLinkVpcInput{
+ InstanceId: aws.String("String"), // Required
+ VpcId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DetachClassicLinkVpc(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DetachInternetGateway() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DetachInternetGatewayInput{
+ InternetGatewayId: aws.String("String"), // Required
+ VpcId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DetachInternetGateway(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DetachNetworkInterface() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DetachNetworkInterfaceInput{
+ AttachmentId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ Force: aws.Bool(true),
+ }
+ resp, err := svc.DetachNetworkInterface(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DetachVolume() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DetachVolumeInput{
+ VolumeId: aws.String("String"), // Required
+ Device: aws.String("String"),
+ DryRun: aws.Bool(true),
+ Force: aws.Bool(true),
+ InstanceId: aws.String("String"),
+ }
+ resp, err := svc.DetachVolume(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DetachVpnGateway() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DetachVpnGatewayInput{
+ VpcId: aws.String("String"), // Required
+ VpnGatewayId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DetachVpnGateway(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DisableVgwRoutePropagation() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DisableVgwRoutePropagationInput{
+ GatewayId: aws.String("String"), // Required
+ RouteTableId: aws.String("String"), // Required
+ }
+ resp, err := svc.DisableVgwRoutePropagation(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DisableVpcClassicLink() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DisableVpcClassicLinkInput{
+ VpcId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DisableVpcClassicLink(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DisassociateAddress() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DisassociateAddressInput{
+ AssociationId: aws.String("String"),
+ DryRun: aws.Bool(true),
+ PublicIp: aws.String("String"),
+ }
+ resp, err := svc.DisassociateAddress(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_DisassociateRouteTable() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.DisassociateRouteTableInput{
+ AssociationId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.DisassociateRouteTable(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_EnableVgwRoutePropagation() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.EnableVgwRoutePropagationInput{
+ GatewayId: aws.String("String"), // Required
+ RouteTableId: aws.String("String"), // Required
+ }
+ resp, err := svc.EnableVgwRoutePropagation(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_EnableVolumeIO() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.EnableVolumeIOInput{
+ VolumeId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.EnableVolumeIO(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_EnableVpcClassicLink() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.EnableVpcClassicLinkInput{
+ VpcId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.EnableVpcClassicLink(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_GetConsoleOutput() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.GetConsoleOutputInput{
+ InstanceId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.GetConsoleOutput(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_GetPasswordData() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.GetPasswordDataInput{
+ InstanceId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.GetPasswordData(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ImportImage() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ImportImageInput{
+ Architecture: aws.String("String"),
+ ClientData: &ec2.ClientData{
+ Comment: aws.String("String"),
+ UploadEnd: aws.Time(time.Now()),
+ UploadSize: aws.Float64(1.0),
+ UploadStart: aws.Time(time.Now()),
+ },
+ ClientToken: aws.String("String"),
+ Description: aws.String("String"),
+ DiskContainers: []*ec2.ImageDiskContainer{
+ { // Required
+ Description: aws.String("String"),
+ DeviceName: aws.String("String"),
+ Format: aws.String("String"),
+ SnapshotId: aws.String("String"),
+ Url: aws.String("String"),
+ UserBucket: &ec2.UserBucket{
+ S3Bucket: aws.String("String"),
+ S3Key: aws.String("String"),
+ },
+ },
+ // More values...
+ },
+ DryRun: aws.Bool(true),
+ Hypervisor: aws.String("String"),
+ LicenseType: aws.String("String"),
+ Platform: aws.String("String"),
+ RoleName: aws.String("String"),
+ }
+ resp, err := svc.ImportImage(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ImportInstance() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ImportInstanceInput{
+ Platform: aws.String("PlatformValues"), // Required
+ Description: aws.String("String"),
+ DiskImages: []*ec2.DiskImage{
+ { // Required
+ Description: aws.String("String"),
+ Image: &ec2.DiskImageDetail{
+ Bytes: aws.Int64(1), // Required
+ Format: aws.String("DiskImageFormat"), // Required
+ ImportManifestUrl: aws.String("String"), // Required
+ },
+ Volume: &ec2.VolumeDetail{
+ Size: aws.Int64(1), // Required
+ },
+ },
+ // More values...
+ },
+ DryRun: aws.Bool(true),
+ LaunchSpecification: &ec2.ImportInstanceLaunchSpecification{
+ AdditionalInfo: aws.String("String"),
+ Architecture: aws.String("ArchitectureValues"),
+ GroupIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ GroupNames: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ InstanceInitiatedShutdownBehavior: aws.String("ShutdownBehavior"),
+ InstanceType: aws.String("InstanceType"),
+ Monitoring: aws.Bool(true),
+ Placement: &ec2.Placement{
+ AvailabilityZone: aws.String("String"),
+ GroupName: aws.String("String"),
+ Tenancy: aws.String("Tenancy"),
+ },
+ PrivateIpAddress: aws.String("String"),
+ SubnetId: aws.String("String"),
+ UserData: &ec2.UserData{
+ Data: aws.String("String"),
+ },
+ },
+ }
+ resp, err := svc.ImportInstance(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ImportKeyPair() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ImportKeyPairInput{
+ KeyName: aws.String("String"), // Required
+ PublicKeyMaterial: []byte("PAYLOAD"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.ImportKeyPair(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ImportSnapshot() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ImportSnapshotInput{
+ ClientData: &ec2.ClientData{
+ Comment: aws.String("String"),
+ UploadEnd: aws.Time(time.Now()),
+ UploadSize: aws.Float64(1.0),
+ UploadStart: aws.Time(time.Now()),
+ },
+ ClientToken: aws.String("String"),
+ Description: aws.String("String"),
+ DiskContainer: &ec2.SnapshotDiskContainer{
+ Description: aws.String("String"),
+ Format: aws.String("String"),
+ Url: aws.String("String"),
+ UserBucket: &ec2.UserBucket{
+ S3Bucket: aws.String("String"),
+ S3Key: aws.String("String"),
+ },
+ },
+ DryRun: aws.Bool(true),
+ RoleName: aws.String("String"),
+ }
+ resp, err := svc.ImportSnapshot(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ImportVolume() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ImportVolumeInput{
+ AvailabilityZone: aws.String("String"), // Required
+ Image: &ec2.DiskImageDetail{ // Required
+ Bytes: aws.Int64(1), // Required
+ Format: aws.String("DiskImageFormat"), // Required
+ ImportManifestUrl: aws.String("String"), // Required
+ },
+ Volume: &ec2.VolumeDetail{ // Required
+ Size: aws.Int64(1), // Required
+ },
+ Description: aws.String("String"),
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.ImportVolume(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ModifyImageAttribute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ModifyImageAttributeInput{
+ ImageId: aws.String("String"), // Required
+ Attribute: aws.String("String"),
+ Description: &ec2.AttributeValue{
+ Value: aws.String("String"),
+ },
+ DryRun: aws.Bool(true),
+ LaunchPermission: &ec2.LaunchPermissionModifications{
+ Add: []*ec2.LaunchPermission{
+ { // Required
+ Group: aws.String("PermissionGroup"),
+ UserId: aws.String("String"),
+ },
+ // More values...
+ },
+ Remove: []*ec2.LaunchPermission{
+ { // Required
+ Group: aws.String("PermissionGroup"),
+ UserId: aws.String("String"),
+ },
+ // More values...
+ },
+ },
+ OperationType: aws.String("OperationType"),
+ ProductCodes: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ UserGroups: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ UserIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ Value: aws.String("String"),
+ }
+ resp, err := svc.ModifyImageAttribute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ModifyInstanceAttribute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ModifyInstanceAttributeInput{
+ InstanceId: aws.String("String"), // Required
+ Attribute: aws.String("InstanceAttributeName"),
+ BlockDeviceMappings: []*ec2.InstanceBlockDeviceMappingSpecification{
+ { // Required
+ DeviceName: aws.String("String"),
+ Ebs: &ec2.EbsInstanceBlockDeviceSpecification{
+ DeleteOnTermination: aws.Bool(true),
+ VolumeId: aws.String("String"),
+ },
+ NoDevice: aws.String("String"),
+ VirtualName: aws.String("String"),
+ },
+ // More values...
+ },
+ DisableApiTermination: &ec2.AttributeBooleanValue{
+ Value: aws.Bool(true),
+ },
+ DryRun: aws.Bool(true),
+ EbsOptimized: &ec2.AttributeBooleanValue{
+ Value: aws.Bool(true),
+ },
+ Groups: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ InstanceInitiatedShutdownBehavior: &ec2.AttributeValue{
+ Value: aws.String("String"),
+ },
+ InstanceType: &ec2.AttributeValue{
+ Value: aws.String("String"),
+ },
+ Kernel: &ec2.AttributeValue{
+ Value: aws.String("String"),
+ },
+ Ramdisk: &ec2.AttributeValue{
+ Value: aws.String("String"),
+ },
+ SourceDestCheck: &ec2.AttributeBooleanValue{
+ Value: aws.Bool(true),
+ },
+ SriovNetSupport: &ec2.AttributeValue{
+ Value: aws.String("String"),
+ },
+ UserData: &ec2.BlobAttributeValue{
+ Value: []byte("PAYLOAD"),
+ },
+ Value: aws.String("String"),
+ }
+ resp, err := svc.ModifyInstanceAttribute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ModifyNetworkInterfaceAttribute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ModifyNetworkInterfaceAttributeInput{
+ NetworkInterfaceId: aws.String("String"), // Required
+ Attachment: &ec2.NetworkInterfaceAttachmentChanges{
+ AttachmentId: aws.String("String"),
+ DeleteOnTermination: aws.Bool(true),
+ },
+ Description: &ec2.AttributeValue{
+ Value: aws.String("String"),
+ },
+ DryRun: aws.Bool(true),
+ Groups: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ SourceDestCheck: &ec2.AttributeBooleanValue{
+ Value: aws.Bool(true),
+ },
+ }
+ resp, err := svc.ModifyNetworkInterfaceAttribute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ModifyReservedInstances() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ModifyReservedInstancesInput{
+ ReservedInstancesIds: []*string{ // Required
+ aws.String("String"), // Required
+ // More values...
+ },
+ TargetConfigurations: []*ec2.ReservedInstancesConfiguration{ // Required
+ { // Required
+ AvailabilityZone: aws.String("String"),
+ InstanceCount: aws.Int64(1),
+ InstanceType: aws.String("InstanceType"),
+ Platform: aws.String("String"),
+ },
+ // More values...
+ },
+ ClientToken: aws.String("String"),
+ }
+ resp, err := svc.ModifyReservedInstances(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ModifySnapshotAttribute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ModifySnapshotAttributeInput{
+ SnapshotId: aws.String("String"), // Required
+ Attribute: aws.String("SnapshotAttributeName"),
+ CreateVolumePermission: &ec2.CreateVolumePermissionModifications{
+ Add: []*ec2.CreateVolumePermission{
+ { // Required
+ Group: aws.String("PermissionGroup"),
+ UserId: aws.String("String"),
+ },
+ // More values...
+ },
+ Remove: []*ec2.CreateVolumePermission{
+ { // Required
+ Group: aws.String("PermissionGroup"),
+ UserId: aws.String("String"),
+ },
+ // More values...
+ },
+ },
+ DryRun: aws.Bool(true),
+ GroupNames: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ OperationType: aws.String("OperationType"),
+ UserIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.ModifySnapshotAttribute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ModifySpotFleetRequest() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ModifySpotFleetRequestInput{
+ SpotFleetRequestId: aws.String("String"), // Required
+ ExcessCapacityTerminationPolicy: aws.String("ExcessCapacityTerminationPolicy"),
+ TargetCapacity: aws.Int64(1),
+ }
+ resp, err := svc.ModifySpotFleetRequest(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ModifySubnetAttribute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ModifySubnetAttributeInput{
+ SubnetId: aws.String("String"), // Required
+ MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{
+ Value: aws.Bool(true),
+ },
+ }
+ resp, err := svc.ModifySubnetAttribute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ModifyVolumeAttribute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ModifyVolumeAttributeInput{
+ VolumeId: aws.String("String"), // Required
+ AutoEnableIO: &ec2.AttributeBooleanValue{
+ Value: aws.Bool(true),
+ },
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.ModifyVolumeAttribute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ModifyVpcAttribute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ModifyVpcAttributeInput{
+ VpcId: aws.String("String"), // Required
+ EnableDnsHostnames: &ec2.AttributeBooleanValue{
+ Value: aws.Bool(true),
+ },
+ EnableDnsSupport: &ec2.AttributeBooleanValue{
+ Value: aws.Bool(true),
+ },
+ }
+ resp, err := svc.ModifyVpcAttribute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ModifyVpcEndpoint() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ModifyVpcEndpointInput{
+ VpcEndpointId: aws.String("String"), // Required
+ AddRouteTableIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ DryRun: aws.Bool(true),
+ PolicyDocument: aws.String("String"),
+ RemoveRouteTableIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ ResetPolicy: aws.Bool(true),
+ }
+ resp, err := svc.ModifyVpcEndpoint(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_MonitorInstances() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.MonitorInstancesInput{
+ InstanceIds: []*string{ // Required
+ aws.String("String"), // Required
+ // More values...
+ },
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.MonitorInstances(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_MoveAddressToVpc() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.MoveAddressToVpcInput{
+ PublicIp: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.MoveAddressToVpc(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_PurchaseReservedInstancesOffering() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.PurchaseReservedInstancesOfferingInput{
+ InstanceCount: aws.Int64(1), // Required
+ ReservedInstancesOfferingId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ LimitPrice: &ec2.ReservedInstanceLimitPrice{
+ Amount: aws.Float64(1.0),
+ CurrencyCode: aws.String("CurrencyCodeValues"),
+ },
+ }
+ resp, err := svc.PurchaseReservedInstancesOffering(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_RebootInstances() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.RebootInstancesInput{
+ InstanceIds: []*string{ // Required
+ aws.String("String"), // Required
+ // More values...
+ },
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.RebootInstances(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_RegisterImage() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.RegisterImageInput{
+ Name: aws.String("String"), // Required
+ Architecture: aws.String("ArchitectureValues"),
+ BlockDeviceMappings: []*ec2.BlockDeviceMapping{
+ { // Required
+ DeviceName: aws.String("String"),
+ Ebs: &ec2.EbsBlockDevice{
+ DeleteOnTermination: aws.Bool(true),
+ Encrypted: aws.Bool(true),
+ Iops: aws.Int64(1),
+ SnapshotId: aws.String("String"),
+ VolumeSize: aws.Int64(1),
+ VolumeType: aws.String("VolumeType"),
+ },
+ NoDevice: aws.String("String"),
+ VirtualName: aws.String("String"),
+ },
+ // More values...
+ },
+ Description: aws.String("String"),
+ DryRun: aws.Bool(true),
+ ImageLocation: aws.String("String"),
+ KernelId: aws.String("String"),
+ RamdiskId: aws.String("String"),
+ RootDeviceName: aws.String("String"),
+ SriovNetSupport: aws.String("String"),
+ VirtualizationType: aws.String("String"),
+ }
+ resp, err := svc.RegisterImage(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_RejectVpcPeeringConnection() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.RejectVpcPeeringConnectionInput{
+ VpcPeeringConnectionId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.RejectVpcPeeringConnection(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ReleaseAddress() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ReleaseAddressInput{
+ AllocationId: aws.String("String"),
+ DryRun: aws.Bool(true),
+ PublicIp: aws.String("String"),
+ }
+ resp, err := svc.ReleaseAddress(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ReplaceNetworkAclAssociation() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ReplaceNetworkAclAssociationInput{
+ AssociationId: aws.String("String"), // Required
+ NetworkAclId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.ReplaceNetworkAclAssociation(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ReplaceNetworkAclEntry() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ReplaceNetworkAclEntryInput{
+ CidrBlock: aws.String("String"), // Required
+ Egress: aws.Bool(true), // Required
+ NetworkAclId: aws.String("String"), // Required
+ Protocol: aws.String("String"), // Required
+ RuleAction: aws.String("RuleAction"), // Required
+ RuleNumber: aws.Int64(1), // Required
+ DryRun: aws.Bool(true),
+ IcmpTypeCode: &ec2.IcmpTypeCode{
+ Code: aws.Int64(1),
+ Type: aws.Int64(1),
+ },
+ PortRange: &ec2.PortRange{
+ From: aws.Int64(1),
+ To: aws.Int64(1),
+ },
+ }
+ resp, err := svc.ReplaceNetworkAclEntry(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ReplaceRoute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ReplaceRouteInput{
+ DestinationCidrBlock: aws.String("String"), // Required
+ RouteTableId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ GatewayId: aws.String("String"),
+ InstanceId: aws.String("String"),
+ NetworkInterfaceId: aws.String("String"),
+ VpcPeeringConnectionId: aws.String("String"),
+ }
+ resp, err := svc.ReplaceRoute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ReplaceRouteTableAssociation() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ReplaceRouteTableAssociationInput{
+ AssociationId: aws.String("String"), // Required
+ RouteTableId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.ReplaceRouteTableAssociation(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ReportInstanceStatus() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ReportInstanceStatusInput{
+ Instances: []*string{ // Required
+ aws.String("String"), // Required
+ // More values...
+ },
+ ReasonCodes: []*string{ // Required
+ aws.String("ReportInstanceReasonCodes"), // Required
+ // More values...
+ },
+ Status: aws.String("ReportStatusType"), // Required
+ Description: aws.String("String"),
+ DryRun: aws.Bool(true),
+ EndTime: aws.Time(time.Now()),
+ StartTime: aws.Time(time.Now()),
+ }
+ resp, err := svc.ReportInstanceStatus(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_RequestSpotFleet() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.RequestSpotFleetInput{
+ SpotFleetRequestConfig: &ec2.SpotFleetRequestConfigData{ // Required
+ IamFleetRole: aws.String("String"), // Required
+ LaunchSpecifications: []*ec2.SpotFleetLaunchSpecification{ // Required
+ { // Required
+ AddressingType: aws.String("String"),
+ BlockDeviceMappings: []*ec2.BlockDeviceMapping{
+ { // Required
+ DeviceName: aws.String("String"),
+ Ebs: &ec2.EbsBlockDevice{
+ DeleteOnTermination: aws.Bool(true),
+ Encrypted: aws.Bool(true),
+ Iops: aws.Int64(1),
+ SnapshotId: aws.String("String"),
+ VolumeSize: aws.Int64(1),
+ VolumeType: aws.String("VolumeType"),
+ },
+ NoDevice: aws.String("String"),
+ VirtualName: aws.String("String"),
+ },
+ // More values...
+ },
+ EbsOptimized: aws.Bool(true),
+ IamInstanceProfile: &ec2.IamInstanceProfileSpecification{
+ Arn: aws.String("String"),
+ Name: aws.String("String"),
+ },
+ ImageId: aws.String("String"),
+ InstanceType: aws.String("InstanceType"),
+ KernelId: aws.String("String"),
+ KeyName: aws.String("String"),
+ Monitoring: &ec2.SpotFleetMonitoring{
+ Enabled: aws.Bool(true),
+ },
+ NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{
+ { // Required
+ AssociatePublicIpAddress: aws.Bool(true),
+ DeleteOnTermination: aws.Bool(true),
+ Description: aws.String("String"),
+ DeviceIndex: aws.Int64(1),
+ Groups: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ NetworkInterfaceId: aws.String("String"),
+ PrivateIpAddress: aws.String("String"),
+ PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{
+ { // Required
+ PrivateIpAddress: aws.String("String"), // Required
+ Primary: aws.Bool(true),
+ },
+ // More values...
+ },
+ SecondaryPrivateIpAddressCount: aws.Int64(1),
+ SubnetId: aws.String("String"),
+ },
+ // More values...
+ },
+ Placement: &ec2.SpotPlacement{
+ AvailabilityZone: aws.String("String"),
+ GroupName: aws.String("String"),
+ },
+ RamdiskId: aws.String("String"),
+ SecurityGroups: []*ec2.GroupIdentifier{
+ { // Required
+ GroupId: aws.String("String"),
+ GroupName: aws.String("String"),
+ },
+ // More values...
+ },
+ SpotPrice: aws.String("String"),
+ SubnetId: aws.String("String"),
+ UserData: aws.String("String"),
+ WeightedCapacity: aws.Float64(1.0),
+ },
+ // More values...
+ },
+ SpotPrice: aws.String("String"), // Required
+ TargetCapacity: aws.Int64(1), // Required
+ AllocationStrategy: aws.String("AllocationStrategy"),
+ ClientToken: aws.String("String"),
+ ExcessCapacityTerminationPolicy: aws.String("ExcessCapacityTerminationPolicy"),
+ TerminateInstancesWithExpiration: aws.Bool(true),
+ ValidFrom: aws.Time(time.Now()),
+ ValidUntil: aws.Time(time.Now()),
+ },
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.RequestSpotFleet(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_RequestSpotInstances() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.RequestSpotInstancesInput{
+ SpotPrice: aws.String("String"), // Required
+ AvailabilityZoneGroup: aws.String("String"),
+ BlockDurationMinutes: aws.Int64(1),
+ ClientToken: aws.String("String"),
+ DryRun: aws.Bool(true),
+ InstanceCount: aws.Int64(1),
+ LaunchGroup: aws.String("String"),
+ LaunchSpecification: &ec2.RequestSpotLaunchSpecification{
+ AddressingType: aws.String("String"),
+ BlockDeviceMappings: []*ec2.BlockDeviceMapping{
+ { // Required
+ DeviceName: aws.String("String"),
+ Ebs: &ec2.EbsBlockDevice{
+ DeleteOnTermination: aws.Bool(true),
+ Encrypted: aws.Bool(true),
+ Iops: aws.Int64(1),
+ SnapshotId: aws.String("String"),
+ VolumeSize: aws.Int64(1),
+ VolumeType: aws.String("VolumeType"),
+ },
+ NoDevice: aws.String("String"),
+ VirtualName: aws.String("String"),
+ },
+ // More values...
+ },
+ EbsOptimized: aws.Bool(true),
+ IamInstanceProfile: &ec2.IamInstanceProfileSpecification{
+ Arn: aws.String("String"),
+ Name: aws.String("String"),
+ },
+ ImageId: aws.String("String"),
+ InstanceType: aws.String("InstanceType"),
+ KernelId: aws.String("String"),
+ KeyName: aws.String("String"),
+ Monitoring: &ec2.RunInstancesMonitoringEnabled{
+ Enabled: aws.Bool(true), // Required
+ },
+ NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{
+ { // Required
+ AssociatePublicIpAddress: aws.Bool(true),
+ DeleteOnTermination: aws.Bool(true),
+ Description: aws.String("String"),
+ DeviceIndex: aws.Int64(1),
+ Groups: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ NetworkInterfaceId: aws.String("String"),
+ PrivateIpAddress: aws.String("String"),
+ PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{
+ { // Required
+ PrivateIpAddress: aws.String("String"), // Required
+ Primary: aws.Bool(true),
+ },
+ // More values...
+ },
+ SecondaryPrivateIpAddressCount: aws.Int64(1),
+ SubnetId: aws.String("String"),
+ },
+ // More values...
+ },
+ Placement: &ec2.SpotPlacement{
+ AvailabilityZone: aws.String("String"),
+ GroupName: aws.String("String"),
+ },
+ RamdiskId: aws.String("String"),
+ SecurityGroupIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ SecurityGroups: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ SubnetId: aws.String("String"),
+ UserData: aws.String("String"),
+ },
+ Type: aws.String("SpotInstanceType"),
+ ValidFrom: aws.Time(time.Now()),
+ ValidUntil: aws.Time(time.Now()),
+ }
+ resp, err := svc.RequestSpotInstances(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ResetImageAttribute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ResetImageAttributeInput{
+ Attribute: aws.String("ResetImageAttributeName"), // Required
+ ImageId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.ResetImageAttribute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ResetInstanceAttribute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ResetInstanceAttributeInput{
+ Attribute: aws.String("InstanceAttributeName"), // Required
+ InstanceId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.ResetInstanceAttribute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ResetNetworkInterfaceAttribute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ResetNetworkInterfaceAttributeInput{
+ NetworkInterfaceId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ SourceDestCheck: aws.String("String"),
+ }
+ resp, err := svc.ResetNetworkInterfaceAttribute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_ResetSnapshotAttribute() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.ResetSnapshotAttributeInput{
+ Attribute: aws.String("SnapshotAttributeName"), // Required
+ SnapshotId: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.ResetSnapshotAttribute(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_RestoreAddressToClassic() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.RestoreAddressToClassicInput{
+ PublicIp: aws.String("String"), // Required
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.RestoreAddressToClassic(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_RevokeSecurityGroupEgress() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.RevokeSecurityGroupEgressInput{
+ GroupId: aws.String("String"), // Required
+ CidrIp: aws.String("String"),
+ DryRun: aws.Bool(true),
+ FromPort: aws.Int64(1),
+ IpPermissions: []*ec2.IpPermission{
+ { // Required
+ FromPort: aws.Int64(1),
+ IpProtocol: aws.String("String"),
+ IpRanges: []*ec2.IpRange{
+ { // Required
+ CidrIp: aws.String("String"),
+ },
+ // More values...
+ },
+ PrefixListIds: []*ec2.PrefixListId{
+ { // Required
+ PrefixListId: aws.String("String"),
+ },
+ // More values...
+ },
+ ToPort: aws.Int64(1),
+ UserIdGroupPairs: []*ec2.UserIdGroupPair{
+ { // Required
+ GroupId: aws.String("String"),
+ GroupName: aws.String("String"),
+ UserId: aws.String("String"),
+ },
+ // More values...
+ },
+ },
+ // More values...
+ },
+ IpProtocol: aws.String("String"),
+ SourceSecurityGroupName: aws.String("String"),
+ SourceSecurityGroupOwnerId: aws.String("String"),
+ ToPort: aws.Int64(1),
+ }
+ resp, err := svc.RevokeSecurityGroupEgress(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_RevokeSecurityGroupIngress() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.RevokeSecurityGroupIngressInput{
+ CidrIp: aws.String("String"),
+ DryRun: aws.Bool(true),
+ FromPort: aws.Int64(1),
+ GroupId: aws.String("String"),
+ GroupName: aws.String("String"),
+ IpPermissions: []*ec2.IpPermission{
+ { // Required
+ FromPort: aws.Int64(1),
+ IpProtocol: aws.String("String"),
+ IpRanges: []*ec2.IpRange{
+ { // Required
+ CidrIp: aws.String("String"),
+ },
+ // More values...
+ },
+ PrefixListIds: []*ec2.PrefixListId{
+ { // Required
+ PrefixListId: aws.String("String"),
+ },
+ // More values...
+ },
+ ToPort: aws.Int64(1),
+ UserIdGroupPairs: []*ec2.UserIdGroupPair{
+ { // Required
+ GroupId: aws.String("String"),
+ GroupName: aws.String("String"),
+ UserId: aws.String("String"),
+ },
+ // More values...
+ },
+ },
+ // More values...
+ },
+ IpProtocol: aws.String("String"),
+ SourceSecurityGroupName: aws.String("String"),
+ SourceSecurityGroupOwnerId: aws.String("String"),
+ ToPort: aws.Int64(1),
+ }
+ resp, err := svc.RevokeSecurityGroupIngress(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_RunInstances() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.RunInstancesInput{
+ ImageId: aws.String("String"), // Required
+ MaxCount: aws.Int64(1), // Required
+ MinCount: aws.Int64(1), // Required
+ AdditionalInfo: aws.String("String"),
+ BlockDeviceMappings: []*ec2.BlockDeviceMapping{
+ { // Required
+ DeviceName: aws.String("String"),
+ Ebs: &ec2.EbsBlockDevice{
+ DeleteOnTermination: aws.Bool(true),
+ Encrypted: aws.Bool(true),
+ Iops: aws.Int64(1),
+ SnapshotId: aws.String("String"),
+ VolumeSize: aws.Int64(1),
+ VolumeType: aws.String("VolumeType"),
+ },
+ NoDevice: aws.String("String"),
+ VirtualName: aws.String("String"),
+ },
+ // More values...
+ },
+ ClientToken: aws.String("String"),
+ DisableApiTermination: aws.Bool(true),
+ DryRun: aws.Bool(true),
+ EbsOptimized: aws.Bool(true),
+ IamInstanceProfile: &ec2.IamInstanceProfileSpecification{
+ Arn: aws.String("String"),
+ Name: aws.String("String"),
+ },
+ InstanceInitiatedShutdownBehavior: aws.String("ShutdownBehavior"),
+ InstanceType: aws.String("InstanceType"),
+ KernelId: aws.String("String"),
+ KeyName: aws.String("String"),
+ Monitoring: &ec2.RunInstancesMonitoringEnabled{
+ Enabled: aws.Bool(true), // Required
+ },
+ NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{
+ { // Required
+ AssociatePublicIpAddress: aws.Bool(true),
+ DeleteOnTermination: aws.Bool(true),
+ Description: aws.String("String"),
+ DeviceIndex: aws.Int64(1),
+ Groups: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ NetworkInterfaceId: aws.String("String"),
+ PrivateIpAddress: aws.String("String"),
+ PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{
+ { // Required
+ PrivateIpAddress: aws.String("String"), // Required
+ Primary: aws.Bool(true),
+ },
+ // More values...
+ },
+ SecondaryPrivateIpAddressCount: aws.Int64(1),
+ SubnetId: aws.String("String"),
+ },
+ // More values...
+ },
+ Placement: &ec2.Placement{
+ AvailabilityZone: aws.String("String"),
+ GroupName: aws.String("String"),
+ Tenancy: aws.String("Tenancy"),
+ },
+ PrivateIpAddress: aws.String("String"),
+ RamdiskId: aws.String("String"),
+ SecurityGroupIds: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ SecurityGroups: []*string{
+ aws.String("String"), // Required
+ // More values...
+ },
+ SubnetId: aws.String("String"),
+ UserData: aws.String("String"),
+ }
+ resp, err := svc.RunInstances(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_StartInstances() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.StartInstancesInput{
+ InstanceIds: []*string{ // Required
+ aws.String("String"), // Required
+ // More values...
+ },
+ AdditionalInfo: aws.String("String"),
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.StartInstances(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_StopInstances() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.StopInstancesInput{
+ InstanceIds: []*string{ // Required
+ aws.String("String"), // Required
+ // More values...
+ },
+ DryRun: aws.Bool(true),
+ Force: aws.Bool(true),
+ }
+ resp, err := svc.StopInstances(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_TerminateInstances() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.TerminateInstancesInput{
+ InstanceIds: []*string{ // Required
+ aws.String("String"), // Required
+ // More values...
+ },
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.TerminateInstances(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_UnassignPrivateIpAddresses() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.UnassignPrivateIpAddressesInput{
+ NetworkInterfaceId: aws.String("String"), // Required
+ PrivateIpAddresses: []*string{ // Required
+ aws.String("String"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.UnassignPrivateIpAddresses(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleEC2_UnmonitorInstances() {
+ svc := ec2.New(session.New())
+
+ params := &ec2.UnmonitorInstancesInput{
+ InstanceIds: []*string{ // Required
+ aws.String("String"), // Required
+ // More values...
+ },
+ DryRun: aws.Bool(true),
+ }
+ resp, err := svc.UnmonitorInstances(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/service.go
index 51a8aef7c..2ff4220f7 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/service.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/service.go
@@ -4,52 +4,75 @@ package ec2
import (
"github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/defaults"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/aws/service"
- "github.com/aws/aws-sdk-go/aws/service/serviceinfo"
- "github.com/aws/aws-sdk-go/internal/protocol/ec2query"
- "github.com/aws/aws-sdk-go/internal/signer/v4"
+ "github.com/aws/aws-sdk-go/private/protocol/ec2query"
+ "github.com/aws/aws-sdk-go/private/signer/v4"
)
// Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity
// in the Amazon Web Services (AWS) cloud. Using Amazon EC2 eliminates your
// need to invest in hardware up front, so you can develop and deploy applications
// faster.
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
type EC2 struct {
- *service.Service
+ *client.Client
}
-// Used for custom service initialization logic
-var initService func(*service.Service)
+// Used for custom client initialization logic
+var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
-// New returns a new EC2 client.
-func New(config *aws.Config) *EC2 {
- service := &service.Service{
- ServiceInfo: serviceinfo.ServiceInfo{
- Config: defaults.DefaultConfig.Merge(config),
- ServiceName: "ec2",
- APIVersion: "2015-10-01",
- },
+// A ServiceName is the name of the service the client will make API calls to.
+const ServiceName = "ec2"
+
+// New creates a new instance of the EC2 client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a EC2 client from just a session.
+// svc := ec2.New(mySession)
+//
+// // Create a EC2 client with additional configuration
+// svc := ec2.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2 {
+ c := p.ClientConfig(ServiceName, cfgs...)
+ return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *EC2 {
+ svc := &EC2{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2015-10-01",
+ },
+ handlers,
+ ),
}
- service.Initialize()
// Handlers
- service.Handlers.Sign.PushBack(v4.Sign)
- service.Handlers.Build.PushBack(ec2query.Build)
- service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
- service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
- service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(ec2query.Build)
+ svc.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
- // Run custom service initialization if present
- if initService != nil {
- initService(service)
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
}
- return &EC2{service}
+ return svc
}
// newRequest creates a new request for a EC2 operation and runs any
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/iam/examples_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/iam/examples_test.go
new file mode 100644
index 000000000..95e009af9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/iam/examples_test.go
@@ -0,0 +1,2364 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+package iam_test
+
+import (
+ "bytes"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/iam"
+)
+
+var _ time.Duration
+var _ bytes.Buffer
+
+func ExampleIAM_AddClientIDToOpenIDConnectProvider() {
+ svc := iam.New(session.New())
+
+ params := &iam.AddClientIDToOpenIDConnectProviderInput{
+ ClientID: aws.String("clientIDType"), // Required
+ OpenIDConnectProviderArn: aws.String("arnType"), // Required
+ }
+ resp, err := svc.AddClientIDToOpenIDConnectProvider(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_AddRoleToInstanceProfile() {
+ svc := iam.New(session.New())
+
+ params := &iam.AddRoleToInstanceProfileInput{
+ InstanceProfileName: aws.String("instanceProfileNameType"), // Required
+ RoleName: aws.String("roleNameType"), // Required
+ }
+ resp, err := svc.AddRoleToInstanceProfile(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_AddUserToGroup() {
+ svc := iam.New(session.New())
+
+ params := &iam.AddUserToGroupInput{
+ GroupName: aws.String("groupNameType"), // Required
+ UserName: aws.String("existingUserNameType"), // Required
+ }
+ resp, err := svc.AddUserToGroup(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_AttachGroupPolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.AttachGroupPolicyInput{
+ GroupName: aws.String("groupNameType"), // Required
+ PolicyArn: aws.String("arnType"), // Required
+ }
+ resp, err := svc.AttachGroupPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_AttachRolePolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.AttachRolePolicyInput{
+ PolicyArn: aws.String("arnType"), // Required
+ RoleName: aws.String("roleNameType"), // Required
+ }
+ resp, err := svc.AttachRolePolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_AttachUserPolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.AttachUserPolicyInput{
+ PolicyArn: aws.String("arnType"), // Required
+ UserName: aws.String("userNameType"), // Required
+ }
+ resp, err := svc.AttachUserPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ChangePassword() {
+ svc := iam.New(session.New())
+
+ params := &iam.ChangePasswordInput{
+ NewPassword: aws.String("passwordType"), // Required
+ OldPassword: aws.String("passwordType"), // Required
+ }
+ resp, err := svc.ChangePassword(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_CreateAccessKey() {
+ svc := iam.New(session.New())
+
+ params := &iam.CreateAccessKeyInput{
+ UserName: aws.String("existingUserNameType"),
+ }
+ resp, err := svc.CreateAccessKey(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_CreateAccountAlias() {
+ svc := iam.New(session.New())
+
+ params := &iam.CreateAccountAliasInput{
+ AccountAlias: aws.String("accountAliasType"), // Required
+ }
+ resp, err := svc.CreateAccountAlias(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_CreateGroup() {
+ svc := iam.New(session.New())
+
+ params := &iam.CreateGroupInput{
+ GroupName: aws.String("groupNameType"), // Required
+ Path: aws.String("pathType"),
+ }
+ resp, err := svc.CreateGroup(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_CreateInstanceProfile() {
+ svc := iam.New(session.New())
+
+ params := &iam.CreateInstanceProfileInput{
+ InstanceProfileName: aws.String("instanceProfileNameType"), // Required
+ Path: aws.String("pathType"),
+ }
+ resp, err := svc.CreateInstanceProfile(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_CreateLoginProfile() {
+ svc := iam.New(session.New())
+
+ params := &iam.CreateLoginProfileInput{
+ Password: aws.String("passwordType"), // Required
+ UserName: aws.String("userNameType"), // Required
+ PasswordResetRequired: aws.Bool(true),
+ }
+ resp, err := svc.CreateLoginProfile(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_CreateOpenIDConnectProvider() {
+ svc := iam.New(session.New())
+
+ params := &iam.CreateOpenIDConnectProviderInput{
+ ThumbprintList: []*string{ // Required
+ aws.String("thumbprintType"), // Required
+ // More values...
+ },
+ Url: aws.String("OpenIDConnectProviderUrlType"), // Required
+ ClientIDList: []*string{
+ aws.String("clientIDType"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.CreateOpenIDConnectProvider(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_CreatePolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.CreatePolicyInput{
+ PolicyDocument: aws.String("policyDocumentType"), // Required
+ PolicyName: aws.String("policyNameType"), // Required
+ Description: aws.String("policyDescriptionType"),
+ Path: aws.String("policyPathType"),
+ }
+ resp, err := svc.CreatePolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_CreatePolicyVersion() {
+ svc := iam.New(session.New())
+
+ params := &iam.CreatePolicyVersionInput{
+ PolicyArn: aws.String("arnType"), // Required
+ PolicyDocument: aws.String("policyDocumentType"), // Required
+ SetAsDefault: aws.Bool(true),
+ }
+ resp, err := svc.CreatePolicyVersion(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_CreateRole() {
+ svc := iam.New(session.New())
+
+ params := &iam.CreateRoleInput{
+ AssumeRolePolicyDocument: aws.String("policyDocumentType"), // Required
+ RoleName: aws.String("roleNameType"), // Required
+ Path: aws.String("pathType"),
+ }
+ resp, err := svc.CreateRole(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_CreateSAMLProvider() {
+ svc := iam.New(session.New())
+
+ params := &iam.CreateSAMLProviderInput{
+ Name: aws.String("SAMLProviderNameType"), // Required
+ SAMLMetadataDocument: aws.String("SAMLMetadataDocumentType"), // Required
+ }
+ resp, err := svc.CreateSAMLProvider(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_CreateUser() {
+ svc := iam.New(session.New())
+
+ params := &iam.CreateUserInput{
+ UserName: aws.String("userNameType"), // Required
+ Path: aws.String("pathType"),
+ }
+ resp, err := svc.CreateUser(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_CreateVirtualMFADevice() {
+ svc := iam.New(session.New())
+
+ params := &iam.CreateVirtualMFADeviceInput{
+ VirtualMFADeviceName: aws.String("virtualMFADeviceName"), // Required
+ Path: aws.String("pathType"),
+ }
+ resp, err := svc.CreateVirtualMFADevice(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeactivateMFADevice() {
+ svc := iam.New(session.New())
+
+ params := &iam.DeactivateMFADeviceInput{
+ SerialNumber: aws.String("serialNumberType"), // Required
+ UserName: aws.String("existingUserNameType"), // Required
+ }
+ resp, err := svc.DeactivateMFADevice(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeleteAccessKey() {
+ svc := iam.New(session.New())
+
+ params := &iam.DeleteAccessKeyInput{
+ AccessKeyId: aws.String("accessKeyIdType"), // Required
+ UserName: aws.String("existingUserNameType"),
+ }
+ resp, err := svc.DeleteAccessKey(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeleteAccountAlias() {
+ svc := iam.New(session.New())
+
+ params := &iam.DeleteAccountAliasInput{
+ AccountAlias: aws.String("accountAliasType"), // Required
+ }
+ resp, err := svc.DeleteAccountAlias(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeleteAccountPasswordPolicy() {
+ svc := iam.New(session.New())
+
+ var params *iam.DeleteAccountPasswordPolicyInput
+ resp, err := svc.DeleteAccountPasswordPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeleteGroup() {
+ svc := iam.New(session.New())
+
+ params := &iam.DeleteGroupInput{
+ GroupName: aws.String("groupNameType"), // Required
+ }
+ resp, err := svc.DeleteGroup(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeleteGroupPolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.DeleteGroupPolicyInput{
+ GroupName: aws.String("groupNameType"), // Required
+ PolicyName: aws.String("policyNameType"), // Required
+ }
+ resp, err := svc.DeleteGroupPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeleteInstanceProfile() {
+ svc := iam.New(session.New())
+
+ params := &iam.DeleteInstanceProfileInput{
+ InstanceProfileName: aws.String("instanceProfileNameType"), // Required
+ }
+ resp, err := svc.DeleteInstanceProfile(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeleteLoginProfile() {
+ svc := iam.New(session.New())
+
+ params := &iam.DeleteLoginProfileInput{
+ UserName: aws.String("userNameType"), // Required
+ }
+ resp, err := svc.DeleteLoginProfile(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeleteOpenIDConnectProvider() {
+ svc := iam.New(session.New())
+
+ params := &iam.DeleteOpenIDConnectProviderInput{
+ OpenIDConnectProviderArn: aws.String("arnType"), // Required
+ }
+ resp, err := svc.DeleteOpenIDConnectProvider(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeletePolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.DeletePolicyInput{
+ PolicyArn: aws.String("arnType"), // Required
+ }
+ resp, err := svc.DeletePolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeletePolicyVersion() {
+ svc := iam.New(session.New())
+
+ params := &iam.DeletePolicyVersionInput{
+ PolicyArn: aws.String("arnType"), // Required
+ VersionId: aws.String("policyVersionIdType"), // Required
+ }
+ resp, err := svc.DeletePolicyVersion(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeleteRole() {
+ svc := iam.New(session.New())
+
+ params := &iam.DeleteRoleInput{
+ RoleName: aws.String("roleNameType"), // Required
+ }
+ resp, err := svc.DeleteRole(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeleteRolePolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.DeleteRolePolicyInput{
+ PolicyName: aws.String("policyNameType"), // Required
+ RoleName: aws.String("roleNameType"), // Required
+ }
+ resp, err := svc.DeleteRolePolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeleteSAMLProvider() {
+ svc := iam.New(session.New())
+
+ params := &iam.DeleteSAMLProviderInput{
+ SAMLProviderArn: aws.String("arnType"), // Required
+ }
+ resp, err := svc.DeleteSAMLProvider(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeleteSSHPublicKey() {
+ svc := iam.New(session.New())
+
+ params := &iam.DeleteSSHPublicKeyInput{
+ SSHPublicKeyId: aws.String("publicKeyIdType"), // Required
+ UserName: aws.String("userNameType"), // Required
+ }
+ resp, err := svc.DeleteSSHPublicKey(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeleteServerCertificate() {
+ svc := iam.New(session.New())
+
+ params := &iam.DeleteServerCertificateInput{
+ ServerCertificateName: aws.String("serverCertificateNameType"), // Required
+ }
+ resp, err := svc.DeleteServerCertificate(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeleteSigningCertificate() {
+ svc := iam.New(session.New())
+
+ params := &iam.DeleteSigningCertificateInput{
+ CertificateId: aws.String("certificateIdType"), // Required
+ UserName: aws.String("existingUserNameType"),
+ }
+ resp, err := svc.DeleteSigningCertificate(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeleteUser() {
+ svc := iam.New(session.New())
+
+ params := &iam.DeleteUserInput{
+ UserName: aws.String("existingUserNameType"), // Required
+ }
+ resp, err := svc.DeleteUser(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeleteUserPolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.DeleteUserPolicyInput{
+ PolicyName: aws.String("policyNameType"), // Required
+ UserName: aws.String("existingUserNameType"), // Required
+ }
+ resp, err := svc.DeleteUserPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DeleteVirtualMFADevice() {
+ svc := iam.New(session.New())
+
+ params := &iam.DeleteVirtualMFADeviceInput{
+ SerialNumber: aws.String("serialNumberType"), // Required
+ }
+ resp, err := svc.DeleteVirtualMFADevice(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DetachGroupPolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.DetachGroupPolicyInput{
+ GroupName: aws.String("groupNameType"), // Required
+ PolicyArn: aws.String("arnType"), // Required
+ }
+ resp, err := svc.DetachGroupPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DetachRolePolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.DetachRolePolicyInput{
+ PolicyArn: aws.String("arnType"), // Required
+ RoleName: aws.String("roleNameType"), // Required
+ }
+ resp, err := svc.DetachRolePolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_DetachUserPolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.DetachUserPolicyInput{
+ PolicyArn: aws.String("arnType"), // Required
+ UserName: aws.String("userNameType"), // Required
+ }
+ resp, err := svc.DetachUserPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_EnableMFADevice() {
+ svc := iam.New(session.New())
+
+ params := &iam.EnableMFADeviceInput{
+ AuthenticationCode1: aws.String("authenticationCodeType"), // Required
+ AuthenticationCode2: aws.String("authenticationCodeType"), // Required
+ SerialNumber: aws.String("serialNumberType"), // Required
+ UserName: aws.String("existingUserNameType"), // Required
+ }
+ resp, err := svc.EnableMFADevice(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GenerateCredentialReport() {
+ svc := iam.New(session.New())
+
+ var params *iam.GenerateCredentialReportInput
+ resp, err := svc.GenerateCredentialReport(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetAccessKeyLastUsed() {
+ svc := iam.New(session.New())
+
+ params := &iam.GetAccessKeyLastUsedInput{
+ AccessKeyId: aws.String("accessKeyIdType"), // Required
+ }
+ resp, err := svc.GetAccessKeyLastUsed(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetAccountAuthorizationDetails() {
+ svc := iam.New(session.New())
+
+ params := &iam.GetAccountAuthorizationDetailsInput{
+ Filter: []*string{
+ aws.String("EntityType"), // Required
+ // More values...
+ },
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ }
+ resp, err := svc.GetAccountAuthorizationDetails(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetAccountPasswordPolicy() {
+ svc := iam.New(session.New())
+
+ var params *iam.GetAccountPasswordPolicyInput
+ resp, err := svc.GetAccountPasswordPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetAccountSummary() {
+ svc := iam.New(session.New())
+
+ var params *iam.GetAccountSummaryInput
+ resp, err := svc.GetAccountSummary(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetContextKeysForCustomPolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.GetContextKeysForCustomPolicyInput{
+ PolicyInputList: []*string{ // Required
+ aws.String("policyDocumentType"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.GetContextKeysForCustomPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetContextKeysForPrincipalPolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.GetContextKeysForPrincipalPolicyInput{
+ PolicySourceArn: aws.String("arnType"), // Required
+ PolicyInputList: []*string{
+ aws.String("policyDocumentType"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.GetContextKeysForPrincipalPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetCredentialReport() {
+ svc := iam.New(session.New())
+
+ var params *iam.GetCredentialReportInput
+ resp, err := svc.GetCredentialReport(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetGroup() {
+ svc := iam.New(session.New())
+
+ params := &iam.GetGroupInput{
+ GroupName: aws.String("groupNameType"), // Required
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ }
+ resp, err := svc.GetGroup(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetGroupPolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.GetGroupPolicyInput{
+ GroupName: aws.String("groupNameType"), // Required
+ PolicyName: aws.String("policyNameType"), // Required
+ }
+ resp, err := svc.GetGroupPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetInstanceProfile() {
+ svc := iam.New(session.New())
+
+ params := &iam.GetInstanceProfileInput{
+ InstanceProfileName: aws.String("instanceProfileNameType"), // Required
+ }
+ resp, err := svc.GetInstanceProfile(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetLoginProfile() {
+ svc := iam.New(session.New())
+
+ params := &iam.GetLoginProfileInput{
+ UserName: aws.String("userNameType"), // Required
+ }
+ resp, err := svc.GetLoginProfile(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetOpenIDConnectProvider() {
+ svc := iam.New(session.New())
+
+ params := &iam.GetOpenIDConnectProviderInput{
+ OpenIDConnectProviderArn: aws.String("arnType"), // Required
+ }
+ resp, err := svc.GetOpenIDConnectProvider(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetPolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.GetPolicyInput{
+ PolicyArn: aws.String("arnType"), // Required
+ }
+ resp, err := svc.GetPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetPolicyVersion() {
+ svc := iam.New(session.New())
+
+ params := &iam.GetPolicyVersionInput{
+ PolicyArn: aws.String("arnType"), // Required
+ VersionId: aws.String("policyVersionIdType"), // Required
+ }
+ resp, err := svc.GetPolicyVersion(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetRole() {
+ svc := iam.New(session.New())
+
+ params := &iam.GetRoleInput{
+ RoleName: aws.String("roleNameType"), // Required
+ }
+ resp, err := svc.GetRole(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetRolePolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.GetRolePolicyInput{
+ PolicyName: aws.String("policyNameType"), // Required
+ RoleName: aws.String("roleNameType"), // Required
+ }
+ resp, err := svc.GetRolePolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetSAMLProvider() {
+ svc := iam.New(session.New())
+
+ params := &iam.GetSAMLProviderInput{
+ SAMLProviderArn: aws.String("arnType"), // Required
+ }
+ resp, err := svc.GetSAMLProvider(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetSSHPublicKey() {
+ svc := iam.New(session.New())
+
+ params := &iam.GetSSHPublicKeyInput{
+ Encoding: aws.String("encodingType"), // Required
+ SSHPublicKeyId: aws.String("publicKeyIdType"), // Required
+ UserName: aws.String("userNameType"), // Required
+ }
+ resp, err := svc.GetSSHPublicKey(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetServerCertificate() {
+ svc := iam.New(session.New())
+
+ params := &iam.GetServerCertificateInput{
+ ServerCertificateName: aws.String("serverCertificateNameType"), // Required
+ }
+ resp, err := svc.GetServerCertificate(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetUser() {
+ svc := iam.New(session.New())
+
+ params := &iam.GetUserInput{
+ UserName: aws.String("existingUserNameType"),
+ }
+ resp, err := svc.GetUser(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_GetUserPolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.GetUserPolicyInput{
+ PolicyName: aws.String("policyNameType"), // Required
+ UserName: aws.String("existingUserNameType"), // Required
+ }
+ resp, err := svc.GetUserPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListAccessKeys() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListAccessKeysInput{
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ UserName: aws.String("existingUserNameType"),
+ }
+ resp, err := svc.ListAccessKeys(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListAccountAliases() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListAccountAliasesInput{
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ }
+ resp, err := svc.ListAccountAliases(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListAttachedGroupPolicies() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListAttachedGroupPoliciesInput{
+ GroupName: aws.String("groupNameType"), // Required
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ PathPrefix: aws.String("policyPathType"),
+ }
+ resp, err := svc.ListAttachedGroupPolicies(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListAttachedRolePolicies() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListAttachedRolePoliciesInput{
+ RoleName: aws.String("roleNameType"), // Required
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ PathPrefix: aws.String("policyPathType"),
+ }
+ resp, err := svc.ListAttachedRolePolicies(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListAttachedUserPolicies() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListAttachedUserPoliciesInput{
+ UserName: aws.String("userNameType"), // Required
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ PathPrefix: aws.String("policyPathType"),
+ }
+ resp, err := svc.ListAttachedUserPolicies(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListEntitiesForPolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListEntitiesForPolicyInput{
+ PolicyArn: aws.String("arnType"), // Required
+ EntityFilter: aws.String("EntityType"),
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ PathPrefix: aws.String("pathType"),
+ }
+ resp, err := svc.ListEntitiesForPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListGroupPolicies() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListGroupPoliciesInput{
+ GroupName: aws.String("groupNameType"), // Required
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ }
+ resp, err := svc.ListGroupPolicies(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListGroups() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListGroupsInput{
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ PathPrefix: aws.String("pathPrefixType"),
+ }
+ resp, err := svc.ListGroups(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListGroupsForUser() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListGroupsForUserInput{
+ UserName: aws.String("existingUserNameType"), // Required
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ }
+ resp, err := svc.ListGroupsForUser(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListInstanceProfiles() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListInstanceProfilesInput{
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ PathPrefix: aws.String("pathPrefixType"),
+ }
+ resp, err := svc.ListInstanceProfiles(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListInstanceProfilesForRole() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListInstanceProfilesForRoleInput{
+ RoleName: aws.String("roleNameType"), // Required
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ }
+ resp, err := svc.ListInstanceProfilesForRole(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListMFADevices() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListMFADevicesInput{
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ UserName: aws.String("existingUserNameType"),
+ }
+ resp, err := svc.ListMFADevices(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListOpenIDConnectProviders() {
+ svc := iam.New(session.New())
+
+ var params *iam.ListOpenIDConnectProvidersInput
+ resp, err := svc.ListOpenIDConnectProviders(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListPolicies() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListPoliciesInput{
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ OnlyAttached: aws.Bool(true),
+ PathPrefix: aws.String("policyPathType"),
+ Scope: aws.String("policyScopeType"),
+ }
+ resp, err := svc.ListPolicies(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListPolicyVersions() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListPolicyVersionsInput{
+ PolicyArn: aws.String("arnType"), // Required
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ }
+ resp, err := svc.ListPolicyVersions(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListRolePolicies() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListRolePoliciesInput{
+ RoleName: aws.String("roleNameType"), // Required
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ }
+ resp, err := svc.ListRolePolicies(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListRoles() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListRolesInput{
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ PathPrefix: aws.String("pathPrefixType"),
+ }
+ resp, err := svc.ListRoles(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListSAMLProviders() {
+ svc := iam.New(session.New())
+
+ var params *iam.ListSAMLProvidersInput
+ resp, err := svc.ListSAMLProviders(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListSSHPublicKeys() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListSSHPublicKeysInput{
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ UserName: aws.String("userNameType"),
+ }
+ resp, err := svc.ListSSHPublicKeys(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListServerCertificates() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListServerCertificatesInput{
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ PathPrefix: aws.String("pathPrefixType"),
+ }
+ resp, err := svc.ListServerCertificates(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListSigningCertificates() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListSigningCertificatesInput{
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ UserName: aws.String("existingUserNameType"),
+ }
+ resp, err := svc.ListSigningCertificates(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListUserPolicies() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListUserPoliciesInput{
+ UserName: aws.String("existingUserNameType"), // Required
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ }
+ resp, err := svc.ListUserPolicies(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListUsers() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListUsersInput{
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ PathPrefix: aws.String("pathPrefixType"),
+ }
+ resp, err := svc.ListUsers(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ListVirtualMFADevices() {
+ svc := iam.New(session.New())
+
+ params := &iam.ListVirtualMFADevicesInput{
+ AssignmentStatus: aws.String("assignmentStatusType"),
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ }
+ resp, err := svc.ListVirtualMFADevices(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_PutGroupPolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.PutGroupPolicyInput{
+ GroupName: aws.String("groupNameType"), // Required
+ PolicyDocument: aws.String("policyDocumentType"), // Required
+ PolicyName: aws.String("policyNameType"), // Required
+ }
+ resp, err := svc.PutGroupPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_PutRolePolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.PutRolePolicyInput{
+ PolicyDocument: aws.String("policyDocumentType"), // Required
+ PolicyName: aws.String("policyNameType"), // Required
+ RoleName: aws.String("roleNameType"), // Required
+ }
+ resp, err := svc.PutRolePolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_PutUserPolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.PutUserPolicyInput{
+ PolicyDocument: aws.String("policyDocumentType"), // Required
+ PolicyName: aws.String("policyNameType"), // Required
+ UserName: aws.String("existingUserNameType"), // Required
+ }
+ resp, err := svc.PutUserPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_RemoveClientIDFromOpenIDConnectProvider() {
+ svc := iam.New(session.New())
+
+ params := &iam.RemoveClientIDFromOpenIDConnectProviderInput{
+ ClientID: aws.String("clientIDType"), // Required
+ OpenIDConnectProviderArn: aws.String("arnType"), // Required
+ }
+ resp, err := svc.RemoveClientIDFromOpenIDConnectProvider(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_RemoveRoleFromInstanceProfile() {
+ svc := iam.New(session.New())
+
+ params := &iam.RemoveRoleFromInstanceProfileInput{
+ InstanceProfileName: aws.String("instanceProfileNameType"), // Required
+ RoleName: aws.String("roleNameType"), // Required
+ }
+ resp, err := svc.RemoveRoleFromInstanceProfile(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_RemoveUserFromGroup() {
+ svc := iam.New(session.New())
+
+ params := &iam.RemoveUserFromGroupInput{
+ GroupName: aws.String("groupNameType"), // Required
+ UserName: aws.String("existingUserNameType"), // Required
+ }
+ resp, err := svc.RemoveUserFromGroup(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_ResyncMFADevice() {
+ svc := iam.New(session.New())
+
+ params := &iam.ResyncMFADeviceInput{
+ AuthenticationCode1: aws.String("authenticationCodeType"), // Required
+ AuthenticationCode2: aws.String("authenticationCodeType"), // Required
+ SerialNumber: aws.String("serialNumberType"), // Required
+ UserName: aws.String("existingUserNameType"), // Required
+ }
+ resp, err := svc.ResyncMFADevice(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_SetDefaultPolicyVersion() {
+ svc := iam.New(session.New())
+
+ params := &iam.SetDefaultPolicyVersionInput{
+ PolicyArn: aws.String("arnType"), // Required
+ VersionId: aws.String("policyVersionIdType"), // Required
+ }
+ resp, err := svc.SetDefaultPolicyVersion(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_SimulateCustomPolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.SimulateCustomPolicyInput{
+ ActionNames: []*string{ // Required
+ aws.String("ActionNameType"), // Required
+ // More values...
+ },
+ PolicyInputList: []*string{ // Required
+ aws.String("policyDocumentType"), // Required
+ // More values...
+ },
+ CallerArn: aws.String("ResourceNameType"),
+ ContextEntries: []*iam.ContextEntry{
+ { // Required
+ ContextKeyName: aws.String("ContextKeyNameType"),
+ ContextKeyType: aws.String("ContextKeyTypeEnum"),
+ ContextKeyValues: []*string{
+ aws.String("ContextKeyValueType"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ ResourceArns: []*string{
+ aws.String("ResourceNameType"), // Required
+ // More values...
+ },
+ ResourceOwner: aws.String("ResourceNameType"),
+ ResourcePolicy: aws.String("policyDocumentType"),
+ }
+ resp, err := svc.SimulateCustomPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_SimulatePrincipalPolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.SimulatePrincipalPolicyInput{
+ ActionNames: []*string{ // Required
+ aws.String("ActionNameType"), // Required
+ // More values...
+ },
+ PolicySourceArn: aws.String("arnType"), // Required
+ CallerArn: aws.String("ResourceNameType"),
+ ContextEntries: []*iam.ContextEntry{
+ { // Required
+ ContextKeyName: aws.String("ContextKeyNameType"),
+ ContextKeyType: aws.String("ContextKeyTypeEnum"),
+ ContextKeyValues: []*string{
+ aws.String("ContextKeyValueType"), // Required
+ // More values...
+ },
+ },
+ // More values...
+ },
+ Marker: aws.String("markerType"),
+ MaxItems: aws.Int64(1),
+ PolicyInputList: []*string{
+ aws.String("policyDocumentType"), // Required
+ // More values...
+ },
+ ResourceArns: []*string{
+ aws.String("ResourceNameType"), // Required
+ // More values...
+ },
+ ResourceOwner: aws.String("ResourceNameType"),
+ ResourcePolicy: aws.String("policyDocumentType"),
+ }
+ resp, err := svc.SimulatePrincipalPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_UpdateAccessKey() {
+ svc := iam.New(session.New())
+
+ params := &iam.UpdateAccessKeyInput{
+ AccessKeyId: aws.String("accessKeyIdType"), // Required
+ Status: aws.String("statusType"), // Required
+ UserName: aws.String("existingUserNameType"),
+ }
+ resp, err := svc.UpdateAccessKey(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_UpdateAccountPasswordPolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.UpdateAccountPasswordPolicyInput{
+ AllowUsersToChangePassword: aws.Bool(true),
+ HardExpiry: aws.Bool(true),
+ MaxPasswordAge: aws.Int64(1),
+ MinimumPasswordLength: aws.Int64(1),
+ PasswordReusePrevention: aws.Int64(1),
+ RequireLowercaseCharacters: aws.Bool(true),
+ RequireNumbers: aws.Bool(true),
+ RequireSymbols: aws.Bool(true),
+ RequireUppercaseCharacters: aws.Bool(true),
+ }
+ resp, err := svc.UpdateAccountPasswordPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_UpdateAssumeRolePolicy() {
+ svc := iam.New(session.New())
+
+ params := &iam.UpdateAssumeRolePolicyInput{
+ PolicyDocument: aws.String("policyDocumentType"), // Required
+ RoleName: aws.String("roleNameType"), // Required
+ }
+ resp, err := svc.UpdateAssumeRolePolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_UpdateGroup() {
+ svc := iam.New(session.New())
+
+ params := &iam.UpdateGroupInput{
+ GroupName: aws.String("groupNameType"), // Required
+ NewGroupName: aws.String("groupNameType"),
+ NewPath: aws.String("pathType"),
+ }
+ resp, err := svc.UpdateGroup(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_UpdateLoginProfile() {
+ svc := iam.New(session.New())
+
+ params := &iam.UpdateLoginProfileInput{
+ UserName: aws.String("userNameType"), // Required
+ Password: aws.String("passwordType"),
+ PasswordResetRequired: aws.Bool(true),
+ }
+ resp, err := svc.UpdateLoginProfile(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_UpdateOpenIDConnectProviderThumbprint() {
+ svc := iam.New(session.New())
+
+ params := &iam.UpdateOpenIDConnectProviderThumbprintInput{
+ OpenIDConnectProviderArn: aws.String("arnType"), // Required
+ ThumbprintList: []*string{ // Required
+ aws.String("thumbprintType"), // Required
+ // More values...
+ },
+ }
+ resp, err := svc.UpdateOpenIDConnectProviderThumbprint(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_UpdateSAMLProvider() {
+ svc := iam.New(session.New())
+
+ params := &iam.UpdateSAMLProviderInput{
+ SAMLMetadataDocument: aws.String("SAMLMetadataDocumentType"), // Required
+ SAMLProviderArn: aws.String("arnType"), // Required
+ }
+ resp, err := svc.UpdateSAMLProvider(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_UpdateSSHPublicKey() {
+ svc := iam.New(session.New())
+
+ params := &iam.UpdateSSHPublicKeyInput{
+ SSHPublicKeyId: aws.String("publicKeyIdType"), // Required
+ Status: aws.String("statusType"), // Required
+ UserName: aws.String("userNameType"), // Required
+ }
+ resp, err := svc.UpdateSSHPublicKey(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_UpdateServerCertificate() {
+ svc := iam.New(session.New())
+
+ params := &iam.UpdateServerCertificateInput{
+ ServerCertificateName: aws.String("serverCertificateNameType"), // Required
+ NewPath: aws.String("pathType"),
+ NewServerCertificateName: aws.String("serverCertificateNameType"),
+ }
+ resp, err := svc.UpdateServerCertificate(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_UpdateSigningCertificate() {
+ svc := iam.New(session.New())
+
+ params := &iam.UpdateSigningCertificateInput{
+ CertificateId: aws.String("certificateIdType"), // Required
+ Status: aws.String("statusType"), // Required
+ UserName: aws.String("existingUserNameType"),
+ }
+ resp, err := svc.UpdateSigningCertificate(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_UpdateUser() {
+ svc := iam.New(session.New())
+
+ params := &iam.UpdateUserInput{
+ UserName: aws.String("existingUserNameType"), // Required
+ NewPath: aws.String("pathType"),
+ NewUserName: aws.String("userNameType"),
+ }
+ resp, err := svc.UpdateUser(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_UploadSSHPublicKey() {
+ svc := iam.New(session.New())
+
+ params := &iam.UploadSSHPublicKeyInput{
+ SSHPublicKeyBody: aws.String("publicKeyMaterialType"), // Required
+ UserName: aws.String("userNameType"), // Required
+ }
+ resp, err := svc.UploadSSHPublicKey(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_UploadServerCertificate() {
+ svc := iam.New(session.New())
+
+ params := &iam.UploadServerCertificateInput{
+ CertificateBody: aws.String("certificateBodyType"), // Required
+ PrivateKey: aws.String("privateKeyType"), // Required
+ ServerCertificateName: aws.String("serverCertificateNameType"), // Required
+ CertificateChain: aws.String("certificateChainType"),
+ Path: aws.String("pathType"),
+ }
+ resp, err := svc.UploadServerCertificate(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleIAM_UploadSigningCertificate() {
+ svc := iam.New(session.New())
+
+ params := &iam.UploadSigningCertificateInput{
+ CertificateBody: aws.String("certificateBodyType"), // Required
+ UserName: aws.String("existingUserNameType"),
+ }
+ resp, err := svc.UploadSigningCertificate(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/iam/iamiface/interface.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/iam/iamiface/interface.go
index d2dcd0ca2..3df15410a 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/iam/iamiface/interface.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/iam/iamiface/interface.go
@@ -506,3 +506,5 @@ type IAMAPI interface {
UploadSigningCertificate(*iam.UploadSigningCertificateInput) (*iam.UploadSigningCertificateOutput, error)
}
+
+var _ IAMAPI = (*iam.IAM)(nil)
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/iam/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/iam/service.go
index 23ea20a6a..e8cfb9c6f 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/iam/service.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/iam/service.go
@@ -4,12 +4,11 @@ package iam
import (
"github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/defaults"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/aws/service"
- "github.com/aws/aws-sdk-go/aws/service/serviceinfo"
- "github.com/aws/aws-sdk-go/internal/protocol/query"
- "github.com/aws/aws-sdk-go/internal/signer/v4"
+ "github.com/aws/aws-sdk-go/private/protocol/query"
+ "github.com/aws/aws-sdk-go/private/signer/v4"
)
// AWS Identity and Access Management (IAM) is a web service that you can use
@@ -60,40 +59,64 @@ import (
// secure your AWS resources. Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html).
// This set of topics walk you through the process of signing a request using
// an access key ID and secret access key.
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
type IAM struct {
- *service.Service
+ *client.Client
}
-// Used for custom service initialization logic
-var initService func(*service.Service)
+// Used for custom client initialization logic
+var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
-// New returns a new IAM client.
-func New(config *aws.Config) *IAM {
- service := &service.Service{
- ServiceInfo: serviceinfo.ServiceInfo{
- Config: defaults.DefaultConfig.Merge(config),
- ServiceName: "iam",
- APIVersion: "2010-05-08",
- },
+// A ServiceName is the name of the service the client will make API calls to.
+const ServiceName = "iam"
+
+// New creates a new instance of the IAM client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a IAM client from just a session.
+// svc := iam.New(mySession)
+//
+// // Create a IAM client with additional configuration
+// svc := iam.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *IAM {
+ c := p.ClientConfig(ServiceName, cfgs...)
+ return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *IAM {
+ svc := &IAM{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2010-05-08",
+ },
+ handlers,
+ ),
}
- service.Initialize()
// Handlers
- service.Handlers.Sign.PushBack(v4.Sign)
- service.Handlers.Build.PushBack(query.Build)
- service.Handlers.Unmarshal.PushBack(query.Unmarshal)
- service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
- service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
- // Run custom service initialization if present
- if initService != nil {
- initService(service)
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
}
- return &IAM{service}
+ return svc
}
// newRequest creates a new request for a IAM operation and runs any
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
index 0feec1ce7..c3a2702da 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
@@ -30,13 +30,13 @@ func buildGetBucketLocation(r *request.Request) {
}
func populateLocationConstraint(r *request.Request) {
- if r.ParamsFilled() && aws.StringValue(r.Service.Config.Region) != "us-east-1" {
+ if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" {
in := r.Params.(*CreateBucketInput)
if in.CreateBucketConfiguration == nil {
r.Params = awsutil.CopyOf(r.Params)
in = r.Params.(*CreateBucketInput)
in.CreateBucketConfiguration = &CreateBucketConfiguration{
- LocationConstraint: r.Service.Config.Region,
+ LocationConstraint: r.Config.Region,
}
}
}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go
new file mode 100644
index 000000000..83baa00fe
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go
@@ -0,0 +1,75 @@
+package s3_test
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/awstesting/unit"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/stretchr/testify/assert"
+)
+
+var s3LocationTests = []struct {
+ body string
+ loc string
+}{
+ {``, ``},
+ {`EU`, `EU`},
+}
+
+func TestGetBucketLocation(t *testing.T) {
+ for _, test := range s3LocationTests {
+ s := s3.New(unit.Session)
+ s.Handlers.Send.Clear()
+ s.Handlers.Send.PushBack(func(r *request.Request) {
+ reader := ioutil.NopCloser(bytes.NewReader([]byte(test.body)))
+ r.HTTPResponse = &http.Response{StatusCode: 200, Body: reader}
+ })
+
+ resp, err := s.GetBucketLocation(&s3.GetBucketLocationInput{Bucket: aws.String("bucket")})
+ assert.NoError(t, err)
+ if test.loc == "" {
+ assert.Nil(t, resp.LocationConstraint)
+ } else {
+ assert.Equal(t, test.loc, *resp.LocationConstraint)
+ }
+ }
+}
+
+func TestPopulateLocationConstraint(t *testing.T) {
+ s := s3.New(unit.Session)
+ in := &s3.CreateBucketInput{
+ Bucket: aws.String("bucket"),
+ }
+ req, _ := s.CreateBucketRequest(in)
+ err := req.Build()
+ assert.NoError(t, err)
+ assert.Equal(t, "mock-region", awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint")[0])
+ assert.Nil(t, in.CreateBucketConfiguration) // don't modify original params
+}
+
+func TestNoPopulateLocationConstraintIfProvided(t *testing.T) {
+ s := s3.New(unit.Session)
+ req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{
+ Bucket: aws.String("bucket"),
+ CreateBucketConfiguration: &s3.CreateBucketConfiguration{},
+ })
+ err := req.Build()
+ assert.NoError(t, err)
+ assert.Equal(t, 0, len(awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint")))
+}
+
+func TestNoPopulateLocationConstraintIfClassic(t *testing.T) {
+ s := s3.New(unit.Session, &aws.Config{Region: aws.String("us-east-1")})
+ req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{
+ Bucket: aws.String("bucket"),
+ })
+ err := req.Build()
+ assert.NoError(t, err)
+ assert.Equal(t, 0, len(awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint")))
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations.go
index db3005e1a..eaa4af8ea 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations.go
@@ -1,22 +1,22 @@
package s3
import (
+ "github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/aws/service"
)
func init() {
- initService = func(s *service.Service) {
+ initClient = func(c *client.Client) {
// Support building custom host-style bucket endpoints
- s.Handlers.Build.PushFront(updateHostWithBucket)
+ c.Handlers.Build.PushFront(updateHostWithBucket)
// Require SSL when using SSE keys
- s.Handlers.Validate.PushBack(validateSSERequiresSSL)
- s.Handlers.Build.PushBack(computeSSEKeys)
+ c.Handlers.Validate.PushBack(validateSSERequiresSSL)
+ c.Handlers.Build.PushBack(computeSSEKeys)
// S3 uses custom error unmarshaling logic
- s.Handlers.UnmarshalError.Clear()
- s.Handlers.UnmarshalError.PushBack(unmarshalError)
+ c.Handlers.UnmarshalError.Clear()
+ c.Handlers.UnmarshalError.PushBack(unmarshalError)
}
initRequest = func(r *request.Request) {
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations_test.go
new file mode 100644
index 000000000..5c60822a7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations_test.go
@@ -0,0 +1,92 @@
+package s3_test
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "io/ioutil"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/awstesting/unit"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/stretchr/testify/assert"
+)
+
+func assertMD5(t *testing.T, req *request.Request) {
+ err := req.Build()
+ assert.NoError(t, err)
+
+ b, _ := ioutil.ReadAll(req.HTTPRequest.Body)
+ out := md5.Sum(b)
+ assert.NotEmpty(t, b)
+ assert.Equal(t, base64.StdEncoding.EncodeToString(out[:]), req.HTTPRequest.Header.Get("Content-MD5"))
+}
+
+func TestMD5InPutBucketCors(t *testing.T) {
+ svc := s3.New(unit.Session)
+ req, _ := svc.PutBucketCorsRequest(&s3.PutBucketCorsInput{
+ Bucket: aws.String("bucketname"),
+ CORSConfiguration: &s3.CORSConfiguration{
+ CORSRules: []*s3.CORSRule{
+ {
+ AllowedMethods: []*string{aws.String("GET")},
+ AllowedOrigins: []*string{aws.String("*")},
+ },
+ },
+ },
+ })
+ assertMD5(t, req)
+}
+
+func TestMD5InPutBucketLifecycle(t *testing.T) {
+ svc := s3.New(unit.Session)
+ req, _ := svc.PutBucketLifecycleRequest(&s3.PutBucketLifecycleInput{
+ Bucket: aws.String("bucketname"),
+ LifecycleConfiguration: &s3.LifecycleConfiguration{
+ Rules: []*s3.Rule{
+ {
+ ID: aws.String("ID"),
+ Prefix: aws.String("Prefix"),
+ Status: aws.String("Enabled"),
+ },
+ },
+ },
+ })
+ assertMD5(t, req)
+}
+
+func TestMD5InPutBucketPolicy(t *testing.T) {
+ svc := s3.New(unit.Session)
+ req, _ := svc.PutBucketPolicyRequest(&s3.PutBucketPolicyInput{
+ Bucket: aws.String("bucketname"),
+ Policy: aws.String("{}"),
+ })
+ assertMD5(t, req)
+}
+
+func TestMD5InPutBucketTagging(t *testing.T) {
+ svc := s3.New(unit.Session)
+ req, _ := svc.PutBucketTaggingRequest(&s3.PutBucketTaggingInput{
+ Bucket: aws.String("bucketname"),
+ Tagging: &s3.Tagging{
+ TagSet: []*s3.Tag{
+ {Key: aws.String("KEY"), Value: aws.String("VALUE")},
+ },
+ },
+ })
+ assertMD5(t, req)
+}
+
+func TestMD5InDeleteObjects(t *testing.T) {
+ svc := s3.New(unit.Session)
+ req, _ := svc.DeleteObjectsRequest(&s3.DeleteObjectsInput{
+ Bucket: aws.String("bucketname"),
+ Delete: &s3.Delete{
+ Objects: []*s3.ObjectIdentifier{
+ {Key: aws.String("key")},
+ },
+ },
+ })
+ assertMD5(t, req)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/examples_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/examples_test.go
new file mode 100644
index 000000000..90cb2e63c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/examples_test.go
@@ -0,0 +1,1600 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+package s3_test
+
+import (
+ "bytes"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3"
+)
+
+var _ time.Duration
+var _ bytes.Buffer
+
+func ExampleS3_AbortMultipartUpload() {
+ svc := s3.New(session.New())
+
+ params := &s3.AbortMultipartUploadInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ UploadId: aws.String("MultipartUploadId"), // Required
+ RequestPayer: aws.String("RequestPayer"),
+ }
+ resp, err := svc.AbortMultipartUpload(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_CompleteMultipartUpload() {
+ svc := s3.New(session.New())
+
+ params := &s3.CompleteMultipartUploadInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ UploadId: aws.String("MultipartUploadId"), // Required
+ MultipartUpload: &s3.CompletedMultipartUpload{
+ Parts: []*s3.CompletedPart{
+ { // Required
+ ETag: aws.String("ETag"),
+ PartNumber: aws.Int64(1),
+ },
+ // More values...
+ },
+ },
+ RequestPayer: aws.String("RequestPayer"),
+ }
+ resp, err := svc.CompleteMultipartUpload(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_CopyObject() {
+ svc := s3.New(session.New())
+
+ params := &s3.CopyObjectInput{
+ Bucket: aws.String("BucketName"), // Required
+ CopySource: aws.String("CopySource"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ ACL: aws.String("ObjectCannedACL"),
+ CacheControl: aws.String("CacheControl"),
+ ContentDisposition: aws.String("ContentDisposition"),
+ ContentEncoding: aws.String("ContentEncoding"),
+ ContentLanguage: aws.String("ContentLanguage"),
+ ContentType: aws.String("ContentType"),
+ CopySourceIfMatch: aws.String("CopySourceIfMatch"),
+ CopySourceIfModifiedSince: aws.Time(time.Now()),
+ CopySourceIfNoneMatch: aws.String("CopySourceIfNoneMatch"),
+ CopySourceIfUnmodifiedSince: aws.Time(time.Now()),
+ CopySourceSSECustomerAlgorithm: aws.String("CopySourceSSECustomerAlgorithm"),
+ CopySourceSSECustomerKey: aws.String("CopySourceSSECustomerKey"),
+ CopySourceSSECustomerKeyMD5: aws.String("CopySourceSSECustomerKeyMD5"),
+ Expires: aws.Time(time.Now()),
+ GrantFullControl: aws.String("GrantFullControl"),
+ GrantRead: aws.String("GrantRead"),
+ GrantReadACP: aws.String("GrantReadACP"),
+ GrantWriteACP: aws.String("GrantWriteACP"),
+ Metadata: map[string]*string{
+ "Key": aws.String("MetadataValue"), // Required
+ // More values...
+ },
+ MetadataDirective: aws.String("MetadataDirective"),
+ RequestPayer: aws.String("RequestPayer"),
+ SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"),
+ SSECustomerKey: aws.String("SSECustomerKey"),
+ SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"),
+ SSEKMSKeyId: aws.String("SSEKMSKeyId"),
+ ServerSideEncryption: aws.String("ServerSideEncryption"),
+ StorageClass: aws.String("StorageClass"),
+ WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"),
+ }
+ resp, err := svc.CopyObject(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_CreateBucket() {
+ svc := s3.New(session.New())
+
+ params := &s3.CreateBucketInput{
+ Bucket: aws.String("BucketName"), // Required
+ ACL: aws.String("BucketCannedACL"),
+ CreateBucketConfiguration: &s3.CreateBucketConfiguration{
+ LocationConstraint: aws.String("BucketLocationConstraint"),
+ },
+ GrantFullControl: aws.String("GrantFullControl"),
+ GrantRead: aws.String("GrantRead"),
+ GrantReadACP: aws.String("GrantReadACP"),
+ GrantWrite: aws.String("GrantWrite"),
+ GrantWriteACP: aws.String("GrantWriteACP"),
+ }
+ resp, err := svc.CreateBucket(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_CreateMultipartUpload() {
+ svc := s3.New(session.New())
+
+ params := &s3.CreateMultipartUploadInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ ACL: aws.String("ObjectCannedACL"),
+ CacheControl: aws.String("CacheControl"),
+ ContentDisposition: aws.String("ContentDisposition"),
+ ContentEncoding: aws.String("ContentEncoding"),
+ ContentLanguage: aws.String("ContentLanguage"),
+ ContentType: aws.String("ContentType"),
+ Expires: aws.Time(time.Now()),
+ GrantFullControl: aws.String("GrantFullControl"),
+ GrantRead: aws.String("GrantRead"),
+ GrantReadACP: aws.String("GrantReadACP"),
+ GrantWriteACP: aws.String("GrantWriteACP"),
+ Metadata: map[string]*string{
+ "Key": aws.String("MetadataValue"), // Required
+ // More values...
+ },
+ RequestPayer: aws.String("RequestPayer"),
+ SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"),
+ SSECustomerKey: aws.String("SSECustomerKey"),
+ SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"),
+ SSEKMSKeyId: aws.String("SSEKMSKeyId"),
+ ServerSideEncryption: aws.String("ServerSideEncryption"),
+ StorageClass: aws.String("StorageClass"),
+ WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"),
+ }
+ resp, err := svc.CreateMultipartUpload(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_DeleteBucket() {
+ svc := s3.New(session.New())
+
+ params := &s3.DeleteBucketInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.DeleteBucket(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_DeleteBucketCors() {
+ svc := s3.New(session.New())
+
+ params := &s3.DeleteBucketCorsInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.DeleteBucketCors(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_DeleteBucketLifecycle() {
+ svc := s3.New(session.New())
+
+ params := &s3.DeleteBucketLifecycleInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.DeleteBucketLifecycle(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_DeleteBucketPolicy() {
+ svc := s3.New(session.New())
+
+ params := &s3.DeleteBucketPolicyInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.DeleteBucketPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_DeleteBucketReplication() {
+ svc := s3.New(session.New())
+
+ params := &s3.DeleteBucketReplicationInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.DeleteBucketReplication(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_DeleteBucketTagging() {
+ svc := s3.New(session.New())
+
+ params := &s3.DeleteBucketTaggingInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.DeleteBucketTagging(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_DeleteBucketWebsite() {
+ svc := s3.New(session.New())
+
+ params := &s3.DeleteBucketWebsiteInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.DeleteBucketWebsite(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_DeleteObject() {
+ svc := s3.New(session.New())
+
+ params := &s3.DeleteObjectInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ MFA: aws.String("MFA"),
+ RequestPayer: aws.String("RequestPayer"),
+ VersionId: aws.String("ObjectVersionId"),
+ }
+ resp, err := svc.DeleteObject(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_DeleteObjects() {
+ svc := s3.New(session.New())
+
+ params := &s3.DeleteObjectsInput{
+ Bucket: aws.String("BucketName"), // Required
+ Delete: &s3.Delete{ // Required
+ Objects: []*s3.ObjectIdentifier{ // Required
+ { // Required
+ Key: aws.String("ObjectKey"), // Required
+ VersionId: aws.String("ObjectVersionId"),
+ },
+ // More values...
+ },
+ Quiet: aws.Bool(true),
+ },
+ MFA: aws.String("MFA"),
+ RequestPayer: aws.String("RequestPayer"),
+ }
+ resp, err := svc.DeleteObjects(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_GetBucketAcl() {
+ svc := s3.New(session.New())
+
+ params := &s3.GetBucketAclInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketAcl(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_GetBucketCors() {
+ svc := s3.New(session.New())
+
+ params := &s3.GetBucketCorsInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketCors(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_GetBucketLifecycle() {
+ svc := s3.New(session.New())
+
+ params := &s3.GetBucketLifecycleInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketLifecycle(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_GetBucketLifecycleConfiguration() {
+ svc := s3.New(session.New())
+
+ params := &s3.GetBucketLifecycleConfigurationInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketLifecycleConfiguration(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_GetBucketLocation() {
+ svc := s3.New(session.New())
+
+ params := &s3.GetBucketLocationInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketLocation(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_GetBucketLogging() {
+ svc := s3.New(session.New())
+
+ params := &s3.GetBucketLoggingInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketLogging(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_GetBucketNotification() {
+ svc := s3.New(session.New())
+
+ params := &s3.GetBucketNotificationConfigurationRequest{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketNotification(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_GetBucketNotificationConfiguration() {
+ svc := s3.New(session.New())
+
+ params := &s3.GetBucketNotificationConfigurationRequest{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketNotificationConfiguration(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_GetBucketPolicy() {
+ svc := s3.New(session.New())
+
+ params := &s3.GetBucketPolicyInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_GetBucketReplication() {
+ svc := s3.New(session.New())
+
+ params := &s3.GetBucketReplicationInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketReplication(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_GetBucketRequestPayment() {
+ svc := s3.New(session.New())
+
+ params := &s3.GetBucketRequestPaymentInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketRequestPayment(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_GetBucketTagging() {
+ svc := s3.New(session.New())
+
+ params := &s3.GetBucketTaggingInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketTagging(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_GetBucketVersioning() {
+ svc := s3.New(session.New())
+
+ params := &s3.GetBucketVersioningInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketVersioning(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_GetBucketWebsite() {
+ svc := s3.New(session.New())
+
+ params := &s3.GetBucketWebsiteInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.GetBucketWebsite(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_GetObject() {
+ svc := s3.New(session.New())
+
+ params := &s3.GetObjectInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ IfMatch: aws.String("IfMatch"),
+ IfModifiedSince: aws.Time(time.Now()),
+ IfNoneMatch: aws.String("IfNoneMatch"),
+ IfUnmodifiedSince: aws.Time(time.Now()),
+ Range: aws.String("Range"),
+ RequestPayer: aws.String("RequestPayer"),
+ ResponseCacheControl: aws.String("ResponseCacheControl"),
+ ResponseContentDisposition: aws.String("ResponseContentDisposition"),
+ ResponseContentEncoding: aws.String("ResponseContentEncoding"),
+ ResponseContentLanguage: aws.String("ResponseContentLanguage"),
+ ResponseContentType: aws.String("ResponseContentType"),
+ ResponseExpires: aws.Time(time.Now()),
+ SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"),
+ SSECustomerKey: aws.String("SSECustomerKey"),
+ SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"),
+ VersionId: aws.String("ObjectVersionId"),
+ }
+ resp, err := svc.GetObject(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_GetObjectAcl() {
+ svc := s3.New(session.New())
+
+ params := &s3.GetObjectAclInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ RequestPayer: aws.String("RequestPayer"),
+ VersionId: aws.String("ObjectVersionId"),
+ }
+ resp, err := svc.GetObjectAcl(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_GetObjectTorrent() {
+ svc := s3.New(session.New())
+
+ params := &s3.GetObjectTorrentInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ RequestPayer: aws.String("RequestPayer"),
+ }
+ resp, err := svc.GetObjectTorrent(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_HeadBucket() {
+ svc := s3.New(session.New())
+
+ params := &s3.HeadBucketInput{
+ Bucket: aws.String("BucketName"), // Required
+ }
+ resp, err := svc.HeadBucket(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_HeadObject() {
+ svc := s3.New(session.New())
+
+ params := &s3.HeadObjectInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ IfMatch: aws.String("IfMatch"),
+ IfModifiedSince: aws.Time(time.Now()),
+ IfNoneMatch: aws.String("IfNoneMatch"),
+ IfUnmodifiedSince: aws.Time(time.Now()),
+ Range: aws.String("Range"),
+ RequestPayer: aws.String("RequestPayer"),
+ SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"),
+ SSECustomerKey: aws.String("SSECustomerKey"),
+ SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"),
+ VersionId: aws.String("ObjectVersionId"),
+ }
+ resp, err := svc.HeadObject(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_ListBuckets() {
+ svc := s3.New(session.New())
+
+ var params *s3.ListBucketsInput
+ resp, err := svc.ListBuckets(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_ListMultipartUploads() {
+ svc := s3.New(session.New())
+
+ params := &s3.ListMultipartUploadsInput{
+ Bucket: aws.String("BucketName"), // Required
+ Delimiter: aws.String("Delimiter"),
+ EncodingType: aws.String("EncodingType"),
+ KeyMarker: aws.String("KeyMarker"),
+ MaxUploads: aws.Int64(1),
+ Prefix: aws.String("Prefix"),
+ UploadIdMarker: aws.String("UploadIdMarker"),
+ }
+ resp, err := svc.ListMultipartUploads(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_ListObjectVersions() {
+ svc := s3.New(session.New())
+
+ params := &s3.ListObjectVersionsInput{
+ Bucket: aws.String("BucketName"), // Required
+ Delimiter: aws.String("Delimiter"),
+ EncodingType: aws.String("EncodingType"),
+ KeyMarker: aws.String("KeyMarker"),
+ MaxKeys: aws.Int64(1),
+ Prefix: aws.String("Prefix"),
+ VersionIdMarker: aws.String("VersionIdMarker"),
+ }
+ resp, err := svc.ListObjectVersions(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_ListObjects() {
+ svc := s3.New(session.New())
+
+ params := &s3.ListObjectsInput{
+ Bucket: aws.String("BucketName"), // Required
+ Delimiter: aws.String("Delimiter"),
+ EncodingType: aws.String("EncodingType"),
+ Marker: aws.String("Marker"),
+ MaxKeys: aws.Int64(1),
+ Prefix: aws.String("Prefix"),
+ }
+ resp, err := svc.ListObjects(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_ListParts() {
+ svc := s3.New(session.New())
+
+ params := &s3.ListPartsInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ UploadId: aws.String("MultipartUploadId"), // Required
+ MaxParts: aws.Int64(1),
+ PartNumberMarker: aws.Int64(1),
+ RequestPayer: aws.String("RequestPayer"),
+ }
+ resp, err := svc.ListParts(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_PutBucketAcl() {
+ svc := s3.New(session.New())
+
+ params := &s3.PutBucketAclInput{
+ Bucket: aws.String("BucketName"), // Required
+ ACL: aws.String("BucketCannedACL"),
+ AccessControlPolicy: &s3.AccessControlPolicy{
+ Grants: []*s3.Grant{
+ { // Required
+ Grantee: &s3.Grantee{
+ Type: aws.String("Type"), // Required
+ DisplayName: aws.String("DisplayName"),
+ EmailAddress: aws.String("EmailAddress"),
+ ID: aws.String("ID"),
+ URI: aws.String("URI"),
+ },
+ Permission: aws.String("Permission"),
+ },
+ // More values...
+ },
+ Owner: &s3.Owner{
+ DisplayName: aws.String("DisplayName"),
+ ID: aws.String("ID"),
+ },
+ },
+ GrantFullControl: aws.String("GrantFullControl"),
+ GrantRead: aws.String("GrantRead"),
+ GrantReadACP: aws.String("GrantReadACP"),
+ GrantWrite: aws.String("GrantWrite"),
+ GrantWriteACP: aws.String("GrantWriteACP"),
+ }
+ resp, err := svc.PutBucketAcl(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_PutBucketCors() {
+ svc := s3.New(session.New())
+
+ params := &s3.PutBucketCorsInput{
+ Bucket: aws.String("BucketName"), // Required
+ CORSConfiguration: &s3.CORSConfiguration{ // Required
+ CORSRules: []*s3.CORSRule{ // Required
+ { // Required
+ AllowedMethods: []*string{ // Required
+ aws.String("AllowedMethod"), // Required
+ // More values...
+ },
+ AllowedOrigins: []*string{ // Required
+ aws.String("AllowedOrigin"), // Required
+ // More values...
+ },
+ AllowedHeaders: []*string{
+ aws.String("AllowedHeader"), // Required
+ // More values...
+ },
+ ExposeHeaders: []*string{
+ aws.String("ExposeHeader"), // Required
+ // More values...
+ },
+ MaxAgeSeconds: aws.Int64(1),
+ },
+ // More values...
+ },
+ },
+ }
+ resp, err := svc.PutBucketCors(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_PutBucketLifecycle() {
+ svc := s3.New(session.New())
+
+ params := &s3.PutBucketLifecycleInput{
+ Bucket: aws.String("BucketName"), // Required
+ LifecycleConfiguration: &s3.LifecycleConfiguration{
+ Rules: []*s3.Rule{ // Required
+ { // Required
+ Prefix: aws.String("Prefix"), // Required
+ Status: aws.String("ExpirationStatus"), // Required
+ Expiration: &s3.LifecycleExpiration{
+ Date: aws.Time(time.Now()),
+ Days: aws.Int64(1),
+ },
+ ID: aws.String("ID"),
+ NoncurrentVersionExpiration: &s3.NoncurrentVersionExpiration{
+ NoncurrentDays: aws.Int64(1),
+ },
+ NoncurrentVersionTransition: &s3.NoncurrentVersionTransition{
+ NoncurrentDays: aws.Int64(1),
+ StorageClass: aws.String("TransitionStorageClass"),
+ },
+ Transition: &s3.Transition{
+ Date: aws.Time(time.Now()),
+ Days: aws.Int64(1),
+ StorageClass: aws.String("TransitionStorageClass"),
+ },
+ },
+ // More values...
+ },
+ },
+ }
+ resp, err := svc.PutBucketLifecycle(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_PutBucketLifecycleConfiguration() {
+ svc := s3.New(session.New())
+
+ params := &s3.PutBucketLifecycleConfigurationInput{
+ Bucket: aws.String("BucketName"), // Required
+ LifecycleConfiguration: &s3.BucketLifecycleConfiguration{
+ Rules: []*s3.LifecycleRule{ // Required
+ { // Required
+ Prefix: aws.String("Prefix"), // Required
+ Status: aws.String("ExpirationStatus"), // Required
+ Expiration: &s3.LifecycleExpiration{
+ Date: aws.Time(time.Now()),
+ Days: aws.Int64(1),
+ },
+ ID: aws.String("ID"),
+ NoncurrentVersionExpiration: &s3.NoncurrentVersionExpiration{
+ NoncurrentDays: aws.Int64(1),
+ },
+ NoncurrentVersionTransitions: []*s3.NoncurrentVersionTransition{
+ { // Required
+ NoncurrentDays: aws.Int64(1),
+ StorageClass: aws.String("TransitionStorageClass"),
+ },
+ // More values...
+ },
+ Transitions: []*s3.Transition{
+ { // Required
+ Date: aws.Time(time.Now()),
+ Days: aws.Int64(1),
+ StorageClass: aws.String("TransitionStorageClass"),
+ },
+ // More values...
+ },
+ },
+ // More values...
+ },
+ },
+ }
+ resp, err := svc.PutBucketLifecycleConfiguration(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_PutBucketLogging() {
+ svc := s3.New(session.New())
+
+ params := &s3.PutBucketLoggingInput{
+ Bucket: aws.String("BucketName"), // Required
+ BucketLoggingStatus: &s3.BucketLoggingStatus{ // Required
+ LoggingEnabled: &s3.LoggingEnabled{
+ TargetBucket: aws.String("TargetBucket"),
+ TargetGrants: []*s3.TargetGrant{
+ { // Required
+ Grantee: &s3.Grantee{
+ Type: aws.String("Type"), // Required
+ DisplayName: aws.String("DisplayName"),
+ EmailAddress: aws.String("EmailAddress"),
+ ID: aws.String("ID"),
+ URI: aws.String("URI"),
+ },
+ Permission: aws.String("BucketLogsPermission"),
+ },
+ // More values...
+ },
+ TargetPrefix: aws.String("TargetPrefix"),
+ },
+ },
+ }
+ resp, err := svc.PutBucketLogging(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_PutBucketNotification() {
+ svc := s3.New(session.New())
+
+ params := &s3.PutBucketNotificationInput{
+ Bucket: aws.String("BucketName"), // Required
+ NotificationConfiguration: &s3.NotificationConfigurationDeprecated{ // Required
+ CloudFunctionConfiguration: &s3.CloudFunctionConfiguration{
+ CloudFunction: aws.String("CloudFunction"),
+ Event: aws.String("Event"),
+ Events: []*string{
+ aws.String("Event"), // Required
+ // More values...
+ },
+ Id: aws.String("NotificationId"),
+ InvocationRole: aws.String("CloudFunctionInvocationRole"),
+ },
+ QueueConfiguration: &s3.QueueConfigurationDeprecated{
+ Event: aws.String("Event"),
+ Events: []*string{
+ aws.String("Event"), // Required
+ // More values...
+ },
+ Id: aws.String("NotificationId"),
+ Queue: aws.String("QueueArn"),
+ },
+ TopicConfiguration: &s3.TopicConfigurationDeprecated{
+ Event: aws.String("Event"),
+ Events: []*string{
+ aws.String("Event"), // Required
+ // More values...
+ },
+ Id: aws.String("NotificationId"),
+ Topic: aws.String("TopicArn"),
+ },
+ },
+ }
+ resp, err := svc.PutBucketNotification(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_PutBucketNotificationConfiguration() {
+ svc := s3.New(session.New())
+
+ params := &s3.PutBucketNotificationConfigurationInput{
+ Bucket: aws.String("BucketName"), // Required
+ NotificationConfiguration: &s3.NotificationConfiguration{ // Required
+ LambdaFunctionConfigurations: []*s3.LambdaFunctionConfiguration{
+ { // Required
+ Events: []*string{ // Required
+ aws.String("Event"), // Required
+ // More values...
+ },
+ LambdaFunctionArn: aws.String("LambdaFunctionArn"), // Required
+ Filter: &s3.NotificationConfigurationFilter{
+ Key: &s3.KeyFilter{
+ FilterRules: []*s3.FilterRule{
+ { // Required
+ Name: aws.String("FilterRuleName"),
+ Value: aws.String("FilterRuleValue"),
+ },
+ // More values...
+ },
+ },
+ },
+ Id: aws.String("NotificationId"),
+ },
+ // More values...
+ },
+ QueueConfigurations: []*s3.QueueConfiguration{
+ { // Required
+ Events: []*string{ // Required
+ aws.String("Event"), // Required
+ // More values...
+ },
+ QueueArn: aws.String("QueueArn"), // Required
+ Filter: &s3.NotificationConfigurationFilter{
+ Key: &s3.KeyFilter{
+ FilterRules: []*s3.FilterRule{
+ { // Required
+ Name: aws.String("FilterRuleName"),
+ Value: aws.String("FilterRuleValue"),
+ },
+ // More values...
+ },
+ },
+ },
+ Id: aws.String("NotificationId"),
+ },
+ // More values...
+ },
+ TopicConfigurations: []*s3.TopicConfiguration{
+ { // Required
+ Events: []*string{ // Required
+ aws.String("Event"), // Required
+ // More values...
+ },
+ TopicArn: aws.String("TopicArn"), // Required
+ Filter: &s3.NotificationConfigurationFilter{
+ Key: &s3.KeyFilter{
+ FilterRules: []*s3.FilterRule{
+ { // Required
+ Name: aws.String("FilterRuleName"),
+ Value: aws.String("FilterRuleValue"),
+ },
+ // More values...
+ },
+ },
+ },
+ Id: aws.String("NotificationId"),
+ },
+ // More values...
+ },
+ },
+ }
+ resp, err := svc.PutBucketNotificationConfiguration(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_PutBucketPolicy() {
+ svc := s3.New(session.New())
+
+ params := &s3.PutBucketPolicyInput{
+ Bucket: aws.String("BucketName"), // Required
+ Policy: aws.String("Policy"), // Required
+ }
+ resp, err := svc.PutBucketPolicy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_PutBucketReplication() {
+ svc := s3.New(session.New())
+
+ params := &s3.PutBucketReplicationInput{
+ Bucket: aws.String("BucketName"), // Required
+ ReplicationConfiguration: &s3.ReplicationConfiguration{ // Required
+ Role: aws.String("Role"), // Required
+ Rules: []*s3.ReplicationRule{ // Required
+ { // Required
+ Destination: &s3.Destination{ // Required
+ Bucket: aws.String("BucketName"), // Required
+ StorageClass: aws.String("StorageClass"),
+ },
+ Prefix: aws.String("Prefix"), // Required
+ Status: aws.String("ReplicationRuleStatus"), // Required
+ ID: aws.String("ID"),
+ },
+ // More values...
+ },
+ },
+ }
+ resp, err := svc.PutBucketReplication(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_PutBucketRequestPayment() {
+ svc := s3.New(session.New())
+
+ params := &s3.PutBucketRequestPaymentInput{
+ Bucket: aws.String("BucketName"), // Required
+ RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{ // Required
+ Payer: aws.String("Payer"), // Required
+ },
+ }
+ resp, err := svc.PutBucketRequestPayment(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_PutBucketTagging() {
+ svc := s3.New(session.New())
+
+ params := &s3.PutBucketTaggingInput{
+ Bucket: aws.String("BucketName"), // Required
+ Tagging: &s3.Tagging{ // Required
+ TagSet: []*s3.Tag{ // Required
+ { // Required
+ Key: aws.String("ObjectKey"), // Required
+ Value: aws.String("Value"), // Required
+ },
+ // More values...
+ },
+ },
+ }
+ resp, err := svc.PutBucketTagging(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_PutBucketVersioning() {
+ svc := s3.New(session.New())
+
+ params := &s3.PutBucketVersioningInput{
+ Bucket: aws.String("BucketName"), // Required
+ VersioningConfiguration: &s3.VersioningConfiguration{ // Required
+ MFADelete: aws.String("MFADelete"),
+ Status: aws.String("BucketVersioningStatus"),
+ },
+ MFA: aws.String("MFA"),
+ }
+ resp, err := svc.PutBucketVersioning(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_PutBucketWebsite() {
+ svc := s3.New(session.New())
+
+ params := &s3.PutBucketWebsiteInput{
+ Bucket: aws.String("BucketName"), // Required
+ WebsiteConfiguration: &s3.WebsiteConfiguration{ // Required
+ ErrorDocument: &s3.ErrorDocument{
+ Key: aws.String("ObjectKey"), // Required
+ },
+ IndexDocument: &s3.IndexDocument{
+ Suffix: aws.String("Suffix"), // Required
+ },
+ RedirectAllRequestsTo: &s3.RedirectAllRequestsTo{
+ HostName: aws.String("HostName"), // Required
+ Protocol: aws.String("Protocol"),
+ },
+ RoutingRules: []*s3.RoutingRule{
+ { // Required
+ Redirect: &s3.Redirect{ // Required
+ HostName: aws.String("HostName"),
+ HttpRedirectCode: aws.String("HttpRedirectCode"),
+ Protocol: aws.String("Protocol"),
+ ReplaceKeyPrefixWith: aws.String("ReplaceKeyPrefixWith"),
+ ReplaceKeyWith: aws.String("ReplaceKeyWith"),
+ },
+ Condition: &s3.Condition{
+ HttpErrorCodeReturnedEquals: aws.String("HttpErrorCodeReturnedEquals"),
+ KeyPrefixEquals: aws.String("KeyPrefixEquals"),
+ },
+ },
+ // More values...
+ },
+ },
+ }
+ resp, err := svc.PutBucketWebsite(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_PutObject() {
+ svc := s3.New(session.New())
+
+ params := &s3.PutObjectInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ ACL: aws.String("ObjectCannedACL"),
+ Body: bytes.NewReader([]byte("PAYLOAD")),
+ CacheControl: aws.String("CacheControl"),
+ ContentDisposition: aws.String("ContentDisposition"),
+ ContentEncoding: aws.String("ContentEncoding"),
+ ContentLanguage: aws.String("ContentLanguage"),
+ ContentLength: aws.Int64(1),
+ ContentType: aws.String("ContentType"),
+ Expires: aws.Time(time.Now()),
+ GrantFullControl: aws.String("GrantFullControl"),
+ GrantRead: aws.String("GrantRead"),
+ GrantReadACP: aws.String("GrantReadACP"),
+ GrantWriteACP: aws.String("GrantWriteACP"),
+ Metadata: map[string]*string{
+ "Key": aws.String("MetadataValue"), // Required
+ // More values...
+ },
+ RequestPayer: aws.String("RequestPayer"),
+ SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"),
+ SSECustomerKey: aws.String("SSECustomerKey"),
+ SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"),
+ SSEKMSKeyId: aws.String("SSEKMSKeyId"),
+ ServerSideEncryption: aws.String("ServerSideEncryption"),
+ StorageClass: aws.String("StorageClass"),
+ WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"),
+ }
+ resp, err := svc.PutObject(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_PutObjectAcl() {
+ svc := s3.New(session.New())
+
+ params := &s3.PutObjectAclInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ ACL: aws.String("ObjectCannedACL"),
+ AccessControlPolicy: &s3.AccessControlPolicy{
+ Grants: []*s3.Grant{
+ { // Required
+ Grantee: &s3.Grantee{
+ Type: aws.String("Type"), // Required
+ DisplayName: aws.String("DisplayName"),
+ EmailAddress: aws.String("EmailAddress"),
+ ID: aws.String("ID"),
+ URI: aws.String("URI"),
+ },
+ Permission: aws.String("Permission"),
+ },
+ // More values...
+ },
+ Owner: &s3.Owner{
+ DisplayName: aws.String("DisplayName"),
+ ID: aws.String("ID"),
+ },
+ },
+ GrantFullControl: aws.String("GrantFullControl"),
+ GrantRead: aws.String("GrantRead"),
+ GrantReadACP: aws.String("GrantReadACP"),
+ GrantWrite: aws.String("GrantWrite"),
+ GrantWriteACP: aws.String("GrantWriteACP"),
+ RequestPayer: aws.String("RequestPayer"),
+ }
+ resp, err := svc.PutObjectAcl(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_RestoreObject() {
+ svc := s3.New(session.New())
+
+ params := &s3.RestoreObjectInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ RequestPayer: aws.String("RequestPayer"),
+ RestoreRequest: &s3.RestoreRequest{
+ Days: aws.Int64(1), // Required
+ },
+ VersionId: aws.String("ObjectVersionId"),
+ }
+ resp, err := svc.RestoreObject(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_UploadPart() {
+ svc := s3.New(session.New())
+
+ params := &s3.UploadPartInput{
+ Bucket: aws.String("BucketName"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ PartNumber: aws.Int64(1), // Required
+ UploadId: aws.String("MultipartUploadId"), // Required
+ Body: bytes.NewReader([]byte("PAYLOAD")),
+ ContentLength: aws.Int64(1),
+ RequestPayer: aws.String("RequestPayer"),
+ SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"),
+ SSECustomerKey: aws.String("SSECustomerKey"),
+ SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"),
+ ServerSideEncryption: aws.String("UploadPartRequestServerSideEncryption"),
+ }
+ resp, err := svc.UploadPart(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleS3_UploadPartCopy() {
+ svc := s3.New(session.New())
+
+ params := &s3.UploadPartCopyInput{
+ Bucket: aws.String("BucketName"), // Required
+ CopySource: aws.String("CopySource"), // Required
+ Key: aws.String("ObjectKey"), // Required
+ PartNumber: aws.Int64(1), // Required
+ UploadId: aws.String("MultipartUploadId"), // Required
+ CopySourceIfMatch: aws.String("CopySourceIfMatch"),
+ CopySourceIfModifiedSince: aws.Time(time.Now()),
+ CopySourceIfNoneMatch: aws.String("CopySourceIfNoneMatch"),
+ CopySourceIfUnmodifiedSince: aws.Time(time.Now()),
+ CopySourceRange: aws.String("CopySourceRange"),
+ CopySourceSSECustomerAlgorithm: aws.String("CopySourceSSECustomerAlgorithm"),
+ CopySourceSSECustomerKey: aws.String("CopySourceSSECustomerKey"),
+ CopySourceSSECustomerKeyMD5: aws.String("CopySourceSSECustomerKeyMD5"),
+ RequestPayer: aws.String("RequestPayer"),
+ SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"),
+ SSECustomerKey: aws.String("SSECustomerKey"),
+ SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"),
+ }
+ resp, err := svc.UploadPartCopy(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
index 47c8495e8..859171b5e 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
@@ -24,7 +24,7 @@ func dnsCompatibleBucketName(bucket string) bool {
// the host. This is false if S3ForcePathStyle is explicitly set or if the
// bucket is not DNS compatible.
func hostStyleBucketName(r *request.Request, bucket string) bool {
- if aws.BoolValue(r.Service.Config.S3ForcePathStyle) {
+ if aws.BoolValue(r.Config.S3ForcePathStyle) {
return false
}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go
new file mode 100644
index 000000000..9c8e81c38
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go
@@ -0,0 +1,75 @@
+package s3_test
+
+import (
+ "net/url"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/awstesting/unit"
+ "github.com/aws/aws-sdk-go/service/s3"
+)
+
+type s3BucketTest struct {
+ bucket string
+ url string
+}
+
+var (
+ sslTests = []s3BucketTest{
+ {"abc", "https://abc.s3.mock-region.amazonaws.com/"},
+ {"a$b$c", "https://s3.mock-region.amazonaws.com/a%24b%24c"},
+ {"a.b.c", "https://s3.mock-region.amazonaws.com/a.b.c"},
+ {"a..bc", "https://s3.mock-region.amazonaws.com/a..bc"},
+ }
+
+ nosslTests = []s3BucketTest{
+ {"a.b.c", "http://a.b.c.s3.mock-region.amazonaws.com/"},
+ {"a..bc", "http://s3.mock-region.amazonaws.com/a..bc"},
+ }
+
+ forcepathTests = []s3BucketTest{
+ {"abc", "https://s3.mock-region.amazonaws.com/abc"},
+ {"a$b$c", "https://s3.mock-region.amazonaws.com/a%24b%24c"},
+ {"a.b.c", "https://s3.mock-region.amazonaws.com/a.b.c"},
+ {"a..bc", "https://s3.mock-region.amazonaws.com/a..bc"},
+ }
+)
+
+func runTests(t *testing.T, svc *s3.S3, tests []s3BucketTest) {
+ for _, test := range tests {
+ req, _ := svc.ListObjectsRequest(&s3.ListObjectsInput{Bucket: &test.bucket})
+ req.Build()
+ assert.Equal(t, test.url, req.HTTPRequest.URL.String())
+ }
+}
+
+func TestHostStyleBucketBuild(t *testing.T) {
+ s := s3.New(unit.Session)
+ runTests(t, s, sslTests)
+}
+
+func TestHostStyleBucketBuildNoSSL(t *testing.T) {
+ s := s3.New(unit.Session, &aws.Config{DisableSSL: aws.Bool(true)})
+ runTests(t, s, nosslTests)
+}
+
+func TestPathStyleBucketBuild(t *testing.T) {
+ s := s3.New(unit.Session, &aws.Config{S3ForcePathStyle: aws.Bool(true)})
+ runTests(t, s, forcepathTests)
+}
+
+func TestHostStyleBucketGetBucketLocation(t *testing.T) {
+ s := s3.New(unit.Session)
+ req, _ := s.GetBucketLocationRequest(&s3.GetBucketLocationInput{
+ Bucket: aws.String("bucket"),
+ })
+
+ req.Build()
+ require.NoError(t, req.Error)
+ u, _ := url.Parse(req.HTTPRequest.URL.String())
+ assert.NotContains(t, u.Host, "bucket")
+ assert.Contains(t, u.Path, "bucket")
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
index d51ef693a..9e66afb34 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
@@ -242,3 +242,5 @@ type S3API interface {
UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error)
}
+
+var _ S3API = (*s3.S3)(nil)
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go
new file mode 100644
index 000000000..229c0d63b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go
@@ -0,0 +1,3 @@
+// Package s3manager provides utilities to upload and download objects from
+// S3 concurrently. Helpful for when working with large objects.
+package s3manager
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
index e98b00d47..7ac7cf2e6 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
@@ -9,27 +9,23 @@ import (
"time"
"github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
)
// DefaultDownloadPartSize is the default range of bytes to get at a time when
// using Download().
-var DefaultDownloadPartSize int64 = 1024 * 1024 * 5
+const DefaultDownloadPartSize = 1024 * 1024 * 5
// DefaultDownloadConcurrency is the default number of goroutines to spin up
// when using Download().
-var DefaultDownloadConcurrency = 5
+const DefaultDownloadConcurrency = 5
-// DefaultDownloadOptions is the default set of options used when opts is nil
-// in Download().
-var DefaultDownloadOptions = &DownloadOptions{
- PartSize: DefaultDownloadPartSize,
- Concurrency: DefaultDownloadConcurrency,
-}
-
-// DownloadOptions keeps tracks of extra options to pass to an Download() call.
-type DownloadOptions struct {
+// The Downloader structure that calls Download(). It is safe to call Download()
+// on this structure for multiple objects and across concurrent goroutines.
+// Mutating the Downloader's properties is not safe to be done concurrently.
+type Downloader struct {
// The buffer size (in bytes) to use when buffering data into chunks and
// sending them as parts to S3. The minimum allowed part size is 5MB, and
// if this value is set to zero, the DefaultPartSize value will be used.
@@ -39,45 +35,96 @@ type DownloadOptions struct {
// If this is set to zero, the DefaultConcurrency value will be used.
Concurrency int
- // An S3 client to use when performing downloads. Leave this as nil to use
- // a default client.
+ // An S3 client to use when performing downloads.
S3 s3iface.S3API
}
-// NewDownloader creates a new Downloader structure that downloads an object
-// from S3 in concurrent chunks. Pass in an optional DownloadOptions struct
-// to customize the downloader behavior.
-func NewDownloader(opts *DownloadOptions) *Downloader {
- if opts == nil {
- opts = DefaultDownloadOptions
+// NewDownloader creates a new Downloader instance to downloads objects from
+// S3 in concurrent chunks. Pass in additional functional options to customize
+// the downloader behavior. Requires a client.ConfigProvider in order to create
+// a S3 service client. The session.Session satisfies the client.ConfigProvider
+// interface.
+//
+// Example:
+// // The session the S3 Downloader will use
+// sess := session.New()
+//
+// // Create a downloader with the session and default options
+// downloader := s3manager.NewDownloader(sess)
+//
+// // Create a downloader with the session and custom options
+// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Uploader) {
+// d.PartSize = 64 * 1024 * 1024 // 64MB per part
+// })
+func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader {
+ d := &Downloader{
+ S3: s3.New(c),
+ PartSize: DefaultDownloadPartSize,
+ Concurrency: DefaultDownloadConcurrency,
}
- return &Downloader{opts: opts}
+ for _, option := range options {
+ option(d)
+ }
+
+ return d
}
-// The Downloader structure that calls Download(). It is safe to call Download()
-// on this structure for multiple objects and across concurrent goroutines.
-type Downloader struct {
- opts *DownloadOptions
+// NewDownloaderWithClient creates a new Downloader instance to downloads
+// objects from S3 in concurrent chunks. Pass in additional functional
+// options to customize the downloader behavior. Requires a S3 service client
+// to make S3 API calls.
+//
+// Example:
+// // The S3 client the S3 Downloader will use
+// s3Svc := s3.new(session.New())
+//
+// // Create a downloader with the s3 client and default options
+// downloader := s3manager.NewDownloaderWithClient(s3Svc)
+//
+// // Create a downloader with the s3 client and custom options
+// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Uploader) {
+// d.PartSize = 64 * 1024 * 1024 // 64MB per part
+// })
+func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader {
+ d := &Downloader{
+ S3: svc,
+ PartSize: DefaultDownloadPartSize,
+ Concurrency: DefaultDownloadConcurrency,
+ }
+ for _, option := range options {
+ option(d)
+ }
+
+ return d
}
// Download downloads an object in S3 and writes the payload into w using
// concurrent GET requests.
//
-// It is safe to call this method for multiple objects and across concurrent
-// goroutines.
+// Additional functional options can be provided to configure the individual
+// upload. These options are copies of the Uploader instance Upload is called from.
+// Modifying the options will not impact the original Uploader instance.
+//
+// It is safe to call this method concurrently across goroutines.
//
// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
-func (d *Downloader) Download(w io.WriterAt, input *s3.GetObjectInput) (n int64, err error) {
- impl := downloader{w: w, in: input, opts: *d.opts}
+func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {
+ impl := downloader{w: w, in: input, ctx: d}
+
+ for _, option := range options {
+ option(&impl.ctx)
+ }
+
return impl.download()
}
// downloader is the implementation structure used internally by Downloader.
type downloader struct {
- opts DownloadOptions
- in *s3.GetObjectInput
- w io.WriterAt
+ ctx Downloader
+
+ in *s3.GetObjectInput
+ w io.WriterAt
wg sync.WaitGroup
m sync.Mutex
@@ -92,16 +139,12 @@ type downloader struct {
func (d *downloader) init() {
d.totalBytes = -1
- if d.opts.Concurrency == 0 {
- d.opts.Concurrency = DefaultDownloadConcurrency
+ if d.ctx.Concurrency == 0 {
+ d.ctx.Concurrency = DefaultDownloadConcurrency
}
- if d.opts.PartSize == 0 {
- d.opts.PartSize = DefaultDownloadPartSize
- }
-
- if d.opts.S3 == nil {
- d.opts.S3 = s3.New(nil)
+ if d.ctx.PartSize == 0 {
+ d.ctx.PartSize = DefaultDownloadPartSize
}
}
@@ -111,8 +154,8 @@ func (d *downloader) download() (n int64, err error) {
d.init()
// Spin up workers
- ch := make(chan dlchunk, d.opts.Concurrency)
- for i := 0; i < d.opts.Concurrency; i++ {
+ ch := make(chan dlchunk, d.ctx.Concurrency)
+ for i := 0; i < d.ctx.Concurrency; i++ {
d.wg.Add(1)
go d.downloadPart(ch)
}
@@ -136,8 +179,8 @@ func (d *downloader) download() (n int64, err error) {
}
// Queue the next range of bytes to read.
- ch <- dlchunk{w: d.w, start: d.pos, size: d.opts.PartSize}
- d.pos += d.opts.PartSize
+ ch <- dlchunk{w: d.w, start: d.pos, size: d.ctx.PartSize}
+ d.pos += d.ctx.PartSize
}
// Wait for completion
@@ -171,7 +214,7 @@ func (d *downloader) downloadPart(ch chan dlchunk) {
chunk.start, chunk.start+chunk.size-1)
in.Range = &rng
- resp, err := d.opts.S3.GetObject(in)
+ resp, err := d.ctx.S3.GetObject(in)
if err != nil {
d.seterr(err)
} else {
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go
new file mode 100644
index 000000000..f67b23658
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go
@@ -0,0 +1,144 @@
+package s3manager_test
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "regexp"
+ "strconv"
+ "sync"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/awstesting/unit"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3manager"
+)
+
+func dlLoggingSvc(data []byte) (*s3.S3, *[]string, *[]string) {
+ var m sync.Mutex
+ names := []string{}
+ ranges := []string{}
+
+ svc := s3.New(unit.Session)
+ svc.Handlers.Send.Clear()
+ svc.Handlers.Send.PushBack(func(r *request.Request) {
+ m.Lock()
+ defer m.Unlock()
+
+ names = append(names, r.Operation.Name)
+ ranges = append(ranges, *r.Params.(*s3.GetObjectInput).Range)
+
+ rerng := regexp.MustCompile(`bytes=(\d+)-(\d+)`)
+ rng := rerng.FindStringSubmatch(r.HTTPRequest.Header.Get("Range"))
+ start, _ := strconv.ParseInt(rng[1], 10, 64)
+ fin, _ := strconv.ParseInt(rng[2], 10, 64)
+ fin++
+
+ if fin > int64(len(data)) {
+ fin = int64(len(data))
+ }
+
+ r.HTTPResponse = &http.Response{
+ StatusCode: 200,
+ Body: ioutil.NopCloser(bytes.NewReader(data[start:fin])),
+ Header: http.Header{},
+ }
+ r.HTTPResponse.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d",
+ start, fin, len(data)))
+ })
+
+ return svc, &names, &ranges
+}
+
+func TestDownloadOrder(t *testing.T) {
+ s, names, ranges := dlLoggingSvc(buf12MB)
+
+ d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) {
+ d.Concurrency = 1
+ })
+ w := &aws.WriteAtBuffer{}
+ n, err := d.Download(w, &s3.GetObjectInput{
+ Bucket: aws.String("bucket"),
+ Key: aws.String("key"),
+ })
+
+ assert.Nil(t, err)
+ assert.Equal(t, int64(len(buf12MB)), n)
+ assert.Equal(t, []string{"GetObject", "GetObject", "GetObject"}, *names)
+ assert.Equal(t, []string{"bytes=0-5242879", "bytes=5242880-10485759", "bytes=10485760-15728639"}, *ranges)
+
+ count := 0
+ for _, b := range w.Bytes() {
+ count += int(b)
+ }
+ assert.Equal(t, 0, count)
+}
+
+func TestDownloadZero(t *testing.T) {
+ s, names, ranges := dlLoggingSvc([]byte{})
+
+ d := s3manager.NewDownloaderWithClient(s)
+ w := &aws.WriteAtBuffer{}
+ n, err := d.Download(w, &s3.GetObjectInput{
+ Bucket: aws.String("bucket"),
+ Key: aws.String("key"),
+ })
+
+ assert.Nil(t, err)
+ assert.Equal(t, int64(0), n)
+ assert.Equal(t, []string{"GetObject"}, *names)
+ assert.Equal(t, []string{"bytes=0-5242879"}, *ranges)
+}
+
+func TestDownloadSetPartSize(t *testing.T) {
+ s, names, ranges := dlLoggingSvc([]byte{1, 2, 3})
+
+ d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) {
+ d.Concurrency = 1
+ d.PartSize = 1
+ })
+ w := &aws.WriteAtBuffer{}
+ n, err := d.Download(w, &s3.GetObjectInput{
+ Bucket: aws.String("bucket"),
+ Key: aws.String("key"),
+ })
+
+ assert.Nil(t, err)
+ assert.Equal(t, int64(3), n)
+ assert.Equal(t, []string{"GetObject", "GetObject", "GetObject"}, *names)
+ assert.Equal(t, []string{"bytes=0-0", "bytes=1-1", "bytes=2-2"}, *ranges)
+ assert.Equal(t, []byte{1, 2, 3}, w.Bytes())
+}
+
+func TestDownloadError(t *testing.T) {
+ s, names, _ := dlLoggingSvc([]byte{1, 2, 3})
+
+ num := 0
+ s.Handlers.Send.PushBack(func(r *request.Request) {
+ num++
+ if num > 1 {
+ r.HTTPResponse.StatusCode = 400
+ r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
+ }
+ })
+
+ d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) {
+ d.Concurrency = 1
+ d.PartSize = 1
+ })
+ w := &aws.WriteAtBuffer{}
+ n, err := d.Download(w, &s3.GetObjectInput{
+ Bucket: aws.String("bucket"),
+ Key: aws.String("key"),
+ })
+
+ assert.NotNil(t, err)
+ assert.Equal(t, int64(1), n)
+ assert.Equal(t, []string{"GetObject", "GetObject"}, *names)
+ assert.Equal(t, []byte{1}, w.Bytes())
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/shared_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/shared_test.go
new file mode 100644
index 000000000..b5b613143
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/shared_test.go
@@ -0,0 +1,4 @@
+package s3manager_test
+
+var buf12MB = make([]byte, 1024*1024*12)
+var buf2MB = make([]byte, 1024*1024*2)
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
index 3f2158c95..b43c80cfd 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
@@ -10,34 +10,26 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
)
// MaxUploadParts is the maximum allowed number of parts in a multi-part upload
// on Amazon S3.
-var MaxUploadParts = 10000
+const MaxUploadParts = 10000
// MinUploadPartSize is the minimum allowed part size when uploading a part to
// Amazon S3.
-var MinUploadPartSize int64 = 1024 * 1024 * 5
+const MinUploadPartSize int64 = 1024 * 1024 * 5
// DefaultUploadPartSize is the default part size to buffer chunks of a
// payload into.
-var DefaultUploadPartSize = MinUploadPartSize
+const DefaultUploadPartSize = MinUploadPartSize
// DefaultUploadConcurrency is the default number of goroutines to spin up when
// using Upload().
-var DefaultUploadConcurrency = 5
-
-// DefaultUploadOptions is the default set of options used when opts is nil in
-// Upload().
-var DefaultUploadOptions = &UploadOptions{
- PartSize: DefaultUploadPartSize,
- Concurrency: DefaultUploadConcurrency,
- LeavePartsOnError: false,
- S3: nil,
-}
+const DefaultUploadConcurrency = 5
// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned
// will satisfy this interface when a multi part upload failed to upload all
@@ -205,8 +197,10 @@ type UploadOutput struct {
UploadID string
}
-// UploadOptions keeps tracks of extra options to pass to an Upload() call.
-type UploadOptions struct {
+// The Uploader structure that calls Upload(). It is safe to call Upload()
+// on this structure for multiple objects and across concurrent goroutines.
+// Mutating the Uploader's properties is not safe to be done concurrently.
+type Uploader struct {
// The buffer size (in bytes) to use when buffering data into chunks and
// sending them as parts to S3. The minimum allowed part size is 5MB, and
// if this value is set to zero, the DefaultPartSize value will be used.
@@ -224,43 +218,121 @@ type UploadOptions struct {
// space usage on S3 and will add additional costs if not cleaned up.
LeavePartsOnError bool
- // The client to use when uploading to S3. Leave this as nil to use the
- // default S3 client.
+ // MaxUploadParts is the max number of parts which will be uploaded to S3.
+ // Will be used to calculate the partsize of the object to be uploaded.
+ // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file
+ // as 100, 50MB parts.
+ // With a limited of s3.MaxUploadParts (10,000 parts).
+ MaxUploadParts int
+
+ // The client to use when uploading to S3.
S3 s3iface.S3API
}
-// NewUploader creates a new Uploader object to upload data to S3. Pass in
-// an optional opts structure to customize the uploader behavior.
-func NewUploader(opts *UploadOptions) *Uploader {
- if opts == nil {
- opts = DefaultUploadOptions
+// NewUploader creates a new Uploader instance to upload objects to S3. Pass In
+// additional functional options to customize the uploader's behavior. Requires a
+// client.ConfigProvider in order to create a S3 service client. The session.Session
+// satisfies the client.ConfigProvider interface.
+//
+// Example:
+// // The session the S3 Uploader will use
+// sess := session.New()
+//
+// // Create an uploader with the session and default options
+// uploader := s3manager.NewUploader(sess)
+//
+// // Create an uploader with the session and custom options
+// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) {
+// u.PartSize = 64 * 1024 * 1024 // 64MB per part
+// })
+func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader {
+ u := &Uploader{
+ S3: s3.New(c),
+ PartSize: DefaultUploadPartSize,
+ Concurrency: DefaultUploadConcurrency,
+ LeavePartsOnError: false,
+ MaxUploadParts: MaxUploadParts,
}
- return &Uploader{opts: opts}
+
+ for _, option := range options {
+ option(u)
+ }
+
+ return u
}
-// The Uploader structure that calls Upload(). It is safe to call Upload()
-// on this structure for multiple objects and across concurrent goroutines.
-type Uploader struct {
- opts *UploadOptions
+// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in
+// additional functional options to customize the uploader's behavior. Requires
+// a S3 service client to make S3 API calls.
+//
+// Example:
+// // S3 service client the Upload manager will use.
+// s3Svc := s3.New(session.New())
+//
+// // Create an uploader with S3 client and default options
+// uploader := s3manager.NewUploaderWithClient(s3Svc)
+//
+// // Create an uploader with S3 client and custom options
+// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) {
+// u.PartSize = 64 * 1024 * 1024 // 64MB per part
+// })
+func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader {
+ u := &Uploader{
+ S3: svc,
+ PartSize: DefaultUploadPartSize,
+ Concurrency: DefaultUploadConcurrency,
+ LeavePartsOnError: false,
+ MaxUploadParts: MaxUploadParts,
+ }
+
+ for _, option := range options {
+ option(u)
+ }
+
+ return u
}
// Upload uploads an object to S3, intelligently buffering large files into
// smaller chunks and sending them in parallel across multiple goroutines. You
-// can configure the buffer size and concurrency through the opts parameter.
+// can configure the buffer size and concurrency through the Uploader's parameters.
//
-// If opts is set to nil, DefaultUploadOptions will be used.
+// Additional functional options can be provided to configure the individual
+// upload. These options are copies of the Uploader instance Upload is called from.
+// Modifying the options will not impact the original Uploader instance.
//
-// It is safe to call this method for multiple objects and across concurrent
-// goroutines.
-func (u *Uploader) Upload(input *UploadInput) (*UploadOutput, error) {
- i := uploader{in: input, opts: *u.opts}
+// It is safe to call this method concurrently across goroutines.
+//
+// Example:
+// // Upload input parameters
+// upParams := &s3manager.UploadInput{
+// Bucket: &bucketName,
+// Key: &keyName,
+// Body: file,
+// }
+//
+// // Perform an upload.
+// result, err := uploader.Upload(upParams)
+//
+// // Perform upload with options different than the those in the Uploader.
+// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) {
+// u.PartSize = 10 * 1024 * 1024 // 10MB part size
+// u.LeavePartsOnError = true // Dont delete the parts if the upload fails.
+// })
+func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) {
+ i := uploader{in: input, ctx: u}
+
+ for _, option := range options {
+ option(&i.ctx)
+ }
+
return i.upload()
}
// internal structure to manage an upload to S3.
type uploader struct {
- in *UploadInput
- opts UploadOptions
+ ctx Uploader
+
+ in *UploadInput
readerPos int64 // current reader position
totalSize int64 // set to -1 if the size is not known
@@ -271,7 +343,7 @@ type uploader struct {
func (u *uploader) upload() (*UploadOutput, error) {
u.init()
- if u.opts.PartSize < MinUploadPartSize {
+ if u.ctx.PartSize < MinUploadPartSize {
msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize)
return nil, awserr.New("ConfigError", msg, nil)
}
@@ -290,14 +362,11 @@ func (u *uploader) upload() (*UploadOutput, error) {
// init will initialize all default options.
func (u *uploader) init() {
- if u.opts.S3 == nil {
- u.opts.S3 = s3.New(nil)
+ if u.ctx.Concurrency == 0 {
+ u.ctx.Concurrency = DefaultUploadConcurrency
}
- if u.opts.Concurrency == 0 {
- u.opts.Concurrency = DefaultUploadConcurrency
- }
- if u.opts.PartSize == 0 {
- u.opts.PartSize = DefaultUploadPartSize
+ if u.ctx.PartSize == 0 {
+ u.ctx.PartSize = DefaultUploadPartSize
}
// Try to get the total size for some optimizations
@@ -320,9 +389,12 @@ func (u *uploader) initSize() {
}
u.totalSize = n
- // try to adjust partSize if it is too small
- if u.totalSize/u.opts.PartSize >= int64(MaxUploadParts) {
- u.opts.PartSize = u.totalSize / int64(MaxUploadParts)
+ // Try to adjust partSize if it is too small and account for
+ // integer division truncation.
+ if u.totalSize/u.ctx.PartSize >= int64(u.ctx.MaxUploadParts) {
+ // Add one to the part size to account for remainders
+ // during the size calculation. e.g odd number of bytes.
+ u.ctx.PartSize = (u.totalSize / int64(u.ctx.MaxUploadParts)) + 1
}
}
}
@@ -336,14 +408,14 @@ func (u *uploader) nextReader() (io.ReadSeeker, error) {
case io.ReaderAt:
var err error
- n := u.opts.PartSize
+ n := u.ctx.PartSize
if u.totalSize >= 0 {
bytesLeft := u.totalSize - u.readerPos
if bytesLeft == 0 {
err = io.EOF
n = bytesLeft
- } else if bytesLeft <= u.opts.PartSize {
+ } else if bytesLeft <= u.ctx.PartSize {
err = io.ErrUnexpectedEOF
n = bytesLeft
}
@@ -355,7 +427,7 @@ func (u *uploader) nextReader() (io.ReadSeeker, error) {
return buf, err
default:
- packet := make([]byte, u.opts.PartSize)
+ packet := make([]byte, u.ctx.PartSize)
n, err := io.ReadFull(u.in.Body, packet)
u.readerPos += int64(n)
@@ -371,7 +443,7 @@ func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) {
awsutil.Copy(params, u.in)
params.Body = buf
- req, out := u.opts.S3.PutObjectRequest(params)
+ req, out := u.ctx.S3.PutObjectRequest(params)
if err := req.Send(); err != nil {
return nil, err
}
@@ -414,15 +486,15 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) {
awsutil.Copy(params, u.in)
// Create the multipart
- resp, err := u.opts.S3.CreateMultipartUpload(params)
+ resp, err := u.ctx.S3.CreateMultipartUpload(params)
if err != nil {
return nil, err
}
u.uploadID = *resp.UploadId
// Create the workers
- ch := make(chan chunk, u.opts.Concurrency)
- for i := 0; i < u.opts.Concurrency; i++ {
+ ch := make(chan chunk, u.ctx.Concurrency)
+ for i := 0; i < u.ctx.Concurrency; i++ {
u.wg.Add(1)
go u.readChunk(ch)
}
@@ -434,13 +506,18 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) {
// Read and queue the rest of the parts
for u.geterr() == nil {
// This upload exceeded maximum number of supported parts, error now.
- if num > int64(MaxUploadParts) {
- msg := fmt.Sprintf("exceeded total allowed parts (%d). "+
- "Adjust PartSize to fit in this limit", MaxUploadParts)
+ if num > int64(u.ctx.MaxUploadParts) || num > int64(MaxUploadParts) {
+ var msg string
+ if num > int64(u.ctx.MaxUploadParts) {
+ msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
+ u.ctx.MaxUploadParts)
+ } else {
+ msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
+ MaxUploadParts)
+ }
u.seterr(awserr.New("TotalPartsExceeded", msg, nil))
break
}
-
num++
buf, err := u.nextReader()
@@ -502,7 +579,7 @@ func (u *multiuploader) readChunk(ch chan chunk) {
// send performs an UploadPart request and keeps track of the completed
// part information.
func (u *multiuploader) send(c chunk) error {
- resp, err := u.opts.S3.UploadPart(&s3.UploadPartInput{
+ resp, err := u.ctx.S3.UploadPart(&s3.UploadPartInput{
Bucket: u.in.Bucket,
Key: u.in.Key,
Body: c.buf,
@@ -542,11 +619,11 @@ func (u *multiuploader) seterr(e error) {
// fail will abort the multipart unless LeavePartsOnError is set to true.
func (u *multiuploader) fail() {
- if u.opts.LeavePartsOnError {
+ if u.ctx.LeavePartsOnError {
return
}
- u.opts.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{
+ u.ctx.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{
Bucket: u.in.Bucket,
Key: u.in.Key,
UploadId: &u.uploadID,
@@ -563,7 +640,7 @@ func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
// Parts must be sorted in PartNumber order.
sort.Sort(u.parts)
- resp, err := u.opts.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{
+ resp, err := u.ctx.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{
Bucket: u.in.Bucket,
Key: u.in.Key,
UploadId: &u.uploadID,
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go
new file mode 100644
index 000000000..cd127ebfb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go
@@ -0,0 +1,469 @@
+package s3manager_test
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "sort"
+ "strings"
+ "sync"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/awstesting/unit"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3manager"
+ "github.com/stretchr/testify/assert"
+)
+
+var emptyList = []string{}
+
+func val(i interface{}, s string) interface{} {
+ return awsutil.ValuesAtPath(i, s)[0]
+}
+
+func contains(src []string, s string) bool {
+ for _, v := range src {
+ if s == v {
+ return true
+ }
+ }
+ return false
+}
+
+func loggingSvc(ignoreOps []string) (*s3.S3, *[]string, *[]interface{}) {
+ var m sync.Mutex
+ partNum := 0
+ names := []string{}
+ params := []interface{}{}
+ svc := s3.New(unit.Session)
+ svc.Handlers.Unmarshal.Clear()
+ svc.Handlers.UnmarshalMeta.Clear()
+ svc.Handlers.UnmarshalError.Clear()
+ svc.Handlers.Send.Clear()
+ svc.Handlers.Send.PushBack(func(r *request.Request) {
+ m.Lock()
+ defer m.Unlock()
+
+ if !contains(ignoreOps, r.Operation.Name) {
+ names = append(names, r.Operation.Name)
+ params = append(params, r.Params)
+ }
+
+ r.HTTPResponse = &http.Response{
+ StatusCode: 200,
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+
+ switch data := r.Data.(type) {
+ case *s3.CreateMultipartUploadOutput:
+ data.UploadId = aws.String("UPLOAD-ID")
+ case *s3.UploadPartOutput:
+ partNum++
+ data.ETag = aws.String(fmt.Sprintf("ETAG%d", partNum))
+ case *s3.CompleteMultipartUploadOutput:
+ data.Location = aws.String("https://location")
+ data.VersionId = aws.String("VERSION-ID")
+ case *s3.PutObjectOutput:
+ data.VersionId = aws.String("VERSION-ID")
+ }
+ })
+
+ return svc, &names, ¶ms
+}
+
+func buflen(i interface{}) int {
+ r := i.(io.Reader)
+ b, _ := ioutil.ReadAll(r)
+ return len(b)
+}
+
+func TestUploadOrderMulti(t *testing.T) {
+ s, ops, args := loggingSvc(emptyList)
+ u := s3manager.NewUploaderWithClient(s)
+
+ resp, err := u.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(buf12MB),
+ ServerSideEncryption: aws.String("AES256"),
+ ContentType: aws.String("content/type"),
+ })
+
+ assert.NoError(t, err)
+ assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops)
+ assert.Equal(t, "https://location", resp.Location)
+ assert.Equal(t, "UPLOAD-ID", resp.UploadID)
+ assert.Equal(t, aws.String("VERSION-ID"), resp.VersionID)
+
+ // Validate input values
+
+ // UploadPart
+ assert.Equal(t, "UPLOAD-ID", val((*args)[1], "UploadId"))
+ assert.Equal(t, "UPLOAD-ID", val((*args)[2], "UploadId"))
+ assert.Equal(t, "UPLOAD-ID", val((*args)[3], "UploadId"))
+
+ // CompleteMultipartUpload
+ assert.Equal(t, "UPLOAD-ID", val((*args)[4], "UploadId"))
+ assert.Equal(t, int64(1), val((*args)[4], "MultipartUpload.Parts[0].PartNumber"))
+ assert.Equal(t, int64(2), val((*args)[4], "MultipartUpload.Parts[1].PartNumber"))
+ assert.Equal(t, int64(3), val((*args)[4], "MultipartUpload.Parts[2].PartNumber"))
+ assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[0].ETag"))
+ assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[1].ETag"))
+ assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[2].ETag"))
+
+ // Custom headers
+ assert.Equal(t, "AES256", val((*args)[0], "ServerSideEncryption"))
+ assert.Equal(t, "content/type", val((*args)[0], "ContentType"))
+}
+
+func TestUploadOrderMultiDifferentPartSize(t *testing.T) {
+ s, ops, args := loggingSvc(emptyList)
+ mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
+ u.PartSize = 1024 * 1024 * 7
+ u.Concurrency = 1
+ })
+ _, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(buf12MB),
+ })
+
+ assert.NoError(t, err)
+ assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops)
+
+ // Part lengths
+ assert.Equal(t, 1024*1024*7, buflen(val((*args)[1], "Body")))
+ assert.Equal(t, 1024*1024*5, buflen(val((*args)[2], "Body")))
+}
+
+func TestUploadIncreasePartSize(t *testing.T) {
+ s, ops, args := loggingSvc(emptyList)
+ mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
+ u.Concurrency = 1
+ u.MaxUploadParts = 2
+ })
+ _, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(buf12MB),
+ })
+
+ assert.NoError(t, err)
+ assert.Equal(t, int64(s3manager.DefaultDownloadPartSize), mgr.PartSize)
+ assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops)
+
+ // Part lengths
+ assert.Equal(t, (1024*1024*6)+1, buflen(val((*args)[1], "Body")))
+ assert.Equal(t, (1024*1024*6)-1, buflen(val((*args)[2], "Body")))
+}
+
+func TestUploadFailIfPartSizeTooSmall(t *testing.T) {
+ mgr := s3manager.NewUploader(unit.Session, func(u *s3manager.Uploader) {
+ u.PartSize = 5
+ })
+ resp, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(buf12MB),
+ })
+
+ assert.Nil(t, resp)
+ assert.NotNil(t, err)
+
+ aerr := err.(awserr.Error)
+ assert.Equal(t, "ConfigError", aerr.Code())
+ assert.Contains(t, aerr.Message(), "part size must be at least")
+}
+
+func TestUploadOrderSingle(t *testing.T) {
+ s, ops, args := loggingSvc(emptyList)
+ mgr := s3manager.NewUploaderWithClient(s)
+ resp, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(buf2MB),
+ ServerSideEncryption: aws.String("AES256"),
+ ContentType: aws.String("content/type"),
+ })
+
+ assert.NoError(t, err)
+ assert.Equal(t, []string{"PutObject"}, *ops)
+ assert.NotEqual(t, "", resp.Location)
+ assert.Equal(t, aws.String("VERSION-ID"), resp.VersionID)
+ assert.Equal(t, "", resp.UploadID)
+ assert.Equal(t, "AES256", val((*args)[0], "ServerSideEncryption"))
+ assert.Equal(t, "content/type", val((*args)[0], "ContentType"))
+}
+
+func TestUploadOrderSingleFailure(t *testing.T) {
+ s, ops, _ := loggingSvc(emptyList)
+ s.Handlers.Send.PushBack(func(r *request.Request) {
+ r.HTTPResponse.StatusCode = 400
+ })
+ mgr := s3manager.NewUploaderWithClient(s)
+ resp, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(buf2MB),
+ })
+
+ assert.Error(t, err)
+ assert.Equal(t, []string{"PutObject"}, *ops)
+ assert.Nil(t, resp)
+}
+
+func TestUploadOrderZero(t *testing.T) {
+ s, ops, args := loggingSvc(emptyList)
+ mgr := s3manager.NewUploaderWithClient(s)
+ resp, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(make([]byte, 0)),
+ })
+
+ assert.NoError(t, err)
+ assert.Equal(t, []string{"PutObject"}, *ops)
+ assert.NotEqual(t, "", resp.Location)
+ assert.Equal(t, "", resp.UploadID)
+ assert.Equal(t, 0, buflen(val((*args)[0], "Body")))
+}
+
+func TestUploadOrderMultiFailure(t *testing.T) {
+ s, ops, _ := loggingSvc(emptyList)
+ s.Handlers.Send.PushBack(func(r *request.Request) {
+ switch t := r.Data.(type) {
+ case *s3.UploadPartOutput:
+ if *t.ETag == "ETAG2" {
+ r.HTTPResponse.StatusCode = 400
+ }
+ }
+ })
+
+ mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
+ u.Concurrency = 1
+ })
+ _, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(buf12MB),
+ })
+
+ assert.Error(t, err)
+ assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "AbortMultipartUpload"}, *ops)
+}
+
+func TestUploadOrderMultiFailureOnComplete(t *testing.T) {
+ s, ops, _ := loggingSvc(emptyList)
+ s.Handlers.Send.PushBack(func(r *request.Request) {
+ switch r.Data.(type) {
+ case *s3.CompleteMultipartUploadOutput:
+ r.HTTPResponse.StatusCode = 400
+ }
+ })
+
+ mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
+ u.Concurrency = 1
+ })
+ _, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(buf12MB),
+ })
+
+ assert.Error(t, err)
+ assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart",
+ "UploadPart", "CompleteMultipartUpload", "AbortMultipartUpload"}, *ops)
+}
+
+func TestUploadOrderMultiFailureOnCreate(t *testing.T) {
+ s, ops, _ := loggingSvc(emptyList)
+ s.Handlers.Send.PushBack(func(r *request.Request) {
+ switch r.Data.(type) {
+ case *s3.CreateMultipartUploadOutput:
+ r.HTTPResponse.StatusCode = 400
+ }
+ })
+
+ mgr := s3manager.NewUploaderWithClient(s)
+ _, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(make([]byte, 1024*1024*12)),
+ })
+
+ assert.Error(t, err)
+ assert.Equal(t, []string{"CreateMultipartUpload"}, *ops)
+}
+
+func TestUploadOrderMultiFailureLeaveParts(t *testing.T) {
+ s, ops, _ := loggingSvc(emptyList)
+ s.Handlers.Send.PushBack(func(r *request.Request) {
+ switch data := r.Data.(type) {
+ case *s3.UploadPartOutput:
+ if *data.ETag == "ETAG2" {
+ r.HTTPResponse.StatusCode = 400
+ }
+ }
+ })
+
+ mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
+ u.Concurrency = 1
+ u.LeavePartsOnError = true
+ })
+ _, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: bytes.NewReader(make([]byte, 1024*1024*12)),
+ })
+
+ assert.Error(t, err)
+ assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart"}, *ops)
+}
+
+type failreader struct {
+ times int
+ failCount int
+}
+
+func (f *failreader) Read(b []byte) (int, error) {
+ f.failCount++
+ if f.failCount >= f.times {
+ return 0, fmt.Errorf("random failure")
+ }
+ return len(b), nil
+}
+
+func TestUploadOrderReadFail1(t *testing.T) {
+ s, ops, _ := loggingSvc(emptyList)
+ mgr := s3manager.NewUploaderWithClient(s)
+ _, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: &failreader{times: 1},
+ })
+
+ assert.Equal(t, "ReadRequestBody", err.(awserr.Error).Code())
+ assert.EqualError(t, err.(awserr.Error).OrigErr(), "random failure")
+ assert.Equal(t, []string{}, *ops)
+}
+
+func TestUploadOrderReadFail2(t *testing.T) {
+ s, ops, _ := loggingSvc([]string{"UploadPart"})
+ mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
+ u.Concurrency = 1
+ })
+ _, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: &failreader{times: 2},
+ })
+
+ assert.Equal(t, "ReadRequestBody", err.(awserr.Error).Code())
+ assert.EqualError(t, err.(awserr.Error).OrigErr(), "random failure")
+ assert.Equal(t, []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops)
+}
+
+type sizedReader struct {
+ size int
+ cur int
+}
+
+func (s *sizedReader) Read(p []byte) (n int, err error) {
+ if s.cur >= s.size {
+ return 0, io.EOF
+ }
+
+ n = len(p)
+ s.cur += len(p)
+ if s.cur > s.size {
+ n -= s.cur - s.size
+ }
+
+ return
+}
+
+func TestUploadOrderMultiBufferedReader(t *testing.T) {
+ s, ops, args := loggingSvc(emptyList)
+ mgr := s3manager.NewUploaderWithClient(s)
+ _, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: &sizedReader{size: 1024 * 1024 * 12},
+ })
+
+ assert.NoError(t, err)
+ assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops)
+
+ // Part lengths
+ parts := []int{
+ buflen(val((*args)[1], "Body")),
+ buflen(val((*args)[2], "Body")),
+ buflen(val((*args)[3], "Body")),
+ }
+ sort.Ints(parts)
+ assert.Equal(t, []int{1024 * 1024 * 2, 1024 * 1024 * 5, 1024 * 1024 * 5}, parts)
+}
+
+func TestUploadOrderMultiBufferedReaderExceedTotalParts(t *testing.T) {
+ s, ops, _ := loggingSvc([]string{"UploadPart"})
+ mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
+ u.Concurrency = 1
+ u.MaxUploadParts = 2
+ })
+ resp, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: &sizedReader{size: 1024 * 1024 * 12},
+ })
+
+ assert.Error(t, err)
+ assert.Nil(t, resp)
+ assert.Equal(t, []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops)
+
+ aerr := err.(awserr.Error)
+ assert.Equal(t, "TotalPartsExceeded", aerr.Code())
+ assert.Contains(t, aerr.Message(), "configured MaxUploadParts (2)")
+}
+
+func TestUploadOrderSingleBufferedReader(t *testing.T) {
+ s, ops, _ := loggingSvc(emptyList)
+ mgr := s3manager.NewUploaderWithClient(s)
+ resp, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: &sizedReader{size: 1024 * 1024 * 2},
+ })
+
+ assert.NoError(t, err)
+ assert.Equal(t, []string{"PutObject"}, *ops)
+ assert.NotEqual(t, "", resp.Location)
+ assert.Equal(t, "", resp.UploadID)
+}
+
+func TestUploadZeroLenObject(t *testing.T) {
+ requestMade := false
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ requestMade = true
+ w.WriteHeader(http.StatusOK)
+ }))
+ mgr := s3manager.NewUploaderWithClient(s3.New(unit.Session, &aws.Config{
+ Endpoint: aws.String(server.URL),
+ }))
+ resp, err := mgr.Upload(&s3manager.UploadInput{
+ Bucket: aws.String("Bucket"),
+ Key: aws.String("Key"),
+ Body: strings.NewReader(""),
+ })
+
+ assert.NoError(t, err)
+ assert.True(t, requestMade)
+ assert.NotEqual(t, "", resp.Location)
+ assert.Equal(t, "", resp.UploadID)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/service.go
index daeca34c2..e80d95429 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/service.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/service.go
@@ -4,49 +4,72 @@ package s3
import (
"github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/defaults"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/aws/service"
- "github.com/aws/aws-sdk-go/aws/service/serviceinfo"
- "github.com/aws/aws-sdk-go/internal/protocol/restxml"
- "github.com/aws/aws-sdk-go/internal/signer/v4"
+ "github.com/aws/aws-sdk-go/private/protocol/restxml"
+ "github.com/aws/aws-sdk-go/private/signer/v4"
)
// S3 is a client for Amazon S3.
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
type S3 struct {
- *service.Service
+ *client.Client
}
-// Used for custom service initialization logic
-var initService func(*service.Service)
+// Used for custom client initialization logic
+var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
-// New returns a new S3 client.
-func New(config *aws.Config) *S3 {
- service := &service.Service{
- ServiceInfo: serviceinfo.ServiceInfo{
- Config: defaults.DefaultConfig.Merge(config),
- ServiceName: "s3",
- APIVersion: "2006-03-01",
- },
+// A ServiceName is the name of the service the client will make API calls to.
+const ServiceName = "s3"
+
+// New creates a new instance of the S3 client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a S3 client from just a session.
+// svc := s3.New(mySession)
+//
+// // Create a S3 client with additional configuration
+// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
+ c := p.ClientConfig(ServiceName, cfgs...)
+ return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *S3 {
+ svc := &S3{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2006-03-01",
+ },
+ handlers,
+ ),
}
- service.Initialize()
// Handlers
- service.Handlers.Sign.PushBack(v4.Sign)
- service.Handlers.Build.PushBack(restxml.Build)
- service.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
- service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
- service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(restxml.Build)
+ svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError)
- // Run custom service initialization if present
- if initService != nil {
- initService(service)
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
}
- return &S3{service}
+ return svc
}
// newRequest creates a new request for a S3 operation and runs any
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/sse_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/sse_test.go
new file mode 100644
index 000000000..5f1ca64bf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/sse_test.go
@@ -0,0 +1,79 @@
+package s3_test
+
+import (
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/awstesting/unit"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestSSECustomerKeyOverHTTPError(t *testing.T) {
+ s := s3.New(unit.Session, &aws.Config{DisableSSL: aws.Bool(true)})
+ req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{
+ Bucket: aws.String("bucket"),
+ CopySource: aws.String("bucket/source"),
+ Key: aws.String("dest"),
+ SSECustomerKey: aws.String("key"),
+ })
+ err := req.Build()
+
+ assert.Error(t, err)
+ assert.Equal(t, "ConfigError", err.(awserr.Error).Code())
+ assert.Contains(t, err.(awserr.Error).Message(), "cannot send SSE keys over HTTP")
+}
+
+func TestCopySourceSSECustomerKeyOverHTTPError(t *testing.T) {
+ s := s3.New(unit.Session, &aws.Config{DisableSSL: aws.Bool(true)})
+ req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{
+ Bucket: aws.String("bucket"),
+ CopySource: aws.String("bucket/source"),
+ Key: aws.String("dest"),
+ CopySourceSSECustomerKey: aws.String("key"),
+ })
+ err := req.Build()
+
+ assert.Error(t, err)
+ assert.Equal(t, "ConfigError", err.(awserr.Error).Code())
+ assert.Contains(t, err.(awserr.Error).Message(), "cannot send SSE keys over HTTP")
+}
+
+func TestComputeSSEKeys(t *testing.T) {
+ s := s3.New(unit.Session)
+ req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{
+ Bucket: aws.String("bucket"),
+ CopySource: aws.String("bucket/source"),
+ Key: aws.String("dest"),
+ SSECustomerKey: aws.String("key"),
+ CopySourceSSECustomerKey: aws.String("key"),
+ })
+ err := req.Build()
+
+ assert.NoError(t, err)
+ assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key"))
+ assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key"))
+ assert.Equal(t, "PG4LipwVIkqCKLmpjKFTHQ==", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key-md5"))
+ assert.Equal(t, "PG4LipwVIkqCKLmpjKFTHQ==", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key-md5"))
+}
+
+func TestComputeSSEKeysShortcircuit(t *testing.T) {
+ s := s3.New(unit.Session)
+ req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{
+ Bucket: aws.String("bucket"),
+ CopySource: aws.String("bucket/source"),
+ Key: aws.String("dest"),
+ SSECustomerKey: aws.String("key"),
+ CopySourceSSECustomerKey: aws.String("key"),
+ SSECustomerKeyMD5: aws.String("MD5"),
+ CopySourceSSECustomerKeyMD5: aws.String("MD5"),
+ })
+ err := req.Build()
+
+ assert.NoError(t, err)
+ assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key"))
+ assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key"))
+ assert.Equal(t, "MD5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key-md5"))
+ assert.Equal(t, "MD5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key-md5"))
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/statusok_error_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/statusok_error_test.go
new file mode 100644
index 000000000..f508cd153
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/statusok_error_test.go
@@ -0,0 +1,130 @@
+package s3_test
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/awstesting/unit"
+ "github.com/aws/aws-sdk-go/service/s3"
+)
+
+const errMsg = `ErrorCode
message bodyrequestIDhostID=`
+
+var lastModifiedTime = time.Date(2009, 11, 23, 0, 0, 0, 0, time.UTC)
+
+func TestCopyObjectNoError(t *testing.T) {
+ const successMsg = `
+
+2009-11-23T0:00:00Z"1da64c7f13d1e8dbeaea40b905fd586c"`
+
+ res, err := newCopyTestSvc(successMsg).CopyObject(&s3.CopyObjectInput{
+ Bucket: aws.String("bucketname"),
+ CopySource: aws.String("bucketname/exists.txt"),
+ Key: aws.String("destination.txt"),
+ })
+
+ require.NoError(t, err)
+
+ assert.Equal(t, fmt.Sprintf(`%q`, "1da64c7f13d1e8dbeaea40b905fd586c"), *res.CopyObjectResult.ETag)
+ assert.Equal(t, lastModifiedTime, *res.CopyObjectResult.LastModified)
+}
+
+func TestCopyObjectError(t *testing.T) {
+ _, err := newCopyTestSvc(errMsg).CopyObject(&s3.CopyObjectInput{
+ Bucket: aws.String("bucketname"),
+ CopySource: aws.String("bucketname/doesnotexist.txt"),
+ Key: aws.String("destination.txt"),
+ })
+
+ require.Error(t, err)
+ e := err.(awserr.Error)
+
+ assert.Equal(t, "ErrorCode", e.Code())
+ assert.Equal(t, "message body", e.Message())
+}
+
+func TestUploadPartCopySuccess(t *testing.T) {
+ const successMsg = `
+
+2009-11-23T0:00:00Z"1da64c7f13d1e8dbeaea40b905fd586c"`
+
+ res, err := newCopyTestSvc(successMsg).UploadPartCopy(&s3.UploadPartCopyInput{
+ Bucket: aws.String("bucketname"),
+ CopySource: aws.String("bucketname/doesnotexist.txt"),
+ Key: aws.String("destination.txt"),
+ PartNumber: aws.Int64(0),
+ UploadId: aws.String("uploadID"),
+ })
+
+ require.NoError(t, err)
+
+ assert.Equal(t, fmt.Sprintf(`%q`, "1da64c7f13d1e8dbeaea40b905fd586c"), *res.CopyPartResult.ETag)
+ assert.Equal(t, lastModifiedTime, *res.CopyPartResult.LastModified)
+}
+
+func TestUploadPartCopyError(t *testing.T) {
+ _, err := newCopyTestSvc(errMsg).UploadPartCopy(&s3.UploadPartCopyInput{
+ Bucket: aws.String("bucketname"),
+ CopySource: aws.String("bucketname/doesnotexist.txt"),
+ Key: aws.String("destination.txt"),
+ PartNumber: aws.Int64(0),
+ UploadId: aws.String("uploadID"),
+ })
+
+ require.Error(t, err)
+ e := err.(awserr.Error)
+
+ assert.Equal(t, "ErrorCode", e.Code())
+ assert.Equal(t, "message body", e.Message())
+}
+
+func TestCompleteMultipartUploadSuccess(t *testing.T) {
+ const successMsg = `
+
+locationNamebucketNamekeyName"etagVal"`
+ res, err := newCopyTestSvc(successMsg).CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{
+ Bucket: aws.String("bucketname"),
+ Key: aws.String("key"),
+ UploadId: aws.String("uploadID"),
+ })
+
+ require.NoError(t, err)
+
+ assert.Equal(t, `"etagVal"`, *res.ETag)
+ assert.Equal(t, "bucketName", *res.Bucket)
+ assert.Equal(t, "keyName", *res.Key)
+ assert.Equal(t, "locationName", *res.Location)
+}
+
+func TestCompleteMultipartUploadError(t *testing.T) {
+ _, err := newCopyTestSvc(errMsg).CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{
+ Bucket: aws.String("bucketname"),
+ Key: aws.String("key"),
+ UploadId: aws.String("uploadID"),
+ })
+
+ require.Error(t, err)
+ e := err.(awserr.Error)
+
+ assert.Equal(t, "ErrorCode", e.Code())
+ assert.Equal(t, "message body", e.Message())
+}
+
+func newCopyTestSvc(errMsg string) *s3.S3 {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, errMsg, http.StatusOK)
+ }))
+ return s3.New(unit.Session, aws.NewConfig().
+ WithEndpoint(server.URL).
+ WithDisableSSL(true).
+ WithMaxRetries(0).
+ WithS3ForcePathStyle(true))
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
index ed4505bf3..30470ac11 100644
--- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
@@ -23,7 +23,7 @@ func unmarshalError(r *request.Request) {
if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
r.Error = awserr.New("BucketRegionError",
- fmt.Sprintf("incorrect region, the bucket is not in '%s' region", aws.StringValue(r.Service.Config.Region)), nil)
+ fmt.Sprintf("incorrect region, the bucket is not in '%s' region", aws.StringValue(r.Config.Region)), nil)
return
}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go
new file mode 100644
index 000000000..c4cce13c5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go
@@ -0,0 +1,53 @@
+package s3_test
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/awstesting/unit"
+ "github.com/aws/aws-sdk-go/service/s3"
+)
+
+var s3StatusCodeErrorTests = []struct {
+ scode int
+ status string
+ body string
+ code string
+ message string
+}{
+ {301, "Moved Permanently", "", "BucketRegionError", "incorrect region, the bucket is not in 'mock-region' region"},
+ {403, "Forbidden", "", "Forbidden", "Forbidden"},
+ {400, "Bad Request", "", "BadRequest", "Bad Request"},
+ {404, "Not Found", "", "NotFound", "Not Found"},
+ {500, "Internal Error", "", "InternalError", "Internal Error"},
+}
+
+func TestStatusCodeError(t *testing.T) {
+ for _, test := range s3StatusCodeErrorTests {
+ s := s3.New(unit.Session)
+ s.Handlers.Send.Clear()
+ s.Handlers.Send.PushBack(func(r *request.Request) {
+ body := ioutil.NopCloser(bytes.NewReader([]byte(test.body)))
+ r.HTTPResponse = &http.Response{
+ ContentLength: int64(len(test.body)),
+ StatusCode: test.scode,
+ Status: test.status,
+ Body: body,
+ }
+ })
+ _, err := s.PutBucketAcl(&s3.PutBucketAclInput{
+ Bucket: aws.String("bucket"), ACL: aws.String("public-read"),
+ })
+
+ assert.Error(t, err)
+ assert.Equal(t, test.code, err.(awserr.Error).Code())
+ assert.Equal(t, test.message, err.(awserr.Error).Message())
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child_test.go
new file mode 100644
index 000000000..26223ff1c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child_test.go
@@ -0,0 +1,73 @@
+package etcd
+
+import "testing"
+
+func TestAddChild(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("fooDir", true)
+ c.Delete("nonexistentDir", true)
+ }()
+
+ c.CreateDir("fooDir", 5)
+
+ _, err := c.AddChild("fooDir", "v0", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = c.AddChild("fooDir", "v1", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err := c.Get("fooDir", true, false)
+ // The child with v0 should proceed the child with v1 because it's added
+ // earlier, so it should have a lower key.
+ if !(len(resp.Node.Nodes) == 2 && (resp.Node.Nodes[0].Value == "v0" && resp.Node.Nodes[1].Value == "v1")) {
+ t.Fatalf("AddChild 1 failed. There should be two chlidren whose values are v0 and v1, respectively."+
+ " The response was: %#v", resp)
+ }
+
+ // Creating a child under a nonexistent directory should succeed.
+ // The directory should be created.
+ resp, err = c.AddChild("nonexistentDir", "foo", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestAddChildDir(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("fooDir", true)
+ c.Delete("nonexistentDir", true)
+ }()
+
+ c.CreateDir("fooDir", 5)
+
+ _, err := c.AddChildDir("fooDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = c.AddChildDir("fooDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err := c.Get("fooDir", true, false)
+ // The child with v0 should proceed the child with v1 because it's added
+ // earlier, so it should have a lower key.
+ if !(len(resp.Node.Nodes) == 2 && (len(resp.Node.Nodes[0].Nodes) == 0 && len(resp.Node.Nodes[1].Nodes) == 0)) {
+ t.Fatalf("AddChildDir 1 failed. There should be two chlidren whose values are v0 and v1, respectively."+
+ " The response was: %#v", resp)
+ }
+
+ // Creating a child under a nonexistent directory should succeed.
+ // The directory should be created.
+ resp, err = c.AddChildDir("nonexistentDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client_test.go
new file mode 100644
index 000000000..4720d8d69
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client_test.go
@@ -0,0 +1,108 @@
+package etcd
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+ "net/url"
+ "os"
+ "testing"
+)
+
+// To pass this test, we need to create a cluster of 3 machines
+// The server should be listening on localhost:4001, 4002, 4003
+func TestSync(t *testing.T) {
+ fmt.Println("Make sure there are three nodes at 0.0.0.0:4001-4003")
+
+ // Explicit trailing slash to ensure this doesn't reproduce:
+ // https://github.com/coreos/go-etcd/issues/82
+ c := NewClient([]string{"http://127.0.0.1:4001/"})
+
+ success := c.SyncCluster()
+ if !success {
+ t.Fatal("cannot sync machines")
+ }
+
+ for _, m := range c.GetCluster() {
+ u, err := url.Parse(m)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if u.Scheme != "http" {
+ t.Fatal("scheme must be http")
+ }
+
+ host, _, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if host != "localhost" {
+ t.Fatal("Host must be localhost")
+ }
+ }
+
+ badMachines := []string{"abc", "edef"}
+
+ success = c.SetCluster(badMachines)
+
+ if success {
+ t.Fatal("should not sync on bad machines")
+ }
+
+ goodMachines := []string{"127.0.0.1:4002"}
+
+ success = c.SetCluster(goodMachines)
+
+ if !success {
+ t.Fatal("cannot sync machines")
+ } else {
+ fmt.Println(c.cluster.Machines)
+ }
+
+}
+
+func TestPersistence(t *testing.T) {
+ c := NewClient(nil)
+ c.SyncCluster()
+
+ fo, err := os.Create("config.json")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := fo.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ c.SetPersistence(fo)
+ err = c.saveConfig()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ c2, err := NewClientFromFile("config.json")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Verify that the two clients have the same config
+ b1, _ := json.Marshal(c)
+ b2, _ := json.Marshal(c2)
+
+ if string(b1) != string(b2) {
+ t.Fatalf("The two configs should be equal!")
+ }
+}
+
+func TestClientRetry(t *testing.T) {
+ c := NewClient([]string{"http://strange", "http://127.0.0.1:4001"})
+ // use first endpoint as the picked url
+ c.cluster.picked = 0
+ if _, err := c.Set("foo", "bar", 5); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := c.Delete("foo", true); err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go
new file mode 100644
index 000000000..223e50f29
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go
@@ -0,0 +1,46 @@
+package etcd
+
+import (
+ "testing"
+)
+
+func TestCompareAndDelete(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ }()
+
+ c.Set("foo", "bar", 5)
+
+ // This should succeed an correct prevValue
+ resp, err := c.CompareAndDelete("foo", "bar", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+ t.Fatalf("CompareAndDelete 1 prevNode failed: %#v", resp)
+ }
+
+ resp, _ = c.Set("foo", "bar", 5)
+ // This should fail because it gives an incorrect prevValue
+ _, err = c.CompareAndDelete("foo", "xxx", 0)
+ if err == nil {
+ t.Fatalf("CompareAndDelete 2 should have failed. The response is: %#v", resp)
+ }
+
+ // This should succeed because it gives an correct prevIndex
+ resp, err = c.CompareAndDelete("foo", "", resp.Node.ModifiedIndex)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+ t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp)
+ }
+
+ c.Set("foo", "bar", 5)
+ // This should fail because it gives an incorrect prevIndex
+ resp, err = c.CompareAndDelete("foo", "", 29817514)
+ if err == nil {
+ t.Fatalf("CompareAndDelete 4 should have failed. The response is: %#v", resp)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go
new file mode 100644
index 000000000..14a1b00f5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go
@@ -0,0 +1,57 @@
+package etcd
+
+import (
+ "testing"
+)
+
+func TestCompareAndSwap(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ }()
+
+ c.Set("foo", "bar", 5)
+
+ // This should succeed
+ resp, err := c.CompareAndSwap("foo", "bar2", 5, "bar", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
+ t.Fatalf("CompareAndSwap 1 failed: %#v", resp)
+ }
+
+ if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+ t.Fatalf("CompareAndSwap 1 prevNode failed: %#v", resp)
+ }
+
+ // This should fail because it gives an incorrect prevValue
+ resp, err = c.CompareAndSwap("foo", "bar3", 5, "xxx", 0)
+ if err == nil {
+ t.Fatalf("CompareAndSwap 2 should have failed. The response is: %#v", resp)
+ }
+
+ resp, err = c.Set("foo", "bar", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // This should succeed
+ resp, err = c.CompareAndSwap("foo", "bar2", 5, "", resp.Node.ModifiedIndex)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
+ t.Fatalf("CompareAndSwap 3 failed: %#v", resp)
+ }
+
+ if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+ t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp)
+ }
+
+ // This should fail because it gives an incorrect prevIndex
+ resp, err = c.CompareAndSwap("foo", "bar3", 5, "", 29817514)
+ if err == nil {
+ t.Fatalf("CompareAndSwap 4 should have failed. The response is: %#v", resp)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug_test.go
new file mode 100644
index 000000000..97f6d1110
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug_test.go
@@ -0,0 +1,28 @@
+package etcd
+
+import (
+ "testing"
+)
+
+type Foo struct{}
+type Bar struct {
+ one string
+ two int
+}
+
+// Tests that logs don't panic with arbitrary interfaces
+func TestDebug(t *testing.T) {
+ f := &Foo{}
+ b := &Bar{"asfd", 3}
+ for _, test := range []interface{}{
+ 1234,
+ "asdf",
+ f,
+ b,
+ } {
+ logger.Debug(test)
+ logger.Debugf("something, %s", test)
+ logger.Warning(test)
+ logger.Warningf("something, %s", test)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete_test.go
new file mode 100644
index 000000000..590497155
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete_test.go
@@ -0,0 +1,81 @@
+package etcd
+
+import (
+ "testing"
+)
+
+func TestDelete(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ }()
+
+ c.Set("foo", "bar", 5)
+ resp, err := c.Delete("foo", false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Node.Value == "") {
+ t.Fatalf("Delete failed with %s", resp.Node.Value)
+ }
+
+ if !(resp.PrevNode.Value == "bar") {
+ t.Fatalf("Delete PrevNode failed with %s", resp.Node.Value)
+ }
+
+ resp, err = c.Delete("foo", false)
+ if err == nil {
+ t.Fatalf("Delete should have failed because the key foo did not exist. "+
+ "The response was: %v", resp)
+ }
+}
+
+func TestDeleteAll(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ c.Delete("fooDir", true)
+ }()
+
+ c.SetDir("foo", 5)
+ // test delete an empty dir
+ resp, err := c.DeleteDir("foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Node.Value == "") {
+ t.Fatalf("DeleteAll 1 failed: %#v", resp)
+ }
+
+ if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") {
+ t.Fatalf("DeleteAll 1 PrevNode failed: %#v", resp)
+ }
+
+ c.CreateDir("fooDir", 5)
+ c.Set("fooDir/foo", "bar", 5)
+ _, err = c.DeleteDir("fooDir")
+ if err == nil {
+ t.Fatal("should not able to delete a non-empty dir with deletedir")
+ }
+
+ resp, err = c.Delete("fooDir", true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Node.Value == "") {
+ t.Fatalf("DeleteAll 2 failed: %#v", resp)
+ }
+
+ if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") {
+ t.Fatalf("DeleteAll 2 PrevNode failed: %#v", resp)
+ }
+
+ resp, err = c.Delete("foo", true)
+ if err == nil {
+ t.Fatalf("DeleteAll should have failed because the key foo did not exist. "+
+ "The response was: %v", resp)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get_test.go
new file mode 100644
index 000000000..279c4e26f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get_test.go
@@ -0,0 +1,131 @@
+package etcd
+
+import (
+ "reflect"
+ "testing"
+)
+
+// cleanNode scrubs Expiration, ModifiedIndex and CreatedIndex of a node.
+func cleanNode(n *Node) {
+ n.Expiration = nil
+ n.ModifiedIndex = 0
+ n.CreatedIndex = 0
+}
+
+// cleanResult scrubs a result object two levels deep of Expiration,
+// ModifiedIndex and CreatedIndex.
+func cleanResult(result *Response) {
+ // TODO(philips): make this recursive.
+ cleanNode(result.Node)
+ for i, _ := range result.Node.Nodes {
+ cleanNode(result.Node.Nodes[i])
+ for j, _ := range result.Node.Nodes[i].Nodes {
+ cleanNode(result.Node.Nodes[i].Nodes[j])
+ }
+ }
+}
+
+func TestGet(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ }()
+
+ c.Set("foo", "bar", 5)
+
+ result, err := c.Get("foo", false, false)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if result.Node.Key != "/foo" || result.Node.Value != "bar" {
+ t.Fatalf("Get failed with %s %s %v", result.Node.Key, result.Node.Value, result.Node.TTL)
+ }
+
+ result, err = c.Get("goo", false, false)
+ if err == nil {
+ t.Fatalf("should not be able to get non-exist key")
+ }
+}
+
+func TestGetAll(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("fooDir", true)
+ }()
+
+ c.CreateDir("fooDir", 5)
+ c.Set("fooDir/k0", "v0", 5)
+ c.Set("fooDir/k1", "v1", 5)
+
+ // Return kv-pairs in sorted order
+ result, err := c.Get("fooDir", true, false)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected := Nodes{
+ &Node{
+ Key: "/fooDir/k0",
+ Value: "v0",
+ TTL: 5,
+ },
+ &Node{
+ Key: "/fooDir/k1",
+ Value: "v1",
+ TTL: 5,
+ },
+ }
+
+ cleanResult(result)
+
+ if !reflect.DeepEqual(result.Node.Nodes, expected) {
+ t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected)
+ }
+
+ // Test the `recursive` option
+ c.CreateDir("fooDir/childDir", 5)
+ c.Set("fooDir/childDir/k2", "v2", 5)
+
+ // Return kv-pairs in sorted order
+ result, err = c.Get("fooDir", true, true)
+
+ cleanResult(result)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected = Nodes{
+ &Node{
+ Key: "/fooDir/childDir",
+ Dir: true,
+ Nodes: Nodes{
+ &Node{
+ Key: "/fooDir/childDir/k2",
+ Value: "v2",
+ TTL: 5,
+ },
+ },
+ TTL: 5,
+ },
+ &Node{
+ Key: "/fooDir/k0",
+ Value: "v0",
+ TTL: 5,
+ },
+ &Node{
+ Key: "/fooDir/k1",
+ Value: "v1",
+ TTL: 5,
+ },
+ }
+
+ cleanResult(result)
+
+ if !reflect.DeepEqual(result.Node.Nodes, expected) {
+ t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/member_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/member_test.go
new file mode 100644
index 000000000..53ebdd4bf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/member_test.go
@@ -0,0 +1,71 @@
+package etcd
+
+import (
+ "encoding/json"
+ "reflect"
+ "testing"
+)
+
+func TestMemberCollectionUnmarshal(t *testing.T) {
+ tests := []struct {
+ body []byte
+ want memberCollection
+ }{
+ {
+ body: []byte(`{"members":[]}`),
+ want: memberCollection([]Member{}),
+ },
+ {
+ body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
+ want: memberCollection(
+ []Member{
+ {
+ ID: "2745e2525fce8fe",
+ Name: "node3",
+ PeerURLs: []string{
+ "http://127.0.0.1:7003",
+ },
+ ClientURLs: []string{
+ "http://127.0.0.1:4003",
+ },
+ },
+ {
+ ID: "42134f434382925",
+ Name: "node1",
+ PeerURLs: []string{
+ "http://127.0.0.1:2380",
+ "http://127.0.0.1:7001",
+ },
+ ClientURLs: []string{
+ "http://127.0.0.1:2379",
+ "http://127.0.0.1:4001",
+ },
+ },
+ {
+ ID: "94088180e21eb87b",
+ Name: "node2",
+ PeerURLs: []string{
+ "http://127.0.0.1:7002",
+ },
+ ClientURLs: []string{
+ "http://127.0.0.1:4002",
+ },
+ },
+ },
+ ),
+ },
+ }
+
+ for i, tt := range tests {
+ var got memberCollection
+ err := json.Unmarshal(tt.body, &got)
+ if err != nil {
+ t.Errorf("#%d: unexpected error: %v", i, err)
+ continue
+ }
+
+ if !reflect.DeepEqual(tt.want, got) {
+ t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.want, got)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests_test.go
new file mode 100644
index 000000000..7a2bd190a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests_test.go
@@ -0,0 +1,22 @@
+package etcd
+
+import "testing"
+
+func TestKeyToPath(t *testing.T) {
+ tests := []struct {
+ key string
+ wpath string
+ }{
+ {"", "keys/"},
+ {"foo", "keys/foo"},
+ {"foo/bar", "keys/foo/bar"},
+ {"%z", "keys/%25z"},
+ {"/", "keys/"},
+ }
+ for i, tt := range tests {
+ path := keyToPath(tt.key)
+ if path != tt.wpath {
+ t.Errorf("#%d: path = %s, want %s", i, path, tt.wpath)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/response.generated.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/response.generated.go
index 0701dc09d..95d2cd99d 100644
--- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/response.generated.go
+++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/response.generated.go
@@ -16,10 +16,18 @@ import (
)
const (
- codecSelferC_UTF81978 = 1
- codecSelferC_RAW1978 = 0
+ // ----- content types ----
+ codecSelferC_UTF81978 = 1
+ codecSelferC_RAW1978 = 0
+ // ----- value types used ----
codecSelferValueTypeArray1978 = 10
codecSelferValueTypeMap1978 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1978 = 2
+ codecSelfer_containerMapValue1978 = 3
+ codecSelfer_containerMapEnd1978 = 4
+ codecSelfer_containerArrayElem1978 = 6
+ codecSelfer_containerArrayEnd1978 = 7
)
var (
@@ -30,10 +38,10 @@ var (
type codecSelfer1978 struct{}
func init() {
- if codec1978.GenVersion != 4 {
+ if codec1978.GenVersion != 5 {
_, file, _, _ := runtime.Caller(0)
err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
- 4, codec1978.GenVersion, file)
+ 5, codec1978.GenVersion, file)
panic(err)
}
if false { // reference the types, but skip this branch at build/run time
@@ -86,18 +94,21 @@ func (x *RawResponse) CodecEncodeSelf(e *codec1978.Encoder) {
var yyq4 [3]bool
_, _, _ = yysep4, yyq4, yy2arr4
const yyr4 bool = false
+ var yynn4 int
if yyr4 || yy2arr4 {
r.EncodeArrayStart(3)
} else {
- var yynn4 int = 3
+ yynn4 = 3
for _, b := range yyq4 {
if b {
yynn4++
}
}
r.EncodeMapStart(yynn4)
+ yynn4 = 0
}
if yyr4 || yy2arr4 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
yym6 := z.EncBinary()
_ = yym6
if false {
@@ -105,7 +116,9 @@ func (x *RawResponse) CodecEncodeSelf(e *codec1978.Encoder) {
r.EncodeInt(int64(x.StatusCode))
}
} else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
r.EncodeString(codecSelferC_UTF81978, string("StatusCode"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
yym7 := z.EncBinary()
_ = yym7
if false {
@@ -114,6 +127,7 @@ func (x *RawResponse) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
if yyr4 || yy2arr4 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
if x.Body == nil {
r.EncodeNil()
} else {
@@ -125,7 +139,9 @@ func (x *RawResponse) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
} else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
r.EncodeString(codecSelferC_UTF81978, string("Body"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
if x.Body == nil {
r.EncodeNil()
} else {
@@ -138,6 +154,7 @@ func (x *RawResponse) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
if yyr4 || yy2arr4 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
if x.Header == nil {
r.EncodeNil()
} else {
@@ -150,7 +167,9 @@ func (x *RawResponse) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
} else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
r.EncodeString(codecSelferC_UTF81978, string("Header"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
if x.Header == nil {
r.EncodeNil()
} else {
@@ -163,8 +182,10 @@ func (x *RawResponse) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
}
- if yysep4 {
- r.EncodeEnd()
+ if yyr4 || yy2arr4 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1978)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1978)
}
}
}
@@ -179,17 +200,18 @@ func (x *RawResponse) CodecDecodeSelf(d *codec1978.Decoder) {
if false {
} else if z.HasExtensions() && z.DecExt(x) {
} else {
- if r.IsContainerType(codecSelferValueTypeMap1978) {
+ yyct15 := r.ContainerType()
+ if yyct15 == codecSelferValueTypeMap1978 {
yyl15 := r.ReadMapStart()
if yyl15 == 0 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerMapEnd1978)
} else {
x.codecDecodeSelfFromMap(yyl15, d)
}
- } else if r.IsContainerType(codecSelferValueTypeArray1978) {
+ } else if yyct15 == codecSelferValueTypeArray1978 {
yyl15 := r.ReadArrayStart()
if yyl15 == 0 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
} else {
x.codecDecodeSelfFromArray(yyl15, d)
}
@@ -216,8 +238,10 @@ func (x *RawResponse) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
break
}
}
+ z.DecSendContainerState(codecSelfer_containerMapKey1978)
yys16Slc = r.DecodeBytes(yys16Slc, true, true)
yys16 := string(yys16Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1978)
switch yys16 {
case "StatusCode":
if r.TryDecodeAsNil() {
@@ -254,9 +278,7 @@ func (x *RawResponse) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
z.DecStructFieldNotFound(-1, yys16)
} // end switch yys16
} // end for yyj16
- if !yyhl16 {
- r.ReadEnd()
- }
+ z.DecSendContainerState(codecSelfer_containerMapEnd1978)
}
func (x *RawResponse) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
@@ -273,9 +295,10 @@ func (x *RawResponse) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
yyb22 = r.CheckBreak()
}
if yyb22 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
return
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
if r.TryDecodeAsNil() {
x.StatusCode = 0
} else {
@@ -288,9 +311,10 @@ func (x *RawResponse) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
yyb22 = r.CheckBreak()
}
if yyb22 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
return
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
if r.TryDecodeAsNil() {
x.Body = nil
} else {
@@ -309,9 +333,10 @@ func (x *RawResponse) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
yyb22 = r.CheckBreak()
}
if yyb22 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
return
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
if r.TryDecodeAsNil() {
x.Header = nil
} else {
@@ -334,9 +359,10 @@ func (x *RawResponse) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if yyb22 {
break
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
z.DecStructFieldNotFound(yyj22-1, "")
}
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
}
func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
@@ -357,18 +383,21 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
_, _, _ = yysep29, yyq29, yy2arr29
const yyr29 bool = false
yyq29[2] = x.PrevNode != nil
+ var yynn29 int
if yyr29 || yy2arr29 {
r.EncodeArrayStart(6)
} else {
- var yynn29 int = 5
+ yynn29 = 5
for _, b := range yyq29 {
if b {
yynn29++
}
}
r.EncodeMapStart(yynn29)
+ yynn29 = 0
}
if yyr29 || yy2arr29 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
yym31 := z.EncBinary()
_ = yym31
if false {
@@ -376,7 +405,9 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
r.EncodeString(codecSelferC_UTF81978, string(x.Action))
}
} else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
r.EncodeString(codecSelferC_UTF81978, string("action"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
yym32 := z.EncBinary()
_ = yym32
if false {
@@ -385,13 +416,16 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
if yyr29 || yy2arr29 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
if x.Node == nil {
r.EncodeNil()
} else {
x.Node.CodecEncodeSelf(e)
}
} else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
r.EncodeString(codecSelferC_UTF81978, string("node"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
if x.Node == nil {
r.EncodeNil()
} else {
@@ -399,6 +433,7 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
if yyr29 || yy2arr29 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
if yyq29[2] {
if x.PrevNode == nil {
r.EncodeNil()
@@ -410,7 +445,9 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
}
} else {
if yyq29[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
r.EncodeString(codecSelferC_UTF81978, string("prevNode"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
if x.PrevNode == nil {
r.EncodeNil()
} else {
@@ -419,6 +456,7 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
if yyr29 || yy2arr29 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
yym36 := z.EncBinary()
_ = yym36
if false {
@@ -426,7 +464,9 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
r.EncodeUint(uint64(x.EtcdIndex))
}
} else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
r.EncodeString(codecSelferC_UTF81978, string("etcdIndex"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
yym37 := z.EncBinary()
_ = yym37
if false {
@@ -435,6 +475,7 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
if yyr29 || yy2arr29 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
yym39 := z.EncBinary()
_ = yym39
if false {
@@ -442,7 +483,9 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
r.EncodeUint(uint64(x.RaftIndex))
}
} else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
r.EncodeString(codecSelferC_UTF81978, string("raftIndex"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
yym40 := z.EncBinary()
_ = yym40
if false {
@@ -451,6 +494,7 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
if yyr29 || yy2arr29 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
yym42 := z.EncBinary()
_ = yym42
if false {
@@ -458,7 +502,9 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
r.EncodeUint(uint64(x.RaftTerm))
}
} else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
r.EncodeString(codecSelferC_UTF81978, string("raftTerm"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
yym43 := z.EncBinary()
_ = yym43
if false {
@@ -466,8 +512,10 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
r.EncodeUint(uint64(x.RaftTerm))
}
}
- if yysep29 {
- r.EncodeEnd()
+ if yyr29 || yy2arr29 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1978)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1978)
}
}
}
@@ -482,17 +530,18 @@ func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) {
if false {
} else if z.HasExtensions() && z.DecExt(x) {
} else {
- if r.IsContainerType(codecSelferValueTypeMap1978) {
+ yyct45 := r.ContainerType()
+ if yyct45 == codecSelferValueTypeMap1978 {
yyl45 := r.ReadMapStart()
if yyl45 == 0 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerMapEnd1978)
} else {
x.codecDecodeSelfFromMap(yyl45, d)
}
- } else if r.IsContainerType(codecSelferValueTypeArray1978) {
+ } else if yyct45 == codecSelferValueTypeArray1978 {
yyl45 := r.ReadArrayStart()
if yyl45 == 0 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
} else {
x.codecDecodeSelfFromArray(yyl45, d)
}
@@ -519,8 +568,10 @@ func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
break
}
}
+ z.DecSendContainerState(codecSelfer_containerMapKey1978)
yys46Slc = r.DecodeBytes(yys46Slc, true, true)
yys46 := string(yys46Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1978)
switch yys46 {
case "action":
if r.TryDecodeAsNil() {
@@ -572,9 +623,7 @@ func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
z.DecStructFieldNotFound(-1, yys46)
} // end switch yys46
} // end for yyj46
- if !yyhl46 {
- r.ReadEnd()
- }
+ z.DecSendContainerState(codecSelfer_containerMapEnd1978)
}
func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
@@ -591,9 +640,10 @@ func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
yyb53 = r.CheckBreak()
}
if yyb53 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
return
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
if r.TryDecodeAsNil() {
x.Action = ""
} else {
@@ -606,9 +656,10 @@ func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
yyb53 = r.CheckBreak()
}
if yyb53 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
return
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
if r.TryDecodeAsNil() {
if x.Node != nil {
x.Node = nil
@@ -626,9 +677,10 @@ func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
yyb53 = r.CheckBreak()
}
if yyb53 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
return
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
if r.TryDecodeAsNil() {
if x.PrevNode != nil {
x.PrevNode = nil
@@ -646,9 +698,10 @@ func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
yyb53 = r.CheckBreak()
}
if yyb53 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
return
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
if r.TryDecodeAsNil() {
x.EtcdIndex = 0
} else {
@@ -661,9 +714,10 @@ func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
yyb53 = r.CheckBreak()
}
if yyb53 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
return
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
if r.TryDecodeAsNil() {
x.RaftIndex = 0
} else {
@@ -676,9 +730,10 @@ func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
yyb53 = r.CheckBreak()
}
if yyb53 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
return
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
if r.TryDecodeAsNil() {
x.RaftTerm = 0
} else {
@@ -694,9 +749,10 @@ func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if yyb53 {
break
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
z.DecStructFieldNotFound(yyj53-1, "")
}
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
}
func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
@@ -723,18 +779,21 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
yyq61[5] = len(x.Nodes) != 0
yyq61[6] = x.ModifiedIndex != 0
yyq61[7] = x.CreatedIndex != 0
+ var yynn61 int
if yyr61 || yy2arr61 {
r.EncodeArrayStart(8)
} else {
- var yynn61 int = 1
+ yynn61 = 1
for _, b := range yyq61 {
if b {
yynn61++
}
}
r.EncodeMapStart(yynn61)
+ yynn61 = 0
}
if yyr61 || yy2arr61 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
yym63 := z.EncBinary()
_ = yym63
if false {
@@ -742,7 +801,9 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
r.EncodeString(codecSelferC_UTF81978, string(x.Key))
}
} else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
r.EncodeString(codecSelferC_UTF81978, string("key"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
yym64 := z.EncBinary()
_ = yym64
if false {
@@ -751,6 +812,7 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
if yyr61 || yy2arr61 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
if yyq61[1] {
yym66 := z.EncBinary()
_ = yym66
@@ -763,7 +825,9 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
}
} else {
if yyq61[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
r.EncodeString(codecSelferC_UTF81978, string("value"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
yym67 := z.EncBinary()
_ = yym67
if false {
@@ -773,6 +837,7 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
if yyr61 || yy2arr61 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
if yyq61[2] {
yym69 := z.EncBinary()
_ = yym69
@@ -785,7 +850,9 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
}
} else {
if yyq61[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
r.EncodeString(codecSelferC_UTF81978, string("dir"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
yym70 := z.EncBinary()
_ = yym70
if false {
@@ -795,6 +862,7 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
if yyr61 || yy2arr61 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
if yyq61[3] {
if x.Expiration == nil {
r.EncodeNil()
@@ -818,7 +886,9 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
}
} else {
if yyq61[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
r.EncodeString(codecSelferC_UTF81978, string("expiration"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
if x.Expiration == nil {
r.EncodeNil()
} else {
@@ -839,6 +909,7 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
if yyr61 || yy2arr61 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
if yyq61[4] {
yym77 := z.EncBinary()
_ = yym77
@@ -851,7 +922,9 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
}
} else {
if yyq61[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
r.EncodeString(codecSelferC_UTF81978, string("ttl"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
yym78 := z.EncBinary()
_ = yym78
if false {
@@ -861,6 +934,7 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
if yyr61 || yy2arr61 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
if yyq61[5] {
if x.Nodes == nil {
r.EncodeNil()
@@ -872,7 +946,9 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
}
} else {
if yyq61[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
r.EncodeString(codecSelferC_UTF81978, string("nodes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
if x.Nodes == nil {
r.EncodeNil()
} else {
@@ -881,6 +957,7 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
if yyr61 || yy2arr61 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
if yyq61[6] {
yym81 := z.EncBinary()
_ = yym81
@@ -893,7 +970,9 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
}
} else {
if yyq61[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
r.EncodeString(codecSelferC_UTF81978, string("modifiedIndex"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
yym82 := z.EncBinary()
_ = yym82
if false {
@@ -903,6 +982,7 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
if yyr61 || yy2arr61 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
if yyq61[7] {
yym84 := z.EncBinary()
_ = yym84
@@ -915,7 +995,9 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
}
} else {
if yyq61[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
r.EncodeString(codecSelferC_UTF81978, string("createdIndex"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
yym85 := z.EncBinary()
_ = yym85
if false {
@@ -924,8 +1006,10 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
}
- if yysep61 {
- r.EncodeEnd()
+ if yyr61 || yy2arr61 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1978)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1978)
}
}
}
@@ -940,17 +1024,18 @@ func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) {
if false {
} else if z.HasExtensions() && z.DecExt(x) {
} else {
- if r.IsContainerType(codecSelferValueTypeMap1978) {
+ yyct87 := r.ContainerType()
+ if yyct87 == codecSelferValueTypeMap1978 {
yyl87 := r.ReadMapStart()
if yyl87 == 0 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerMapEnd1978)
} else {
x.codecDecodeSelfFromMap(yyl87, d)
}
- } else if r.IsContainerType(codecSelferValueTypeArray1978) {
+ } else if yyct87 == codecSelferValueTypeArray1978 {
yyl87 := r.ReadArrayStart()
if yyl87 == 0 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
} else {
x.codecDecodeSelfFromArray(yyl87, d)
}
@@ -977,8 +1062,10 @@ func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
break
}
}
+ z.DecSendContainerState(codecSelfer_containerMapKey1978)
yys88Slc = r.DecodeBytes(yys88Slc, true, true)
yys88 := string(yys88Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1978)
switch yys88 {
case "key":
if r.TryDecodeAsNil() {
@@ -1050,9 +1137,7 @@ func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
z.DecStructFieldNotFound(-1, yys88)
} // end switch yys88
} // end for yyj88
- if !yyhl88 {
- r.ReadEnd()
- }
+ z.DecSendContainerState(codecSelfer_containerMapEnd1978)
}
func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
@@ -1069,9 +1154,10 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
yyb99 = r.CheckBreak()
}
if yyb99 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
return
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
if r.TryDecodeAsNil() {
x.Key = ""
} else {
@@ -1084,9 +1170,10 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
yyb99 = r.CheckBreak()
}
if yyb99 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
return
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
if r.TryDecodeAsNil() {
x.Value = ""
} else {
@@ -1099,9 +1186,10 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
yyb99 = r.CheckBreak()
}
if yyb99 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
return
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
if r.TryDecodeAsNil() {
x.Dir = false
} else {
@@ -1114,9 +1202,10 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
yyb99 = r.CheckBreak()
}
if yyb99 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
return
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
if r.TryDecodeAsNil() {
if x.Expiration != nil {
x.Expiration = nil
@@ -1146,9 +1235,10 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
yyb99 = r.CheckBreak()
}
if yyb99 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
return
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
if r.TryDecodeAsNil() {
x.TTL = 0
} else {
@@ -1161,9 +1251,10 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
yyb99 = r.CheckBreak()
}
if yyb99 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
return
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
if r.TryDecodeAsNil() {
x.Nodes = nil
} else {
@@ -1177,9 +1268,10 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
yyb99 = r.CheckBreak()
}
if yyb99 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
return
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
if r.TryDecodeAsNil() {
x.ModifiedIndex = 0
} else {
@@ -1192,9 +1284,10 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
yyb99 = r.CheckBreak()
}
if yyb99 {
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
return
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
if r.TryDecodeAsNil() {
x.CreatedIndex = 0
} else {
@@ -1210,9 +1303,10 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if yyb99 {
break
}
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
z.DecStructFieldNotFound(yyj99-1, "")
}
- r.ReadEnd()
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
}
func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) {
@@ -1251,12 +1345,14 @@ func (x codecSelfer1978) enchttp_Header(v pkg1_http.Header, e *codec1978.Encoder
_, _, _ = h, z, r
r.EncodeMapStart(len(v))
for yyk112, yyv112 := range v {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
yym113 := z.EncBinary()
_ = yym113
if false {
} else {
r.EncodeString(codecSelferC_UTF81978, string(yyk112))
}
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
if yyv112 == nil {
r.EncodeNil()
} else {
@@ -1268,7 +1364,7 @@ func (x codecSelfer1978) enchttp_Header(v pkg1_http.Header, e *codec1978.Encoder
}
}
}
- r.EncodeEnd()
+ z.EncSendContainerState(codecSelfer_containerMapEnd1978)
}
func (x codecSelfer1978) dechttp_Header(v *pkg1_http.Header, d *codec1978.Decoder) {
@@ -1278,21 +1374,33 @@ func (x codecSelfer1978) dechttp_Header(v *pkg1_http.Header, d *codec1978.Decode
yyv115 := *v
yyl115 := r.ReadMapStart()
+ yybh115 := z.DecBasicHandle()
if yyv115 == nil {
- yyrl115, _ := z.DecInferLen(yyl115, z.DecBasicHandle().MaxInitLen, 40)
+ yyrl115, _ := z.DecInferLen(yyl115, yybh115.MaxInitLen, 40)
yyv115 = make(map[string][]string, yyrl115)
*v = yyv115
}
+ var yymk115 string
+ var yymv115 []string
+ var yymg115 bool
+ if yybh115.MapValueReset {
+ yymg115 = true
+ }
if yyl115 > 0 {
for yyj115 := 0; yyj115 < yyl115; yyj115++ {
- var yymk115 string
+ z.DecSendContainerState(codecSelfer_containerMapKey1978)
if r.TryDecodeAsNil() {
yymk115 = ""
} else {
yymk115 = string(r.DecodeString())
}
- yymv115 := yyv115[yymk115]
+ if yymg115 {
+ yymv115 = yyv115[yymk115]
+ } else {
+ yymv115 = nil
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1978)
if r.TryDecodeAsNil() {
yymv115 = nil
} else {
@@ -1311,14 +1419,19 @@ func (x codecSelfer1978) dechttp_Header(v *pkg1_http.Header, d *codec1978.Decode
}
} else if yyl115 < 0 {
for yyj115 := 0; !r.CheckBreak(); yyj115++ {
- var yymk115 string
+ z.DecSendContainerState(codecSelfer_containerMapKey1978)
if r.TryDecodeAsNil() {
yymk115 = ""
} else {
yymk115 = string(r.DecodeString())
}
- yymv115 := yyv115[yymk115]
+ if yymg115 {
+ yymv115 = yyv115[yymk115]
+ } else {
+ yymv115 = nil
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1978)
if r.TryDecodeAsNil() {
yymv115 = nil
} else {
@@ -1335,8 +1448,8 @@ func (x codecSelfer1978) dechttp_Header(v *pkg1_http.Header, d *codec1978.Decode
yyv115[yymk115] = yymv115
}
}
- r.ReadEnd()
} // else len==0: TODO: Should we clear map entries?
+ z.DecSendContainerState(codecSelfer_containerMapEnd1978)
}
func (x codecSelfer1978) encNodes(v Nodes, e *codec1978.Encoder) {
@@ -1345,13 +1458,14 @@ func (x codecSelfer1978) encNodes(v Nodes, e *codec1978.Encoder) {
_, _, _ = h, z, r
r.EncodeArrayStart(len(v))
for _, yyv122 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
if yyv122 == nil {
r.EncodeNil()
} else {
yyv122.CodecEncodeSelf(e)
}
}
- r.EncodeEnd()
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1978)
}
func (x codecSelfer1978) decNodes(v *Nodes, d *codec1978.Decoder) {
@@ -1361,39 +1475,44 @@ func (x codecSelfer1978) decNodes(v *Nodes, d *codec1978.Decoder) {
yyv123 := *v
yyh123, yyl123 := z.DecSliceHelperStart()
-
- var yyrr123, yyrl123 int
- var yyc123, yyrt123 bool
- _, _, _ = yyc123, yyrt123, yyrl123
- yyrr123 = yyl123
-
- if yyv123 == nil {
- if yyrl123, yyrt123 = z.DecInferLen(yyl123, z.DecBasicHandle().MaxInitLen, 8); yyrt123 {
- yyrr123 = yyrl123
- }
- yyv123 = make(Nodes, yyrl123)
- yyc123 = true
- }
-
+ var yyc123 bool
if yyl123 == 0 {
- if len(yyv123) != 0 {
+ if yyv123 == nil {
+ yyv123 = []*Node{}
+ yyc123 = true
+ } else if len(yyv123) != 0 {
yyv123 = yyv123[:0]
yyc123 = true
}
} else if yyl123 > 0 {
-
+ var yyrr123, yyrl123 int
+ var yyrt123 bool
if yyl123 > cap(yyv123) {
- yyrl123, yyrt123 = z.DecInferLen(yyl123, z.DecBasicHandle().MaxInitLen, 8)
- yyv123 = make([]*Node, yyrl123)
- yyc123 = true
+ yyrg123 := len(yyv123) > 0
+ yyv2123 := yyv123
+ yyrl123, yyrt123 = z.DecInferLen(yyl123, z.DecBasicHandle().MaxInitLen, 8)
+ if yyrt123 {
+ if yyrl123 <= cap(yyv123) {
+ yyv123 = yyv123[:yyrl123]
+ } else {
+ yyv123 = make([]*Node, yyrl123)
+ }
+ } else {
+ yyv123 = make([]*Node, yyrl123)
+ }
+ yyc123 = true
yyrr123 = len(yyv123)
+ if yyrg123 {
+ copy(yyv123, yyv2123)
+ }
} else if yyl123 != len(yyv123) {
yyv123 = yyv123[:yyl123]
yyc123 = true
}
yyj123 := 0
for ; yyj123 < yyrr123; yyj123++ {
+ yyh123.ElemContainerState(yyj123)
if r.TryDecodeAsNil() {
if yyv123[yyj123] != nil {
*yyv123[yyj123] = Node{}
@@ -1410,6 +1529,7 @@ func (x codecSelfer1978) decNodes(v *Nodes, d *codec1978.Decoder) {
if yyrt123 {
for ; yyj123 < yyl123; yyj123++ {
yyv123 = append(yyv123, nil)
+ yyh123.ElemContainerState(yyj123)
if r.TryDecodeAsNil() {
if yyv123[yyj123] != nil {
*yyv123[yyj123] = Node{}
@@ -1426,12 +1546,14 @@ func (x codecSelfer1978) decNodes(v *Nodes, d *codec1978.Decoder) {
}
} else {
- for yyj123 := 0; !r.CheckBreak(); yyj123++ {
+ yyj123 := 0
+ for ; !r.CheckBreak(); yyj123++ {
+
if yyj123 >= len(yyv123) {
yyv123 = append(yyv123, nil) // var yyz123 *Node
yyc123 = true
}
-
+ yyh123.ElemContainerState(yyj123)
if yyj123 < len(yyv123) {
if r.TryDecodeAsNil() {
if yyv123[yyj123] != nil {
@@ -1450,10 +1572,16 @@ func (x codecSelfer1978) decNodes(v *Nodes, d *codec1978.Decoder) {
}
}
- yyh123.End()
+ if yyj123 < len(yyv123) {
+ yyv123 = yyv123[:yyj123]
+ yyc123 = true
+ } else if yyj123 == 0 && yyv123 == nil {
+ yyv123 = []*Node{}
+ yyc123 = true
+ }
}
+ yyh123.End()
if yyc123 {
*v = yyv123
}
-
}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/response_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/response_test.go
new file mode 100644
index 000000000..23e0c56eb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/response_test.go
@@ -0,0 +1,75 @@
+package etcd
+
+import (
+ "net/http"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/ugorji/go/codec"
+)
+
+func createTestNode(size int) *Node {
+ return &Node{
+ Key: strings.Repeat("a", 30),
+ Value: strings.Repeat("a", size),
+ TTL: 123456789,
+ ModifiedIndex: 123456,
+ CreatedIndex: 123456,
+ }
+}
+
+func createTestNodeWithChildren(children, size int) *Node {
+ node := createTestNode(size)
+ for i := 0; i < children; i++ {
+ node.Nodes = append(node.Nodes, createTestNode(size))
+ }
+ return node
+}
+
+func createTestResponse(children, size int) *Response {
+ return &Response{
+ Action: "aaaaa",
+ Node: createTestNodeWithChildren(children, size),
+ PrevNode: nil,
+ EtcdIndex: 123456,
+ RaftIndex: 123456,
+ RaftTerm: 123456,
+ }
+}
+
+func benchmarkResponseUnmarshalling(b *testing.B, children, size int) {
+ response := createTestResponse(children, size)
+
+ rr := RawResponse{http.StatusOK, make([]byte, 0), http.Header{}}
+ codec.NewEncoderBytes(&rr.Body, new(codec.JsonHandle)).Encode(response)
+
+ b.ResetTimer()
+ newResponse := new(Response)
+ var err error
+ for i := 0; i < b.N; i++ {
+ if newResponse, err = rr.Unmarshal(); err != nil {
+ b.Errorf("Error: %v", err)
+ }
+
+ }
+ if !reflect.DeepEqual(response.Node, newResponse.Node) {
+ b.Errorf("Unexpected difference in a parsed response: \n%+v\n%+v", response, newResponse)
+ }
+}
+
+func BenchmarkSmallResponseUnmarshal(b *testing.B) {
+ benchmarkResponseUnmarshalling(b, 30, 20)
+}
+
+func BenchmarkManySmallResponseUnmarshal(b *testing.B) {
+ benchmarkResponseUnmarshalling(b, 3000, 20)
+}
+
+func BenchmarkMediumResponseUnmarshal(b *testing.B) {
+ benchmarkResponseUnmarshalling(b, 300, 200)
+}
+
+func BenchmarkLargeResponseUnmarshal(b *testing.B) {
+ benchmarkResponseUnmarshalling(b, 3000, 2000)
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go
new file mode 100644
index 000000000..87c86b830
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go
@@ -0,0 +1,42 @@
+package etcd
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestSetCurlChan(t *testing.T) {
+ c := NewClient(nil)
+ c.OpenCURL()
+
+ defer func() {
+ c.Delete("foo", true)
+ }()
+
+ _, err := c.Set("foo", "bar", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected := fmt.Sprintf("curl -X PUT %s/v2/keys/foo -d value=bar -d ttl=5",
+ c.cluster.pick())
+ actual := c.RecvCURL()
+ if expected != actual {
+ t.Fatalf(`Command "%s" is not equal to expected value "%s"`,
+ actual, expected)
+ }
+
+ c.SetConsistency(STRONG_CONSISTENCY)
+ _, err = c.Get("foo", false, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected = fmt.Sprintf("curl -X GET %s/v2/keys/foo?quorum=true&recursive=false&sorted=false",
+ c.cluster.pick())
+ actual = c.RecvCURL()
+ if expected != actual {
+ t.Fatalf(`Command "%s" is not equal to expected value "%s"`,
+ actual, expected)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create_test.go
new file mode 100644
index 000000000..ced0f06e7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create_test.go
@@ -0,0 +1,241 @@
+package etcd
+
+import (
+ "testing"
+)
+
+func TestSet(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ }()
+
+ resp, err := c.Set("foo", "bar", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.Node.Key != "/foo" || resp.Node.Value != "bar" || resp.Node.TTL != 5 {
+ t.Fatalf("Set 1 failed: %#v", resp)
+ }
+ if resp.PrevNode != nil {
+ t.Fatalf("Set 1 PrevNode failed: %#v", resp)
+ }
+
+ resp, err = c.Set("foo", "bar2", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/foo" && resp.Node.Value == "bar2" && resp.Node.TTL == 5) {
+ t.Fatalf("Set 2 failed: %#v", resp)
+ }
+ if resp.PrevNode.Key != "/foo" || resp.PrevNode.Value != "bar" || resp.Node.TTL != 5 {
+ t.Fatalf("Set 2 PrevNode failed: %#v", resp)
+ }
+}
+
+func TestUpdate(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ c.Delete("nonexistent", true)
+ }()
+
+ resp, err := c.Set("foo", "bar", 5)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // This should succeed.
+ resp, err = c.Update("foo", "wakawaka", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Action == "update" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
+ t.Fatalf("Update 1 failed: %#v", resp)
+ }
+ if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.Node.TTL == 5) {
+ t.Fatalf("Update 1 prevValue failed: %#v", resp)
+ }
+
+ // This should fail because the key does not exist.
+ resp, err = c.Update("nonexistent", "whatever", 5)
+ if err == nil {
+ t.Fatalf("The key %v did not exist, so the update should have failed."+
+ "The response was: %#v", resp.Node.Key, resp)
+ }
+}
+
+func TestCreate(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("newKey", true)
+ }()
+
+ newKey := "/newKey"
+ newValue := "/newValue"
+
+ // This should succeed
+ resp, err := c.Create(newKey, newValue, 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Action == "create" && resp.Node.Key == newKey &&
+ resp.Node.Value == newValue && resp.Node.TTL == 5) {
+ t.Fatalf("Create 1 failed: %#v", resp)
+ }
+ if resp.PrevNode != nil {
+ t.Fatalf("Create 1 PrevNode failed: %#v", resp)
+ }
+
+ // This should fail, because the key is already there
+ resp, err = c.Create(newKey, newValue, 5)
+ if err == nil {
+ t.Fatalf("The key %v did exist, so the creation should have failed."+
+ "The response was: %#v", resp.Node.Key, resp)
+ }
+}
+
+func TestCreateInOrder(t *testing.T) {
+ c := NewClient(nil)
+ dir := "/queue"
+ defer func() {
+ c.DeleteDir(dir)
+ }()
+
+ var firstKey, secondKey string
+
+ resp, err := c.CreateInOrder(dir, "1", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Action == "create" && resp.Node.Value == "1" && resp.Node.TTL == 5) {
+ t.Fatalf("Create 1 failed: %#v", resp)
+ }
+
+ firstKey = resp.Node.Key
+
+ resp, err = c.CreateInOrder(dir, "2", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Action == "create" && resp.Node.Value == "2" && resp.Node.TTL == 5) {
+ t.Fatalf("Create 2 failed: %#v", resp)
+ }
+
+ secondKey = resp.Node.Key
+
+ if firstKey >= secondKey {
+ t.Fatalf("Expected first key to be greater than second key, but %s is not greater than %s",
+ firstKey, secondKey)
+ }
+}
+
+func TestSetDir(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ c.Delete("fooDir", true)
+ }()
+
+ resp, err := c.CreateDir("fooDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/fooDir" && resp.Node.Value == "" && resp.Node.TTL == 5) {
+ t.Fatalf("SetDir 1 failed: %#v", resp)
+ }
+ if resp.PrevNode != nil {
+ t.Fatalf("SetDir 1 PrevNode failed: %#v", resp)
+ }
+
+ // This should fail because /fooDir already points to a directory
+ resp, err = c.CreateDir("/fooDir", 5)
+ if err == nil {
+ t.Fatalf("fooDir already points to a directory, so SetDir should have failed."+
+ "The response was: %#v", resp)
+ }
+
+ _, err = c.Set("foo", "bar", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // This should succeed
+ // It should replace the key
+ resp, err = c.SetDir("foo", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/foo" && resp.Node.Value == "" && resp.Node.TTL == 5) {
+ t.Fatalf("SetDir 2 failed: %#v", resp)
+ }
+ if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.PrevNode.TTL == 5) {
+ t.Fatalf("SetDir 2 failed: %#v", resp)
+ }
+}
+
+func TestUpdateDir(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("fooDir", true)
+ }()
+
+ resp, err := c.CreateDir("fooDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // This should succeed.
+ resp, err = c.UpdateDir("fooDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Action == "update" && resp.Node.Key == "/fooDir" &&
+ resp.Node.Value == "" && resp.Node.TTL == 5) {
+ t.Fatalf("UpdateDir 1 failed: %#v", resp)
+ }
+ if !(resp.PrevNode.Key == "/fooDir" && resp.PrevNode.Dir == true && resp.PrevNode.TTL == 5) {
+ t.Fatalf("UpdateDir 1 PrevNode failed: %#v", resp)
+ }
+
+ // This should fail because the key does not exist.
+ resp, err = c.UpdateDir("nonexistentDir", 5)
+ if err == nil {
+ t.Fatalf("The key %v did not exist, so the update should have failed."+
+ "The response was: %#v", resp.Node.Key, resp)
+ }
+}
+
+func TestCreateDir(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("fooDir", true)
+ }()
+
+ // This should succeed
+ resp, err := c.CreateDir("fooDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Action == "create" && resp.Node.Key == "/fooDir" &&
+ resp.Node.Value == "" && resp.Node.TTL == 5) {
+ t.Fatalf("CreateDir 1 failed: %#v", resp)
+ }
+ if resp.PrevNode != nil {
+ t.Fatalf("CreateDir 1 PrevNode failed: %#v", resp)
+ }
+
+ // This should fail, because the key is already there
+ resp, err = c.CreateDir("fooDir", 5)
+ if err == nil {
+ t.Fatalf("The key %v did exist, so the creation should have failed."+
+ "The response was: %#v", resp.Node.Key, resp)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch_test.go
new file mode 100644
index 000000000..43e1dfeb8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch_test.go
@@ -0,0 +1,119 @@
+package etcd
+
+import (
+ "fmt"
+ "runtime"
+ "testing"
+ "time"
+)
+
+func TestWatch(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("watch_foo", true)
+ }()
+
+ go setHelper("watch_foo", "bar", c)
+
+ resp, err := c.Watch("watch_foo", 0, false, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") {
+ t.Fatalf("Watch 1 failed: %#v", resp)
+ }
+
+ go setHelper("watch_foo", "bar", c)
+
+ resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, false, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") {
+ t.Fatalf("Watch 2 failed: %#v", resp)
+ }
+
+ routineNum := runtime.NumGoroutine()
+
+ ch := make(chan *Response, 10)
+ stop := make(chan bool, 1)
+
+ go setLoop("watch_foo", "bar", c)
+
+ go receiver(ch, stop)
+
+ _, err = c.Watch("watch_foo", 0, false, ch, stop)
+ if err != ErrWatchStoppedByUser {
+ t.Fatalf("Watch returned a non-user stop error")
+ }
+
+ if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum {
+ t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum)
+ }
+}
+
+func TestWatchAll(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("watch_foo", true)
+ }()
+
+ go setHelper("watch_foo/foo", "bar", c)
+
+ resp, err := c.Watch("watch_foo", 0, true, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") {
+ t.Fatalf("WatchAll 1 failed: %#v", resp)
+ }
+
+ go setHelper("watch_foo/foo", "bar", c)
+
+ resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, true, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") {
+ t.Fatalf("WatchAll 2 failed: %#v", resp)
+ }
+
+ ch := make(chan *Response, 10)
+ stop := make(chan bool, 1)
+
+ routineNum := runtime.NumGoroutine()
+
+ go setLoop("watch_foo/foo", "bar", c)
+
+ go receiver(ch, stop)
+
+ _, err = c.Watch("watch_foo", 0, true, ch, stop)
+ if err != ErrWatchStoppedByUser {
+ t.Fatalf("Watch returned a non-user stop error")
+ }
+
+ if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum {
+ t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum)
+ }
+}
+
+func setHelper(key, value string, c *Client) {
+ time.Sleep(time.Second)
+ c.Set(key, value, 100)
+}
+
+func setLoop(key, value string, c *Client) {
+ time.Sleep(time.Second)
+ for i := 0; i < 10; i++ {
+ newValue := fmt.Sprintf("%s_%v", value, i)
+ c.Set(key, newValue, 100)
+ time.Sleep(time.Second / 10)
+ }
+}
+
+func receiver(c chan *Response, stop chan bool) {
+ for i := 0; i < 10; i++ {
+ <-c
+ }
+ stop <- true
+}
diff --git a/Godeps/_workspace/src/github.com/duosecurity/duo_api_golang/authapi/authapi_test.go b/Godeps/_workspace/src/github.com/duosecurity/duo_api_golang/authapi/authapi_test.go
new file mode 100644
index 000000000..45fbbaf95
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/duosecurity/duo_api_golang/authapi/authapi_test.go
@@ -0,0 +1,497 @@
+package authapi
+
+import (
+ "testing"
+ "fmt"
+ "strings"
+ "net/http"
+ "net/http/httptest"
+ "time"
+ "github.com/duosecurity/duo_api_golang"
+ )
+
+func buildAuthApi(url string) *AuthApi {
+ ikey := "eyekey"
+ skey := "esskey"
+ host := strings.Split(url, "//")[1]
+ userAgent := "GoTestClient"
+ return NewAuthApi(*duoapi.NewDuoApi(ikey,
+ skey,
+ host,
+ userAgent,
+ duoapi.SetTimeout(1*time.Second),
+ duoapi.SetInsecure()))
+}
+
+// Timeouts are set to 1 second. Take 15 seconds to respond and verify
+// that the client times out.
+func TestTimeout(t *testing.T) {
+ ts := httptest.NewTLSServer(http.HandlerFunc(func (w http.ResponseWriter, r *http.Request) {
+ time.Sleep(15*time.Second)
+ }))
+
+ duo := buildAuthApi(ts.URL)
+
+ start := time.Now()
+ _, err := duo.Ping()
+ duration := time.Since(start)
+ if duration.Seconds() > 2 {
+ t.Error("Timeout took %d seconds", duration.Seconds())
+ }
+ if err == nil {
+ t.Error("Expected timeout error.")
+ }
+}
+
+// Test a successful ping request / response.
+func TestPing(t *testing.T) {
+ ts := httptest.NewTLSServer(http.HandlerFunc(func (w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintln(w, `
+ {
+ "stat": "OK",
+ "response": {
+ "time": 1357020061,
+ "unexpected_parameter" : "blah"
+ }
+ }`)
+ }))
+ defer ts.Close()
+
+ duo := buildAuthApi(ts.URL)
+
+ result, err := duo.Ping()
+ if err != nil {
+ t.Error("Unexpected error from Ping call" + err.Error())
+ }
+ if result.Stat != "OK" {
+ t.Error("Expected OK, but got " + result.Stat)
+ }
+ if result.Response.Time != 1357020061 {
+ t.Errorf("Expected 1357020061, but got %d", result.Response.Time)
+ }
+}
+
+// Test a successful Check request / response.
+func TestCheck(t *testing.T) {
+ ts := httptest.NewTLSServer(
+ http.HandlerFunc(
+ func (w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintln(w, `
+ {
+ "stat": "OK",
+ "response": {
+ "time": 1357020061
+ }
+ }`)}))
+ defer ts.Close()
+
+ duo := buildAuthApi(ts.URL)
+
+ result, err := duo.Check()
+ if err != nil {
+ t.Error("Failed TestCheck: " + err.Error())
+ }
+ if result.Stat != "OK" {
+ t.Error("Expected OK, but got " + result.Stat)
+ }
+ if result.Response.Time != 1357020061 {
+ t.Errorf("Expected 1357020061, but got %d", result.Response.Time)
+ }
+}
+
+// Test a successful logo request / response.
+func TestLogo(t *testing.T) {
+ ts := httptest.NewTLSServer(
+ http.HandlerFunc(
+ func (w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "image/png")
+ w.Write([]byte("\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00" +
+ "\x00\x00\x01\x00\x00\x00\x01\x08\x06\x00" +
+ "\x00\x00\x1f\x15\xc4\x89\x00\x00\x00\nIDATx" +
+ "\x9cc\x00\x01\x00\x00\x05\x00\x01\r\n-\xb4\x00" +
+ "\x00\x00\x00IEND\xaeB`\x82"))
+ }))
+ defer ts.Close()
+
+ duo := buildAuthApi(ts.URL)
+
+ _, err := duo.Logo()
+ if err != nil {
+ t.Error("Failed TestCheck: " + err.Error())
+ }
+}
+
+// Test a failure logo reqeust / response.
+func TestLogoError(t *testing.T) {
+ ts := httptest.NewTLSServer(
+ http.HandlerFunc(
+ func (w http.ResponseWriter, r *http.Request) {
+ // Return a 400, as if the logo was not found.
+ w.WriteHeader(400)
+ fmt.Fprintln(w, `
+ {
+ "stat": "FAIL",
+ "code": 40002,
+ "message": "Logo not found",
+ "message_detail": "Why u no have logo?"
+ }`)
+ }))
+ defer ts.Close()
+
+ duo := buildAuthApi(ts.URL)
+
+ res, err := duo.Logo()
+ if err != nil {
+ t.Error("Failed TestCheck: " + err.Error())
+ }
+ if res.Stat != "FAIL" {
+ t.Error("Expected FAIL, but got " + res.Stat)
+ }
+ if res.Code == nil || *res.Code != 40002 {
+ t.Error("Unexpected response code.")
+ }
+ if res.Message == nil || *res.Message != "Logo not found" {
+ t.Error("Unexpected message.")
+ }
+ if res.Message_Detail == nil || *res.Message_Detail != "Why u no have logo?" {
+ t.Error("Unexpected message detail.")
+ }
+}
+
+// Test a successful enroll request / response.
+func TestEnroll(t *testing.T) {
+ ts := httptest.NewTLSServer(
+ http.HandlerFunc(
+ func (w http.ResponseWriter, r *http.Request) {
+ if r.FormValue("username") != "49c6c3097adb386048c84354d82ea63d" {
+ t.Error("TestEnroll failed to set 'username' query parameter:" +
+ r.RequestURI)
+ }
+ if r.FormValue("valid_secs") != "10" {
+ t.Error("TestEnroll failed to set 'valid_secs' query parameter: " +
+ r.RequestURI)
+ }
+ fmt.Fprintln(w, `
+ {
+ "stat": "OK",
+ "response": {
+ "activation_barcode": "https://api-eval.duosecurity.com/frame/qr?value=8LIRa5danrICkhHtkLxi-cKLu2DWzDYCmBwBHY2YzW5ZYnYaRxA",
+ "activation_code": "duo://8LIRa5danrICkhHtkLxi-cKLu2DWzDYCmBwBHY2YzW5ZYnYaRxA",
+ "expiration": 1357020061,
+ "user_id": "DU94SWSN4ADHHJHF2HXT",
+ "username": "49c6c3097adb386048c84354d82ea63d"
+ }
+ }`)}))
+ defer ts.Close()
+
+ duo := buildAuthApi(ts.URL)
+
+ result, err := duo.Enroll(EnrollUsername("49c6c3097adb386048c84354d82ea63d"), EnrollValidSeconds(10))
+ if err != nil {
+ t.Error("Failed TestEnroll: " + err.Error())
+ }
+ if result.Stat != "OK" {
+ t.Error("Expected OK, but got " + result.Stat)
+ }
+ if result.Response.Activation_Barcode != "https://api-eval.duosecurity.com/frame/qr?value=8LIRa5danrICkhHtkLxi-cKLu2DWzDYCmBwBHY2YzW5ZYnYaRxA" {
+ t.Error("Unexpected activation_barcode: " + result.Response.Activation_Barcode)
+ }
+ if result.Response.Activation_Code != "duo://8LIRa5danrICkhHtkLxi-cKLu2DWzDYCmBwBHY2YzW5ZYnYaRxA" {
+ t.Error("Unexpected activation code: " + result.Response.Activation_Code)
+ }
+ if result.Response.Expiration != 1357020061 {
+ t.Errorf("Unexpected expiration time: %d", result.Response.Expiration)
+ }
+ if result.Response.User_Id != "DU94SWSN4ADHHJHF2HXT" {
+ t.Error("Unexpected user id: " + result.Response.User_Id)
+ }
+ if result.Response.Username != "49c6c3097adb386048c84354d82ea63d" {
+ t.Error("Unexpected username: " + result.Response.Username)
+ }
+}
+
+// Test a succesful enroll status request / response.
+func TestEnrollStatus(t *testing.T) {
+ ts := httptest.NewTLSServer(
+ http.HandlerFunc(
+ func (w http.ResponseWriter, r *http.Request) {
+ if r.FormValue("user_id") != "49c6c3097adb386048c84354d82ea63d" {
+ t.Error("TestEnrollStatus failed to set 'user_id' query parameter:" +
+ r.RequestURI)
+ }
+ if r.FormValue("activation_code") != "10" {
+ t.Error("TestEnrollStatus failed to set 'activation_code' query parameter: " +
+ r.RequestURI)
+ }
+ fmt.Fprintln(w, `
+ {
+ "stat": "OK",
+ "response": "success"
+ }`)}))
+ defer ts.Close()
+
+ duo := buildAuthApi(ts.URL)
+
+ result, err := duo.EnrollStatus("49c6c3097adb386048c84354d82ea63d", "10")
+ if err != nil {
+ t.Error("Failed TestEnrollStatus: " + err.Error())
+ }
+ if result.Stat != "OK" {
+ t.Error("Expected OK, but got " + result.Stat)
+ }
+ if result.Response != "success" {
+ t.Error("Unexpected response: " + result.Response)
+ }
+}
+
+// Test a successful preauth with user id. The client doesn't enforce api requirements,
+// such as requiring only one of user id or username, but we'll cover the username
+// in another test anyway.
+func TestPreauthUserId(t *testing.T) {
+ ts := httptest.NewTLSServer(
+ http.HandlerFunc(
+ func (w http.ResponseWriter, r *http.Request) {
+ if r.FormValue("ipaddr") != "127.0.0.1" {
+ t.Error("TestPreauth failed to set 'ipaddr' query parameter:" +
+ r.RequestURI)
+ }
+ if r.FormValue("user_id") != "10" {
+ t.Error("TestEnrollStatus failed to set 'user_id' query parameter: " +
+ r.RequestURI)
+ }
+ if r.FormValue("trusted_device_token") != "l33t" {
+ t.Error("TestEnrollStatus failed to set 'trusted_device_token' query parameter: " +
+ r.RequestURI)
+ }
+ fmt.Fprintln(w, `
+ {
+ "stat": "OK",
+ "response": {
+ "result": "auth",
+ "status_msg": "Account is active",
+ "devices": [
+ {
+ "device": "DPFZRS9FB0D46QFTM891",
+ "type": "phone",
+ "number": "XXX-XXX-0100",
+ "name": "",
+ "capabilities": [
+ "push",
+ "sms",
+ "phone"
+ ]
+ },
+ {
+ "device": "DHEKH0JJIYC1LX3AZWO4",
+ "type": "token",
+ "name": "0"
+ }
+ ]
+ }
+ }`)}))
+ defer ts.Close()
+
+ duo := buildAuthApi(ts.URL)
+
+ res, err := duo.Preauth(PreauthUserId("10"), PreauthIpAddr("127.0.0.1"), PreauthTrustedToken("l33t"))
+ if err != nil {
+ t.Error("Failed TestPreauthUserId: " + err.Error())
+ }
+ if res.Stat != "OK" {
+ t.Error("Unexpected stat: " + res.Stat)
+ }
+ if res.Response.Result != "auth" {
+ t.Error("Unexpected response result: " + res.Response.Result)
+ }
+ if res.Response.Status_Msg != "Account is active" {
+ t.Error("Unexpected status message: " + res.Response.Status_Msg)
+ }
+ if len(res.Response.Devices) != 2 {
+ t.Errorf("Unexpected devices length: %d", len(res.Response.Devices))
+ }
+ if res.Response.Devices[0].Device != "DPFZRS9FB0D46QFTM891" {
+ t.Error("Unexpected [0] device name: " + res.Response.Devices[0].Device)
+ }
+ if res.Response.Devices[0].Type != "phone" {
+ t.Error("Unexpected [0] device type: " + res.Response.Devices[0].Type)
+ }
+ if res.Response.Devices[0].Number != "XXX-XXX-0100" {
+ t.Error("Unexpected [0] device number: " + res.Response.Devices[0].Number)
+ }
+ if res.Response.Devices[0].Name != "" {
+ t.Error("Unexpected [0] devices name :" + res.Response.Devices[0].Name)
+ }
+ if len(res.Response.Devices[0].Capabilities) != 3 {
+ t.Errorf("Unexpected [0] device capabilities length: %d", len(res.Response.Devices[0].Capabilities))
+ }
+ if res.Response.Devices[0].Capabilities[0] != "push" {
+ t.Error("Unexpected [0] device capability: " + res.Response.Devices[0].Capabilities[0])
+ }
+ if res.Response.Devices[0].Capabilities[1] != "sms" {
+ t.Error("Unexpected [0] device capability: " + res.Response.Devices[0].Capabilities[1])
+ }
+ if res.Response.Devices[0].Capabilities[2] != "phone" {
+ t.Error("Unexpected [0] device capability: " + res.Response.Devices[0].Capabilities[2])
+ }
+ if res.Response.Devices[1].Device != "DHEKH0JJIYC1LX3AZWO4" {
+ t.Error("Unexpected [1] device name: " + res.Response.Devices[1].Device)
+ }
+ if res.Response.Devices[1].Type != "token" {
+ t.Error("Unexpected [1] device type: " + res.Response.Devices[1].Type)
+ }
+ if res.Response.Devices[1].Name != "0" {
+ t.Error("Unexpected [1] devices name :" + res.Response.Devices[1].Name)
+ }
+ if len(res.Response.Devices[1].Capabilities) != 0 {
+ t.Errorf("Unexpected [1] device capabilities length: %d", len(res.Response.Devices[1].Capabilities))
+ }
+}
+
+// Test preauth enroll with username, and an enroll response.
+func TestPreauthEnroll(t *testing.T) {
+ ts := httptest.NewTLSServer(
+ http.HandlerFunc(
+ func (w http.ResponseWriter, r *http.Request) {
+ if r.FormValue("username") != "10" {
+ t.Error("TestEnrollStatus failed to set 'username' query parameter: " +
+ r.RequestURI)
+ }
+ fmt.Fprintln(w, `
+ {
+ "stat": "OK",
+ "response": {
+ "enroll_portal_url": "https://api-3945ef22.duosecurity.com/portal?48bac5d9393fb2c2",
+ "result": "enroll",
+ "status_msg": "Enroll an authentication device to proceed"
+ }
+ }`)}))
+ defer ts.Close()
+
+ duo := buildAuthApi(ts.URL)
+
+ res, err := duo.Preauth(PreauthUsername("10"))
+ if err != nil {
+ t.Error("Failed TestPreauthEnroll: " + err.Error())
+ }
+ if res.Stat != "OK" {
+ t.Error("Unexpected stat: " + res.Stat)
+ }
+ if res.Response.Enroll_Portal_Url != "https://api-3945ef22.duosecurity.com/portal?48bac5d9393fb2c2" {
+ t.Error("Unexpected enroll portal URL: " + res.Response.Enroll_Portal_Url)
+ }
+ if res.Response.Result != "enroll" {
+ t.Error("Unexpected response result: " + res.Response.Result)
+ }
+ if res.Response.Status_Msg != "Enroll an authentication device to proceed" {
+ t.Error("Unexpected status msg: " + res.Response.Status_Msg)
+ }
+}
+
+// Test an authentication request / response. This won't work against the Duo
+// server, because the request parameters included are illegal. But we can
+// verify that the go code sets the query parameters correctly.
+func TestAuth(t *testing.T) {
+ ts := httptest.NewTLSServer(
+ http.HandlerFunc(
+ func (w http.ResponseWriter, r *http.Request) {
+ expected := map[string]string {
+ "username" : "username value",
+ "user_id" : "user_id value",
+ "factor" : "auto",
+ "ipaddr" : "40.40.40.10",
+ "async" : "1",
+ "device" : "primary",
+ "type" : "request",
+ "display_username" : "display username",
+
+ }
+ for key, value := range expected {
+ if r.FormValue(key) != value {
+ t.Errorf("TestAuth failed to set '%s' query parameter: " +
+ r.RequestURI, key)
+ }
+ }
+ fmt.Fprintln(w, `
+ {
+ "stat": "OK",
+ "response": {
+ "result": "allow",
+ "status": "allow",
+ "status_msg": "Success. Logging you in..."
+ }
+ }`)}))
+ defer ts.Close()
+
+ duo := buildAuthApi(ts.URL)
+
+ res, err := duo.Auth("auto",
+ AuthUserId("user_id value"),
+ AuthUsername("username value"),
+ AuthIpAddr("40.40.40.10"),
+ AuthAsync(),
+ AuthDevice("primary"),
+ AuthType("request"),
+ AuthDisplayUsername("display username"),
+ )
+ if err != nil {
+ t.Error("Failed TestAuth: " + err.Error())
+ }
+ if res.Stat != "OK" {
+ t.Error("Unexpected stat: " + res.Stat)
+ }
+ if res.Response.Result != "allow" {
+ t.Error("Unexpected response result: " + res.Response.Result)
+ }
+ if res.Response.Status != "allow" {
+ t.Error("Unexpected response status: " + res.Response.Status)
+ }
+ if res.Response.Status_Msg != "Success. Logging you in..." {
+ t.Error("Unexpected response status msg: " + res.Response.Status_Msg)
+ }
+}
+
+// Test AuthStatus request / response.
+func TestAuthStatus(t *testing.T) {
+ ts := httptest.NewTLSServer(
+ http.HandlerFunc(
+ func (w http.ResponseWriter, r *http.Request) {
+ expected := map[string]string {
+ "txid" : "4",
+ }
+ for key, value := range expected {
+ if r.FormValue(key) != value {
+ t.Errorf("TestAuthStatus failed to set '%s' query parameter: " +
+ r.RequestURI, key)
+ }
+ }
+ fmt.Fprintln(w, `
+ {
+ "stat": "OK",
+ "response": {
+ "result": "waiting",
+ "status": "pushed",
+ "status_msg": "Pushed a login request to your phone..."
+ }
+ }`)}))
+ defer ts.Close()
+
+ duo := buildAuthApi(ts.URL)
+
+ res, err := duo.AuthStatus("4")
+ if err != nil {
+ t.Error("Failed TestAuthStatus: " + err.Error())
+ }
+
+ if res.Stat != "OK" {
+ t.Error("Unexpected stat: " + res.Stat)
+ }
+ if res.Response.Result != "waiting" {
+ t.Error("Unexpected response result: " + res.Response.Result)
+ }
+ if res.Response.Status != "pushed" {
+ t.Error("Unexpected response status: " + res.Response.Status)
+ }
+ if res.Response.Status_Msg != "Pushed a login request to your phone..." {
+ t.Error("Unexpected response status msg: " + res.Response.Status_Msg)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/duosecurity/duo_api_golang/duo_test.go b/Godeps/_workspace/src/github.com/duosecurity/duo_api_golang/duo_test.go
new file mode 100644
index 000000000..01278457e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/duosecurity/duo_api_golang/duo_test.go
@@ -0,0 +1,156 @@
+package duoapi
+
+import (
+ "testing"
+ "net/url"
+ "strings"
+)
+
+func TestCanonicalize(t *testing.T) {
+ values := url.Values{}
+ values.Set("username", "H ell?o")
+ values.Set("password", "H-._~i")
+ values.Add("password", "A(!'*)")
+ params_str := canonicalize("post",
+ "API-XXX.duosecurity.COM",
+ "/auth/v2/ping",
+ values,
+ "5")
+ params := strings.Split(params_str, "\n")
+ if len(params) != 5 {
+ t.Error("Expected 5 parameters, but got " + string(len(params)))
+ }
+ if params[1] != string("POST") {
+ t.Error("Expected POST, but got " + params[1])
+ }
+ if params[2] != string("api-xxx.duosecurity.com") {
+ t.Error("Expected api-xxx.duosecurity.com, but got " + params[2])
+ }
+ if params[3] != string("/auth/v2/ping") {
+ t.Error("Expected /auth/v2/ping, but got " + params[3])
+ }
+ if params[4] != string("password=A%28%21%27%2A%29&password=H-._~i&username=H%20ell%3Fo") {
+ t.Error("Expected sorted escaped params, but got " + params[4])
+ }
+}
+
+func encodeAndValidate(t *testing.T, input url.Values, output string) {
+ values := url.Values{}
+ for key, val := range input {
+ values.Set(key, val[0])
+ }
+ params_str := canonicalize("post",
+ "API-XXX.duosecurity.com",
+ "/auth/v2/ping",
+ values,
+ "5")
+ params := strings.Split(params_str, "\n")
+ if params[4] != output {
+ t.Error("Mismatch\n" + output + "\n" + params[4])
+ }
+
+}
+
+func TestSimple(t *testing.T) {
+ values := url.Values{}
+ values.Set("realname", "First Last")
+ values.Set("username", "root")
+
+ encodeAndValidate(t, values, "realname=First%20Last&username=root")
+}
+
+func TestZero(t *testing.T) {
+ values := url.Values{}
+ encodeAndValidate(t, values, "")
+}
+
+func TestOne(t *testing.T) {
+ values := url.Values{}
+ values.Set("realname", "First Last")
+ encodeAndValidate(t, values, "realname=First%20Last")
+}
+
+func TestPrintableAsciiCharaceters(t *testing.T) {
+ values := url.Values{}
+ values.Set("digits", "0123456789")
+ values.Set("letters", "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+ values.Set("punctuation", "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~")
+ values.Set("whitespace", "\t\n\x0b\x0c\r ")
+ encodeAndValidate(t, values, "digits=0123456789&letters=abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ&punctuation=%21%22%23%24%25%26%27%28%29%2A%2B%2C-.%2F%3A%3B%3C%3D%3E%3F%40%5B%5C%5D%5E_%60%7B%7C%7D~&whitespace=%09%0A%0B%0C%0D%20")
+}
+
+func TestSortOrderWithCommonPrefix(t *testing.T) {
+ values := url.Values{}
+ values.Set("foo", "1")
+ values.Set("foo_bar", "2")
+ encodeAndValidate(t, values, "foo=1&foo_bar=2")
+}
+
+func TestUnicodeFuzzValues(t *testing.T) {
+ values := url.Values{}
+ values.Set("bar", "⠕ꪣ㟏䮷㛩찅暎腢슽ꇱ")
+ values.Set("baz", "ෳ蒽噩馅뢤갺篧潩鍊뤜")
+ values.Set("foo", "퓎훖礸僀訠輕ﴋ耤岳왕")
+ values.Set("qux", "讗졆-芎茚쳊ꋔ谾뢲馾")
+ encodeAndValidate(t, values, "bar=%E2%A0%95%EA%AA%A3%E3%9F%8F%E4%AE%B7%E3%9B%A9%EC%B0%85%E6%9A%8E%E8%85%A2%EC%8A%BD%EA%87%B1&baz=%E0%B7%B3%E8%92%BD%E5%99%A9%E9%A6%85%EB%A2%A4%EA%B0%BA%E7%AF%A7%E6%BD%A9%E9%8D%8A%EB%A4%9C&foo=%ED%93%8E%ED%9B%96%E7%A4%B8%E5%83%80%E8%A8%A0%E8%BC%95%EF%B4%8B%E8%80%A4%E5%B2%B3%EC%99%95&qux=%E8%AE%97%EC%A1%86-%E8%8A%8E%E8%8C%9A%EC%B3%8A%EA%8B%94%E8%B0%BE%EB%A2%B2%E9%A6%BE")
+}
+
+func TestUnicodeFuzzKeysAndValues(t *testing.T) {
+ values := url.Values{}
+ values.Set("䚚⡻㗐軳朧倪ࠐ킑È셰",
+ "ཅ᩶㐚敌숿鬉ꯢ荃ᬧ惐")
+ values.Set("瑉繋쳻姿﹟获귌逌쿑砓",
+ "趷倢鋓䋯⁽蜰곾嘗ॆ丰")
+ values.Set("瑰錔逜麮䃘䈁苘豰ᴱꁂ",
+ "៙ந鍘꫟ꐪ䢾ﮖ濩럿㋳")
+ values.Set("싅Ⱍ☠㘗隳F蘅⃨갡头",
+ "ﮩ䆪붃萋☕㹮攭ꢵ핫U")
+ encodeAndValidate(t, values, "%E4%9A%9A%E2%A1%BB%E3%97%90%E8%BB%B3%E6%9C%A7%E5%80%AA%E0%A0%90%ED%82%91%C3%88%EC%85%B0=%E0%BD%85%E1%A9%B6%E3%90%9A%E6%95%8C%EC%88%BF%E9%AC%89%EA%AF%A2%E8%8D%83%E1%AC%A7%E6%83%90&%E7%91%89%E7%B9%8B%EC%B3%BB%E5%A7%BF%EF%B9%9F%E8%8E%B7%EA%B7%8C%E9%80%8C%EC%BF%91%E7%A0%93=%E8%B6%B7%E5%80%A2%E9%8B%93%E4%8B%AF%E2%81%BD%E8%9C%B0%EA%B3%BE%E5%98%97%E0%A5%86%E4%B8%B0&%E7%91%B0%E9%8C%94%E9%80%9C%E9%BA%AE%E4%83%98%E4%88%81%E8%8B%98%E8%B1%B0%E1%B4%B1%EA%81%82=%E1%9F%99%E0%AE%A8%E9%8D%98%EA%AB%9F%EA%90%AA%E4%A2%BE%EF%AE%96%E6%BF%A9%EB%9F%BF%E3%8B%B3&%EC%8B%85%E2%B0%9D%E2%98%A0%E3%98%97%E9%9A%B3F%E8%98%85%E2%83%A8%EA%B0%A1%E5%A4%B4=%EF%AE%A9%E4%86%AA%EB%B6%83%E8%90%8B%E2%98%95%E3%B9%AE%E6%94%AD%EA%A2%B5%ED%95%ABU")
+}
+
+func TestSign(t *testing.T) {
+ values := url.Values{}
+ values.Set("realname", "First Last")
+ values.Set("username", "root")
+ res := sign("DIWJ8X6AEYOR5OMC6TQ1",
+ "Zh5eGmUq9zpfQnyUIu5OL9iWoMMv5ZNmk3zLJ4Ep",
+ "POST",
+ "api-XXXXXXXX.duosecurity.com",
+ "/accounts/v1/account/list",
+ "Tue, 21 Aug 2012 17:29:18 -0000",
+ values)
+ if res != "Basic RElXSjhYNkFFWU9SNU9NQzZUUTE6MmQ5N2Q2MTY2MzE5Nzgx" +
+ "YjVhM2EwN2FmMzlkMzY2ZjQ5MTIzNGVkYw==" {
+ t.Error("Signature did not produce output documented at " +
+ "https://www.duosecurity.com/docs/authapi :(")
+ }
+}
+
+func TestV2Canonicalize(t *testing.T) {
+ values := url.Values{}
+ values.Set("䚚⡻㗐軳朧倪ࠐ킑È셰",
+ "ཅ᩶㐚敌숿鬉ꯢ荃ᬧ惐")
+ values.Set("瑉繋쳻姿﹟获귌逌쿑砓",
+ "趷倢鋓䋯⁽蜰곾嘗ॆ丰")
+ values.Set("瑰錔逜麮䃘䈁苘豰ᴱꁂ",
+ "៙ந鍘꫟ꐪ䢾ﮖ濩럿㋳")
+ values.Set("싅Ⱍ☠㘗隳F蘅⃨갡头",
+ "ﮩ䆪붃萋☕㹮攭ꢵ핫U")
+ canon := canonicalize(
+ "PoSt",
+ "foO.BAr52.cOm",
+ "/Foo/BaR2/qux",
+ values,
+ "Fri, 07 Dec 2012 17:18:00 -0000")
+ expected := "Fri, 07 Dec 2012 17:18:00 -0000\nPOST\nfoo.bar52.com\n/Foo/BaR2/qux\n%E4%9A%9A%E2%A1%BB%E3%97%90%E8%BB%B3%E6%9C%A7%E5%80%AA%E0%A0%90%ED%82%91%C3%88%EC%85%B0=%E0%BD%85%E1%A9%B6%E3%90%9A%E6%95%8C%EC%88%BF%E9%AC%89%EA%AF%A2%E8%8D%83%E1%AC%A7%E6%83%90&%E7%91%89%E7%B9%8B%EC%B3%BB%E5%A7%BF%EF%B9%9F%E8%8E%B7%EA%B7%8C%E9%80%8C%EC%BF%91%E7%A0%93=%E8%B6%B7%E5%80%A2%E9%8B%93%E4%8B%AF%E2%81%BD%E8%9C%B0%EA%B3%BE%E5%98%97%E0%A5%86%E4%B8%B0&%E7%91%B0%E9%8C%94%E9%80%9C%E9%BA%AE%E4%83%98%E4%88%81%E8%8B%98%E8%B1%B0%E1%B4%B1%EA%81%82=%E1%9F%99%E0%AE%A8%E9%8D%98%EA%AB%9F%EA%90%AA%E4%A2%BE%EF%AE%96%E6%BF%A9%EB%9F%BF%E3%8B%B3&%EC%8B%85%E2%B0%9D%E2%98%A0%E3%98%97%E9%9A%B3F%E8%98%85%E2%83%A8%EA%B0%A1%E5%A4%B4=%EF%AE%A9%E4%86%AA%EB%B6%83%E8%90%8B%E2%98%95%E3%B9%AE%E6%94%AD%EA%A2%B5%ED%95%ABU"
+ if canon != expected {
+ t.Error("Mismatch!\n" + expected + "\n" + canon)
+ }
+}
+
+func TestNewDuo(t *testing.T) {
+ duo := NewDuoApi("ABC", "123", "api-XXXXXXX.duosecurity.com", "go-client")
+ if duo == nil {
+ t.Fatal("Failed to create a new Duo Api")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/fatih/structs/field_test.go b/Godeps/_workspace/src/github.com/fatih/structs/field_test.go
new file mode 100644
index 000000000..379ceff76
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fatih/structs/field_test.go
@@ -0,0 +1,324 @@
+package structs
+
+import (
+ "reflect"
+ "testing"
+)
+
+// A test struct that defines all cases
+type Foo struct {
+ A string
+ B int `structs:"y"`
+ C bool `json:"c"`
+ d string // not exported
+ E *Baz
+ x string `xml:"x"` // not exported, with tag
+ Y []string
+ Z map[string]interface{}
+ *Bar // embedded
+}
+
+type Baz struct {
+ A string
+ B int
+}
+
+type Bar struct {
+ E string
+ F int
+ g []string
+}
+
+func newStruct() *Struct {
+ b := &Bar{
+ E: "example",
+ F: 2,
+ g: []string{"zeynep", "fatih"},
+ }
+
+ // B and x is not initialized for testing
+ f := &Foo{
+ A: "gopher",
+ C: true,
+ d: "small",
+ E: nil,
+ Y: []string{"example"},
+ Z: nil,
+ }
+ f.Bar = b
+
+ return New(f)
+}
+
+func TestField_Set(t *testing.T) {
+ s := newStruct()
+
+ f := s.Field("A")
+ err := f.Set("fatih")
+ if err != nil {
+ t.Error(err)
+ }
+
+ if f.Value().(string) != "fatih" {
+ t.Errorf("Setted value is wrong: %s want: %s", f.Value().(string), "fatih")
+ }
+
+ f = s.Field("Y")
+ err = f.Set([]string{"override", "with", "this"})
+ if err != nil {
+ t.Error(err)
+ }
+
+ sliceLen := len(f.Value().([]string))
+ if sliceLen != 3 {
+ t.Errorf("Setted values slice length is wrong: %d, want: %d", sliceLen, 3)
+ }
+
+ f = s.Field("C")
+ err = f.Set(false)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if f.Value().(bool) {
+ t.Errorf("Setted value is wrong: %t want: %t", f.Value().(bool), false)
+ }
+
+ // let's pass a different type
+ f = s.Field("A")
+ err = f.Set(123) // Field A is of type string, but we are going to pass an integer
+ if err == nil {
+ t.Error("Setting a field's value with a different type than the field's type should return an error")
+ }
+
+ // old value should be still there :)
+ if f.Value().(string) != "fatih" {
+ t.Errorf("Setted value is wrong: %s want: %s", f.Value().(string), "fatih")
+ }
+
+ // let's access an unexported field, which should give an error
+ f = s.Field("d")
+ err = f.Set("large")
+ if err != errNotExported {
+ t.Error(err)
+ }
+
+ // let's set a pointer to struct
+ b := &Bar{
+ E: "gopher",
+ F: 2,
+ }
+
+ f = s.Field("Bar")
+ err = f.Set(b)
+ if err != nil {
+ t.Error(err)
+ }
+
+ baz := &Baz{
+ A: "helloWorld",
+ B: 42,
+ }
+
+ f = s.Field("E")
+ err = f.Set(baz)
+ if err != nil {
+ t.Error(err)
+ }
+
+ ba := s.Field("E").Value().(*Baz)
+
+ if ba.A != "helloWorld" {
+ t.Errorf("could not set baz. Got: %s Want: helloWorld", ba.A)
+ }
+}
+
+func TestField(t *testing.T) {
+ s := newStruct()
+
+ defer func() {
+ err := recover()
+ if err == nil {
+ t.Error("Retrieveing a non existing field from the struct should panic")
+ }
+ }()
+
+ _ = s.Field("no-field")
+}
+
+func TestField_Kind(t *testing.T) {
+ s := newStruct()
+
+ f := s.Field("A")
+ if f.Kind() != reflect.String {
+ t.Errorf("Field A has wrong kind: %s want: %s", f.Kind(), reflect.String)
+ }
+
+ f = s.Field("B")
+ if f.Kind() != reflect.Int {
+ t.Errorf("Field B has wrong kind: %s want: %s", f.Kind(), reflect.Int)
+ }
+
+ // unexported
+ f = s.Field("d")
+ if f.Kind() != reflect.String {
+ t.Errorf("Field d has wrong kind: %s want: %s", f.Kind(), reflect.String)
+ }
+}
+
+func TestField_Tag(t *testing.T) {
+ s := newStruct()
+
+ v := s.Field("B").Tag("json")
+ if v != "" {
+ t.Errorf("Field's tag value of a non existing tag should return empty, got: %s", v)
+ }
+
+ v = s.Field("C").Tag("json")
+ if v != "c" {
+ t.Errorf("Field's tag value of the existing field C should return 'c', got: %s", v)
+ }
+
+ v = s.Field("d").Tag("json")
+ if v != "" {
+ t.Errorf("Field's tag value of a non exported field should return empty, got: %s", v)
+ }
+
+ v = s.Field("x").Tag("xml")
+ if v != "x" {
+ t.Errorf("Field's tag value of a non exported field with a tag should return 'x', got: %s", v)
+ }
+
+ v = s.Field("A").Tag("json")
+ if v != "" {
+ t.Errorf("Field's tag value of a existing field without a tag should return empty, got: %s", v)
+ }
+}
+
+func TestField_Value(t *testing.T) {
+ s := newStruct()
+
+ v := s.Field("A").Value()
+ val, ok := v.(string)
+ if !ok {
+ t.Errorf("Field's value of a A should be string")
+ }
+
+ if val != "gopher" {
+ t.Errorf("Field's value of a existing tag should return 'gopher', got: %s", val)
+ }
+
+ defer func() {
+ err := recover()
+ if err == nil {
+ t.Error("Value of a non exported field from the field should panic")
+ }
+ }()
+
+ // should panic
+ _ = s.Field("d").Value()
+}
+
+func TestField_IsEmbedded(t *testing.T) {
+ s := newStruct()
+
+ if !s.Field("Bar").IsEmbedded() {
+ t.Errorf("Fields 'Bar' field is an embedded field")
+ }
+
+ if s.Field("d").IsEmbedded() {
+ t.Errorf("Fields 'd' field is not an embedded field")
+ }
+}
+
+func TestField_IsExported(t *testing.T) {
+ s := newStruct()
+
+ if !s.Field("Bar").IsExported() {
+ t.Errorf("Fields 'Bar' field is an exported field")
+ }
+
+ if !s.Field("A").IsExported() {
+ t.Errorf("Fields 'A' field is an exported field")
+ }
+
+ if s.Field("d").IsExported() {
+ t.Errorf("Fields 'd' field is not an exported field")
+ }
+}
+
+func TestField_IsZero(t *testing.T) {
+ s := newStruct()
+
+ if s.Field("A").IsZero() {
+ t.Errorf("Fields 'A' field is an initialized field")
+ }
+
+ if !s.Field("B").IsZero() {
+ t.Errorf("Fields 'B' field is not an initialized field")
+ }
+}
+
+func TestField_Name(t *testing.T) {
+ s := newStruct()
+
+ if s.Field("A").Name() != "A" {
+ t.Errorf("Fields 'A' field should have the name 'A'")
+ }
+}
+
+func TestField_Field(t *testing.T) {
+ s := newStruct()
+
+ e := s.Field("Bar").Field("E")
+
+ val, ok := e.Value().(string)
+ if !ok {
+ t.Error("The value of the field 'e' inside 'Bar' struct should be string")
+ }
+
+ if val != "example" {
+ t.Errorf("The value of 'e' should be 'example, got: %s", val)
+ }
+
+ defer func() {
+ err := recover()
+ if err == nil {
+ t.Error("Field of a non existing nested struct should panic")
+ }
+ }()
+
+ _ = s.Field("Bar").Field("e")
+}
+
+func TestField_Fields(t *testing.T) {
+ s := newStruct()
+ fields := s.Field("Bar").Fields()
+
+ if len(fields) != 3 {
+ t.Errorf("We expect 3 fields in embedded struct, was: %d", len(fields))
+ }
+}
+
+func TestField_FieldOk(t *testing.T) {
+ s := newStruct()
+
+ b, ok := s.FieldOk("Bar")
+ if !ok {
+ t.Error("The field 'Bar' should exists.")
+ }
+
+ e, ok := b.FieldOk("E")
+ if !ok {
+ t.Error("The field 'E' should exists.")
+ }
+
+ val, ok := e.Value().(string)
+ if !ok {
+ t.Error("The value of the field 'e' inside 'Bar' struct should be string")
+ }
+
+ if val != "example" {
+ t.Errorf("The value of 'e' should be 'example, got: %s", val)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/fatih/structs/structs_example_test.go b/Godeps/_workspace/src/github.com/fatih/structs/structs_example_test.go
new file mode 100644
index 000000000..32bb82937
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fatih/structs/structs_example_test.go
@@ -0,0 +1,351 @@
+package structs
+
+import (
+ "fmt"
+ "time"
+)
+
+func ExampleNew() {
+ type Server struct {
+ Name string
+ ID int32
+ Enabled bool
+ }
+
+ server := &Server{
+ Name: "Arslan",
+ ID: 123456,
+ Enabled: true,
+ }
+
+ s := New(server)
+
+ fmt.Printf("Name : %v\n", s.Name())
+ fmt.Printf("Values : %v\n", s.Values())
+ fmt.Printf("Value of ID : %v\n", s.Field("ID").Value())
+ // Output:
+ // Name : Server
+ // Values : [Arslan 123456 true]
+ // Value of ID : 123456
+
+}
+
+func ExampleMap() {
+ type Server struct {
+ Name string
+ ID int32
+ Enabled bool
+ }
+
+ s := &Server{
+ Name: "Arslan",
+ ID: 123456,
+ Enabled: true,
+ }
+
+ m := Map(s)
+
+ fmt.Printf("%#v\n", m["Name"])
+ fmt.Printf("%#v\n", m["ID"])
+ fmt.Printf("%#v\n", m["Enabled"])
+ // Output:
+ // "Arslan"
+ // 123456
+ // true
+
+}
+
+func ExampleMap_tags() {
+ // Custom tags can change the map keys instead of using the fields name
+ type Server struct {
+ Name string `structs:"server_name"`
+ ID int32 `structs:"server_id"`
+ Enabled bool `structs:"enabled"`
+ }
+
+ s := &Server{
+ Name: "Zeynep",
+ ID: 789012,
+ }
+
+ m := Map(s)
+
+ // access them by the custom tags defined above
+ fmt.Printf("%#v\n", m["server_name"])
+ fmt.Printf("%#v\n", m["server_id"])
+ fmt.Printf("%#v\n", m["enabled"])
+ // Output:
+ // "Zeynep"
+ // 789012
+ // false
+
+}
+
+func ExampleMap_nested() {
+ // By default field with struct types are processed too. We can stop
+ // processing them via "omitnested" tag option.
+ type Server struct {
+ Name string `structs:"server_name"`
+ ID int32 `structs:"server_id"`
+ Time time.Time `structs:"time,omitnested"` // do not convert to map[string]interface{}
+ }
+
+ const shortForm = "2006-Jan-02"
+ t, _ := time.Parse("2006-Jan-02", "2013-Feb-03")
+
+ s := &Server{
+ Name: "Zeynep",
+ ID: 789012,
+ Time: t,
+ }
+
+ m := Map(s)
+
+ // access them by the custom tags defined above
+ fmt.Printf("%v\n", m["server_name"])
+ fmt.Printf("%v\n", m["server_id"])
+ fmt.Printf("%v\n", m["time"].(time.Time))
+ // Output:
+ // Zeynep
+ // 789012
+ // 2013-02-03 00:00:00 +0000 UTC
+}
+
+func ExampleMap_omitEmpty() {
+ // By default field with struct types of zero values are processed too. We
+ // can stop processing them via "omitempty" tag option.
+ type Server struct {
+ Name string `structs:",omitempty"`
+ ID int32 `structs:"server_id,omitempty"`
+ Location string
+ }
+
+ // Only add location
+ s := &Server{
+ Location: "Tokyo",
+ }
+
+ m := Map(s)
+
+ // map contains only the Location field
+ fmt.Printf("%v\n", m)
+ // Output:
+ // map[Location:Tokyo]
+}
+
+func ExampleValues() {
+ type Server struct {
+ Name string
+ ID int32
+ Enabled bool
+ }
+
+ s := &Server{
+ Name: "Fatih",
+ ID: 135790,
+ Enabled: false,
+ }
+
+ m := Values(s)
+
+ fmt.Printf("Values: %+v\n", m)
+ // Output:
+ // Values: [Fatih 135790 false]
+}
+
+func ExampleValues_omitEmpty() {
+ // By default field with struct types of zero values are processed too. We
+ // can stop processing them via "omitempty" tag option.
+ type Server struct {
+ Name string `structs:",omitempty"`
+ ID int32 `structs:"server_id,omitempty"`
+ Location string
+ }
+
+ // Only add location
+ s := &Server{
+ Location: "Ankara",
+ }
+
+ m := Values(s)
+
+ // values contains only the Location field
+ fmt.Printf("Values: %+v\n", m)
+ // Output:
+ // Values: [Ankara]
+}
+
+func ExampleValues_tags() {
+ type Location struct {
+ City string
+ Country string
+ }
+
+ type Server struct {
+ Name string
+ ID int32
+ Enabled bool
+ Location Location `structs:"-"` // values from location are not included anymore
+ }
+
+ s := &Server{
+ Name: "Fatih",
+ ID: 135790,
+ Enabled: false,
+ Location: Location{City: "Ankara", Country: "Turkey"},
+ }
+
+ // Let get all values from the struct s. Note that we don't include values
+ // from the Location field
+ m := Values(s)
+
+ fmt.Printf("Values: %+v\n", m)
+ // Output:
+ // Values: [Fatih 135790 false]
+}
+
+func ExampleFields() {
+ type Access struct {
+ Name string
+ LastAccessed time.Time
+ Number int
+ }
+
+ s := &Access{
+ Name: "Fatih",
+ LastAccessed: time.Now(),
+ Number: 1234567,
+ }
+
+ fields := Fields(s)
+
+ for i, field := range fields {
+ fmt.Printf("[%d] %+v\n", i, field.Name())
+ }
+
+ // Output:
+ // [0] Name
+ // [1] LastAccessed
+ // [2] Number
+}
+
+func ExampleFields_nested() {
+ type Person struct {
+ Name string
+ Number int
+ }
+
+ type Access struct {
+ Person Person
+ HasPermission bool
+ LastAccessed time.Time
+ }
+
+ s := &Access{
+ Person: Person{Name: "fatih", Number: 1234567},
+ LastAccessed: time.Now(),
+ HasPermission: true,
+ }
+
+ // Let's get all fields from the struct s.
+ fields := Fields(s)
+
+ for _, field := range fields {
+ if field.Name() == "Person" {
+ fmt.Printf("Access.Person.Name: %+v\n", field.Field("Name").Value())
+ }
+ }
+
+ // Output:
+ // Access.Person.Name: fatih
+}
+
+func ExampleField() {
+ type Person struct {
+ Name string
+ Number int
+ }
+
+ type Access struct {
+ Person Person
+ HasPermission bool
+ LastAccessed time.Time
+ }
+
+ access := &Access{
+ Person: Person{Name: "fatih", Number: 1234567},
+ LastAccessed: time.Now(),
+ HasPermission: true,
+ }
+
+ // Create a new Struct type
+ s := New(access)
+
+ // Get the Field type for "Person" field
+ p := s.Field("Person")
+
+ // Get the underlying "Name field" and print the value of it
+ name := p.Field("Name")
+
+ fmt.Printf("Value of Person.Access.Name: %+v\n", name.Value())
+
+ // Output:
+ // Value of Person.Access.Name: fatih
+
+}
+
+func ExampleIsZero() {
+ type Server struct {
+ Name string
+ ID int32
+ Enabled bool
+ }
+
+ // Nothing is initalized
+ a := &Server{}
+ isZeroA := IsZero(a)
+
+ // Name and Enabled is initialized, but not ID
+ b := &Server{
+ Name: "Golang",
+ Enabled: true,
+ }
+ isZeroB := IsZero(b)
+
+ fmt.Printf("%#v\n", isZeroA)
+ fmt.Printf("%#v\n", isZeroB)
+ // Output:
+ // true
+ // false
+}
+
+func ExampleHasZero() {
+ // Let's define an Access struct. Note that the "Enabled" field is not
+ // going to be checked because we added the "structs" tag to the field.
+ type Access struct {
+ Name string
+ LastAccessed time.Time
+ Number int
+ Enabled bool `structs:"-"`
+ }
+
+ // Name and Number is not initialized.
+ a := &Access{
+ LastAccessed: time.Now(),
+ }
+ hasZeroA := HasZero(a)
+
+ // Name and Number is initialized.
+ b := &Access{
+ Name: "Fatih",
+ LastAccessed: time.Now(),
+ Number: 12345,
+ }
+ hasZeroB := HasZero(b)
+
+ fmt.Printf("%#v\n", hasZeroA)
+ fmt.Printf("%#v\n", hasZeroB)
+ // Output:
+ // true
+ // false
+}
diff --git a/Godeps/_workspace/src/github.com/fatih/structs/structs_test.go b/Godeps/_workspace/src/github.com/fatih/structs/structs_test.go
new file mode 100644
index 000000000..14e3de72f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fatih/structs/structs_test.go
@@ -0,0 +1,898 @@
+package structs
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestMapNonStruct(t *testing.T) {
+ foo := []string{"foo"}
+
+ defer func() {
+ err := recover()
+ if err == nil {
+ t.Error("Passing a non struct into Map should panic")
+ }
+ }()
+
+ // this should panic. We are going to recover and and test it
+ _ = Map(foo)
+}
+
+func TestStructIndexes(t *testing.T) {
+ type C struct {
+ something int
+ Props map[string]interface{}
+ }
+
+ defer func() {
+ err := recover()
+ if err != nil {
+ fmt.Printf("err %+v\n", err)
+ t.Error("Using mixed indexes should not panic")
+ }
+ }()
+
+ // They should not panic
+ _ = Map(&C{})
+ _ = Fields(&C{})
+ _ = Values(&C{})
+ _ = IsZero(&C{})
+ _ = HasZero(&C{})
+}
+
+func TestMap(t *testing.T) {
+ var T = struct {
+ A string
+ B int
+ C bool
+ }{
+ A: "a-value",
+ B: 2,
+ C: true,
+ }
+
+ a := Map(T)
+
+ if typ := reflect.TypeOf(a).Kind(); typ != reflect.Map {
+ t.Errorf("Map should return a map type, got: %v", typ)
+ }
+
+ // we have three fields
+ if len(a) != 3 {
+ t.Errorf("Map should return a map of len 3, got: %d", len(a))
+ }
+
+ inMap := func(val interface{}) bool {
+ for _, v := range a {
+ if reflect.DeepEqual(v, val) {
+ return true
+ }
+ }
+
+ return false
+ }
+
+ for _, val := range []interface{}{"a-value", 2, true} {
+ if !inMap(val) {
+ t.Errorf("Map should have the value %v", val)
+ }
+ }
+
+}
+
+func TestMap_Tag(t *testing.T) {
+ var T = struct {
+ A string `structs:"x"`
+ B int `structs:"y"`
+ C bool `structs:"z"`
+ }{
+ A: "a-value",
+ B: 2,
+ C: true,
+ }
+
+ a := Map(T)
+
+ inMap := func(key interface{}) bool {
+ for k := range a {
+ if reflect.DeepEqual(k, key) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, key := range []string{"x", "y", "z"} {
+ if !inMap(key) {
+ t.Errorf("Map should have the key %v", key)
+ }
+ }
+
+}
+
+func TestMap_CustomTag(t *testing.T) {
+ var T = struct {
+ A string `json:"x"`
+ B int `json:"y"`
+ C bool `json:"z"`
+ D struct {
+ E string `json:"jkl"`
+ } `json:"nested"`
+ }{
+ A: "a-value",
+ B: 2,
+ C: true,
+ }
+ T.D.E = "e-value"
+
+ s := New(T)
+ s.TagName = "json"
+
+ a := s.Map()
+
+ inMap := func(key interface{}) bool {
+ for k := range a {
+ if reflect.DeepEqual(k, key) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, key := range []string{"x", "y", "z"} {
+ if !inMap(key) {
+ t.Errorf("Map should have the key %v", key)
+ }
+ }
+
+ nested, ok := a["nested"].(map[string]interface{})
+ if !ok {
+ t.Fatalf("Map should contain the D field that is tagged as 'nested'")
+ }
+
+ e, ok := nested["jkl"].(string)
+ if !ok {
+ t.Fatalf("Map should contain the D.E field that is tagged as 'jkl'")
+ }
+
+ if e != "e-value" {
+ t.Errorf("D.E field should be equal to 'e-value', got: '%v'", e)
+ }
+
+}
+
+func TestMap_MultipleCustomTag(t *testing.T) {
+ var A = struct {
+ X string `aa:"ax"`
+ }{"a_value"}
+
+ aStruct := New(A)
+ aStruct.TagName = "aa"
+
+ var B = struct {
+ X string `bb:"bx"`
+ }{"b_value"}
+
+ bStruct := New(B)
+ bStruct.TagName = "bb"
+
+ a, b := aStruct.Map(), bStruct.Map()
+ if !reflect.DeepEqual(a, map[string]interface{}{"ax": "a_value"}) {
+ t.Error("Map should have field ax with value a_value")
+ }
+
+ if !reflect.DeepEqual(b, map[string]interface{}{"bx": "b_value"}) {
+ t.Error("Map should have field bx with value b_value")
+ }
+}
+
+func TestMap_OmitEmpty(t *testing.T) {
+ type A struct {
+ Name string
+ Value string `structs:",omitempty"`
+ Time time.Time `structs:",omitempty"`
+ }
+ a := A{}
+
+ m := Map(a)
+
+ _, ok := m["Value"].(map[string]interface{})
+ if ok {
+ t.Error("Map should not contain the Value field that is tagged as omitempty")
+ }
+
+ _, ok = m["Time"].(map[string]interface{})
+ if ok {
+ t.Error("Map should not contain the Time field that is tagged as omitempty")
+ }
+}
+
+func TestMap_OmitNested(t *testing.T) {
+ type A struct {
+ Name string
+ Value string
+ Time time.Time `structs:",omitnested"`
+ }
+ a := A{Time: time.Now()}
+
+ type B struct {
+ Desc string
+ A A
+ }
+ b := &B{A: a}
+
+ m := Map(b)
+
+ in, ok := m["A"].(map[string]interface{})
+ if !ok {
+ t.Error("Map nested structs is not available in the map")
+ }
+
+ // should not happen
+ if _, ok := in["Time"].(map[string]interface{}); ok {
+ t.Error("Map nested struct should omit recursiving parsing of Time")
+ }
+
+ if _, ok := in["Time"].(time.Time); !ok {
+ t.Error("Map nested struct should stop parsing of Time at is current value")
+ }
+}
+
+func TestMap_Nested(t *testing.T) {
+ type A struct {
+ Name string
+ }
+ a := &A{Name: "example"}
+
+ type B struct {
+ A *A
+ }
+ b := &B{A: a}
+
+ m := Map(b)
+
+ if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
+ t.Errorf("Map should return a map type, got: %v", typ)
+ }
+
+ in, ok := m["A"].(map[string]interface{})
+ if !ok {
+ t.Error("Map nested structs is not available in the map")
+ }
+
+ if name := in["Name"].(string); name != "example" {
+ t.Errorf("Map nested struct's name field should give example, got: %s", name)
+ }
+}
+
+func TestMap_Anonymous(t *testing.T) {
+ type A struct {
+ Name string
+ }
+ a := &A{Name: "example"}
+
+ type B struct {
+ *A
+ }
+ b := &B{}
+ b.A = a
+
+ m := Map(b)
+
+ if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
+ t.Errorf("Map should return a map type, got: %v", typ)
+ }
+
+ in, ok := m["A"].(map[string]interface{})
+ if !ok {
+ t.Error("Embedded structs is not available in the map")
+ }
+
+ if name := in["Name"].(string); name != "example" {
+ t.Errorf("Embedded A struct's Name field should give example, got: %s", name)
+ }
+}
+
+func TestStruct(t *testing.T) {
+ var T = struct{}{}
+
+ if !IsStruct(T) {
+ t.Errorf("T should be a struct, got: %T", T)
+ }
+
+ if !IsStruct(&T) {
+ t.Errorf("T should be a struct, got: %T", T)
+ }
+
+}
+
+func TestValues(t *testing.T) {
+ var T = struct {
+ A string
+ B int
+ C bool
+ }{
+ A: "a-value",
+ B: 2,
+ C: true,
+ }
+
+ s := Values(T)
+
+ if typ := reflect.TypeOf(s).Kind(); typ != reflect.Slice {
+ t.Errorf("Values should return a slice type, got: %v", typ)
+ }
+
+ inSlice := func(val interface{}) bool {
+ for _, v := range s {
+ if reflect.DeepEqual(v, val) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, val := range []interface{}{"a-value", 2, true} {
+ if !inSlice(val) {
+ t.Errorf("Values should have the value %v", val)
+ }
+ }
+}
+
+func TestValues_OmitEmpty(t *testing.T) {
+ type A struct {
+ Name string
+ Value int `structs:",omitempty"`
+ }
+
+ a := A{Name: "example"}
+ s := Values(a)
+
+ if len(s) != 1 {
+ t.Errorf("Values of omitted empty fields should be not counted")
+ }
+
+ if s[0].(string) != "example" {
+ t.Errorf("Values of omitted empty fields should left the value example")
+ }
+}
+
+func TestValues_OmitNested(t *testing.T) {
+ type A struct {
+ Name string
+ Value int
+ }
+
+ a := A{
+ Name: "example",
+ Value: 123,
+ }
+
+ type B struct {
+ A A `structs:",omitnested"`
+ C int
+ }
+ b := &B{A: a, C: 123}
+
+ s := Values(b)
+
+ if len(s) != 2 {
+ t.Errorf("Values of omitted nested struct should be not counted")
+ }
+
+ inSlice := func(val interface{}) bool {
+ for _, v := range s {
+ if reflect.DeepEqual(v, val) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, val := range []interface{}{123, a} {
+ if !inSlice(val) {
+ t.Errorf("Values should have the value %v", val)
+ }
+ }
+}
+
+func TestValues_Nested(t *testing.T) {
+ type A struct {
+ Name string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A A
+ C int
+ }
+ b := &B{A: a, C: 123}
+
+ s := Values(b)
+
+ inSlice := func(val interface{}) bool {
+ for _, v := range s {
+ if reflect.DeepEqual(v, val) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, val := range []interface{}{"example", 123} {
+ if !inSlice(val) {
+ t.Errorf("Values should have the value %v", val)
+ }
+ }
+}
+
+func TestValues_Anonymous(t *testing.T) {
+ type A struct {
+ Name string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A
+ C int
+ }
+ b := &B{C: 123}
+ b.A = a
+
+ s := Values(b)
+
+ inSlice := func(val interface{}) bool {
+ for _, v := range s {
+ if reflect.DeepEqual(v, val) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, val := range []interface{}{"example", 123} {
+ if !inSlice(val) {
+ t.Errorf("Values should have the value %v", val)
+ }
+ }
+}
+
+func TestNames(t *testing.T) {
+ var T = struct {
+ A string
+ B int
+ C bool
+ }{
+ A: "a-value",
+ B: 2,
+ C: true,
+ }
+
+ s := Names(T)
+
+ if len(s) != 3 {
+ t.Errorf("Names should return a slice of len 3, got: %d", len(s))
+ }
+
+ inSlice := func(val string) bool {
+ for _, v := range s {
+ if reflect.DeepEqual(v, val) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, val := range []string{"A", "B", "C"} {
+ if !inSlice(val) {
+ t.Errorf("Names should have the value %v", val)
+ }
+ }
+}
+
+func TestFields(t *testing.T) {
+ var T = struct {
+ A string
+ B int
+ C bool
+ }{
+ A: "a-value",
+ B: 2,
+ C: true,
+ }
+
+ s := Fields(T)
+
+ if len(s) != 3 {
+ t.Errorf("Fields should return a slice of len 3, got: %d", len(s))
+ }
+
+ inSlice := func(val string) bool {
+ for _, v := range s {
+ if reflect.DeepEqual(v.Name(), val) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, val := range []string{"A", "B", "C"} {
+ if !inSlice(val) {
+ t.Errorf("Fields should have the value %v", val)
+ }
+ }
+}
+
+func TestFields_OmitNested(t *testing.T) {
+ type A struct {
+ Name string
+ Enabled bool
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A A
+ C int
+ Value string `structs:"-"`
+ Number int
+ }
+ b := &B{A: a, C: 123}
+
+ s := Fields(b)
+
+ if len(s) != 3 {
+ t.Errorf("Fields should omit nested struct. Expecting 2 got: %d", len(s))
+ }
+
+ inSlice := func(val interface{}) bool {
+ for _, v := range s {
+ if reflect.DeepEqual(v.Name(), val) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, val := range []interface{}{"A", "C"} {
+ if !inSlice(val) {
+ t.Errorf("Fields should have the value %v", val)
+ }
+ }
+}
+
+func TestFields_Anonymous(t *testing.T) {
+ type A struct {
+ Name string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A
+ C int
+ }
+ b := &B{C: 123}
+ b.A = a
+
+ s := Fields(b)
+
+ inSlice := func(val interface{}) bool {
+ for _, v := range s {
+ if reflect.DeepEqual(v.Name(), val) {
+ return true
+ }
+ }
+ return false
+ }
+
+ for _, val := range []interface{}{"A", "C"} {
+ if !inSlice(val) {
+ t.Errorf("Fields should have the value %v", val)
+ }
+ }
+}
+
+func TestIsZero(t *testing.T) {
+ var T = struct {
+ A string
+ B int
+ C bool `structs:"-"`
+ D []string
+ }{}
+
+ ok := IsZero(T)
+ if !ok {
+ t.Error("IsZero should return true because none of the fields are initialized.")
+ }
+
+ var X = struct {
+ A string
+ F *bool
+ }{
+ A: "a-value",
+ }
+
+ ok = IsZero(X)
+ if ok {
+ t.Error("IsZero should return false because A is initialized")
+ }
+
+ var Y = struct {
+ A string
+ B int
+ }{
+ A: "a-value",
+ B: 123,
+ }
+
+ ok = IsZero(Y)
+ if ok {
+ t.Error("IsZero should return false because A and B is initialized")
+ }
+}
+
+func TestIsZero_OmitNested(t *testing.T) {
+ type A struct {
+ Name string
+ D string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A A `structs:",omitnested"`
+ C int
+ }
+ b := &B{A: a, C: 123}
+
+ ok := IsZero(b)
+ if ok {
+ t.Error("IsZero should return false because A, B and C are initialized")
+ }
+
+ aZero := A{}
+ bZero := &B{A: aZero}
+
+ ok = IsZero(bZero)
+ if !ok {
+ t.Error("IsZero should return true because neither A nor B is initialized")
+ }
+
+}
+
+func TestIsZero_Nested(t *testing.T) {
+ type A struct {
+ Name string
+ D string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A A
+ C int
+ }
+ b := &B{A: a, C: 123}
+
+ ok := IsZero(b)
+ if ok {
+ t.Error("IsZero should return false because A, B and C are initialized")
+ }
+
+ aZero := A{}
+ bZero := &B{A: aZero}
+
+ ok = IsZero(bZero)
+ if !ok {
+ t.Error("IsZero should return true because neither A nor B is initialized")
+ }
+
+}
+
+func TestIsZero_Anonymous(t *testing.T) {
+ type A struct {
+ Name string
+ D string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A
+ C int
+ }
+ b := &B{C: 123}
+ b.A = a
+
+ ok := IsZero(b)
+ if ok {
+ t.Error("IsZero should return false because A, B and C are initialized")
+ }
+
+ aZero := A{}
+ bZero := &B{}
+ bZero.A = aZero
+
+ ok = IsZero(bZero)
+ if !ok {
+ t.Error("IsZero should return true because neither A nor B is initialized")
+ }
+}
+
+func TestHasZero(t *testing.T) {
+ var T = struct {
+ A string
+ B int
+ C bool `structs:"-"`
+ D []string
+ }{
+ A: "a-value",
+ B: 2,
+ }
+
+ ok := HasZero(T)
+ if !ok {
+ t.Error("HasZero should return true because A and B are initialized.")
+ }
+
+ var X = struct {
+ A string
+ F *bool
+ }{
+ A: "a-value",
+ }
+
+ ok = HasZero(X)
+ if !ok {
+ t.Error("HasZero should return true because A is initialized")
+ }
+
+ var Y = struct {
+ A string
+ B int
+ }{
+ A: "a-value",
+ B: 123,
+ }
+
+ ok = HasZero(Y)
+ if ok {
+ t.Error("HasZero should return false because A and B is initialized")
+ }
+}
+
+func TestHasZero_OmitNested(t *testing.T) {
+ type A struct {
+ Name string
+ D string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A A `structs:",omitnested"`
+ C int
+ }
+ b := &B{A: a, C: 123}
+
+ // Because the Field A inside B is omitted HasZero should return false
+ // because it will stop iterating deeper andnot going to lookup for D
+ ok := HasZero(b)
+ if ok {
+ t.Error("HasZero should return false because A and C are initialized")
+ }
+}
+
+func TestHasZero_Nested(t *testing.T) {
+ type A struct {
+ Name string
+ D string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A A
+ C int
+ }
+ b := &B{A: a, C: 123}
+
+ ok := HasZero(b)
+ if !ok {
+ t.Error("HasZero should return true because D is not initialized")
+ }
+}
+
+func TestHasZero_Anonymous(t *testing.T) {
+ type A struct {
+ Name string
+ D string
+ }
+ a := A{Name: "example"}
+
+ type B struct {
+ A
+ C int
+ }
+ b := &B{C: 123}
+ b.A = a
+
+ ok := HasZero(b)
+ if !ok {
+ t.Error("HasZero should return false because D is not initialized")
+ }
+}
+
+func TestName(t *testing.T) {
+ type Foo struct {
+ A string
+ B bool
+ }
+ f := &Foo{}
+
+ n := Name(f)
+ if n != "Foo" {
+ t.Errorf("Name should return Foo, got: %s", n)
+ }
+
+ unnamed := struct{ Name string }{Name: "Cihangir"}
+ m := Name(unnamed)
+ if m != "" {
+ t.Errorf("Name should return empty string for unnamed struct, got: %s", n)
+ }
+
+ defer func() {
+ err := recover()
+ if err == nil {
+ t.Error("Name should panic if a non struct is passed")
+ }
+ }()
+
+ Name([]string{})
+}
+
+func TestNestedNilPointer(t *testing.T) {
+ type Collar struct {
+ Engraving string
+ }
+
+ type Dog struct {
+ Name string
+ Collar *Collar
+ }
+
+ type Person struct {
+ Name string
+ Dog *Dog
+ }
+
+ person := &Person{
+ Name: "John",
+ }
+
+ personWithDog := &Person{
+ Name: "Ron",
+ Dog: &Dog{
+ Name: "Rover",
+ },
+ }
+
+ personWithDogWithCollar := &Person{
+ Name: "Kon",
+ Dog: &Dog{
+ Name: "Ruffles",
+ Collar: &Collar{
+ Engraving: "If lost, call Kon",
+ },
+ },
+ }
+
+ defer func() {
+ err := recover()
+ if err != nil {
+ fmt.Printf("err %+v\n", err)
+ t.Error("Internal nil pointer should not panic")
+ }
+ }()
+
+ _ = Map(person) // Panics
+ _ = Map(personWithDog) // Panics
+ _ = Map(personWithDogWithCollar) // Doesn't panic
+}
diff --git a/Godeps/_workspace/src/github.com/fatih/structs/tags_test.go b/Godeps/_workspace/src/github.com/fatih/structs/tags_test.go
new file mode 100644
index 000000000..5d12724f1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fatih/structs/tags_test.go
@@ -0,0 +1,46 @@
+package structs
+
+import "testing"
+
+func TestParseTag_Name(t *testing.T) {
+ tags := []struct {
+ tag string
+ has bool
+ }{
+ {"", false},
+ {"name", true},
+ {"name,opt", true},
+ {"name , opt, opt2", false}, // has a single whitespace
+ {", opt, opt2", false},
+ }
+
+ for _, tag := range tags {
+ name, _ := parseTag(tag.tag)
+
+ if (name != "name") && tag.has {
+ t.Errorf("Parse tag should return name: %#v", tag)
+ }
+ }
+}
+
+func TestParseTag_Opts(t *testing.T) {
+ tags := []struct {
+ opts string
+ has bool
+ }{
+ {"name", false},
+ {"name,opt", true},
+ {"name , opt, opt2", false}, // has a single whitespace
+ {",opt, opt2", true},
+ {", opt3, opt4", false},
+ }
+
+ // search for "opt"
+ for _, tag := range tags {
+ _, opts := parseTag(tag.opts)
+
+ if opts.Has("opt") != tag.has {
+ t.Errorf("Tag opts should have opt: %#v", tag)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/go-ldap/ldap/conn.go b/Godeps/_workspace/src/github.com/go-ldap/ldap/conn.go
index a08eae0a5..ef7446158 100644
--- a/Godeps/_workspace/src/github.com/go-ldap/ldap/conn.go
+++ b/Godeps/_workspace/src/github.com/go-ldap/ldap/conn.go
@@ -177,7 +177,7 @@ func (l *Conn) StartTLS(config *tls.Config) error {
ber.PrintPacket(packet)
}
- if packet.Children[1].Children[0].Value.(int64) == LDAPResultSuccess {
+ if resultCode, message := getLDAPResultCode(packet); resultCode == LDAPResultSuccess {
conn := tls.Client(l.conn, config)
if err := conn.Handshake(); err != nil {
@@ -188,11 +188,7 @@ func (l *Conn) StartTLS(config *tls.Config) error {
l.isTLS = true
l.conn = conn
} else {
- // https://tools.ietf.org/html/rfc4511#section-4.1.9
- // Children[1].Children[2] is the diagnosticMessage which is guaranteed to exist.
- return NewError(
- uint8(packet.Children[1].Children[0].Value.(int64)),
- fmt.Errorf("ldap: cannot StartTLS (%s)", packet.Children[1].Children[2].Value.(string)))
+ return NewError(resultCode, fmt.Errorf("ldap: cannot StartTLS (%s)", message))
}
go l.reader()
diff --git a/Godeps/_workspace/src/github.com/go-ldap/ldap/dn_test.go b/Godeps/_workspace/src/github.com/go-ldap/ldap/dn_test.go
new file mode 100644
index 000000000..39817c427
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-ldap/ldap/dn_test.go
@@ -0,0 +1,70 @@
+package ldap_test
+
+import (
+ "reflect"
+ "testing"
+
+ "gopkg.in/ldap.v2"
+)
+
+func TestSuccessfulDNParsing(t *testing.T) {
+ testcases := map[string]ldap.DN{
+ "": ldap.DN{[]*ldap.RelativeDN{}},
+ "cn=Jim\\2C \\22Hasse Hö\\22 Hansson!,dc=dummy,dc=com": ldap.DN{[]*ldap.RelativeDN{
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{&ldap.AttributeTypeAndValue{"cn", "Jim, \"Hasse Hö\" Hansson!"}}},
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{&ldap.AttributeTypeAndValue{"dc", "dummy"}}},
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{&ldap.AttributeTypeAndValue{"dc", "com"}}}}},
+ "UID=jsmith,DC=example,DC=net": ldap.DN{[]*ldap.RelativeDN{
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{&ldap.AttributeTypeAndValue{"UID", "jsmith"}}},
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{&ldap.AttributeTypeAndValue{"DC", "example"}}},
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{&ldap.AttributeTypeAndValue{"DC", "net"}}}}},
+ "OU=Sales+CN=J. Smith,DC=example,DC=net": ldap.DN{[]*ldap.RelativeDN{
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{
+ &ldap.AttributeTypeAndValue{"OU", "Sales"},
+ &ldap.AttributeTypeAndValue{"CN", "J. Smith"}}},
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{&ldap.AttributeTypeAndValue{"DC", "example"}}},
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{&ldap.AttributeTypeAndValue{"DC", "net"}}}}},
+ "1.3.6.1.4.1.1466.0=#04024869": ldap.DN{[]*ldap.RelativeDN{
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{&ldap.AttributeTypeAndValue{"1.3.6.1.4.1.1466.0", "Hi"}}}}},
+ "1.3.6.1.4.1.1466.0=#04024869,DC=net": ldap.DN{[]*ldap.RelativeDN{
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{&ldap.AttributeTypeAndValue{"1.3.6.1.4.1.1466.0", "Hi"}}},
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{&ldap.AttributeTypeAndValue{"DC", "net"}}}}},
+ "CN=Lu\\C4\\8Di\\C4\\87": ldap.DN{[]*ldap.RelativeDN{
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{&ldap.AttributeTypeAndValue{"CN", "Lučić"}}}}},
+ }
+
+ for test, answer := range testcases {
+ dn, err := ldap.ParseDN(test)
+ if err != nil {
+ t.Errorf(err.Error())
+ continue
+ }
+ if !reflect.DeepEqual(dn, &answer) {
+ t.Errorf("Parsed DN %s is not equal to the expected structure", test)
+ for _, rdn := range dn.RDNs {
+ for _, attribs := range rdn.Attributes {
+ t.Logf("#%v\n", attribs)
+ }
+ }
+ }
+ }
+}
+
+func TestErrorDNParsing(t *testing.T) {
+ testcases := map[string]string{
+ "*": "DN ended with incomplete type, value pair",
+ "cn=Jim\\0Test": "Failed to decode escaped character: encoding/hex: invalid byte: U+0054 'T'",
+ "cn=Jim\\0": "Got corrupted escaped character",
+ "DC=example,=net": "DN ended with incomplete type, value pair",
+ "1=#0402486": "Failed to decode BER encoding: encoding/hex: odd length hex string",
+ }
+
+ for test, answer := range testcases {
+ _, err := ldap.ParseDN(test)
+ if err == nil {
+ t.Errorf("Expected %s to fail parsing but succeeded\n", test)
+ } else if err.Error() != answer {
+ t.Errorf("Unexpected error on %s:\n%s\nvs.\n%s\n", test, answer, err.Error())
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/go-ldap/ldap/error.go b/Godeps/_workspace/src/github.com/go-ldap/ldap/error.go
new file mode 100644
index 000000000..2dbc30ac0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-ldap/ldap/error.go
@@ -0,0 +1,137 @@
+package ldap
+
+import (
+ "fmt"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// LDAP Result Codes
+const (
+ LDAPResultSuccess = 0
+ LDAPResultOperationsError = 1
+ LDAPResultProtocolError = 2
+ LDAPResultTimeLimitExceeded = 3
+ LDAPResultSizeLimitExceeded = 4
+ LDAPResultCompareFalse = 5
+ LDAPResultCompareTrue = 6
+ LDAPResultAuthMethodNotSupported = 7
+ LDAPResultStrongAuthRequired = 8
+ LDAPResultReferral = 10
+ LDAPResultAdminLimitExceeded = 11
+ LDAPResultUnavailableCriticalExtension = 12
+ LDAPResultConfidentialityRequired = 13
+ LDAPResultSaslBindInProgress = 14
+ LDAPResultNoSuchAttribute = 16
+ LDAPResultUndefinedAttributeType = 17
+ LDAPResultInappropriateMatching = 18
+ LDAPResultConstraintViolation = 19
+ LDAPResultAttributeOrValueExists = 20
+ LDAPResultInvalidAttributeSyntax = 21
+ LDAPResultNoSuchObject = 32
+ LDAPResultAliasProblem = 33
+ LDAPResultInvalidDNSyntax = 34
+ LDAPResultAliasDereferencingProblem = 36
+ LDAPResultInappropriateAuthentication = 48
+ LDAPResultInvalidCredentials = 49
+ LDAPResultInsufficientAccessRights = 50
+ LDAPResultBusy = 51
+ LDAPResultUnavailable = 52
+ LDAPResultUnwillingToPerform = 53
+ LDAPResultLoopDetect = 54
+ LDAPResultNamingViolation = 64
+ LDAPResultObjectClassViolation = 65
+ LDAPResultNotAllowedOnNonLeaf = 66
+ LDAPResultNotAllowedOnRDN = 67
+ LDAPResultEntryAlreadyExists = 68
+ LDAPResultObjectClassModsProhibited = 69
+ LDAPResultAffectsMultipleDSAs = 71
+ LDAPResultOther = 80
+
+ ErrorNetwork = 200
+ ErrorFilterCompile = 201
+ ErrorFilterDecompile = 202
+ ErrorDebugging = 203
+ ErrorUnexpectedMessage = 204
+ ErrorUnexpectedResponse = 205
+)
+
+var LDAPResultCodeMap = map[uint8]string{
+ LDAPResultSuccess: "Success",
+ LDAPResultOperationsError: "Operations Error",
+ LDAPResultProtocolError: "Protocol Error",
+ LDAPResultTimeLimitExceeded: "Time Limit Exceeded",
+ LDAPResultSizeLimitExceeded: "Size Limit Exceeded",
+ LDAPResultCompareFalse: "Compare False",
+ LDAPResultCompareTrue: "Compare True",
+ LDAPResultAuthMethodNotSupported: "Auth Method Not Supported",
+ LDAPResultStrongAuthRequired: "Strong Auth Required",
+ LDAPResultReferral: "Referral",
+ LDAPResultAdminLimitExceeded: "Admin Limit Exceeded",
+ LDAPResultUnavailableCriticalExtension: "Unavailable Critical Extension",
+ LDAPResultConfidentialityRequired: "Confidentiality Required",
+ LDAPResultSaslBindInProgress: "Sasl Bind In Progress",
+ LDAPResultNoSuchAttribute: "No Such Attribute",
+ LDAPResultUndefinedAttributeType: "Undefined Attribute Type",
+ LDAPResultInappropriateMatching: "Inappropriate Matching",
+ LDAPResultConstraintViolation: "Constraint Violation",
+ LDAPResultAttributeOrValueExists: "Attribute Or Value Exists",
+ LDAPResultInvalidAttributeSyntax: "Invalid Attribute Syntax",
+ LDAPResultNoSuchObject: "No Such Object",
+ LDAPResultAliasProblem: "Alias Problem",
+ LDAPResultInvalidDNSyntax: "Invalid DN Syntax",
+ LDAPResultAliasDereferencingProblem: "Alias Dereferencing Problem",
+ LDAPResultInappropriateAuthentication: "Inappropriate Authentication",
+ LDAPResultInvalidCredentials: "Invalid Credentials",
+ LDAPResultInsufficientAccessRights: "Insufficient Access Rights",
+ LDAPResultBusy: "Busy",
+ LDAPResultUnavailable: "Unavailable",
+ LDAPResultUnwillingToPerform: "Unwilling To Perform",
+ LDAPResultLoopDetect: "Loop Detect",
+ LDAPResultNamingViolation: "Naming Violation",
+ LDAPResultObjectClassViolation: "Object Class Violation",
+ LDAPResultNotAllowedOnNonLeaf: "Not Allowed On Non Leaf",
+ LDAPResultNotAllowedOnRDN: "Not Allowed On RDN",
+ LDAPResultEntryAlreadyExists: "Entry Already Exists",
+ LDAPResultObjectClassModsProhibited: "Object Class Mods Prohibited",
+ LDAPResultAffectsMultipleDSAs: "Affects Multiple DSAs",
+ LDAPResultOther: "Other",
+}
+
+func getLDAPResultCode(packet *ber.Packet) (code uint8, description string) {
+ if len(packet.Children) >= 2 {
+ response := packet.Children[1]
+ if response.ClassType == ber.ClassApplication && response.TagType == ber.TypeConstructed && len(response.Children) >= 3 {
+ // Children[1].Children[2] is the diagnosticMessage which is guaranteed to exist as seen here: https://tools.ietf.org/html/rfc4511#section-4.1.9
+ return uint8(response.Children[0].Value.(int64)), response.Children[2].Value.(string)
+ }
+ }
+
+ return ErrorNetwork, "Invalid packet format"
+}
+
+type Error struct {
+ Err error
+ ResultCode uint8
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("LDAP Result Code %d %q: %s", e.ResultCode, LDAPResultCodeMap[e.ResultCode], e.Err.Error())
+}
+
+func NewError(resultCode uint8, err error) error {
+ return &Error{ResultCode: resultCode, Err: err}
+}
+
+func IsErrorWithCode(err error, desiredResultCode uint8) bool {
+ if err == nil {
+ return false
+ }
+
+ serverError, ok := err.(*Error)
+ if !ok {
+ return false
+ }
+
+ return serverError.ResultCode == desiredResultCode
+}
diff --git a/Godeps/_workspace/src/github.com/go-ldap/ldap/example_test.go b/Godeps/_workspace/src/github.com/go-ldap/ldap/example_test.go
new file mode 100644
index 000000000..b018a9664
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-ldap/ldap/example_test.go
@@ -0,0 +1,305 @@
+package ldap_test
+
+import (
+ "crypto/tls"
+ "fmt"
+ "log"
+
+ "gopkg.in/ldap.v2"
+)
+
+// ExampleConn_Bind demonstrates how to bind a connection to an ldap user
+// allowing access to restricted attrabutes that user has access to
+func ExampleConn_Bind() {
+ l, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", "ldap.example.com", 389))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer l.Close()
+
+ err = l.Bind("cn=read-only-admin,dc=example,dc=com", "password")
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+// ExampleConn_Search demonstrates how to use the search interface
+func ExampleConn_Search() {
+ l, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", "ldap.example.com", 389))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer l.Close()
+
+ searchRequest := ldap.NewSearchRequest(
+ "dc=example,dc=com", // The base dn to search
+ ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
+ "(&(objectClass=organizationalPerson))", // The filter to apply
+ []string{"dn", "cn"}, // A list attributes to retrieve
+ nil,
+ )
+
+ sr, err := l.Search(searchRequest)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ for _, entry := range sr.Entries {
+ fmt.Printf("%s: %v\n", entry.DN, entry.GetAttributeValue("cn"))
+ }
+}
+
+// ExampleStartTLS demonstrates how to start a TLS connection
+func ExampleConn_StartTLS() {
+ l, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", "ldap.example.com", 389))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer l.Close()
+
+ // Reconnect with TLS
+ err = l.StartTLS(&tls.Config{InsecureSkipVerify: true})
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Opertations via l are now encrypted
+}
+
+// ExampleConn_Compare demonstrates how to comapre an attribute with a value
+func ExampleConn_Compare() {
+ l, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", "ldap.example.com", 389))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer l.Close()
+
+ matched, err := l.Compare("cn=user,dc=example,dc=com", "uid", "someuserid")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Println(matched)
+}
+
+func ExampleConn_PasswordModify_admin() {
+ l, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", "ldap.example.com", 389))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer l.Close()
+
+ err = l.Bind("cn=admin,dc=example,dc=com", "password")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ passwordModifyRequest := ldap.NewPasswordModifyRequest("cn=user,dc=example,dc=com", "", "NewPassword")
+ _, err = l.PasswordModify(passwordModifyRequest)
+
+ if err != nil {
+ log.Fatalf("Password could not be changed: %s", err.Error())
+ }
+}
+
+func ExampleConn_PasswordModify_generatedPassword() {
+ l, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", "ldap.example.com", 389))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer l.Close()
+
+ err = l.Bind("cn=user,dc=example,dc=com", "password")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ passwordModifyRequest := ldap.NewPasswordModifyRequest("", "OldPassword", "")
+ passwordModifyResponse, err := l.PasswordModify(passwordModifyRequest)
+ if err != nil {
+ log.Fatalf("Password could not be changed: %s", err.Error())
+ }
+
+ generatedPassword := passwordModifyResponse.GeneratedPassword
+ log.Printf("Generated password: %s\n", generatedPassword)
+}
+
+func ExampleConn_PasswordModify_setNewPassword() {
+ l, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", "ldap.example.com", 389))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer l.Close()
+
+ err = l.Bind("cn=user,dc=example,dc=com", "password")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ passwordModifyRequest := ldap.NewPasswordModifyRequest("", "OldPassword", "NewPassword")
+ _, err = l.PasswordModify(passwordModifyRequest)
+
+ if err != nil {
+ log.Fatalf("Password could not be changed: %s", err.Error())
+ }
+}
+
+func ExampleConn_Modify() {
+ l, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", "ldap.example.com", 389))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer l.Close()
+
+ // Add a description, and replace the mail attributes
+ modify := ldap.NewModifyRequest("cn=user,dc=example,dc=com")
+ modify.Add("description", []string{"An example user"})
+ modify.Replace("mail", []string{"user@example.org"})
+
+ err = l.Modify(modify)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+// Example User Authentication shows how a typical application can verify a login attempt
+func Example_userAuthentication() {
+ // The username and password we want to check
+ username := "someuser"
+ password := "userpassword"
+
+ bindusername := "readonly"
+ bindpassword := "password"
+
+ l, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", "ldap.example.com", 389))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer l.Close()
+
+ // Reconnect with TLS
+ err = l.StartTLS(&tls.Config{InsecureSkipVerify: true})
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // First bind with a read only user
+ err = l.Bind(bindusername, bindpassword)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Search for the given username
+ searchRequest := ldap.NewSearchRequest(
+ "dc=example,dc=com",
+ ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
+ fmt.Sprintf("(&(objectClass=organizationalPerson)&(uid=%s))", username),
+ []string{"dn"},
+ nil,
+ )
+
+ sr, err := l.Search(searchRequest)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ if len(sr.Entries) != 1 {
+ log.Fatal("User does not exist or too many entries returned")
+ }
+
+ userdn := sr.Entries[0].DN
+
+ // Bind as the user to verify their password
+ err = l.Bind(userdn, password)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Rebind as the read only user for any futher queries
+ err = l.Bind(bindusername, bindpassword)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func Example_beherappolicy() {
+ l, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", "ldap.example.com", 389))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer l.Close()
+
+ controls := []ldap.Control{}
+ controls = append(controls, ldap.NewControlBeheraPasswordPolicy())
+ bindRequest := ldap.NewSimpleBindRequest("cn=admin,dc=example,dc=com", "password", controls)
+
+ r, err := l.SimpleBind(bindRequest)
+ ppolicyControl := ldap.FindControl(r.Controls, ldap.ControlTypeBeheraPasswordPolicy)
+
+ var ppolicy *ldap.ControlBeheraPasswordPolicy
+ if ppolicyControl != nil {
+ ppolicy = ppolicyControl.(*ldap.ControlBeheraPasswordPolicy)
+ } else {
+ log.Printf("ppolicyControl response not avaliable.\n")
+ }
+ if err != nil {
+ errStr := "ERROR: Cannot bind: " + err.Error()
+ if ppolicy != nil && ppolicy.Error >= 0 {
+ errStr += ":" + ppolicy.ErrorString
+ }
+ log.Print(errStr)
+ } else {
+ logStr := "Login Ok"
+ if ppolicy != nil {
+ if ppolicy.Expire >= 0 {
+ logStr += fmt.Sprintf(". Password expires in %d seconds\n", ppolicy.Expire)
+ } else if ppolicy.Grace >= 0 {
+ logStr += fmt.Sprintf(". Password expired, %d grace logins remain\n", ppolicy.Grace)
+ }
+ }
+ log.Print(logStr)
+ }
+}
+
+func Example_vchuppolicy() {
+ l, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", "ldap.example.com", 389))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer l.Close()
+ l.Debug = true
+
+ bindRequest := ldap.NewSimpleBindRequest("cn=admin,dc=example,dc=com", "password", nil)
+
+ r, err := l.SimpleBind(bindRequest)
+
+ passwordMustChangeControl := ldap.FindControl(r.Controls, ldap.ControlTypeVChuPasswordMustChange)
+ var passwordMustChange *ldap.ControlVChuPasswordMustChange
+ if passwordMustChangeControl != nil {
+ passwordMustChange = passwordMustChangeControl.(*ldap.ControlVChuPasswordMustChange)
+ }
+
+ if passwordMustChange != nil && passwordMustChange.MustChange {
+ log.Printf("Password Must be changed.\n")
+ }
+
+ passwordWarningControl := ldap.FindControl(r.Controls, ldap.ControlTypeVChuPasswordWarning)
+
+ var passwordWarning *ldap.ControlVChuPasswordWarning
+ if passwordWarningControl != nil {
+ passwordWarning = passwordWarningControl.(*ldap.ControlVChuPasswordWarning)
+ } else {
+ log.Printf("ppolicyControl response not available.\n")
+ }
+ if err != nil {
+ log.Print("ERROR: Cannot bind: " + err.Error())
+ } else {
+ logStr := "Login Ok"
+ if passwordWarning != nil {
+ if passwordWarning.Expire >= 0 {
+ logStr += fmt.Sprintf(". Password expires in %d seconds\n", passwordWarning.Expire)
+ }
+ }
+ log.Print(logStr)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/go-ldap/ldap/filter_test.go b/Godeps/_workspace/src/github.com/go-ldap/ldap/filter_test.go
new file mode 100644
index 000000000..121c5bf8f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-ldap/ldap/filter_test.go
@@ -0,0 +1,189 @@
+package ldap_test
+
+import (
+ "strings"
+ "testing"
+
+ "gopkg.in/asn1-ber.v1"
+ "gopkg.in/ldap.v2"
+)
+
+type compileTest struct {
+ filterStr string
+
+ expectedFilter string
+ expectedType int
+ expectedErr string
+}
+
+var testFilters = []compileTest{
+ compileTest{
+ filterStr: "(&(sn=Miller)(givenName=Bob))",
+ expectedFilter: "(&(sn=Miller)(givenName=Bob))",
+ expectedType: ldap.FilterAnd,
+ },
+ compileTest{
+ filterStr: "(|(sn=Miller)(givenName=Bob))",
+ expectedFilter: "(|(sn=Miller)(givenName=Bob))",
+ expectedType: ldap.FilterOr,
+ },
+ compileTest{
+ filterStr: "(!(sn=Miller))",
+ expectedFilter: "(!(sn=Miller))",
+ expectedType: ldap.FilterNot,
+ },
+ compileTest{
+ filterStr: "(sn=Miller)",
+ expectedFilter: "(sn=Miller)",
+ expectedType: ldap.FilterEqualityMatch,
+ },
+ compileTest{
+ filterStr: "(sn=Mill*)",
+ expectedFilter: "(sn=Mill*)",
+ expectedType: ldap.FilterSubstrings,
+ },
+ compileTest{
+ filterStr: "(sn=*Mill)",
+ expectedFilter: "(sn=*Mill)",
+ expectedType: ldap.FilterSubstrings,
+ },
+ compileTest{
+ filterStr: "(sn=*Mill*)",
+ expectedFilter: "(sn=*Mill*)",
+ expectedType: ldap.FilterSubstrings,
+ },
+ compileTest{
+ filterStr: "(sn=*i*le*)",
+ expectedFilter: "(sn=*i*le*)",
+ expectedType: ldap.FilterSubstrings,
+ },
+ compileTest{
+ filterStr: "(sn=Mi*l*r)",
+ expectedFilter: "(sn=Mi*l*r)",
+ expectedType: ldap.FilterSubstrings,
+ },
+ compileTest{
+ filterStr: "(sn=Mi*le*)",
+ expectedFilter: "(sn=Mi*le*)",
+ expectedType: ldap.FilterSubstrings,
+ },
+ compileTest{
+ filterStr: "(sn=*i*ler)",
+ expectedFilter: "(sn=*i*ler)",
+ expectedType: ldap.FilterSubstrings,
+ },
+ compileTest{
+ filterStr: "(sn>=Miller)",
+ expectedFilter: "(sn>=Miller)",
+ expectedType: ldap.FilterGreaterOrEqual,
+ },
+ compileTest{
+ filterStr: "(sn<=Miller)",
+ expectedFilter: "(sn<=Miller)",
+ expectedType: ldap.FilterLessOrEqual,
+ },
+ compileTest{
+ filterStr: "(sn=*)",
+ expectedFilter: "(sn=*)",
+ expectedType: ldap.FilterPresent,
+ },
+ compileTest{
+ filterStr: "(sn~=Miller)",
+ expectedFilter: "(sn~=Miller)",
+ expectedType: ldap.FilterApproxMatch,
+ },
+ compileTest{
+ filterStr: `(objectGUID='\fc\fe\a3\ab\f9\90N\aaGm\d5I~\d12)`,
+ expectedFilter: `(objectGUID='\fc\fe\a3\ab\f9\90N\aaGm\d5I~\d12)`,
+ expectedType: ldap.FilterEqualityMatch,
+ },
+ compileTest{
+ filterStr: `(objectGUID=абвгдеёжзийклмнопрстуфхцчшщъыьэюя)`,
+ expectedFilter: `(objectGUID=\c3\90\c2\b0\c3\90\c2\b1\c3\90\c2\b2\c3\90\c2\b3\c3\90\c2\b4\c3\90\c2\b5\c3\91\c2\91\c3\90\c2\b6\c3\90\c2\b7\c3\90\c2\b8\c3\90\c2\b9\c3\90\c2\ba\c3\90\c2\bb\c3\90\c2\bc\c3\90\c2\bd\c3\90\c2\be\c3\90\c2\bf\c3\91\c2\80\c3\91\c2\81\c3\91\c2\82\c3\91\c2\83\c3\91\c2\84\c3\91\c2\85\c3\91\c2\86\c3\91\c2\87\c3\91\c2\88\c3\91\c2\89\c3\91\c2\8a\c3\91\c2\8b\c3\91\c2\8c\c3\91\c2\8d\c3\91\c2\8e\c3\91\c2\8f)`,
+ expectedType: ldap.FilterEqualityMatch,
+ },
+ compileTest{
+ filterStr: `(objectGUID=함수목록)`,
+ expectedFilter: `(objectGUID=\c3\ad\c2\95\c2\a8\c3\ac\c2\88\c2\98\c3\ab\c2\aa\c2\a9\c3\ab\c2\a1\c2\9d)`,
+ expectedType: ldap.FilterEqualityMatch,
+ },
+ compileTest{
+ filterStr: `(objectGUID=`,
+ expectedFilter: ``,
+ expectedType: 0,
+ expectedErr: "unexpected end of filter",
+ },
+ compileTest{
+ filterStr: `(objectGUID=함수목록`,
+ expectedFilter: ``,
+ expectedType: 0,
+ expectedErr: "unexpected end of filter",
+ },
+ // compileTest{ filterStr: "()", filterType: FilterExtensibleMatch },
+}
+
+var testInvalidFilters = []string{
+ `(objectGUID=\zz)`,
+ `(objectGUID=\a)`,
+}
+
+func TestFilter(t *testing.T) {
+ // Test Compiler and Decompiler
+ for _, i := range testFilters {
+ filter, err := ldap.CompileFilter(i.filterStr)
+ if err != nil {
+ if i.expectedErr == "" || !strings.Contains(err.Error(), i.expectedErr) {
+ t.Errorf("Problem compiling '%s' - '%v' (expected error to contain '%v')", i.filterStr, err, i.expectedErr)
+ }
+ } else if filter.Tag != ber.Tag(i.expectedType) {
+ t.Errorf("%q Expected %q got %q", i.filterStr, ldap.FilterMap[uint64(i.expectedType)], ldap.FilterMap[uint64(filter.Tag)])
+ } else {
+ o, err := ldap.DecompileFilter(filter)
+ if err != nil {
+ t.Errorf("Problem compiling %s - %s", i.filterStr, err.Error())
+ } else if i.expectedFilter != o {
+ t.Errorf("%q expected, got %q", i.expectedFilter, o)
+ }
+ }
+ }
+}
+
+func TestInvalidFilter(t *testing.T) {
+ for _, filterStr := range testInvalidFilters {
+ if _, err := ldap.CompileFilter(filterStr); err == nil {
+ t.Errorf("Problem compiling %s - expected err", filterStr)
+ }
+ }
+}
+
+func BenchmarkFilterCompile(b *testing.B) {
+ b.StopTimer()
+ filters := make([]string, len(testFilters))
+
+ // Test Compiler and Decompiler
+ for idx, i := range testFilters {
+ filters[idx] = i.filterStr
+ }
+
+ maxIdx := len(filters)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ ldap.CompileFilter(filters[i%maxIdx])
+ }
+}
+
+func BenchmarkFilterDecompile(b *testing.B) {
+ b.StopTimer()
+ filters := make([]*ber.Packet, len(testFilters))
+
+ // Test Compiler and Decompiler
+ for idx, i := range testFilters {
+ filters[idx], _ = ldap.CompileFilter(i.filterStr)
+ }
+
+ maxIdx := len(filters)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ ldap.DecompileFilter(filters[i%maxIdx])
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/go-ldap/ldap/ldap.go b/Godeps/_workspace/src/github.com/go-ldap/ldap/ldap.go
index e91972ff4..1620aaea6 100644
--- a/Godeps/_workspace/src/github.com/go-ldap/ldap/ldap.go
+++ b/Godeps/_workspace/src/github.com/go-ldap/ldap/ldap.go
@@ -6,7 +6,6 @@ package ldap
import (
"errors"
- "fmt"
"io/ioutil"
"os"
@@ -60,98 +59,6 @@ var ApplicationMap = map[uint8]string{
ApplicationExtendedResponse: "Extended Response",
}
-// LDAP Result Codes
-const (
- LDAPResultSuccess = 0
- LDAPResultOperationsError = 1
- LDAPResultProtocolError = 2
- LDAPResultTimeLimitExceeded = 3
- LDAPResultSizeLimitExceeded = 4
- LDAPResultCompareFalse = 5
- LDAPResultCompareTrue = 6
- LDAPResultAuthMethodNotSupported = 7
- LDAPResultStrongAuthRequired = 8
- LDAPResultReferral = 10
- LDAPResultAdminLimitExceeded = 11
- LDAPResultUnavailableCriticalExtension = 12
- LDAPResultConfidentialityRequired = 13
- LDAPResultSaslBindInProgress = 14
- LDAPResultNoSuchAttribute = 16
- LDAPResultUndefinedAttributeType = 17
- LDAPResultInappropriateMatching = 18
- LDAPResultConstraintViolation = 19
- LDAPResultAttributeOrValueExists = 20
- LDAPResultInvalidAttributeSyntax = 21
- LDAPResultNoSuchObject = 32
- LDAPResultAliasProblem = 33
- LDAPResultInvalidDNSyntax = 34
- LDAPResultAliasDereferencingProblem = 36
- LDAPResultInappropriateAuthentication = 48
- LDAPResultInvalidCredentials = 49
- LDAPResultInsufficientAccessRights = 50
- LDAPResultBusy = 51
- LDAPResultUnavailable = 52
- LDAPResultUnwillingToPerform = 53
- LDAPResultLoopDetect = 54
- LDAPResultNamingViolation = 64
- LDAPResultObjectClassViolation = 65
- LDAPResultNotAllowedOnNonLeaf = 66
- LDAPResultNotAllowedOnRDN = 67
- LDAPResultEntryAlreadyExists = 68
- LDAPResultObjectClassModsProhibited = 69
- LDAPResultAffectsMultipleDSAs = 71
- LDAPResultOther = 80
-
- ErrorNetwork = 200
- ErrorFilterCompile = 201
- ErrorFilterDecompile = 202
- ErrorDebugging = 203
- ErrorUnexpectedMessage = 204
- ErrorUnexpectedResponse = 205
-)
-
-var LDAPResultCodeMap = map[uint8]string{
- LDAPResultSuccess: "Success",
- LDAPResultOperationsError: "Operations Error",
- LDAPResultProtocolError: "Protocol Error",
- LDAPResultTimeLimitExceeded: "Time Limit Exceeded",
- LDAPResultSizeLimitExceeded: "Size Limit Exceeded",
- LDAPResultCompareFalse: "Compare False",
- LDAPResultCompareTrue: "Compare True",
- LDAPResultAuthMethodNotSupported: "Auth Method Not Supported",
- LDAPResultStrongAuthRequired: "Strong Auth Required",
- LDAPResultReferral: "Referral",
- LDAPResultAdminLimitExceeded: "Admin Limit Exceeded",
- LDAPResultUnavailableCriticalExtension: "Unavailable Critical Extension",
- LDAPResultConfidentialityRequired: "Confidentiality Required",
- LDAPResultSaslBindInProgress: "Sasl Bind In Progress",
- LDAPResultNoSuchAttribute: "No Such Attribute",
- LDAPResultUndefinedAttributeType: "Undefined Attribute Type",
- LDAPResultInappropriateMatching: "Inappropriate Matching",
- LDAPResultConstraintViolation: "Constraint Violation",
- LDAPResultAttributeOrValueExists: "Attribute Or Value Exists",
- LDAPResultInvalidAttributeSyntax: "Invalid Attribute Syntax",
- LDAPResultNoSuchObject: "No Such Object",
- LDAPResultAliasProblem: "Alias Problem",
- LDAPResultInvalidDNSyntax: "Invalid DN Syntax",
- LDAPResultAliasDereferencingProblem: "Alias Dereferencing Problem",
- LDAPResultInappropriateAuthentication: "Inappropriate Authentication",
- LDAPResultInvalidCredentials: "Invalid Credentials",
- LDAPResultInsufficientAccessRights: "Insufficient Access Rights",
- LDAPResultBusy: "Busy",
- LDAPResultUnavailable: "Unavailable",
- LDAPResultUnwillingToPerform: "Unwilling To Perform",
- LDAPResultLoopDetect: "Loop Detect",
- LDAPResultNamingViolation: "Naming Violation",
- LDAPResultObjectClassViolation: "Object Class Violation",
- LDAPResultNotAllowedOnNonLeaf: "Not Allowed On Non Leaf",
- LDAPResultNotAllowedOnRDN: "Not Allowed On RDN",
- LDAPResultEntryAlreadyExists: "Entry Already Exists",
- LDAPResultObjectClassModsProhibited: "Object Class Mods Prohibited",
- LDAPResultAffectsMultipleDSAs: "Affects Multiple DSAs",
- LDAPResultOther: "Other",
-}
-
// Ldap Behera Password Policy Draft 10 (https://tools.ietf.org/html/draft-behera-ldap-password-policy-10)
const (
BeheraPasswordExpired = 0
@@ -318,8 +225,8 @@ func addRequestDescriptions(packet *ber.Packet) {
}
func addDefaultLDAPResponseDescriptions(packet *ber.Packet) {
- resultCode := packet.Children[1].Children[0].Value.(int64)
- packet.Children[1].Children[0].Description = "Result Code (" + LDAPResultCodeMap[uint8(resultCode)] + ")"
+ resultCode, _ := getLDAPResultCode(packet)
+ packet.Children[1].Children[0].Description = "Result Code (" + LDAPResultCodeMap[resultCode] + ")"
packet.Children[1].Children[1].Description = "Matched DN"
packet.Children[1].Children[2].Description = "Error Message"
if len(packet.Children[1].Children) > 3 {
@@ -343,30 +250,6 @@ func DebugBinaryFile(fileName string) error {
return nil
}
-type Error struct {
- Err error
- ResultCode uint8
-}
-
-func (e *Error) Error() string {
- return fmt.Sprintf("LDAP Result Code %d %q: %s", e.ResultCode, LDAPResultCodeMap[e.ResultCode], e.Err.Error())
-}
-
-func NewError(resultCode uint8, err error) error {
- return &Error{ResultCode: resultCode, Err: err}
-}
-
-func getLDAPResultCode(packet *ber.Packet) (code uint8, description string) {
- if len(packet.Children) >= 2 {
- response := packet.Children[1]
- if response.ClassType == ber.ClassApplication && response.TagType == ber.TypeConstructed && len(response.Children) >= 3 {
- return uint8(response.Children[0].Value.(int64)), response.Children[2].Value.(string)
- }
- }
-
- return ErrorNetwork, "Invalid packet format"
-}
-
var hex = "0123456789abcdef"
func mustEscape(c byte) bool {
diff --git a/Godeps/_workspace/src/github.com/go-ldap/ldap/ldap_test.go b/Godeps/_workspace/src/github.com/go-ldap/ldap/ldap_test.go
new file mode 100644
index 000000000..63292747f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-ldap/ldap/ldap_test.go
@@ -0,0 +1,249 @@
+package ldap_test
+
+import (
+ "crypto/tls"
+ "fmt"
+ "testing"
+
+ "gopkg.in/ldap.v2"
+)
+
+var ldapServer = "ldap.itd.umich.edu"
+var ldapPort = uint16(389)
+var ldapTLSPort = uint16(636)
+var baseDN = "dc=umich,dc=edu"
+var filter = []string{
+ "(cn=cis-fac)",
+ "(&(owner=*)(cn=cis-fac))",
+ "(&(objectclass=rfc822mailgroup)(cn=*Computer*))",
+ "(&(objectclass=rfc822mailgroup)(cn=*Mathematics*))"}
+var attributes = []string{
+ "cn",
+ "description"}
+
+func TestDial(t *testing.T) {
+ fmt.Printf("TestDial: starting...\n")
+ l, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", ldapServer, ldapPort))
+ if err != nil {
+ t.Errorf(err.Error())
+ return
+ }
+ defer l.Close()
+ fmt.Printf("TestDial: finished...\n")
+}
+
+func TestDialTLS(t *testing.T) {
+ fmt.Printf("TestDialTLS: starting...\n")
+ l, err := ldap.DialTLS("tcp", fmt.Sprintf("%s:%d", ldapServer, ldapTLSPort), &tls.Config{InsecureSkipVerify: true})
+ if err != nil {
+ t.Errorf(err.Error())
+ return
+ }
+ defer l.Close()
+ fmt.Printf("TestDialTLS: finished...\n")
+}
+
+func TestStartTLS(t *testing.T) {
+ fmt.Printf("TestStartTLS: starting...\n")
+ l, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", ldapServer, ldapPort))
+ if err != nil {
+ t.Errorf(err.Error())
+ return
+ }
+ err = l.StartTLS(&tls.Config{InsecureSkipVerify: true})
+ if err != nil {
+ t.Errorf(err.Error())
+ return
+ }
+ fmt.Printf("TestStartTLS: finished...\n")
+}
+
+func TestSearch(t *testing.T) {
+ fmt.Printf("TestSearch: starting...\n")
+ l, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", ldapServer, ldapPort))
+ if err != nil {
+ t.Errorf(err.Error())
+ return
+ }
+ defer l.Close()
+
+ searchRequest := ldap.NewSearchRequest(
+ baseDN,
+ ldap.ScopeWholeSubtree, ldap.DerefAlways, 0, 0, false,
+ filter[0],
+ attributes,
+ nil)
+
+ sr, err := l.Search(searchRequest)
+ if err != nil {
+ t.Errorf(err.Error())
+ return
+ }
+
+ fmt.Printf("TestSearch: %s -> num of entries = %d\n", searchRequest.Filter, len(sr.Entries))
+}
+
+func TestSearchStartTLS(t *testing.T) {
+ fmt.Printf("TestSearchStartTLS: starting...\n")
+ l, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", ldapServer, ldapPort))
+ if err != nil {
+ t.Errorf(err.Error())
+ return
+ }
+ defer l.Close()
+
+ searchRequest := ldap.NewSearchRequest(
+ baseDN,
+ ldap.ScopeWholeSubtree, ldap.DerefAlways, 0, 0, false,
+ filter[0],
+ attributes,
+ nil)
+
+ sr, err := l.Search(searchRequest)
+ if err != nil {
+ t.Errorf(err.Error())
+ return
+ }
+
+ fmt.Printf("TestSearchStartTLS: %s -> num of entries = %d\n", searchRequest.Filter, len(sr.Entries))
+
+ fmt.Printf("TestSearchStartTLS: upgrading with startTLS\n")
+ err = l.StartTLS(&tls.Config{InsecureSkipVerify: true})
+ if err != nil {
+ t.Errorf(err.Error())
+ return
+ }
+
+ sr, err = l.Search(searchRequest)
+ if err != nil {
+ t.Errorf(err.Error())
+ return
+ }
+
+ fmt.Printf("TestSearchStartTLS: %s -> num of entries = %d\n", searchRequest.Filter, len(sr.Entries))
+}
+
+func TestSearchWithPaging(t *testing.T) {
+ fmt.Printf("TestSearchWithPaging: starting...\n")
+ l, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", ldapServer, ldapPort))
+ if err != nil {
+ t.Errorf(err.Error())
+ return
+ }
+ defer l.Close()
+
+ err = l.Bind("", "")
+ if err != nil {
+ t.Errorf(err.Error())
+ return
+ }
+
+ searchRequest := ldap.NewSearchRequest(
+ baseDN,
+ ldap.ScopeWholeSubtree, ldap.DerefAlways, 0, 0, false,
+ filter[2],
+ attributes,
+ nil)
+ sr, err := l.SearchWithPaging(searchRequest, 5)
+ if err != nil {
+ t.Errorf(err.Error())
+ return
+ }
+
+ fmt.Printf("TestSearchWithPaging: %s -> num of entries = %d\n", searchRequest.Filter, len(sr.Entries))
+}
+
+func searchGoroutine(t *testing.T, l *ldap.Conn, results chan *ldap.SearchResult, i int) {
+ searchRequest := ldap.NewSearchRequest(
+ baseDN,
+ ldap.ScopeWholeSubtree, ldap.DerefAlways, 0, 0, false,
+ filter[i],
+ attributes,
+ nil)
+ sr, err := l.Search(searchRequest)
+ if err != nil {
+ t.Errorf(err.Error())
+ results <- nil
+ return
+ }
+ results <- sr
+}
+
+func testMultiGoroutineSearch(t *testing.T, TLS bool, startTLS bool) {
+ fmt.Printf("TestMultiGoroutineSearch: starting...\n")
+ var l *ldap.Conn
+ var err error
+ if TLS {
+ l, err = ldap.DialTLS("tcp", fmt.Sprintf("%s:%d", ldapServer, ldapTLSPort), &tls.Config{InsecureSkipVerify: true})
+ if err != nil {
+ t.Errorf(err.Error())
+ return
+ }
+ defer l.Close()
+ } else {
+ l, err = ldap.Dial("tcp", fmt.Sprintf("%s:%d", ldapServer, ldapPort))
+ if err != nil {
+ t.Errorf(err.Error())
+ return
+ }
+ if startTLS {
+ fmt.Printf("TestMultiGoroutineSearch: using StartTLS...\n")
+ err := l.StartTLS(&tls.Config{InsecureSkipVerify: true})
+ if err != nil {
+ t.Errorf(err.Error())
+ return
+ }
+
+ }
+ }
+
+ results := make([]chan *ldap.SearchResult, len(filter))
+ for i := range filter {
+ results[i] = make(chan *ldap.SearchResult)
+ go searchGoroutine(t, l, results[i], i)
+ }
+ for i := range filter {
+ sr := <-results[i]
+ if sr == nil {
+ t.Errorf("Did not receive results from goroutine for %q", filter[i])
+ } else {
+ fmt.Printf("TestMultiGoroutineSearch(%d): %s -> num of entries = %d\n", i, filter[i], len(sr.Entries))
+ }
+ }
+}
+
+func TestMultiGoroutineSearch(t *testing.T) {
+ testMultiGoroutineSearch(t, false, false)
+ testMultiGoroutineSearch(t, true, true)
+ testMultiGoroutineSearch(t, false, true)
+}
+
+func TestEscapeFilter(t *testing.T) {
+ if got, want := ldap.EscapeFilter("a\x00b(c)d*e\\f"), `a\00b\28c\29d\2ae\5cf`; got != want {
+ t.Errorf("Got %s, expected %s", want, got)
+ }
+ if got, want := ldap.EscapeFilter("Lučić"), `Lu\c4\8di\c4\87`; got != want {
+ t.Errorf("Got %s, expected %s", want, got)
+ }
+}
+
+func TestCompare(t *testing.T) {
+ fmt.Printf("TestCompare: starting...\n")
+ l, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", ldapServer, ldapPort))
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+ defer l.Close()
+
+ dn := "cn=math mich,ou=User Groups,ou=Groups,dc=umich,dc=edu"
+ attribute := "cn"
+ value := "math mich"
+
+ sr, err := l.Compare(dn, attribute, value)
+ if err != nil {
+ t.Errorf(err.Error())
+ return
+ }
+
+ fmt.Printf("TestCompare: -> %v\n", sr)
+}
diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/benchmark_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/benchmark_test.go
new file mode 100644
index 000000000..fb8a2f5f3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/benchmark_test.go
@@ -0,0 +1,246 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "database/sql"
+ "database/sql/driver"
+ "math"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+type TB testing.B
+
+func (tb *TB) check(err error) {
+ if err != nil {
+ tb.Fatal(err)
+ }
+}
+
+func (tb *TB) checkDB(db *sql.DB, err error) *sql.DB {
+ tb.check(err)
+ return db
+}
+
+func (tb *TB) checkRows(rows *sql.Rows, err error) *sql.Rows {
+ tb.check(err)
+ return rows
+}
+
+func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt {
+ tb.check(err)
+ return stmt
+}
+
+func initDB(b *testing.B, queries ...string) *sql.DB {
+ tb := (*TB)(b)
+ db := tb.checkDB(sql.Open("mysql", dsn))
+ for _, query := range queries {
+ if _, err := db.Exec(query); err != nil {
+ if w, ok := err.(MySQLWarnings); ok {
+ b.Logf("Warning on %q: %v", query, w)
+ } else {
+ b.Fatalf("Error on %q: %v", query, err)
+ }
+ }
+ }
+ return db
+}
+
+const concurrencyLevel = 10
+
+func BenchmarkQuery(b *testing.B) {
+ tb := (*TB)(b)
+ b.StopTimer()
+ b.ReportAllocs()
+ db := initDB(b,
+ "DROP TABLE IF EXISTS foo",
+ "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
+ `INSERT INTO foo VALUES (1, "one")`,
+ `INSERT INTO foo VALUES (2, "two")`,
+ )
+ db.SetMaxIdleConns(concurrencyLevel)
+ defer db.Close()
+
+ stmt := tb.checkStmt(db.Prepare("SELECT val FROM foo WHERE id=?"))
+ defer stmt.Close()
+
+ remain := int64(b.N)
+ var wg sync.WaitGroup
+ wg.Add(concurrencyLevel)
+ defer wg.Wait()
+ b.StartTimer()
+
+ for i := 0; i < concurrencyLevel; i++ {
+ go func() {
+ for {
+ if atomic.AddInt64(&remain, -1) < 0 {
+ wg.Done()
+ return
+ }
+
+ var got string
+ tb.check(stmt.QueryRow(1).Scan(&got))
+ if got != "one" {
+ b.Errorf("query = %q; want one", got)
+ wg.Done()
+ return
+ }
+ }
+ }()
+ }
+}
+
+func BenchmarkExec(b *testing.B) {
+ tb := (*TB)(b)
+ b.StopTimer()
+ b.ReportAllocs()
+ db := tb.checkDB(sql.Open("mysql", dsn))
+ db.SetMaxIdleConns(concurrencyLevel)
+ defer db.Close()
+
+ stmt := tb.checkStmt(db.Prepare("DO 1"))
+ defer stmt.Close()
+
+ remain := int64(b.N)
+ var wg sync.WaitGroup
+ wg.Add(concurrencyLevel)
+ defer wg.Wait()
+ b.StartTimer()
+
+ for i := 0; i < concurrencyLevel; i++ {
+ go func() {
+ for {
+ if atomic.AddInt64(&remain, -1) < 0 {
+ wg.Done()
+ return
+ }
+
+ if _, err := stmt.Exec(); err != nil {
+ b.Fatal(err.Error())
+ }
+ }
+ }()
+ }
+}
+
+// data, but no db writes
+var roundtripSample []byte
+
+func initRoundtripBenchmarks() ([]byte, int, int) {
+ if roundtripSample == nil {
+ roundtripSample = []byte(strings.Repeat("0123456789abcdef", 1024*1024))
+ }
+ return roundtripSample, 16, len(roundtripSample)
+}
+
+func BenchmarkRoundtripTxt(b *testing.B) {
+ b.StopTimer()
+ sample, min, max := initRoundtripBenchmarks()
+ sampleString := string(sample)
+ b.ReportAllocs()
+ tb := (*TB)(b)
+ db := tb.checkDB(sql.Open("mysql", dsn))
+ defer db.Close()
+ b.StartTimer()
+ var result string
+ for i := 0; i < b.N; i++ {
+ length := min + i
+ if length > max {
+ length = max
+ }
+ test := sampleString[0:length]
+ rows := tb.checkRows(db.Query(`SELECT "` + test + `"`))
+ if !rows.Next() {
+ rows.Close()
+ b.Fatalf("crashed")
+ }
+ err := rows.Scan(&result)
+ if err != nil {
+ rows.Close()
+ b.Fatalf("crashed")
+ }
+ if result != test {
+ rows.Close()
+ b.Errorf("mismatch")
+ }
+ rows.Close()
+ }
+}
+
+func BenchmarkRoundtripBin(b *testing.B) {
+ b.StopTimer()
+ sample, min, max := initRoundtripBenchmarks()
+ b.ReportAllocs()
+ tb := (*TB)(b)
+ db := tb.checkDB(sql.Open("mysql", dsn))
+ defer db.Close()
+ stmt := tb.checkStmt(db.Prepare("SELECT ?"))
+ defer stmt.Close()
+ b.StartTimer()
+ var result sql.RawBytes
+ for i := 0; i < b.N; i++ {
+ length := min + i
+ if length > max {
+ length = max
+ }
+ test := sample[0:length]
+ rows := tb.checkRows(stmt.Query(test))
+ if !rows.Next() {
+ rows.Close()
+ b.Fatalf("crashed")
+ }
+ err := rows.Scan(&result)
+ if err != nil {
+ rows.Close()
+ b.Fatalf("crashed")
+ }
+ if !bytes.Equal(result, test) {
+ rows.Close()
+ b.Errorf("mismatch")
+ }
+ rows.Close()
+ }
+}
+
+func BenchmarkInterpolation(b *testing.B) {
+ mc := &mysqlConn{
+ cfg: &config{
+ interpolateParams: true,
+ loc: time.UTC,
+ },
+ maxPacketAllowed: maxPacketSize,
+ maxWriteSize: maxPacketSize - 1,
+ buf: newBuffer(nil),
+ }
+
+ args := []driver.Value{
+ int64(42424242),
+ float64(math.Pi),
+ false,
+ time.Unix(1423411542, 807015000),
+ []byte("bytes containing special chars ' \" \a \x00"),
+ "string containing special chars ' \" \a \x00",
+ }
+ q := "SELECT ?, ?, ?, ?, ?, ?"
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := mc.interpolateParams(q, args)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver_test.go
new file mode 100644
index 000000000..f9da416ec
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver_test.go
@@ -0,0 +1,1681 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "crypto/tls"
+ "database/sql"
+ "database/sql/driver"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/url"
+ "os"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+var (
+ user string
+ pass string
+ prot string
+ addr string
+ dbname string
+ dsn string
+ netAddr string
+ available bool
+)
+
+var (
+ tDate = time.Date(2012, 6, 14, 0, 0, 0, 0, time.UTC)
+ sDate = "2012-06-14"
+ tDateTime = time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)
+ sDateTime = "2011-11-20 21:27:37"
+ tDate0 = time.Time{}
+ sDate0 = "0000-00-00"
+ sDateTime0 = "0000-00-00 00:00:00"
+)
+
+// See https://github.com/go-sql-driver/mysql/wiki/Testing
+func init() {
+ // get environment variables
+ env := func(key, defaultValue string) string {
+ if value := os.Getenv(key); value != "" {
+ return value
+ }
+ return defaultValue
+ }
+ user = env("MYSQL_TEST_USER", "root")
+ pass = env("MYSQL_TEST_PASS", "")
+ prot = env("MYSQL_TEST_PROT", "tcp")
+ addr = env("MYSQL_TEST_ADDR", "localhost:3306")
+ dbname = env("MYSQL_TEST_DBNAME", "gotest")
+ netAddr = fmt.Sprintf("%s(%s)", prot, addr)
+ dsn = fmt.Sprintf("%s:%s@%s/%s?timeout=30s&strict=true", user, pass, netAddr, dbname)
+ c, err := net.Dial(prot, addr)
+ if err == nil {
+ available = true
+ c.Close()
+ }
+}
+
+type DBTest struct {
+ *testing.T
+ db *sql.DB
+}
+
+func runTests(t *testing.T, dsn string, tests ...func(dbt *DBTest)) {
+ if !available {
+ t.Skipf("MySQL-Server not running on %s", netAddr)
+ }
+
+ db, err := sql.Open("mysql", dsn)
+ if err != nil {
+ t.Fatalf("Error connecting: %s", err.Error())
+ }
+ defer db.Close()
+
+ db.Exec("DROP TABLE IF EXISTS test")
+
+ dsn2 := dsn + "&interpolateParams=true"
+ var db2 *sql.DB
+ if _, err := parseDSN(dsn2); err != errInvalidDSNUnsafeCollation {
+ db2, err = sql.Open("mysql", dsn2)
+ if err != nil {
+ t.Fatalf("Error connecting: %s", err.Error())
+ }
+ defer db2.Close()
+ }
+
+ dbt := &DBTest{t, db}
+ dbt2 := &DBTest{t, db2}
+ for _, test := range tests {
+ test(dbt)
+ dbt.db.Exec("DROP TABLE IF EXISTS test")
+ if db2 != nil {
+ test(dbt2)
+ dbt2.db.Exec("DROP TABLE IF EXISTS test")
+ }
+ }
+}
+
+func (dbt *DBTest) fail(method, query string, err error) {
+ if len(query) > 300 {
+ query = "[query too large to print]"
+ }
+ dbt.Fatalf("Error on %s %s: %s", method, query, err.Error())
+}
+
+func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result) {
+ res, err := dbt.db.Exec(query, args...)
+ if err != nil {
+ dbt.fail("Exec", query, err)
+ }
+ return res
+}
+
+func (dbt *DBTest) mustQuery(query string, args ...interface{}) (rows *sql.Rows) {
+ rows, err := dbt.db.Query(query, args...)
+ if err != nil {
+ dbt.fail("Query", query, err)
+ }
+ return rows
+}
+
+func TestEmptyQuery(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ // just a comment, no query
+ rows := dbt.mustQuery("--")
+ // will hang before #255
+ if rows.Next() {
+ dbt.Errorf("Next on rows must be false")
+ }
+ })
+}
+
+func TestCRUD(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ // Create Table
+ dbt.mustExec("CREATE TABLE test (value BOOL)")
+
+ // Test for unexpected data
+ var out bool
+ rows := dbt.mustQuery("SELECT * FROM test")
+ if rows.Next() {
+ dbt.Error("unexpected data in empty table")
+ }
+
+ // Create Data
+ res := dbt.mustExec("INSERT INTO test VALUES (1)")
+ count, err := res.RowsAffected()
+ if err != nil {
+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+ }
+ if count != 1 {
+ dbt.Fatalf("Expected 1 affected row, got %d", count)
+ }
+
+ id, err := res.LastInsertId()
+ if err != nil {
+ dbt.Fatalf("res.LastInsertId() returned error: %s", err.Error())
+ }
+ if id != 0 {
+ dbt.Fatalf("Expected InsertID 0, got %d", id)
+ }
+
+ // Read
+ rows = dbt.mustQuery("SELECT value FROM test")
+ if rows.Next() {
+ rows.Scan(&out)
+ if true != out {
+ dbt.Errorf("true != %t", out)
+ }
+
+ if rows.Next() {
+ dbt.Error("unexpected data")
+ }
+ } else {
+ dbt.Error("no data")
+ }
+
+ // Update
+ res = dbt.mustExec("UPDATE test SET value = ? WHERE value = ?", false, true)
+ count, err = res.RowsAffected()
+ if err != nil {
+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+ }
+ if count != 1 {
+ dbt.Fatalf("Expected 1 affected row, got %d", count)
+ }
+
+ // Check Update
+ rows = dbt.mustQuery("SELECT value FROM test")
+ if rows.Next() {
+ rows.Scan(&out)
+ if false != out {
+ dbt.Errorf("false != %t", out)
+ }
+
+ if rows.Next() {
+ dbt.Error("unexpected data")
+ }
+ } else {
+ dbt.Error("no data")
+ }
+
+ // Delete
+ res = dbt.mustExec("DELETE FROM test WHERE value = ?", false)
+ count, err = res.RowsAffected()
+ if err != nil {
+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+ }
+ if count != 1 {
+ dbt.Fatalf("Expected 1 affected row, got %d", count)
+ }
+
+ // Check for unexpected rows
+ res = dbt.mustExec("DELETE FROM test")
+ count, err = res.RowsAffected()
+ if err != nil {
+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+ }
+ if count != 0 {
+ dbt.Fatalf("Expected 0 affected row, got %d", count)
+ }
+ })
+}
+
+func TestInt(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ types := [5]string{"TINYINT", "SMALLINT", "MEDIUMINT", "INT", "BIGINT"}
+ in := int64(42)
+ var out int64
+ var rows *sql.Rows
+
+ // SIGNED
+ for _, v := range types {
+ dbt.mustExec("CREATE TABLE test (value " + v + ")")
+
+ dbt.mustExec("INSERT INTO test VALUES (?)", in)
+
+ rows = dbt.mustQuery("SELECT value FROM test")
+ if rows.Next() {
+ rows.Scan(&out)
+ if in != out {
+ dbt.Errorf("%s: %d != %d", v, in, out)
+ }
+ } else {
+ dbt.Errorf("%s: no data", v)
+ }
+
+ dbt.mustExec("DROP TABLE IF EXISTS test")
+ }
+
+ // UNSIGNED ZEROFILL
+ for _, v := range types {
+ dbt.mustExec("CREATE TABLE test (value " + v + " ZEROFILL)")
+
+ dbt.mustExec("INSERT INTO test VALUES (?)", in)
+
+ rows = dbt.mustQuery("SELECT value FROM test")
+ if rows.Next() {
+ rows.Scan(&out)
+ if in != out {
+ dbt.Errorf("%s ZEROFILL: %d != %d", v, in, out)
+ }
+ } else {
+ dbt.Errorf("%s ZEROFILL: no data", v)
+ }
+
+ dbt.mustExec("DROP TABLE IF EXISTS test")
+ }
+ })
+}
+
+func TestFloat(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ types := [2]string{"FLOAT", "DOUBLE"}
+ in := float32(42.23)
+ var out float32
+ var rows *sql.Rows
+ for _, v := range types {
+ dbt.mustExec("CREATE TABLE test (value " + v + ")")
+ dbt.mustExec("INSERT INTO test VALUES (?)", in)
+ rows = dbt.mustQuery("SELECT value FROM test")
+ if rows.Next() {
+ rows.Scan(&out)
+ if in != out {
+ dbt.Errorf("%s: %g != %g", v, in, out)
+ }
+ } else {
+ dbt.Errorf("%s: no data", v)
+ }
+ dbt.mustExec("DROP TABLE IF EXISTS test")
+ }
+ })
+}
+
+func TestString(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ types := [6]string{"CHAR(255)", "VARCHAR(255)", "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT"}
+ in := "κόσμε üöäßñóùéàâÿœ'îë Árvíztűrő いろはにほへとちりぬるを イロハニホヘト דג סקרן чащах น่าฟังเอย"
+ var out string
+ var rows *sql.Rows
+
+ for _, v := range types {
+ dbt.mustExec("CREATE TABLE test (value " + v + ") CHARACTER SET utf8")
+
+ dbt.mustExec("INSERT INTO test VALUES (?)", in)
+
+ rows = dbt.mustQuery("SELECT value FROM test")
+ if rows.Next() {
+ rows.Scan(&out)
+ if in != out {
+ dbt.Errorf("%s: %s != %s", v, in, out)
+ }
+ } else {
+ dbt.Errorf("%s: no data", v)
+ }
+
+ dbt.mustExec("DROP TABLE IF EXISTS test")
+ }
+
+ // BLOB
+ dbt.mustExec("CREATE TABLE test (id int, value BLOB) CHARACTER SET utf8")
+
+ id := 2
+ in = "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " +
+ "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " +
+ "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " +
+ "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. " +
+ "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " +
+ "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " +
+ "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " +
+ "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet."
+ dbt.mustExec("INSERT INTO test VALUES (?, ?)", id, in)
+
+ err := dbt.db.QueryRow("SELECT value FROM test WHERE id = ?", id).Scan(&out)
+ if err != nil {
+ dbt.Fatalf("Error on BLOB-Query: %s", err.Error())
+ } else if out != in {
+ dbt.Errorf("BLOB: %s != %s", in, out)
+ }
+ })
+}
+
+type timeTests struct {
+ dbtype string
+ tlayout string
+ tests []timeTest
+}
+
+type timeTest struct {
+ s string // leading "!": do not use t as value in queries
+ t time.Time
+}
+
+type timeMode byte
+
+func (t timeMode) String() string {
+ switch t {
+ case binaryString:
+ return "binary:string"
+ case binaryTime:
+ return "binary:time.Time"
+ case textString:
+ return "text:string"
+ }
+ panic("unsupported timeMode")
+}
+
+func (t timeMode) Binary() bool {
+ switch t {
+ case binaryString, binaryTime:
+ return true
+ }
+ return false
+}
+
+const (
+ binaryString timeMode = iota
+ binaryTime
+ textString
+)
+
+func (t timeTest) genQuery(dbtype string, mode timeMode) string {
+ var inner string
+ if mode.Binary() {
+ inner = "?"
+ } else {
+ inner = `"%s"`
+ }
+ return `SELECT cast(` + inner + ` as ` + dbtype + `)`
+}
+
+func (t timeTest) run(dbt *DBTest, dbtype, tlayout string, mode timeMode) {
+ var rows *sql.Rows
+ query := t.genQuery(dbtype, mode)
+ switch mode {
+ case binaryString:
+ rows = dbt.mustQuery(query, t.s)
+ case binaryTime:
+ rows = dbt.mustQuery(query, t.t)
+ case textString:
+ query = fmt.Sprintf(query, t.s)
+ rows = dbt.mustQuery(query)
+ default:
+ panic("unsupported mode")
+ }
+ defer rows.Close()
+ var err error
+ if !rows.Next() {
+ err = rows.Err()
+ if err == nil {
+ err = fmt.Errorf("no data")
+ }
+ dbt.Errorf("%s [%s]: %s", dbtype, mode, err)
+ return
+ }
+ var dst interface{}
+ err = rows.Scan(&dst)
+ if err != nil {
+ dbt.Errorf("%s [%s]: %s", dbtype, mode, err)
+ return
+ }
+ switch val := dst.(type) {
+ case []uint8:
+ str := string(val)
+ if str == t.s {
+ return
+ }
+ if mode.Binary() && dbtype == "DATETIME" && len(str) == 26 && str[:19] == t.s {
+ // a fix mainly for TravisCI:
+ // accept full microsecond resolution in result for DATETIME columns
+ // where the binary protocol was used
+ return
+ }
+ dbt.Errorf("%s [%s] to string: expected %q, got %q",
+ dbtype, mode,
+ t.s, str,
+ )
+ case time.Time:
+ if val == t.t {
+ return
+ }
+ dbt.Errorf("%s [%s] to string: expected %q, got %q",
+ dbtype, mode,
+ t.s, val.Format(tlayout),
+ )
+ default:
+ fmt.Printf("%#v\n", []interface{}{dbtype, tlayout, mode, t.s, t.t})
+ dbt.Errorf("%s [%s]: unhandled type %T (is '%v')",
+ dbtype, mode,
+ val, val,
+ )
+ }
+}
+
+func TestDateTime(t *testing.T) {
+ afterTime := func(t time.Time, d string) time.Time {
+ dur, err := time.ParseDuration(d)
+ if err != nil {
+ panic(err)
+ }
+ return t.Add(dur)
+ }
+ // NOTE: MySQL rounds DATETIME(x) up - but that's not included in the tests
+ format := "2006-01-02 15:04:05.999999"
+ t0 := time.Time{}
+ tstr0 := "0000-00-00 00:00:00.000000"
+ testcases := []timeTests{
+ {"DATE", format[:10], []timeTest{
+ {t: time.Date(2011, 11, 20, 0, 0, 0, 0, time.UTC)},
+ {t: t0, s: tstr0[:10]},
+ }},
+ {"DATETIME", format[:19], []timeTest{
+ {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)},
+ {t: t0, s: tstr0[:19]},
+ }},
+ {"DATETIME(0)", format[:21], []timeTest{
+ {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)},
+ {t: t0, s: tstr0[:19]},
+ }},
+ {"DATETIME(1)", format[:21], []timeTest{
+ {t: time.Date(2011, 11, 20, 21, 27, 37, 100000000, time.UTC)},
+ {t: t0, s: tstr0[:21]},
+ }},
+ {"DATETIME(6)", format, []timeTest{
+ {t: time.Date(2011, 11, 20, 21, 27, 37, 123456000, time.UTC)},
+ {t: t0, s: tstr0},
+ }},
+ {"TIME", format[11:19], []timeTest{
+ {t: afterTime(t0, "12345s")},
+ {s: "!-12:34:56"},
+ {s: "!-838:59:59"},
+ {s: "!838:59:59"},
+ {t: t0, s: tstr0[11:19]},
+ }},
+ {"TIME(0)", format[11:19], []timeTest{
+ {t: afterTime(t0, "12345s")},
+ {s: "!-12:34:56"},
+ {s: "!-838:59:59"},
+ {s: "!838:59:59"},
+ {t: t0, s: tstr0[11:19]},
+ }},
+ {"TIME(1)", format[11:21], []timeTest{
+ {t: afterTime(t0, "12345600ms")},
+ {s: "!-12:34:56.7"},
+ {s: "!-838:59:58.9"},
+ {s: "!838:59:58.9"},
+ {t: t0, s: tstr0[11:21]},
+ }},
+ {"TIME(6)", format[11:], []timeTest{
+ {t: afterTime(t0, "1234567890123000ns")},
+ {s: "!-12:34:56.789012"},
+ {s: "!-838:59:58.999999"},
+ {s: "!838:59:58.999999"},
+ {t: t0, s: tstr0[11:]},
+ }},
+ }
+ dsns := []string{
+ dsn + "&parseTime=true",
+ dsn + "&parseTime=false",
+ }
+ for _, testdsn := range dsns {
+ runTests(t, testdsn, func(dbt *DBTest) {
+ microsecsSupported := false
+ zeroDateSupported := false
+ var rows *sql.Rows
+ var err error
+ rows, err = dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`)
+ if err == nil {
+ rows.Scan(µsecsSupported)
+ rows.Close()
+ }
+ rows, err = dbt.db.Query(`SELECT cast("0000-00-00" as DATE) = "0000-00-00"`)
+ if err == nil {
+ rows.Scan(&zeroDateSupported)
+ rows.Close()
+ }
+ for _, setups := range testcases {
+ if t := setups.dbtype; !microsecsSupported && t[len(t)-1:] == ")" {
+ // skip fractional second tests if unsupported by server
+ continue
+ }
+ for _, setup := range setups.tests {
+ allowBinTime := true
+ if setup.s == "" {
+ // fill time string whereever Go can reliable produce it
+ setup.s = setup.t.Format(setups.tlayout)
+ } else if setup.s[0] == '!' {
+ // skip tests using setup.t as source in queries
+ allowBinTime = false
+ // fix setup.s - remove the "!"
+ setup.s = setup.s[1:]
+ }
+ if !zeroDateSupported && setup.s == tstr0[:len(setup.s)] {
+ // skip disallowed 0000-00-00 date
+ continue
+ }
+ setup.run(dbt, setups.dbtype, setups.tlayout, textString)
+ setup.run(dbt, setups.dbtype, setups.tlayout, binaryString)
+ if allowBinTime {
+ setup.run(dbt, setups.dbtype, setups.tlayout, binaryTime)
+ }
+ }
+ }
+ })
+ }
+}
+
+func TestTimestampMicros(t *testing.T) {
+ format := "2006-01-02 15:04:05.999999"
+ f0 := format[:19]
+ f1 := format[:21]
+ f6 := format[:26]
+ runTests(t, dsn, func(dbt *DBTest) {
+ // check if microseconds are supported.
+ // Do not use timestamp(x) for that check - before 5.5.6, x would mean display width
+ // and not precision.
+ // Se last paragraph at http://dev.mysql.com/doc/refman/5.6/en/fractional-seconds.html
+ microsecsSupported := false
+ if rows, err := dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`); err == nil {
+ rows.Scan(µsecsSupported)
+ rows.Close()
+ }
+ if !microsecsSupported {
+ // skip test
+ return
+ }
+ _, err := dbt.db.Exec(`
+ CREATE TABLE test (
+ value0 TIMESTAMP NOT NULL DEFAULT '` + f0 + `',
+ value1 TIMESTAMP(1) NOT NULL DEFAULT '` + f1 + `',
+ value6 TIMESTAMP(6) NOT NULL DEFAULT '` + f6 + `'
+ )`,
+ )
+ if err != nil {
+ dbt.Error(err)
+ }
+ defer dbt.mustExec("DROP TABLE IF EXISTS test")
+ dbt.mustExec("INSERT INTO test SET value0=?, value1=?, value6=?", f0, f1, f6)
+ var res0, res1, res6 string
+ rows := dbt.mustQuery("SELECT * FROM test")
+ if !rows.Next() {
+ dbt.Errorf("test contained no selectable values")
+ }
+ err = rows.Scan(&res0, &res1, &res6)
+ if err != nil {
+ dbt.Error(err)
+ }
+ if res0 != f0 {
+ dbt.Errorf("expected %q, got %q", f0, res0)
+ }
+ if res1 != f1 {
+ dbt.Errorf("expected %q, got %q", f1, res1)
+ }
+ if res6 != f6 {
+ dbt.Errorf("expected %q, got %q", f6, res6)
+ }
+ })
+}
+
+func TestNULL(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ nullStmt, err := dbt.db.Prepare("SELECT NULL")
+ if err != nil {
+ dbt.Fatal(err)
+ }
+ defer nullStmt.Close()
+
+ nonNullStmt, err := dbt.db.Prepare("SELECT 1")
+ if err != nil {
+ dbt.Fatal(err)
+ }
+ defer nonNullStmt.Close()
+
+ // NullBool
+ var nb sql.NullBool
+ // Invalid
+ if err = nullStmt.QueryRow().Scan(&nb); err != nil {
+ dbt.Fatal(err)
+ }
+ if nb.Valid {
+ dbt.Error("Valid NullBool which should be invalid")
+ }
+ // Valid
+ if err = nonNullStmt.QueryRow().Scan(&nb); err != nil {
+ dbt.Fatal(err)
+ }
+ if !nb.Valid {
+ dbt.Error("Invalid NullBool which should be valid")
+ } else if nb.Bool != true {
+ dbt.Errorf("Unexpected NullBool value: %t (should be true)", nb.Bool)
+ }
+
+ // NullFloat64
+ var nf sql.NullFloat64
+ // Invalid
+ if err = nullStmt.QueryRow().Scan(&nf); err != nil {
+ dbt.Fatal(err)
+ }
+ if nf.Valid {
+ dbt.Error("Valid NullFloat64 which should be invalid")
+ }
+ // Valid
+ if err = nonNullStmt.QueryRow().Scan(&nf); err != nil {
+ dbt.Fatal(err)
+ }
+ if !nf.Valid {
+ dbt.Error("Invalid NullFloat64 which should be valid")
+ } else if nf.Float64 != float64(1) {
+ dbt.Errorf("Unexpected NullFloat64 value: %f (should be 1.0)", nf.Float64)
+ }
+
+ // NullInt64
+ var ni sql.NullInt64
+ // Invalid
+ if err = nullStmt.QueryRow().Scan(&ni); err != nil {
+ dbt.Fatal(err)
+ }
+ if ni.Valid {
+ dbt.Error("Valid NullInt64 which should be invalid")
+ }
+ // Valid
+ if err = nonNullStmt.QueryRow().Scan(&ni); err != nil {
+ dbt.Fatal(err)
+ }
+ if !ni.Valid {
+ dbt.Error("Invalid NullInt64 which should be valid")
+ } else if ni.Int64 != int64(1) {
+ dbt.Errorf("Unexpected NullInt64 value: %d (should be 1)", ni.Int64)
+ }
+
+ // NullString
+ var ns sql.NullString
+ // Invalid
+ if err = nullStmt.QueryRow().Scan(&ns); err != nil {
+ dbt.Fatal(err)
+ }
+ if ns.Valid {
+ dbt.Error("Valid NullString which should be invalid")
+ }
+ // Valid
+ if err = nonNullStmt.QueryRow().Scan(&ns); err != nil {
+ dbt.Fatal(err)
+ }
+ if !ns.Valid {
+ dbt.Error("Invalid NullString which should be valid")
+ } else if ns.String != `1` {
+ dbt.Error("Unexpected NullString value:" + ns.String + " (should be `1`)")
+ }
+
+ // nil-bytes
+ var b []byte
+ // Read nil
+ if err = nullStmt.QueryRow().Scan(&b); err != nil {
+ dbt.Fatal(err)
+ }
+ if b != nil {
+ dbt.Error("Non-nil []byte wich should be nil")
+ }
+ // Read non-nil
+ if err = nonNullStmt.QueryRow().Scan(&b); err != nil {
+ dbt.Fatal(err)
+ }
+ if b == nil {
+ dbt.Error("Nil []byte wich should be non-nil")
+ }
+ // Insert nil
+ b = nil
+ success := false
+ if err = dbt.db.QueryRow("SELECT ? IS NULL", b).Scan(&success); err != nil {
+ dbt.Fatal(err)
+ }
+ if !success {
+ dbt.Error("Inserting []byte(nil) as NULL failed")
+ }
+ // Check input==output with input==nil
+ b = nil
+ if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil {
+ dbt.Fatal(err)
+ }
+ if b != nil {
+ dbt.Error("Non-nil echo from nil input")
+ }
+ // Check input==output with input!=nil
+ b = []byte("")
+ if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil {
+ dbt.Fatal(err)
+ }
+ if b == nil {
+ dbt.Error("nil echo from non-nil input")
+ }
+
+ // Insert NULL
+ dbt.mustExec("CREATE TABLE test (dummmy1 int, value int, dummy2 int)")
+
+ dbt.mustExec("INSERT INTO test VALUES (?, ?, ?)", 1, nil, 2)
+
+ var out interface{}
+ rows := dbt.mustQuery("SELECT * FROM test")
+ if rows.Next() {
+ rows.Scan(&out)
+ if out != nil {
+ dbt.Errorf("%v != nil", out)
+ }
+ } else {
+ dbt.Error("no data")
+ }
+ })
+}
+
+func TestUint64(t *testing.T) {
+ const (
+ u0 = uint64(0)
+ uall = ^u0
+ uhigh = uall >> 1
+ utop = ^uhigh
+ s0 = int64(0)
+ sall = ^s0
+ shigh = int64(uhigh)
+ stop = ^shigh
+ )
+ runTests(t, dsn, func(dbt *DBTest) {
+ stmt, err := dbt.db.Prepare(`SELECT ?, ?, ? ,?, ?, ?, ?, ?`)
+ if err != nil {
+ dbt.Fatal(err)
+ }
+ defer stmt.Close()
+ row := stmt.QueryRow(
+ u0, uhigh, utop, uall,
+ s0, shigh, stop, sall,
+ )
+
+ var ua, ub, uc, ud uint64
+ var sa, sb, sc, sd int64
+
+ err = row.Scan(&ua, &ub, &uc, &ud, &sa, &sb, &sc, &sd)
+ if err != nil {
+ dbt.Fatal(err)
+ }
+ switch {
+ case ua != u0,
+ ub != uhigh,
+ uc != utop,
+ ud != uall,
+ sa != s0,
+ sb != shigh,
+ sc != stop,
+ sd != sall:
+ dbt.Fatal("Unexpected result value")
+ }
+ })
+}
+
+func TestLongData(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ var maxAllowedPacketSize int
+ err := dbt.db.QueryRow("select @@max_allowed_packet").Scan(&maxAllowedPacketSize)
+ if err != nil {
+ dbt.Fatal(err)
+ }
+ maxAllowedPacketSize--
+
+ // don't get too ambitious
+ if maxAllowedPacketSize > 1<<25 {
+ maxAllowedPacketSize = 1 << 25
+ }
+
+ dbt.mustExec("CREATE TABLE test (value LONGBLOB)")
+
+ in := strings.Repeat(`a`, maxAllowedPacketSize+1)
+ var out string
+ var rows *sql.Rows
+
+ // Long text data
+ const nonDataQueryLen = 28 // length query w/o value
+ inS := in[:maxAllowedPacketSize-nonDataQueryLen]
+ dbt.mustExec("INSERT INTO test VALUES('" + inS + "')")
+ rows = dbt.mustQuery("SELECT value FROM test")
+ if rows.Next() {
+ rows.Scan(&out)
+ if inS != out {
+ dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(inS), len(out))
+ }
+ if rows.Next() {
+ dbt.Error("LONGBLOB: unexpexted row")
+ }
+ } else {
+ dbt.Fatalf("LONGBLOB: no data")
+ }
+
+ // Empty table
+ dbt.mustExec("TRUNCATE TABLE test")
+
+ // Long binary data
+ dbt.mustExec("INSERT INTO test VALUES(?)", in)
+ rows = dbt.mustQuery("SELECT value FROM test WHERE 1=?", 1)
+ if rows.Next() {
+ rows.Scan(&out)
+ if in != out {
+ dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(in), len(out))
+ }
+ if rows.Next() {
+ dbt.Error("LONGBLOB: unexpexted row")
+ }
+ } else {
+ if err = rows.Err(); err != nil {
+ dbt.Fatalf("LONGBLOB: no data (err: %s)", err.Error())
+ } else {
+ dbt.Fatal("LONGBLOB: no data (err: )")
+ }
+ }
+ })
+}
+
+func TestLoadData(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ verifyLoadDataResult := func() {
+ rows, err := dbt.db.Query("SELECT * FROM test")
+ if err != nil {
+ dbt.Fatal(err.Error())
+ }
+
+ i := 0
+ values := [4]string{
+ "a string",
+ "a string containing a \t",
+ "a string containing a \n",
+ "a string containing both \t\n",
+ }
+
+ var id int
+ var value string
+
+ for rows.Next() {
+ i++
+ err = rows.Scan(&id, &value)
+ if err != nil {
+ dbt.Fatal(err.Error())
+ }
+ if i != id {
+ dbt.Fatalf("%d != %d", i, id)
+ }
+ if values[i-1] != value {
+ dbt.Fatalf("%q != %q", values[i-1], value)
+ }
+ }
+ err = rows.Err()
+ if err != nil {
+ dbt.Fatal(err.Error())
+ }
+
+ if i != 4 {
+ dbt.Fatalf("Rows count mismatch. Got %d, want 4", i)
+ }
+ }
+ file, err := ioutil.TempFile("", "gotest")
+ defer os.Remove(file.Name())
+ if err != nil {
+ dbt.Fatal(err)
+ }
+ file.WriteString("1\ta string\n2\ta string containing a \\t\n3\ta string containing a \\n\n4\ta string containing both \\t\\n\n")
+ file.Close()
+
+ dbt.db.Exec("DROP TABLE IF EXISTS test")
+ dbt.mustExec("CREATE TABLE test (id INT NOT NULL PRIMARY KEY, value TEXT NOT NULL) CHARACTER SET utf8")
+
+ // Local File
+ RegisterLocalFile(file.Name())
+ dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name()))
+ verifyLoadDataResult()
+ // negative test
+ _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'doesnotexist' INTO TABLE test")
+ if err == nil {
+ dbt.Fatal("Load non-existent file didn't fail")
+ } else if err.Error() != "Local File 'doesnotexist' is not registered. Use the DSN parameter 'allowAllFiles=true' to allow all files" {
+ dbt.Fatal(err.Error())
+ }
+
+ // Empty table
+ dbt.mustExec("TRUNCATE TABLE test")
+
+ // Reader
+ RegisterReaderHandler("test", func() io.Reader {
+ file, err = os.Open(file.Name())
+ if err != nil {
+ dbt.Fatal(err)
+ }
+ return file
+ })
+ dbt.mustExec("LOAD DATA LOCAL INFILE 'Reader::test' INTO TABLE test")
+ verifyLoadDataResult()
+ // negative test
+ _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'Reader::doesnotexist' INTO TABLE test")
+ if err == nil {
+ dbt.Fatal("Load non-existent Reader didn't fail")
+ } else if err.Error() != "Reader 'doesnotexist' is not registered" {
+ dbt.Fatal(err.Error())
+ }
+ })
+}
+
+func TestFoundRows(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)")
+ dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)")
+
+ res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0")
+ count, err := res.RowsAffected()
+ if err != nil {
+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+ }
+ if count != 2 {
+ dbt.Fatalf("Expected 2 affected rows, got %d", count)
+ }
+ res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1")
+ count, err = res.RowsAffected()
+ if err != nil {
+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+ }
+ if count != 2 {
+ dbt.Fatalf("Expected 2 affected rows, got %d", count)
+ }
+ })
+ runTests(t, dsn+"&clientFoundRows=true", func(dbt *DBTest) {
+ dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)")
+ dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)")
+
+ res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0")
+ count, err := res.RowsAffected()
+ if err != nil {
+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+ }
+ if count != 2 {
+ dbt.Fatalf("Expected 2 matched rows, got %d", count)
+ }
+ res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1")
+ count, err = res.RowsAffected()
+ if err != nil {
+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+ }
+ if count != 3 {
+ dbt.Fatalf("Expected 3 matched rows, got %d", count)
+ }
+ })
+}
+
+func TestStrict(t *testing.T) {
+ // ALLOW_INVALID_DATES to get rid of stricter modes - we want to test for warnings, not errors
+ relaxedDsn := dsn + "&sql_mode=ALLOW_INVALID_DATES"
+ // make sure the MySQL version is recent enough with a separate connection
+ // before running the test
+ conn, err := MySQLDriver{}.Open(relaxedDsn)
+ if conn != nil {
+ conn.Close()
+ }
+ if me, ok := err.(*MySQLError); ok && me.Number == 1231 {
+ // Error 1231: Variable 'sql_mode' can't be set to the value of 'ALLOW_INVALID_DATES'
+ // => skip test, MySQL server version is too old
+ return
+ }
+ runTests(t, relaxedDsn, func(dbt *DBTest) {
+ dbt.mustExec("CREATE TABLE test (a TINYINT NOT NULL, b CHAR(4))")
+
+ var queries = [...]struct {
+ in string
+ codes []string
+ }{
+ {"DROP TABLE IF EXISTS no_such_table", []string{"1051"}},
+ {"INSERT INTO test VALUES(10,'mysql'),(NULL,'test'),(300,'Open Source')", []string{"1265", "1048", "1264", "1265"}},
+ }
+ var err error
+
+ var checkWarnings = func(err error, mode string, idx int) {
+ if err == nil {
+ dbt.Errorf("Expected STRICT error on query [%s] %s", mode, queries[idx].in)
+ }
+
+ if warnings, ok := err.(MySQLWarnings); ok {
+ var codes = make([]string, len(warnings))
+ for i := range warnings {
+ codes[i] = warnings[i].Code
+ }
+ if len(codes) != len(queries[idx].codes) {
+ dbt.Errorf("Unexpected STRICT error count on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes)
+ }
+
+ for i := range warnings {
+ if codes[i] != queries[idx].codes[i] {
+ dbt.Errorf("Unexpected STRICT error codes on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes)
+ return
+ }
+ }
+
+ } else {
+ dbt.Errorf("Unexpected error on query [%s] %s: %s", mode, queries[idx].in, err.Error())
+ }
+ }
+
+ // text protocol
+ for i := range queries {
+ _, err = dbt.db.Exec(queries[i].in)
+ checkWarnings(err, "text", i)
+ }
+
+ var stmt *sql.Stmt
+
+ // binary protocol
+ for i := range queries {
+ stmt, err = dbt.db.Prepare(queries[i].in)
+ if err != nil {
+ dbt.Errorf("Error on preparing query %s: %s", queries[i].in, err.Error())
+ }
+
+ _, err = stmt.Exec()
+ checkWarnings(err, "binary", i)
+
+ err = stmt.Close()
+ if err != nil {
+ dbt.Errorf("Error on closing stmt for query %s: %s", queries[i].in, err.Error())
+ }
+ }
+ })
+}
+
+func TestTLS(t *testing.T) {
+ tlsTest := func(dbt *DBTest) {
+ if err := dbt.db.Ping(); err != nil {
+ if err == ErrNoTLS {
+ dbt.Skip("Server does not support TLS")
+ } else {
+ dbt.Fatalf("Error on Ping: %s", err.Error())
+ }
+ }
+
+ rows := dbt.mustQuery("SHOW STATUS LIKE 'Ssl_cipher'")
+
+ var variable, value *sql.RawBytes
+ for rows.Next() {
+ if err := rows.Scan(&variable, &value); err != nil {
+ dbt.Fatal(err.Error())
+ }
+
+ if value == nil {
+ dbt.Fatal("No Cipher")
+ }
+ }
+ }
+
+ runTests(t, dsn+"&tls=skip-verify", tlsTest)
+
+ // Verify that registering / using a custom cfg works
+ RegisterTLSConfig("custom-skip-verify", &tls.Config{
+ InsecureSkipVerify: true,
+ })
+ runTests(t, dsn+"&tls=custom-skip-verify", tlsTest)
+}
+
+func TestReuseClosedConnection(t *testing.T) {
+ // this test does not use sql.database, it uses the driver directly
+ if !available {
+ t.Skipf("MySQL-Server not running on %s", netAddr)
+ }
+
+ md := &MySQLDriver{}
+ conn, err := md.Open(dsn)
+ if err != nil {
+ t.Fatalf("Error connecting: %s", err.Error())
+ }
+ stmt, err := conn.Prepare("DO 1")
+ if err != nil {
+ t.Fatalf("Error preparing statement: %s", err.Error())
+ }
+ _, err = stmt.Exec(nil)
+ if err != nil {
+ t.Fatalf("Error executing statement: %s", err.Error())
+ }
+ err = conn.Close()
+ if err != nil {
+ t.Fatalf("Error closing connection: %s", err.Error())
+ }
+
+ defer func() {
+ if err := recover(); err != nil {
+ t.Errorf("Panic after reusing a closed connection: %v", err)
+ }
+ }()
+ _, err = stmt.Exec(nil)
+ if err != nil && err != driver.ErrBadConn {
+ t.Errorf("Unexpected error '%s', expected '%s'",
+ err.Error(), driver.ErrBadConn.Error())
+ }
+}
+
+func TestCharset(t *testing.T) {
+ if !available {
+ t.Skipf("MySQL-Server not running on %s", netAddr)
+ }
+
+ mustSetCharset := func(charsetParam, expected string) {
+ runTests(t, dsn+"&"+charsetParam, func(dbt *DBTest) {
+ rows := dbt.mustQuery("SELECT @@character_set_connection")
+ defer rows.Close()
+
+ if !rows.Next() {
+ dbt.Fatalf("Error getting connection charset: %s", rows.Err())
+ }
+
+ var got string
+ rows.Scan(&got)
+
+ if got != expected {
+ dbt.Fatalf("Expected connection charset %s but got %s", expected, got)
+ }
+ })
+ }
+
+ // non utf8 test
+ mustSetCharset("charset=ascii", "ascii")
+
+ // when the first charset is invalid, use the second
+ mustSetCharset("charset=none,utf8", "utf8")
+
+ // when the first charset is valid, use it
+ mustSetCharset("charset=ascii,utf8", "ascii")
+ mustSetCharset("charset=utf8,ascii", "utf8")
+}
+
+func TestFailingCharset(t *testing.T) {
+ runTests(t, dsn+"&charset=none", func(dbt *DBTest) {
+ // run query to really establish connection...
+ _, err := dbt.db.Exec("SELECT 1")
+ if err == nil {
+ dbt.db.Close()
+ t.Fatalf("Connection must not succeed without a valid charset")
+ }
+ })
+}
+
+func TestCollation(t *testing.T) {
+ if !available {
+ t.Skipf("MySQL-Server not running on %s", netAddr)
+ }
+
+ defaultCollation := "utf8_general_ci"
+ testCollations := []string{
+ "", // do not set
+ defaultCollation, // driver default
+ "latin1_general_ci",
+ "binary",
+ "utf8_unicode_ci",
+ "cp1257_bin",
+ }
+
+ for _, collation := range testCollations {
+ var expected, tdsn string
+ if collation != "" {
+ tdsn = dsn + "&collation=" + collation
+ expected = collation
+ } else {
+ tdsn = dsn
+ expected = defaultCollation
+ }
+
+ runTests(t, tdsn, func(dbt *DBTest) {
+ var got string
+ if err := dbt.db.QueryRow("SELECT @@collation_connection").Scan(&got); err != nil {
+ dbt.Fatal(err)
+ }
+
+ if got != expected {
+ dbt.Fatalf("Expected connection collation %s but got %s", expected, got)
+ }
+ })
+ }
+}
+
+func TestColumnsWithAlias(t *testing.T) {
+ runTests(t, dsn+"&columnsWithAlias=true", func(dbt *DBTest) {
+ rows := dbt.mustQuery("SELECT 1 AS A")
+ defer rows.Close()
+ cols, _ := rows.Columns()
+ if len(cols) != 1 {
+ t.Fatalf("expected 1 column, got %d", len(cols))
+ }
+ if cols[0] != "A" {
+ t.Fatalf("expected column name \"A\", got \"%s\"", cols[0])
+ }
+ rows.Close()
+
+ rows = dbt.mustQuery("SELECT * FROM (SELECT 1 AS one) AS A")
+ cols, _ = rows.Columns()
+ if len(cols) != 1 {
+ t.Fatalf("expected 1 column, got %d", len(cols))
+ }
+ if cols[0] != "A.one" {
+ t.Fatalf("expected column name \"A.one\", got \"%s\"", cols[0])
+ }
+ })
+}
+
+func TestRawBytesResultExceedsBuffer(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ // defaultBufSize from buffer.go
+ expected := strings.Repeat("abc", defaultBufSize)
+
+ rows := dbt.mustQuery("SELECT '" + expected + "'")
+ defer rows.Close()
+ if !rows.Next() {
+ dbt.Error("expected result, got none")
+ }
+ var result sql.RawBytes
+ rows.Scan(&result)
+ if expected != string(result) {
+ dbt.Error("result did not match expected value")
+ }
+ })
+}
+
+func TestTimezoneConversion(t *testing.T) {
+ zones := []string{"UTC", "US/Central", "US/Pacific", "Local"}
+
+ // Regression test for timezone handling
+ tzTest := func(dbt *DBTest) {
+
+ // Create table
+ dbt.mustExec("CREATE TABLE test (ts TIMESTAMP)")
+
+ // Insert local time into database (should be converted)
+ usCentral, _ := time.LoadLocation("US/Central")
+ reftime := time.Date(2014, 05, 30, 18, 03, 17, 0, time.UTC).In(usCentral)
+ dbt.mustExec("INSERT INTO test VALUE (?)", reftime)
+
+ // Retrieve time from DB
+ rows := dbt.mustQuery("SELECT ts FROM test")
+ if !rows.Next() {
+ dbt.Fatal("Didn't get any rows out")
+ }
+
+ var dbTime time.Time
+ err := rows.Scan(&dbTime)
+ if err != nil {
+ dbt.Fatal("Err", err)
+ }
+
+ // Check that dates match
+ if reftime.Unix() != dbTime.Unix() {
+ dbt.Errorf("Times don't match.\n")
+ dbt.Errorf(" Now(%v)=%v\n", usCentral, reftime)
+ dbt.Errorf(" Now(UTC)=%v\n", dbTime)
+ }
+ }
+
+ for _, tz := range zones {
+ runTests(t, dsn+"&parseTime=true&loc="+url.QueryEscape(tz), tzTest)
+ }
+}
+
+// Special cases
+
+func TestRowsClose(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ rows, err := dbt.db.Query("SELECT 1")
+ if err != nil {
+ dbt.Fatal(err)
+ }
+
+ err = rows.Close()
+ if err != nil {
+ dbt.Fatal(err)
+ }
+
+ if rows.Next() {
+ dbt.Fatal("Unexpected row after rows.Close()")
+ }
+
+ err = rows.Err()
+ if err != nil {
+ dbt.Fatal(err)
+ }
+ })
+}
+
+// dangling statements
+// http://code.google.com/p/go/issues/detail?id=3865
+func TestCloseStmtBeforeRows(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ stmt, err := dbt.db.Prepare("SELECT 1")
+ if err != nil {
+ dbt.Fatal(err)
+ }
+
+ rows, err := stmt.Query()
+ if err != nil {
+ stmt.Close()
+ dbt.Fatal(err)
+ }
+ defer rows.Close()
+
+ err = stmt.Close()
+ if err != nil {
+ dbt.Fatal(err)
+ }
+
+ if !rows.Next() {
+ dbt.Fatal("Getting row failed")
+ } else {
+ err = rows.Err()
+ if err != nil {
+ dbt.Fatal(err)
+ }
+
+ var out bool
+ err = rows.Scan(&out)
+ if err != nil {
+ dbt.Fatalf("Error on rows.Scan(): %s", err.Error())
+ }
+ if out != true {
+ dbt.Errorf("true != %t", out)
+ }
+ }
+ })
+}
+
+// It is valid to have multiple Rows for the same Stmt
+// http://code.google.com/p/go/issues/detail?id=3734
+func TestStmtMultiRows(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ stmt, err := dbt.db.Prepare("SELECT 1 UNION SELECT 0")
+ if err != nil {
+ dbt.Fatal(err)
+ }
+
+ rows1, err := stmt.Query()
+ if err != nil {
+ stmt.Close()
+ dbt.Fatal(err)
+ }
+ defer rows1.Close()
+
+ rows2, err := stmt.Query()
+ if err != nil {
+ stmt.Close()
+ dbt.Fatal(err)
+ }
+ defer rows2.Close()
+
+ var out bool
+
+ // 1
+ if !rows1.Next() {
+ dbt.Fatal("1st rows1.Next failed")
+ } else {
+ err = rows1.Err()
+ if err != nil {
+ dbt.Fatal(err)
+ }
+
+ err = rows1.Scan(&out)
+ if err != nil {
+ dbt.Fatalf("Error on rows.Scan(): %s", err.Error())
+ }
+ if out != true {
+ dbt.Errorf("true != %t", out)
+ }
+ }
+
+ if !rows2.Next() {
+ dbt.Fatal("1st rows2.Next failed")
+ } else {
+ err = rows2.Err()
+ if err != nil {
+ dbt.Fatal(err)
+ }
+
+ err = rows2.Scan(&out)
+ if err != nil {
+ dbt.Fatalf("Error on rows.Scan(): %s", err.Error())
+ }
+ if out != true {
+ dbt.Errorf("true != %t", out)
+ }
+ }
+
+ // 2
+ if !rows1.Next() {
+ dbt.Fatal("2nd rows1.Next failed")
+ } else {
+ err = rows1.Err()
+ if err != nil {
+ dbt.Fatal(err)
+ }
+
+ err = rows1.Scan(&out)
+ if err != nil {
+ dbt.Fatalf("Error on rows.Scan(): %s", err.Error())
+ }
+ if out != false {
+ dbt.Errorf("false != %t", out)
+ }
+
+ if rows1.Next() {
+ dbt.Fatal("Unexpected row on rows1")
+ }
+ err = rows1.Close()
+ if err != nil {
+ dbt.Fatal(err)
+ }
+ }
+
+ if !rows2.Next() {
+ dbt.Fatal("2nd rows2.Next failed")
+ } else {
+ err = rows2.Err()
+ if err != nil {
+ dbt.Fatal(err)
+ }
+
+ err = rows2.Scan(&out)
+ if err != nil {
+ dbt.Fatalf("Error on rows.Scan(): %s", err.Error())
+ }
+ if out != false {
+ dbt.Errorf("false != %t", out)
+ }
+
+ if rows2.Next() {
+ dbt.Fatal("Unexpected row on rows2")
+ }
+ err = rows2.Close()
+ if err != nil {
+ dbt.Fatal(err)
+ }
+ }
+ })
+}
+
+// Regression test for
+// * more than 32 NULL parameters (issue 209)
+// * more parameters than fit into the buffer (issue 201)
+func TestPreparedManyCols(t *testing.T) {
+ const numParams = defaultBufSize
+ runTests(t, dsn, func(dbt *DBTest) {
+ query := "SELECT ?" + strings.Repeat(",?", numParams-1)
+ stmt, err := dbt.db.Prepare(query)
+ if err != nil {
+ dbt.Fatal(err)
+ }
+ defer stmt.Close()
+ // create more parameters than fit into the buffer
+ // which will take nil-values
+ params := make([]interface{}, numParams)
+ rows, err := stmt.Query(params...)
+ if err != nil {
+ stmt.Close()
+ dbt.Fatal(err)
+ }
+ defer rows.Close()
+ })
+}
+
+func TestConcurrent(t *testing.T) {
+ if enabled, _ := readBool(os.Getenv("MYSQL_TEST_CONCURRENT")); !enabled {
+ t.Skip("MYSQL_TEST_CONCURRENT env var not set")
+ }
+
+ runTests(t, dsn, func(dbt *DBTest) {
+ var max int
+ err := dbt.db.QueryRow("SELECT @@max_connections").Scan(&max)
+ if err != nil {
+ dbt.Fatalf("%s", err.Error())
+ }
+ dbt.Logf("Testing up to %d concurrent connections \r\n", max)
+
+ var remaining, succeeded int32 = int32(max), 0
+
+ var wg sync.WaitGroup
+ wg.Add(max)
+
+ var fatalError string
+ var once sync.Once
+ fatalf := func(s string, vals ...interface{}) {
+ once.Do(func() {
+ fatalError = fmt.Sprintf(s, vals...)
+ })
+ }
+
+ for i := 0; i < max; i++ {
+ go func(id int) {
+ defer wg.Done()
+
+ tx, err := dbt.db.Begin()
+ atomic.AddInt32(&remaining, -1)
+
+ if err != nil {
+ if err.Error() != "Error 1040: Too many connections" {
+ fatalf("Error on Conn %d: %s", id, err.Error())
+ }
+ return
+ }
+
+ // keep the connection busy until all connections are open
+ for remaining > 0 {
+ if _, err = tx.Exec("DO 1"); err != nil {
+ fatalf("Error on Conn %d: %s", id, err.Error())
+ return
+ }
+ }
+
+ if err = tx.Commit(); err != nil {
+ fatalf("Error on Conn %d: %s", id, err.Error())
+ return
+ }
+
+ // everything went fine with this connection
+ atomic.AddInt32(&succeeded, 1)
+ }(i)
+ }
+
+ // wait until all conections are open
+ wg.Wait()
+
+ if fatalError != "" {
+ dbt.Fatal(fatalError)
+ }
+
+ dbt.Logf("Reached %d concurrent connections\r\n", succeeded)
+ })
+}
+
+// Tests custom dial functions
+func TestCustomDial(t *testing.T) {
+ if !available {
+ t.Skipf("MySQL-Server not running on %s", netAddr)
+ }
+
+ // our custom dial function which justs wraps net.Dial here
+ RegisterDial("mydial", func(addr string) (net.Conn, error) {
+ return net.Dial(prot, addr)
+ })
+
+ db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s&strict=true", user, pass, addr, dbname))
+ if err != nil {
+ t.Fatalf("Error connecting: %s", err.Error())
+ }
+ defer db.Close()
+
+ if _, err = db.Exec("DO 1"); err != nil {
+ t.Fatalf("Connection failed: %s", err.Error())
+ }
+}
+
+func TestSqlInjection(t *testing.T) {
+ createTest := func(arg string) func(dbt *DBTest) {
+ return func(dbt *DBTest) {
+ dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ dbt.mustExec("INSERT INTO test VALUES (?)", 1)
+
+ var v int
+ // NULL can't be equal to anything, the idea here is to inject query so it returns row
+ // This test verifies that escapeQuotes and escapeBackslash are working properly
+ err := dbt.db.QueryRow("SELECT v FROM test WHERE NULL = ?", arg).Scan(&v)
+ if err == sql.ErrNoRows {
+ return // success, sql injection failed
+ } else if err == nil {
+ dbt.Errorf("Sql injection successful with arg: %s", arg)
+ } else {
+ dbt.Errorf("Error running query with arg: %s; err: %s", arg, err.Error())
+ }
+ }
+ }
+
+ dsns := []string{
+ dsn,
+ dsn + "&sql_mode=NO_BACKSLASH_ESCAPES",
+ }
+ for _, testdsn := range dsns {
+ runTests(t, testdsn, createTest("1 OR 1=1"))
+ runTests(t, testdsn, createTest("' OR '1'='1"))
+ }
+}
+
+// Test if inserted data is correctly retrieved after being escaped
+func TestInsertRetrieveEscapedData(t *testing.T) {
+ testData := func(dbt *DBTest) {
+ dbt.mustExec("CREATE TABLE test (v VARCHAR(255))")
+
+ // All sequences that are escaped by escapeQuotes and escapeBackslash
+ v := "foo \x00\n\r\x1a\"'\\"
+ dbt.mustExec("INSERT INTO test VALUES (?)", v)
+
+ var out string
+ err := dbt.db.QueryRow("SELECT v FROM test").Scan(&out)
+ if err != nil {
+ dbt.Fatalf("%s", err.Error())
+ }
+
+ if out != v {
+ dbt.Errorf("%q != %q", out, v)
+ }
+ }
+
+ dsns := []string{
+ dsn,
+ dsn + "&sql_mode=NO_BACKSLASH_ESCAPES",
+ }
+ for _, testdsn := range dsns {
+ runTests(t, testdsn, testData)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors_test.go
new file mode 100644
index 000000000..96f9126d6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors_test.go
@@ -0,0 +1,42 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "log"
+ "testing"
+)
+
+func TestErrorsSetLogger(t *testing.T) {
+ previous := errLog
+ defer func() {
+ errLog = previous
+ }()
+
+ // set up logger
+ const expected = "prefix: test\n"
+ buffer := bytes.NewBuffer(make([]byte, 0, 64))
+ logger := log.New(buffer, "prefix: ", 0)
+
+ // print
+ SetLogger(logger)
+ errLog.Print("test")
+
+ // check result
+ if actual := buffer.String(); actual != expected {
+ t.Errorf("expected %q, got %q", expected, actual)
+ }
+}
+
+func TestErrorsStrictIgnoreNotes(t *testing.T) {
+ runTests(t, dsn+"&sql_notes=false", func(dbt *DBTest) {
+ dbt.mustExec("DROP TABLE IF EXISTS does_not_exist")
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/packets.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/packets.go
index 14395bf9a..76cb7c84e 100644
--- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/packets.go
+++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/packets.go
@@ -282,7 +282,10 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
}
// Filler [23 bytes] (all 0x00)
- pos := 13 + 23
+ pos := 13
+ for ; pos < 13+23; pos++ {
+ data[pos] = 0
+ }
// User [null terminated string]
if len(mc.cfg.user) > 0 {
diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils_test.go
new file mode 100644
index 000000000..79fbdd1eb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils_test.go
@@ -0,0 +1,346 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "crypto/tls"
+ "encoding/binary"
+ "fmt"
+ "testing"
+ "time"
+)
+
+var testDSNs = []struct {
+ in string
+ out string
+ loc *time.Location
+}{
+ {"username:password@protocol(address)/dbname?param=value", "&{user:username passwd:password net:protocol addr:address dbname:dbname params:map[param:value] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC},
+ {"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true", "&{user:username passwd:password net:protocol addr:address dbname:dbname params:map[param:value] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:true interpolateParams:false}", time.UTC},
+ {"user@unix(/path/to/socket)/dbname?charset=utf8", "&{user:user passwd: net:unix addr:/path/to/socket dbname:dbname params:map[charset:utf8] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC},
+ {"user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true", "&{user:user passwd:password net:tcp addr:localhost:5555 dbname:dbname params:map[charset:utf8] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC},
+ {"user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify", "&{user:user passwd:password net:tcp addr:localhost:5555 dbname:dbname params:map[charset:utf8mb4,utf8] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC},
+ {"user:password@/dbname?loc=UTC&timeout=30s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci", "&{user:user passwd:password net:tcp addr:127.0.0.1:3306 dbname:dbname params:map[] loc:%p tls: timeout:30000000000 collation:224 allowAllFiles:true allowOldPasswords:true allowCleartextPasswords:false clientFoundRows:true columnsWithAlias:false interpolateParams:false}", time.UTC},
+ {"user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local", "&{user:user passwd:p@ss(word) net:tcp addr:[de:ad:be:ef::ca:fe]:80 dbname:dbname params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.Local},
+ {"/dbname", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname:dbname params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC},
+ {"@/", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC},
+ {"/", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC},
+ {"", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC},
+ {"user:p@/ssword@/", "&{user:user passwd:p@/ssword net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC},
+ {"unix/?arg=%2Fsome%2Fpath.ext", "&{user: passwd: net:unix addr:/tmp/mysql.sock dbname: params:map[arg:/some/path.ext] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC},
+}
+
+func TestDSNParser(t *testing.T) {
+ var cfg *config
+ var err error
+ var res string
+
+ for i, tst := range testDSNs {
+ cfg, err = parseDSN(tst.in)
+ if err != nil {
+ t.Error(err.Error())
+ }
+
+ // pointer not static
+ cfg.tls = nil
+
+ res = fmt.Sprintf("%+v", cfg)
+ if res != fmt.Sprintf(tst.out, tst.loc) {
+ t.Errorf("%d. parseDSN(%q) => %q, want %q", i, tst.in, res, fmt.Sprintf(tst.out, tst.loc))
+ }
+ }
+}
+
+func TestDSNParserInvalid(t *testing.T) {
+ var invalidDSNs = []string{
+ "@net(addr/", // no closing brace
+ "@tcp(/", // no closing brace
+ "tcp(/", // no closing brace
+ "(/", // no closing brace
+ "net(addr)//", // unescaped
+ "user:pass@tcp(1.2.3.4:3306)", // no trailing slash
+ //"/dbname?arg=/some/unescaped/path",
+ }
+
+ for i, tst := range invalidDSNs {
+ if _, err := parseDSN(tst); err == nil {
+ t.Errorf("invalid DSN #%d. (%s) didn't error!", i, tst)
+ }
+ }
+}
+
+func TestDSNWithCustomTLS(t *testing.T) {
+ baseDSN := "user:password@tcp(localhost:5555)/dbname?tls="
+ tlsCfg := tls.Config{}
+
+ RegisterTLSConfig("utils_test", &tlsCfg)
+
+ // Custom TLS is missing
+ tst := baseDSN + "invalid_tls"
+ cfg, err := parseDSN(tst)
+ if err == nil {
+ t.Errorf("Invalid custom TLS in DSN (%s) but did not error. Got config: %#v", tst, cfg)
+ }
+
+ tst = baseDSN + "utils_test"
+
+ // Custom TLS with a server name
+ name := "foohost"
+ tlsCfg.ServerName = name
+ cfg, err = parseDSN(tst)
+
+ if err != nil {
+ t.Error(err.Error())
+ } else if cfg.tls.ServerName != name {
+ t.Errorf("Did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, tst)
+ }
+
+ // Custom TLS without a server name
+ name = "localhost"
+ tlsCfg.ServerName = ""
+ cfg, err = parseDSN(tst)
+
+ if err != nil {
+ t.Error(err.Error())
+ } else if cfg.tls.ServerName != name {
+ t.Errorf("Did not get the correct ServerName (%s) parsing DSN (%s).", name, tst)
+ }
+
+ DeregisterTLSConfig("utils_test")
+}
+
+func TestDSNUnsafeCollation(t *testing.T) {
+ _, err := parseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=true")
+ if err != errInvalidDSNUnsafeCollation {
+ t.Error("Expected %v, Got %v", errInvalidDSNUnsafeCollation, err)
+ }
+
+ _, err = parseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=false")
+ if err != nil {
+ t.Error("Expected %v, Got %v", nil, err)
+ }
+
+ _, err = parseDSN("/dbname?collation=gbk_chinese_ci")
+ if err != nil {
+ t.Error("Expected %v, Got %v", nil, err)
+ }
+
+ _, err = parseDSN("/dbname?collation=ascii_bin&interpolateParams=true")
+ if err != nil {
+ t.Error("Expected %v, Got %v", nil, err)
+ }
+
+ _, err = parseDSN("/dbname?collation=latin1_german1_ci&interpolateParams=true")
+ if err != nil {
+ t.Error("Expected %v, Got %v", nil, err)
+ }
+
+ _, err = parseDSN("/dbname?collation=utf8_general_ci&interpolateParams=true")
+ if err != nil {
+ t.Error("Expected %v, Got %v", nil, err)
+ }
+
+ _, err = parseDSN("/dbname?collation=utf8mb4_general_ci&interpolateParams=true")
+ if err != nil {
+ t.Error("Expected %v, Got %v", nil, err)
+ }
+}
+
+func BenchmarkParseDSN(b *testing.B) {
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ for _, tst := range testDSNs {
+ if _, err := parseDSN(tst.in); err != nil {
+ b.Error(err.Error())
+ }
+ }
+ }
+}
+
+func TestScanNullTime(t *testing.T) {
+ var scanTests = []struct {
+ in interface{}
+ error bool
+ valid bool
+ time time.Time
+ }{
+ {tDate, false, true, tDate},
+ {sDate, false, true, tDate},
+ {[]byte(sDate), false, true, tDate},
+ {tDateTime, false, true, tDateTime},
+ {sDateTime, false, true, tDateTime},
+ {[]byte(sDateTime), false, true, tDateTime},
+ {tDate0, false, true, tDate0},
+ {sDate0, false, true, tDate0},
+ {[]byte(sDate0), false, true, tDate0},
+ {sDateTime0, false, true, tDate0},
+ {[]byte(sDateTime0), false, true, tDate0},
+ {"", true, false, tDate0},
+ {"1234", true, false, tDate0},
+ {0, true, false, tDate0},
+ }
+
+ var nt = NullTime{}
+ var err error
+
+ for _, tst := range scanTests {
+ err = nt.Scan(tst.in)
+ if (err != nil) != tst.error {
+ t.Errorf("%v: expected error status %t, got %t", tst.in, tst.error, (err != nil))
+ }
+ if nt.Valid != tst.valid {
+ t.Errorf("%v: expected valid status %t, got %t", tst.in, tst.valid, nt.Valid)
+ }
+ if nt.Time != tst.time {
+ t.Errorf("%v: expected time %v, got %v", tst.in, tst.time, nt.Time)
+ }
+ }
+}
+
+func TestLengthEncodedInteger(t *testing.T) {
+ var integerTests = []struct {
+ num uint64
+ encoded []byte
+ }{
+ {0x0000000000000000, []byte{0x00}},
+ {0x0000000000000012, []byte{0x12}},
+ {0x00000000000000fa, []byte{0xfa}},
+ {0x0000000000000100, []byte{0xfc, 0x00, 0x01}},
+ {0x0000000000001234, []byte{0xfc, 0x34, 0x12}},
+ {0x000000000000ffff, []byte{0xfc, 0xff, 0xff}},
+ {0x0000000000010000, []byte{0xfd, 0x00, 0x00, 0x01}},
+ {0x0000000000123456, []byte{0xfd, 0x56, 0x34, 0x12}},
+ {0x0000000000ffffff, []byte{0xfd, 0xff, 0xff, 0xff}},
+ {0x0000000001000000, []byte{0xfe, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}},
+ {0x123456789abcdef0, []byte{0xfe, 0xf0, 0xde, 0xbc, 0x9a, 0x78, 0x56, 0x34, 0x12}},
+ {0xffffffffffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
+ }
+
+ for _, tst := range integerTests {
+ num, isNull, numLen := readLengthEncodedInteger(tst.encoded)
+ if isNull {
+ t.Errorf("%x: expected %d, got NULL", tst.encoded, tst.num)
+ }
+ if num != tst.num {
+ t.Errorf("%x: expected %d, got %d", tst.encoded, tst.num, num)
+ }
+ if numLen != len(tst.encoded) {
+ t.Errorf("%x: expected size %d, got %d", tst.encoded, len(tst.encoded), numLen)
+ }
+ encoded := appendLengthEncodedInteger(nil, num)
+ if !bytes.Equal(encoded, tst.encoded) {
+ t.Errorf("%v: expected %x, got %x", num, tst.encoded, encoded)
+ }
+ }
+}
+
+func TestOldPass(t *testing.T) {
+ scramble := []byte{9, 8, 7, 6, 5, 4, 3, 2}
+ vectors := []struct {
+ pass string
+ out string
+ }{
+ {" pass", "47575c5a435b4251"},
+ {"pass ", "47575c5a435b4251"},
+ {"123\t456", "575c47505b5b5559"},
+ {"C0mpl!ca ted#PASS123", "5d5d554849584a45"},
+ }
+ for _, tuple := range vectors {
+ ours := scrambleOldPassword(scramble, []byte(tuple.pass))
+ if tuple.out != fmt.Sprintf("%x", ours) {
+ t.Errorf("Failed old password %q", tuple.pass)
+ }
+ }
+}
+
+func TestFormatBinaryDateTime(t *testing.T) {
+ rawDate := [11]byte{}
+ binary.LittleEndian.PutUint16(rawDate[:2], 1978) // years
+ rawDate[2] = 12 // months
+ rawDate[3] = 30 // days
+ rawDate[4] = 15 // hours
+ rawDate[5] = 46 // minutes
+ rawDate[6] = 23 // seconds
+ binary.LittleEndian.PutUint32(rawDate[7:], 987654) // microseconds
+ expect := func(expected string, inlen, outlen uint8) {
+ actual, _ := formatBinaryDateTime(rawDate[:inlen], outlen, false)
+ bytes, ok := actual.([]byte)
+ if !ok {
+ t.Errorf("formatBinaryDateTime must return []byte, was %T", actual)
+ }
+ if string(bytes) != expected {
+ t.Errorf(
+ "expected %q, got %q for length in %d, out %d",
+ bytes, actual, inlen, outlen,
+ )
+ }
+ }
+ expect("0000-00-00", 0, 10)
+ expect("0000-00-00 00:00:00", 0, 19)
+ expect("1978-12-30", 4, 10)
+ expect("1978-12-30 15:46:23", 7, 19)
+ expect("1978-12-30 15:46:23.987654", 11, 26)
+}
+
+func TestEscapeBackslash(t *testing.T) {
+ expect := func(expected, value string) {
+ actual := string(escapeBytesBackslash([]byte{}, []byte(value)))
+ if actual != expected {
+ t.Errorf(
+ "expected %s, got %s",
+ expected, actual,
+ )
+ }
+
+ actual = string(escapeStringBackslash([]byte{}, value))
+ if actual != expected {
+ t.Errorf(
+ "expected %s, got %s",
+ expected, actual,
+ )
+ }
+ }
+
+ expect("foo\\0bar", "foo\x00bar")
+ expect("foo\\nbar", "foo\nbar")
+ expect("foo\\rbar", "foo\rbar")
+ expect("foo\\Zbar", "foo\x1abar")
+ expect("foo\\\"bar", "foo\"bar")
+ expect("foo\\\\bar", "foo\\bar")
+ expect("foo\\'bar", "foo'bar")
+}
+
+func TestEscapeQuotes(t *testing.T) {
+ expect := func(expected, value string) {
+ actual := string(escapeBytesQuotes([]byte{}, []byte(value)))
+ if actual != expected {
+ t.Errorf(
+ "expected %s, got %s",
+ expected, actual,
+ )
+ }
+
+ actual = string(escapeStringQuotes([]byte{}, value))
+ if actual != expected {
+ t.Errorf(
+ "expected %s, got %s",
+ expected, actual,
+ )
+ }
+ }
+
+ expect("foo\x00bar", "foo\x00bar") // not affected
+ expect("foo\nbar", "foo\nbar") // not affected
+ expect("foo\rbar", "foo\rbar") // not affected
+ expect("foo\x1abar", "foo\x1abar") // not affected
+ expect("foo''bar", "foo'bar") // affected
+ expect("foo\"bar", "foo\"bar") // not affected
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/AUTHORS b/Godeps/_workspace/src/github.com/gocql/gocql/AUTHORS
index 03dc16564..9f821548c 100644
--- a/Godeps/_workspace/src/github.com/gocql/gocql/AUTHORS
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/AUTHORS
@@ -56,3 +56,5 @@ John Weldon
Adrien Bustany
Andrey Smirnov
Adam Weiner
+Daniel Cannon
+Johnny Bergström
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/cass1batch_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/cass1batch_test.go
new file mode 100644
index 000000000..e33b0c580
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/cass1batch_test.go
@@ -0,0 +1,59 @@
+// +build all integration
+
+package gocql
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestProto1BatchInsert(t *testing.T) {
+ session := createSession(t)
+ if err := createTable(session, "CREATE TABLE gocql_test.large (id int primary key)"); err != nil {
+ t.Fatal(err)
+ }
+ defer session.Close()
+
+ begin := "BEGIN BATCH"
+ end := "APPLY BATCH"
+ query := "INSERT INTO large (id) VALUES (?)"
+ fullQuery := strings.Join([]string{begin, query, end}, "\n")
+ args := []interface{}{5}
+ if err := session.Query(fullQuery, args...).Consistency(Quorum).Exec(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestShouldPrepareFunction(t *testing.T) {
+ var shouldPrepareTests = []struct {
+ Stmt string
+ Result bool
+ }{
+ {`
+ BEGIN BATCH
+ INSERT INTO users (userID, password)
+ VALUES ('smith', 'secret')
+ APPLY BATCH
+ ;
+ `, true},
+ {`INSERT INTO users (userID, password, name) VALUES ('user2', 'ch@ngem3b', 'second user')`, true},
+ {`BEGIN COUNTER BATCH UPDATE stats SET views = views + 1 WHERE pageid = 1 APPLY BATCH`, true},
+ {`delete name from users where userID = 'smith';`, true},
+ {` UPDATE users SET password = 'secret' WHERE userID = 'smith' `, true},
+ {`CREATE TABLE users (
+ user_name varchar PRIMARY KEY,
+ password varchar,
+ gender varchar,
+ session_token varchar,
+ state varchar,
+ birth_year bigint
+ );`, false},
+ }
+
+ for _, test := range shouldPrepareTests {
+ q := &Query{stmt: test.Stmt}
+ if got := q.shouldPrepare(); got != test.Result {
+ t.Fatalf("%q: got %v, expected %v\n", test.Stmt, got, test.Result)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/cassandra_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/cassandra_test.go
new file mode 100644
index 000000000..49bfb72e0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/cassandra_test.go
@@ -0,0 +1,2431 @@
+// +build all integration
+
+package gocql
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "math/big"
+ "net"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+ "unicode"
+
+ "gopkg.in/inf.v0"
+)
+
+var (
+ flagCluster = flag.String("cluster", "127.0.0.1", "a comma-separated list of host:port tuples")
+ flagProto = flag.Int("proto", 2, "protcol version")
+ flagCQL = flag.String("cql", "3.0.0", "CQL version")
+ flagRF = flag.Int("rf", 1, "replication factor for test keyspace")
+ clusterSize = flag.Int("clusterSize", 1, "the expected size of the cluster")
+ flagRetry = flag.Int("retries", 5, "number of times to retry queries")
+ flagAutoWait = flag.Duration("autowait", 1000*time.Millisecond, "time to wait for autodiscovery to fill the hosts poll")
+ flagRunSslTest = flag.Bool("runssl", false, "Set to true to run ssl test")
+ flagRunAuthTest = flag.Bool("runauth", false, "Set to true to run authentication test")
+ flagCompressTest = flag.String("compressor", "", "compressor to use")
+ flagTimeout = flag.Duration("gocql.timeout", 5*time.Second, "sets the connection `timeout` for all operations")
+ clusterHosts []string
+)
+
+func init() {
+ flag.Parse()
+ clusterHosts = strings.Split(*flagCluster, ",")
+ log.SetFlags(log.Lshortfile | log.LstdFlags)
+}
+
+func addSslOptions(cluster *ClusterConfig) *ClusterConfig {
+ if *flagRunSslTest {
+ cluster.SslOpts = &SslOptions{
+ CertPath: "testdata/pki/gocql.crt",
+ KeyPath: "testdata/pki/gocql.key",
+ CaPath: "testdata/pki/ca.crt",
+ EnableHostVerification: false,
+ }
+ }
+ return cluster
+}
+
+var initOnce sync.Once
+
+func createTable(s *Session, table string) error {
+ if err := s.control.query(table).Close(); err != nil {
+ return err
+ }
+
+ return s.control.awaitSchemaAgreement()
+}
+
+func createCluster() *ClusterConfig {
+ cluster := NewCluster(clusterHosts...)
+ cluster.ProtoVersion = *flagProto
+ cluster.CQLVersion = *flagCQL
+ cluster.Timeout = *flagTimeout
+ cluster.Consistency = Quorum
+ if *flagRetry > 0 {
+ cluster.RetryPolicy = &SimpleRetryPolicy{NumRetries: *flagRetry}
+ }
+
+ switch *flagCompressTest {
+ case "snappy":
+ cluster.Compressor = &SnappyCompressor{}
+ case "":
+ default:
+ panic("invalid compressor: " + *flagCompressTest)
+ }
+
+ cluster = addSslOptions(cluster)
+ return cluster
+}
+
+func createKeyspace(tb testing.TB, cluster *ClusterConfig, keyspace string) {
+ c := *cluster
+ c.Keyspace = "system"
+ c.Timeout = 20 * time.Second
+ session, err := c.CreateSession()
+ if err != nil {
+ tb.Fatal("createSession:", err)
+ }
+
+ err = session.control.query(`DROP KEYSPACE IF EXISTS ` + keyspace).Close()
+ if err != nil {
+ tb.Fatal(err)
+ }
+
+ if err = session.control.awaitSchemaAgreement(); err != nil {
+ tb.Fatal(err)
+ }
+
+ err = session.control.query(fmt.Sprintf(`CREATE KEYSPACE %s
+ WITH replication = {
+ 'class' : 'SimpleStrategy',
+ 'replication_factor' : %d
+ }`, keyspace, *flagRF)).Close()
+
+ if err != nil {
+ tb.Fatal(err)
+ }
+
+ // the schema version might be out of data between 2 nodes, so wait for the
+ // cluster to settle.
+ // TODO(zariel): use events here to know when the cluster has resolved to the
+ // new schema version
+ if err = session.control.awaitSchemaAgreement(); err != nil {
+ tb.Fatal(err)
+ }
+}
+
+func createSessionFromCluster(cluster *ClusterConfig, tb testing.TB) *Session {
+ // Drop and re-create the keyspace once. Different tests should use their own
+ // individual tables, but can assume that the table does not exist before.
+ initOnce.Do(func() {
+ createKeyspace(tb, cluster, "gocql_test")
+ })
+
+ cluster.Keyspace = "gocql_test"
+ session, err := cluster.CreateSession()
+ if err != nil {
+ tb.Fatal("createSession:", err)
+ }
+
+ return session
+}
+
+func createSession(tb testing.TB) *Session {
+ cluster := createCluster()
+ return createSessionFromCluster(cluster, tb)
+}
+
+// TestAuthentication verifies that gocql will work with a host configured to only accept authenticated connections
+func TestAuthentication(t *testing.T) {
+
+ if *flagProto < 2 {
+ t.Skip("Authentication is not supported with protocol < 2")
+ }
+
+ if !*flagRunAuthTest {
+ t.Skip("Authentication is not configured in the target cluster")
+ }
+
+ cluster := createCluster()
+
+ cluster.Authenticator = PasswordAuthenticator{
+ Username: "cassandra",
+ Password: "cassandra",
+ }
+
+ session, err := cluster.CreateSession()
+
+ if err != nil {
+ t.Fatalf("Authentication error: %s", err)
+ }
+
+ session.Close()
+}
+
+//TestRingDiscovery makes sure that you can autodiscover other cluster members when you seed a cluster config with just one node
+func TestRingDiscovery(t *testing.T) {
+ cluster := createCluster()
+ cluster.Hosts = clusterHosts[:1]
+ cluster.DiscoverHosts = true
+
+ session := createSessionFromCluster(cluster, t)
+ defer session.Close()
+
+ if *clusterSize > 1 {
+ // wait for autodiscovery to update the pool with the list of known hosts
+ time.Sleep(*flagAutoWait)
+ }
+
+ session.pool.mu.RLock()
+ size := len(session.pool.hostConnPools)
+ session.pool.mu.RUnlock()
+
+ if *clusterSize != size {
+ t.Fatalf("Expected a cluster size of %d, but actual size was %d", *clusterSize, size)
+ }
+}
+
+func TestEmptyHosts(t *testing.T) {
+ cluster := createCluster()
+ cluster.Hosts = nil
+ if session, err := cluster.CreateSession(); err == nil {
+ session.Close()
+ t.Error("expected err, got nil")
+ }
+}
+
+//TestUseStatementError checks to make sure the correct error is returned when the user tries to execute a use statement.
+func TestUseStatementError(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ if err := session.Query("USE gocql_test").Exec(); err != nil {
+ if err != ErrUseStmt {
+ t.Fatalf("expected ErrUseStmt, got " + err.Error())
+ }
+ } else {
+ t.Fatal("expected err, got nil.")
+ }
+}
+
+//TestInvalidKeyspace checks that an invalid keyspace will return promptly and without a flood of connections
+func TestInvalidKeyspace(t *testing.T) {
+ cluster := createCluster()
+ cluster.Keyspace = "invalidKeyspace"
+ session, err := cluster.CreateSession()
+ if err != nil {
+ if err != ErrNoConnectionsStarted {
+ t.Fatalf("Expected ErrNoConnections but got %v", err)
+ }
+ } else {
+ session.Close() //Clean up the session
+ t.Fatal("expected err, got nil.")
+ }
+}
+
+func TestTracing(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, `CREATE TABLE gocql_test.trace (id int primary key)`); err != nil {
+ t.Fatal("create:", err)
+ }
+
+ buf := &bytes.Buffer{}
+ trace := NewTraceWriter(session, buf)
+
+ if err := session.Query(`INSERT INTO trace (id) VALUES (?)`, 42).Trace(trace).Exec(); err != nil {
+ t.Fatal("insert:", err)
+ } else if buf.Len() == 0 {
+ t.Fatal("insert: failed to obtain any tracing")
+ }
+ buf.Reset()
+
+ var value int
+ if err := session.Query(`SELECT id FROM trace WHERE id = ?`, 42).Trace(trace).Scan(&value); err != nil {
+ t.Fatal("select:", err)
+ } else if value != 42 {
+ t.Fatalf("value: expected %d, got %d", 42, value)
+ } else if buf.Len() == 0 {
+ t.Fatal("select: failed to obtain any tracing")
+ }
+}
+
+func TestPaging(t *testing.T) {
+ if *flagProto == 1 {
+ t.Skip("Paging not supported. Please use Cassandra >= 2.0")
+ }
+
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, "CREATE TABLE gocql_test.paging (id int primary key)"); err != nil {
+ t.Fatal("create table:", err)
+ }
+ for i := 0; i < 100; i++ {
+ if err := session.Query("INSERT INTO paging (id) VALUES (?)", i).Exec(); err != nil {
+ t.Fatal("insert:", err)
+ }
+ }
+
+ iter := session.Query("SELECT id FROM paging").PageSize(10).Iter()
+ var id int
+ count := 0
+ for iter.Scan(&id) {
+ count++
+ }
+ if err := iter.Close(); err != nil {
+ t.Fatal("close:", err)
+ }
+ if count != 100 {
+ t.Fatalf("expected %d, got %d", 100, count)
+ }
+}
+
+func TestCAS(t *testing.T) {
+ if *flagProto == 1 {
+ t.Skip("lightweight transactions not supported. Please use Cassandra >= 2.0")
+ }
+
+ session := createSession(t)
+ defer session.Close()
+ session.cfg.SerialConsistency = LocalSerial
+
+ if err := createTable(session, `CREATE TABLE gocql_test.cas_table (
+ title varchar,
+ revid timeuuid,
+ last_modified timestamp,
+ PRIMARY KEY (title, revid)
+ )`); err != nil {
+ t.Fatal("create:", err)
+ }
+
+ title, revid, modified := "baz", TimeUUID(), time.Now()
+ var titleCAS string
+ var revidCAS UUID
+ var modifiedCAS time.Time
+
+ if applied, err := session.Query(`INSERT INTO cas_table (title, revid, last_modified)
+ VALUES (?, ?, ?) IF NOT EXISTS`,
+ title, revid, modified).ScanCAS(&titleCAS, &revidCAS, &modifiedCAS); err != nil {
+ t.Fatal("insert:", err)
+ } else if !applied {
+ t.Fatal("insert should have been applied")
+ }
+
+ if applied, err := session.Query(`INSERT INTO cas_table (title, revid, last_modified)
+ VALUES (?, ?, ?) IF NOT EXISTS`,
+ title, revid, modified).ScanCAS(&titleCAS, &revidCAS, &modifiedCAS); err != nil {
+ t.Fatal("insert:", err)
+ } else if applied {
+ t.Fatal("insert should not have been applied")
+ } else if title != titleCAS || revid != revidCAS {
+ t.Fatalf("expected %s/%v/%v but got %s/%v/%v", title, revid, modified, titleCAS, revidCAS, modifiedCAS)
+ }
+
+ tenSecondsLater := modified.Add(10 * time.Second)
+
+ if applied, err := session.Query(`DELETE FROM cas_table WHERE title = ? and revid = ? IF last_modified = ?`,
+ title, revid, tenSecondsLater).ScanCAS(&modifiedCAS); err != nil {
+ t.Fatal("delete:", err)
+ } else if applied {
+ t.Fatal("delete should have not been applied")
+ }
+
+ if modifiedCAS.Unix() != tenSecondsLater.Add(-10*time.Second).Unix() {
+ t.Fatalf("Was expecting modified CAS to be %v; but was one second later", modifiedCAS.UTC())
+ }
+
+ if _, err := session.Query(`DELETE FROM cas_table WHERE title = ? and revid = ? IF last_modified = ?`,
+ title, revid, tenSecondsLater).ScanCAS(); err.Error() != "count mismatch" {
+ t.Fatalf("delete: was expecting count mismatch error but got %s", err)
+ }
+
+ if applied, err := session.Query(`DELETE FROM cas_table WHERE title = ? and revid = ? IF last_modified = ?`,
+ title, revid, modified).ScanCAS(&modifiedCAS); err != nil {
+ t.Fatal("delete:", err)
+ } else if !applied {
+ t.Fatal("delete should have been applied")
+ }
+
+ if err := session.Query(`TRUNCATE cas_table`).Exec(); err != nil {
+ t.Fatal("truncate:", err)
+ }
+
+ successBatch := session.NewBatch(LoggedBatch)
+ successBatch.Query("INSERT INTO cas_table (title, revid, last_modified) VALUES (?, ?, ?) IF NOT EXISTS", title, revid, modified)
+ if applied, _, err := session.ExecuteBatchCAS(successBatch, &titleCAS, &revidCAS, &modifiedCAS); err != nil {
+ t.Fatal("insert:", err)
+ } else if !applied {
+ t.Fatal("insert should have been applied")
+ }
+
+ successBatch = session.NewBatch(LoggedBatch)
+ successBatch.Query("INSERT INTO cas_table (title, revid, last_modified) VALUES (?, ?, ?) IF NOT EXISTS", title+"_foo", revid, modified)
+ casMap := make(map[string]interface{})
+ if applied, _, err := session.MapExecuteBatchCAS(successBatch, casMap); err != nil {
+ t.Fatal("insert:", err)
+ } else if !applied {
+ t.Fatal("insert should have been applied")
+ }
+
+ failBatch := session.NewBatch(LoggedBatch)
+ failBatch.Query("INSERT INTO cas_table (title, revid, last_modified) VALUES (?, ?, ?) IF NOT EXISTS", title, revid, modified)
+ if applied, _, err := session.ExecuteBatchCAS(successBatch, &titleCAS, &revidCAS, &modifiedCAS); err != nil {
+ t.Fatal("insert:", err)
+ } else if applied {
+ t.Fatal("insert shouldn't have been applied")
+ }
+
+ insertBatch := session.NewBatch(LoggedBatch)
+ insertBatch.Query("INSERT INTO cas_table (title, revid, last_modified) VALUES ('_foo', 2c3af400-73a4-11e5-9381-29463d90c3f0, DATEOF(NOW()))")
+ insertBatch.Query("INSERT INTO cas_table (title, revid, last_modified) VALUES ('_foo', 3e4ad2f1-73a4-11e5-9381-29463d90c3f0, DATEOF(NOW()))")
+ if err := session.ExecuteBatch(insertBatch); err != nil {
+ t.Fatal("insert:", err)
+ }
+
+ failBatch = session.NewBatch(LoggedBatch)
+ failBatch.Query("UPDATE cas_table SET last_modified = DATEOF(NOW()) WHERE title='_foo' AND revid=2c3af400-73a4-11e5-9381-29463d90c3f0 IF last_modified=DATEOF(NOW());")
+ failBatch.Query("UPDATE cas_table SET last_modified = DATEOF(NOW()) WHERE title='_foo' AND revid=3e4ad2f1-73a4-11e5-9381-29463d90c3f0 IF last_modified=DATEOF(NOW());")
+ if applied, iter, err := session.ExecuteBatchCAS(failBatch, &titleCAS, &revidCAS, &modifiedCAS); err != nil {
+ t.Fatal("insert:", err)
+ } else if applied {
+ t.Fatal("insert shouldn't have been applied")
+ } else {
+ if scan := iter.Scan(&applied, &titleCAS, &revidCAS, &modifiedCAS); scan && applied {
+ t.Fatal("insert shouldn't have been applied")
+ } else if !scan {
+ t.Fatal("should have scanned another row")
+ }
+ if err := iter.Close(); err != nil {
+ t.Fatal("scan:", err)
+ }
+ }
+}
+
+func TestMapScanCAS(t *testing.T) {
+ if *flagProto == 1 {
+ t.Skip("lightweight transactions not supported. Please use Cassandra >= 2.0")
+ }
+
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, `CREATE TABLE gocql_test.cas_table2 (
+ title varchar,
+ revid timeuuid,
+ last_modified timestamp,
+ deleted boolean,
+ PRIMARY KEY (title, revid)
+ )`); err != nil {
+ t.Fatal("create:", err)
+ }
+
+ title, revid, modified, deleted := "baz", TimeUUID(), time.Now(), false
+ mapCAS := map[string]interface{}{}
+
+ if applied, err := session.Query(`INSERT INTO cas_table2 (title, revid, last_modified, deleted)
+ VALUES (?, ?, ?, ?) IF NOT EXISTS`,
+ title, revid, modified, deleted).MapScanCAS(mapCAS); err != nil {
+ t.Fatal("insert:", err)
+ } else if !applied {
+ t.Fatal("insert should have been applied")
+ }
+
+ mapCAS = map[string]interface{}{}
+ if applied, err := session.Query(`INSERT INTO cas_table2 (title, revid, last_modified, deleted)
+ VALUES (?, ?, ?, ?) IF NOT EXISTS`,
+ title, revid, modified, deleted).MapScanCAS(mapCAS); err != nil {
+ t.Fatal("insert:", err)
+ } else if applied {
+ t.Fatal("insert should not have been applied")
+ } else if title != mapCAS["title"] || revid != mapCAS["revid"] || deleted != mapCAS["deleted"] {
+ t.Fatalf("expected %s/%v/%v/%v but got %s/%v/%v%v", title, revid, modified, false, mapCAS["title"], mapCAS["revid"], mapCAS["last_modified"], mapCAS["deleted"])
+ }
+
+}
+
+func TestBatch(t *testing.T) {
+ if *flagProto == 1 {
+ t.Skip("atomic batches not supported. Please use Cassandra >= 2.0")
+ }
+
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, `CREATE TABLE gocql_test.batch_table (id int primary key)`); err != nil {
+ t.Fatal("create table:", err)
+ }
+
+ batch := NewBatch(LoggedBatch)
+ for i := 0; i < 100; i++ {
+ batch.Query(`INSERT INTO batch_table (id) VALUES (?)`, i)
+ }
+
+ if err := session.ExecuteBatch(batch); err != nil {
+ t.Fatal("execute batch:", err)
+ }
+
+ count := 0
+ if err := session.Query(`SELECT COUNT(*) FROM batch_table`).Scan(&count); err != nil {
+ t.Fatal("select count:", err)
+ } else if count != 100 {
+ t.Fatalf("count: expected %d, got %d\n", 100, count)
+ }
+}
+
+func TestUnpreparedBatch(t *testing.T) {
+ if *flagProto == 1 {
+ t.Skip("atomic batches not supported. Please use Cassandra >= 2.0")
+ }
+
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, `CREATE TABLE gocql_test.batch_unprepared (id int primary key, c counter)`); err != nil {
+ t.Fatal("create table:", err)
+ }
+
+ var batch *Batch
+ if *flagProto == 2 {
+ batch = NewBatch(CounterBatch)
+ } else {
+ batch = NewBatch(UnloggedBatch)
+ }
+
+ for i := 0; i < 100; i++ {
+ batch.Query(`UPDATE batch_unprepared SET c = c + 1 WHERE id = 1`)
+ }
+
+ if err := session.ExecuteBatch(batch); err != nil {
+ t.Fatal("execute batch:", err)
+ }
+
+ count := 0
+ if err := session.Query(`SELECT COUNT(*) FROM batch_unprepared`).Scan(&count); err != nil {
+ t.Fatal("select count:", err)
+ } else if count != 1 {
+ t.Fatalf("count: expected %d, got %d\n", 100, count)
+ }
+
+ if err := session.Query(`SELECT c FROM batch_unprepared`).Scan(&count); err != nil {
+ t.Fatal("select count:", err)
+ } else if count != 100 {
+ t.Fatalf("count: expected %d, got %d\n", 100, count)
+ }
+}
+
+// TestBatchLimit tests gocql to make sure batch operations larger than the maximum
+// statement limit are not submitted to a cassandra node.
+func TestBatchLimit(t *testing.T) {
+ if *flagProto == 1 {
+ t.Skip("atomic batches not supported. Please use Cassandra >= 2.0")
+ }
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, `CREATE TABLE gocql_test.batch_table2 (id int primary key)`); err != nil {
+ t.Fatal("create table:", err)
+ }
+
+ batch := NewBatch(LoggedBatch)
+ for i := 0; i < 65537; i++ {
+ batch.Query(`INSERT INTO batch_table2 (id) VALUES (?)`, i)
+ }
+ if err := session.ExecuteBatch(batch); err != ErrTooManyStmts {
+ t.Fatal("gocql attempted to execute a batch larger than the support limit of statements.")
+ }
+
+}
+
+func TestWhereIn(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, `CREATE TABLE gocql_test.where_in_table (id int, cluster int, primary key (id,cluster))`); err != nil {
+ t.Fatal("create table:", err)
+ }
+
+ if err := session.Query("INSERT INTO where_in_table (id, cluster) VALUES (?,?)", 100, 200).Exec(); err != nil {
+ t.Fatal("insert:", err)
+ }
+
+ iter := session.Query("SELECT * FROM where_in_table WHERE id = ? AND cluster IN (?)", 100, 200).Iter()
+ var id, cluster int
+ count := 0
+ for iter.Scan(&id, &cluster) {
+ count++
+ }
+
+ if id != 100 || cluster != 200 {
+ t.Fatalf("Was expecting id and cluster to be (100,200) but were (%d,%d)", id, cluster)
+ }
+}
+
+// TestTooManyQueryArgs tests to make sure the library correctly handles the application level bug
+// whereby too many query arguments are passed to a query
+func TestTooManyQueryArgs(t *testing.T) {
+ if *flagProto == 1 {
+ t.Skip("atomic batches not supported. Please use Cassandra >= 2.0")
+ }
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, `CREATE TABLE gocql_test.too_many_query_args (id int primary key, value int)`); err != nil {
+ t.Fatal("create table:", err)
+ }
+
+ _, err := session.Query(`SELECT * FROM too_many_query_args WHERE id = ?`, 1, 2).Iter().SliceMap()
+
+ if err == nil {
+ t.Fatal("'`SELECT * FROM too_many_query_args WHERE id = ?`, 1, 2' should return an ErrQueryArgLength")
+ }
+
+ if err != ErrQueryArgLength {
+ t.Fatalf("'`SELECT * FROM too_many_query_args WHERE id = ?`, 1, 2' should return an ErrQueryArgLength, but returned: %s", err)
+ }
+
+ batch := session.NewBatch(UnloggedBatch)
+ batch.Query("INSERT INTO too_many_query_args (id, value) VALUES (?, ?)", 1, 2, 3)
+ err = session.ExecuteBatch(batch)
+
+ if err == nil {
+ t.Fatal("'`INSERT INTO too_many_query_args (id, value) VALUES (?, ?)`, 1, 2, 3' should return an ErrQueryArgLength")
+ }
+
+ if err != ErrQueryArgLength {
+ t.Fatalf("'INSERT INTO too_many_query_args (id, value) VALUES (?, ?)`, 1, 2, 3' should return an ErrQueryArgLength, but returned: %s", err)
+ }
+
+}
+
+// TestNotEnoughQueryArgs tests to make sure the library correctly handles the application level bug
+// whereby not enough query arguments are passed to a query
+func TestNotEnoughQueryArgs(t *testing.T) {
+ if *flagProto == 1 {
+ t.Skip("atomic batches not supported. Please use Cassandra >= 2.0")
+ }
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, `CREATE TABLE gocql_test.not_enough_query_args (id int, cluster int, value int, primary key (id, cluster))`); err != nil {
+ t.Fatal("create table:", err)
+ }
+
+ _, err := session.Query(`SELECT * FROM not_enough_query_args WHERE id = ? and cluster = ?`, 1).Iter().SliceMap()
+
+ if err == nil {
+ t.Fatal("'`SELECT * FROM not_enough_query_args WHERE id = ? and cluster = ?`, 1' should return an ErrQueryArgLength")
+ }
+
+ if err != ErrQueryArgLength {
+ t.Fatalf("'`SELECT * FROM too_few_query_args WHERE id = ? and cluster = ?`, 1' should return an ErrQueryArgLength, but returned: %s", err)
+ }
+
+ batch := session.NewBatch(UnloggedBatch)
+ batch.Query("INSERT INTO not_enough_query_args (id, cluster, value) VALUES (?, ?, ?)", 1, 2)
+ err = session.ExecuteBatch(batch)
+
+ if err == nil {
+ t.Fatal("'`INSERT INTO not_enough_query_args (id, cluster, value) VALUES (?, ?, ?)`, 1, 2' should return an ErrQueryArgLength")
+ }
+
+ if err != ErrQueryArgLength {
+ t.Fatalf("'INSERT INTO not_enough_query_args (id, cluster, value) VALUES (?, ?, ?)`, 1, 2' should return an ErrQueryArgLength, but returned: %s", err)
+ }
+}
+
+// TestCreateSessionTimeout tests to make sure the CreateSession function timeouts out correctly
+// and prevents an infinite loop of connection retries.
+func TestCreateSessionTimeout(t *testing.T) {
+ go func() {
+ <-time.After(2 * time.Second)
+ t.Error("no startup timeout")
+ }()
+
+ cluster := createCluster()
+ cluster.Hosts = []string{"127.0.0.1:1"}
+ session, err := cluster.CreateSession()
+ if err == nil {
+ session.Close()
+ t.Fatal("expected ErrNoConnectionsStarted, but no error was returned.")
+ }
+
+ if err != ErrNoConnectionsStarted {
+ t.Fatalf("expected ErrNoConnectionsStarted, but received %v", err)
+ }
+}
+
+type FullName struct {
+ FirstName string
+ LastName string
+}
+
+func (n FullName) MarshalCQL(info TypeInfo) ([]byte, error) {
+ return []byte(n.FirstName + " " + n.LastName), nil
+}
+
+func (n *FullName) UnmarshalCQL(info TypeInfo, data []byte) error {
+ t := strings.SplitN(string(data), " ", 2)
+ n.FirstName, n.LastName = t[0], t[1]
+ return nil
+}
+
+func TestMapScanWithRefMap(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+ if err := createTable(session, `CREATE TABLE gocql_test.scan_map_ref_table (
+ testtext text PRIMARY KEY,
+ testfullname text,
+ testint int,
+ )`); err != nil {
+ t.Fatal("create table:", err)
+ }
+ m := make(map[string]interface{})
+ m["testtext"] = "testtext"
+ m["testfullname"] = FullName{"John", "Doe"}
+ m["testint"] = 100
+
+ if err := session.Query(`INSERT INTO scan_map_ref_table (testtext, testfullname, testint) values (?,?,?)`, m["testtext"], m["testfullname"], m["testint"]).Exec(); err != nil {
+ t.Fatal("insert:", err)
+ }
+
+ var testText string
+ var testFullName FullName
+ ret := map[string]interface{}{
+ "testtext": &testText,
+ "testfullname": &testFullName,
+ // testint is not set here.
+ }
+ iter := session.Query(`SELECT * FROM scan_map_ref_table`).Iter()
+ if ok := iter.MapScan(ret); !ok {
+ t.Fatal("select:", iter.Close())
+ } else {
+ if ret["testtext"] != "testtext" {
+ t.Fatal("returned testtext did not match")
+ }
+ f := ret["testfullname"].(FullName)
+ if f.FirstName != "John" || f.LastName != "Doe" {
+ t.Fatal("returned testfullname did not match")
+ }
+ if ret["testint"] != 100 {
+ t.Fatal("returned testinit did not match")
+ }
+ }
+
+}
+
+func TestSliceMap(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+ if err := createTable(session, `CREATE TABLE gocql_test.slice_map_table (
+ testuuid timeuuid PRIMARY KEY,
+ testtimestamp timestamp,
+ testvarchar varchar,
+ testbigint bigint,
+ testblob blob,
+ testbool boolean,
+ testfloat float,
+ testdouble double,
+ testint int,
+ testdecimal decimal,
+ testlist list,
+ testset set,
+ testmap map,
+ testvarint varint,
+ testinet inet
+ )`); err != nil {
+ t.Fatal("create table:", err)
+ }
+ m := make(map[string]interface{})
+
+ bigInt := new(big.Int)
+ if _, ok := bigInt.SetString("830169365738487321165427203929228", 10); !ok {
+ t.Fatal("Failed setting bigint by string")
+ }
+
+ m["testuuid"] = TimeUUID()
+ m["testvarchar"] = "Test VarChar"
+ m["testbigint"] = time.Now().Unix()
+ m["testtimestamp"] = time.Now().Truncate(time.Millisecond).UTC()
+ m["testblob"] = []byte("test blob")
+ m["testbool"] = true
+ m["testfloat"] = float32(4.564)
+ m["testdouble"] = float64(4.815162342)
+ m["testint"] = 2343
+ m["testdecimal"] = inf.NewDec(100, 0)
+ m["testlist"] = []string{"quux", "foo", "bar", "baz", "quux"}
+ m["testset"] = []int{1, 2, 3, 4, 5, 6, 7, 8, 9}
+ m["testmap"] = map[string]string{"field1": "val1", "field2": "val2", "field3": "val3"}
+ m["testvarint"] = bigInt
+ m["testinet"] = "213.212.2.19"
+ sliceMap := []map[string]interface{}{m}
+ if err := session.Query(`INSERT INTO slice_map_table (testuuid, testtimestamp, testvarchar, testbigint, testblob, testbool, testfloat, testdouble, testint, testdecimal, testlist, testset, testmap, testvarint, testinet) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
+ m["testuuid"], m["testtimestamp"], m["testvarchar"], m["testbigint"], m["testblob"], m["testbool"], m["testfloat"], m["testdouble"], m["testint"], m["testdecimal"], m["testlist"], m["testset"], m["testmap"], m["testvarint"], m["testinet"]).Exec(); err != nil {
+ t.Fatal("insert:", err)
+ }
+ if returned, retErr := session.Query(`SELECT * FROM slice_map_table`).Iter().SliceMap(); retErr != nil {
+ t.Fatal("select:", retErr)
+ } else {
+ matchSliceMap(t, sliceMap, returned[0])
+ }
+
+ // Test for Iter.MapScan()
+ {
+ testMap := make(map[string]interface{})
+ if !session.Query(`SELECT * FROM slice_map_table`).Iter().MapScan(testMap) {
+ t.Fatal("MapScan failed to work with one row")
+ }
+ matchSliceMap(t, sliceMap, testMap)
+ }
+
+ // Test for Query.MapScan()
+ {
+ testMap := make(map[string]interface{})
+ if session.Query(`SELECT * FROM slice_map_table`).MapScan(testMap) != nil {
+ t.Fatal("MapScan failed to work with one row")
+ }
+ matchSliceMap(t, sliceMap, testMap)
+ }
+}
+func matchSliceMap(t *testing.T, sliceMap []map[string]interface{}, testMap map[string]interface{}) {
+ if sliceMap[0]["testuuid"] != testMap["testuuid"] {
+ t.Fatal("returned testuuid did not match")
+ }
+ if sliceMap[0]["testtimestamp"] != testMap["testtimestamp"] {
+ t.Fatal("returned testtimestamp did not match")
+ }
+ if sliceMap[0]["testvarchar"] != testMap["testvarchar"] {
+ t.Fatal("returned testvarchar did not match")
+ }
+ if sliceMap[0]["testbigint"] != testMap["testbigint"] {
+ t.Fatal("returned testbigint did not match")
+ }
+ if !reflect.DeepEqual(sliceMap[0]["testblob"], testMap["testblob"]) {
+ t.Fatal("returned testblob did not match")
+ }
+ if sliceMap[0]["testbool"] != testMap["testbool"] {
+ t.Fatal("returned testbool did not match")
+ }
+ if sliceMap[0]["testfloat"] != testMap["testfloat"] {
+ t.Fatal("returned testfloat did not match")
+ }
+ if sliceMap[0]["testdouble"] != testMap["testdouble"] {
+ t.Fatal("returned testdouble did not match")
+ }
+ if sliceMap[0]["testinet"] != testMap["testinet"] {
+ t.Fatal("returned testinet did not match")
+ }
+
+ expectedDecimal := sliceMap[0]["testdecimal"].(*inf.Dec)
+ returnedDecimal := testMap["testdecimal"].(*inf.Dec)
+
+ if expectedDecimal.Cmp(returnedDecimal) != 0 {
+ t.Fatal("returned testdecimal did not match")
+ }
+
+ if !reflect.DeepEqual(sliceMap[0]["testlist"], testMap["testlist"]) {
+ t.Fatal("returned testlist did not match")
+ }
+ if !reflect.DeepEqual(sliceMap[0]["testset"], testMap["testset"]) {
+ t.Fatal("returned testset did not match")
+ }
+ if !reflect.DeepEqual(sliceMap[0]["testmap"], testMap["testmap"]) {
+ t.Fatal("returned testmap did not match")
+ }
+ if sliceMap[0]["testint"] != testMap["testint"] {
+ t.Fatal("returned testint did not match")
+ }
+}
+
+func TestScanWithNilArguments(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, `CREATE TABLE gocql_test.scan_with_nil_arguments (
+ foo varchar,
+ bar int,
+ PRIMARY KEY (foo, bar)
+ )`); err != nil {
+ t.Fatal("create:", err)
+ }
+ for i := 1; i <= 20; i++ {
+ if err := session.Query("INSERT INTO scan_with_nil_arguments (foo, bar) VALUES (?, ?)",
+ "squares", i*i).Exec(); err != nil {
+ t.Fatal("insert:", err)
+ }
+ }
+
+ iter := session.Query("SELECT * FROM scan_with_nil_arguments WHERE foo = ?", "squares").Iter()
+ var n int
+ count := 0
+ for iter.Scan(nil, &n) {
+ count += n
+ }
+ if err := iter.Close(); err != nil {
+ t.Fatal("close:", err)
+ }
+ if count != 2870 {
+ t.Fatalf("expected %d, got %d", 2870, count)
+ }
+}
+
+func TestScanCASWithNilArguments(t *testing.T) {
+ if *flagProto == 1 {
+ t.Skip("lightweight transactions not supported. Please use Cassandra >= 2.0")
+ }
+
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, `CREATE TABLE gocql_test.scan_cas_with_nil_arguments (
+ foo varchar,
+ bar varchar,
+ PRIMARY KEY (foo, bar)
+ )`); err != nil {
+ t.Fatal("create:", err)
+ }
+
+ foo := "baz"
+ var cas string
+
+ if applied, err := session.Query(`INSERT INTO scan_cas_with_nil_arguments (foo, bar)
+ VALUES (?, ?) IF NOT EXISTS`,
+ foo, foo).ScanCAS(nil, nil); err != nil {
+ t.Fatal("insert:", err)
+ } else if !applied {
+ t.Fatal("insert should have been applied")
+ }
+
+ if applied, err := session.Query(`INSERT INTO scan_cas_with_nil_arguments (foo, bar)
+ VALUES (?, ?) IF NOT EXISTS`,
+ foo, foo).ScanCAS(&cas, nil); err != nil {
+ t.Fatal("insert:", err)
+ } else if applied {
+ t.Fatal("insert should not have been applied")
+ } else if foo != cas {
+ t.Fatalf("expected %v but got %v", foo, cas)
+ }
+
+ if applied, err := session.Query(`INSERT INTO scan_cas_with_nil_arguments (foo, bar)
+ VALUES (?, ?) IF NOT EXISTS`,
+ foo, foo).ScanCAS(nil, &cas); err != nil {
+ t.Fatal("insert:", err)
+ } else if applied {
+ t.Fatal("insert should not have been applied")
+ } else if foo != cas {
+ t.Fatalf("expected %v but got %v", foo, cas)
+ }
+}
+
+func TestRebindQueryInfo(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, "CREATE TABLE gocql_test.rebind_query (id int, value text, PRIMARY KEY (id))"); err != nil {
+ t.Fatalf("failed to create table with error '%v'", err)
+ }
+
+ if err := session.Query("INSERT INTO rebind_query (id, value) VALUES (?, ?)", 23, "quux").Exec(); err != nil {
+ t.Fatalf("insert into rebind_query failed, err '%v'", err)
+ }
+
+ if err := session.Query("INSERT INTO rebind_query (id, value) VALUES (?, ?)", 24, "w00t").Exec(); err != nil {
+ t.Fatalf("insert into rebind_query failed, err '%v'", err)
+ }
+
+ q := session.Query("SELECT value FROM rebind_query WHERE ID = ?")
+ q.Bind(23)
+
+ iter := q.Iter()
+ var value string
+ for iter.Scan(&value) {
+ }
+
+ if value != "quux" {
+ t.Fatalf("expected %v but got %v", "quux", value)
+ }
+
+ q.Bind(24)
+ iter = q.Iter()
+
+ for iter.Scan(&value) {
+ }
+
+ if value != "w00t" {
+ t.Fatalf("expected %v but got %v", "quux", value)
+ }
+}
+
+//TestStaticQueryInfo makes sure that the application can manually bind query parameters using the simplest possible static binding strategy
+func TestStaticQueryInfo(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, "CREATE TABLE gocql_test.static_query_info (id int, value text, PRIMARY KEY (id))"); err != nil {
+ t.Fatalf("failed to create table with error '%v'", err)
+ }
+
+ if err := session.Query("INSERT INTO static_query_info (id, value) VALUES (?, ?)", 113, "foo").Exec(); err != nil {
+ t.Fatalf("insert into static_query_info failed, err '%v'", err)
+ }
+
+ autobinder := func(q *QueryInfo) ([]interface{}, error) {
+ values := make([]interface{}, 1)
+ values[0] = 113
+ return values, nil
+ }
+
+ qry := session.Bind("SELECT id, value FROM static_query_info WHERE id = ?", autobinder)
+
+ if err := qry.Exec(); err != nil {
+ t.Fatalf("expose query info failed, error '%v'", err)
+ }
+
+ iter := qry.Iter()
+
+ var id int
+ var value string
+
+ iter.Scan(&id, &value)
+
+ if err := iter.Close(); err != nil {
+ t.Fatalf("query with exposed info failed, err '%v'", err)
+ }
+
+ if value != "foo" {
+ t.Fatalf("Expected value %s, but got %s", "foo", value)
+ }
+
+}
+
+type ClusteredKeyValue struct {
+ Id int
+ Cluster int
+ Value string
+}
+
+func (kv *ClusteredKeyValue) Bind(q *QueryInfo) ([]interface{}, error) {
+ values := make([]interface{}, len(q.Args))
+
+ for i, info := range q.Args {
+ fieldName := upcaseInitial(info.Name)
+ value := reflect.ValueOf(kv)
+ field := reflect.Indirect(value).FieldByName(fieldName)
+ values[i] = field.Addr().Interface()
+ }
+
+ return values, nil
+}
+
+func upcaseInitial(str string) string {
+ for i, v := range str {
+ return string(unicode.ToUpper(v)) + str[i+1:]
+ }
+ return ""
+}
+
+//TestBoundQueryInfo makes sure that the application can manually bind query parameters using the query meta data supplied at runtime
+func TestBoundQueryInfo(t *testing.T) {
+
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, "CREATE TABLE gocql_test.clustered_query_info (id int, cluster int, value text, PRIMARY KEY (id, cluster))"); err != nil {
+ t.Fatalf("failed to create table with error '%v'", err)
+ }
+
+ write := &ClusteredKeyValue{Id: 200, Cluster: 300, Value: "baz"}
+
+ insert := session.Bind("INSERT INTO clustered_query_info (id, cluster, value) VALUES (?, ?,?)", write.Bind)
+
+ if err := insert.Exec(); err != nil {
+ t.Fatalf("insert into clustered_query_info failed, err '%v'", err)
+ }
+
+ read := &ClusteredKeyValue{Id: 200, Cluster: 300}
+
+ qry := session.Bind("SELECT id, cluster, value FROM clustered_query_info WHERE id = ? and cluster = ?", read.Bind)
+
+ iter := qry.Iter()
+
+ var id, cluster int
+ var value string
+
+ iter.Scan(&id, &cluster, &value)
+
+ if err := iter.Close(); err != nil {
+ t.Fatalf("query with clustered_query_info info failed, err '%v'", err)
+ }
+
+ if value != "baz" {
+ t.Fatalf("Expected value %s, but got %s", "baz", value)
+ }
+
+}
+
+//TestBatchQueryInfo makes sure that the application can manually bind query parameters when executing in a batch
+func TestBatchQueryInfo(t *testing.T) {
+
+ if *flagProto == 1 {
+ t.Skip("atomic batches not supported. Please use Cassandra >= 2.0")
+ }
+
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, "CREATE TABLE gocql_test.batch_query_info (id int, cluster int, value text, PRIMARY KEY (id, cluster))"); err != nil {
+ t.Fatalf("failed to create table with error '%v'", err)
+ }
+
+ write := func(q *QueryInfo) ([]interface{}, error) {
+ values := make([]interface{}, 3)
+ values[0] = 4000
+ values[1] = 5000
+ values[2] = "bar"
+ return values, nil
+ }
+
+ batch := session.NewBatch(LoggedBatch)
+ batch.Bind("INSERT INTO batch_query_info (id, cluster, value) VALUES (?, ?,?)", write)
+
+ if err := session.ExecuteBatch(batch); err != nil {
+ t.Fatalf("batch insert into batch_query_info failed, err '%v'", err)
+ }
+
+ read := func(q *QueryInfo) ([]interface{}, error) {
+ values := make([]interface{}, 2)
+ values[0] = 4000
+ values[1] = 5000
+ return values, nil
+ }
+
+ qry := session.Bind("SELECT id, cluster, value FROM batch_query_info WHERE id = ? and cluster = ?", read)
+
+ iter := qry.Iter()
+
+ var id, cluster int
+ var value string
+
+ iter.Scan(&id, &cluster, &value)
+
+ if err := iter.Close(); err != nil {
+ t.Fatalf("query with batch_query_info info failed, err '%v'", err)
+ }
+
+ if value != "bar" {
+ t.Fatalf("Expected value %s, but got %s", "bar", value)
+ }
+}
+
+func injectInvalidPreparedStatement(t *testing.T, session *Session, table string) (string, *Conn) {
+ if err := createTable(session, `CREATE TABLE gocql_test.`+table+` (
+ foo varchar,
+ bar int,
+ PRIMARY KEY (foo, bar)
+ )`); err != nil {
+ t.Fatal("create:", err)
+ }
+ stmt := "INSERT INTO " + table + " (foo, bar) VALUES (?, 7)"
+ _, conn := session.pool.Pick(nil)
+ flight := new(inflightPrepare)
+ stmtsLRU.Lock()
+ stmtsLRU.lru.Add(conn.addr+stmt, flight)
+ stmtsLRU.Unlock()
+ flight.info = QueryInfo{
+ Id: []byte{'f', 'o', 'o', 'b', 'a', 'r'},
+ Args: []ColumnInfo{
+ {
+ Keyspace: "gocql_test",
+ Table: table,
+ Name: "foo",
+ TypeInfo: NativeType{
+ typ: TypeVarchar,
+ },
+ },
+ },
+ }
+ return stmt, conn
+}
+
+func TestMissingSchemaPrepare(t *testing.T) {
+ s := createSession(t)
+ _, conn := s.pool.Pick(nil)
+ defer s.Close()
+
+ insertQry := &Query{stmt: "INSERT INTO invalidschemaprep (val) VALUES (?)", values: []interface{}{5}, cons: s.cons,
+ session: s, pageSize: s.pageSize, trace: s.trace,
+ prefetch: s.prefetch, rt: s.cfg.RetryPolicy}
+
+ if err := conn.executeQuery(insertQry).err; err == nil {
+ t.Fatal("expected error, but got nil.")
+ }
+
+ if err := createTable(s, "CREATE TABLE gocql_test.invalidschemaprep (val int, PRIMARY KEY (val))"); err != nil {
+ t.Fatal("create table:", err)
+ }
+
+ if err := conn.executeQuery(insertQry).err; err != nil {
+ t.Fatal(err) // unconfigured columnfamily
+ }
+}
+
+func TestReprepareStatement(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+ stmt, conn := injectInvalidPreparedStatement(t, session, "test_reprepare_statement")
+ query := session.Query(stmt, "bar")
+ if err := conn.executeQuery(query).Close(); err != nil {
+ t.Fatalf("Failed to execute query for reprepare statement: %v", err)
+ }
+}
+
+func TestReprepareBatch(t *testing.T) {
+ if *flagProto == 1 {
+ t.Skip("atomic batches not supported. Please use Cassandra >= 2.0")
+ }
+ session := createSession(t)
+ defer session.Close()
+ stmt, conn := injectInvalidPreparedStatement(t, session, "test_reprepare_statement_batch")
+ batch := session.NewBatch(UnloggedBatch)
+ batch.Query(stmt, "bar")
+ if _, err := conn.executeBatch(batch); err != nil {
+ t.Fatalf("Failed to execute query for reprepare statement: %v", err)
+ }
+
+}
+
+func TestQueryInfo(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ _, conn := session.pool.Pick(nil)
+ info, err := conn.prepareStatement("SELECT release_version, host_id FROM system.local WHERE key = ?", nil)
+
+ if err != nil {
+ t.Fatalf("Failed to execute query for preparing statement: %v", err)
+ }
+
+ if x := len(info.Args); x != 1 {
+ t.Fatalf("Was not expecting meta data for %d query arguments, but got %d\n", 1, x)
+ }
+
+ if *flagProto > 1 {
+ if x := len(info.Rval); x != 2 {
+ t.Fatalf("Was not expecting meta data for %d result columns, but got %d\n", 2, x)
+ }
+ }
+}
+
+//TestPreparedCacheEviction will make sure that the cache size is maintained
+func TestPreparedCacheEviction(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+ stmtsLRU.Lock()
+ stmtsLRU.Max(4)
+ stmtsLRU.Unlock()
+
+ if err := createTable(session, "CREATE TABLE gocql_test.prepcachetest (id int,mod int,PRIMARY KEY (id))"); err != nil {
+ t.Fatalf("failed to create table with error '%v'", err)
+ }
+ //Fill the table
+ for i := 0; i < 2; i++ {
+ if err := session.Query("INSERT INTO prepcachetest (id,mod) VALUES (?, ?)", i, 10000%(i+1)).Exec(); err != nil {
+ t.Fatalf("insert into prepcachetest failed, err '%v'", err)
+ }
+ }
+ //Populate the prepared statement cache with select statements
+ var id, mod int
+ for i := 0; i < 2; i++ {
+ err := session.Query("SELECT id,mod FROM prepcachetest WHERE id = "+strconv.FormatInt(int64(i), 10)).Scan(&id, &mod)
+ if err != nil {
+ t.Fatalf("select from prepcachetest failed, error '%v'", err)
+ }
+ }
+
+ //generate an update statement to test they are prepared
+ err := session.Query("UPDATE prepcachetest SET mod = ? WHERE id = ?", 1, 11).Exec()
+ if err != nil {
+ t.Fatalf("update prepcachetest failed, error '%v'", err)
+ }
+
+ //generate a delete statement to test they are prepared
+ err = session.Query("DELETE FROM prepcachetest WHERE id = ?", 1).Exec()
+ if err != nil {
+ t.Fatalf("delete from prepcachetest failed, error '%v'", err)
+ }
+
+ //generate an insert statement to test they are prepared
+ err = session.Query("INSERT INTO prepcachetest (id,mod) VALUES (?, ?)", 3, 11).Exec()
+ if err != nil {
+ t.Fatalf("insert into prepcachetest failed, error '%v'", err)
+ }
+
+ stmtsLRU.Lock()
+
+ //Make sure the cache size is maintained
+ if stmtsLRU.lru.Len() != stmtsLRU.lru.MaxEntries {
+ t.Fatalf("expected cache size of %v, got %v", stmtsLRU.lru.MaxEntries, stmtsLRU.lru.Len())
+ }
+
+ //Walk through all the configured hosts and test cache retention and eviction
+ var selFound, insFound, updFound, delFound, selEvict bool
+ for i := range session.cfg.Hosts {
+ _, ok := stmtsLRU.lru.Get(session.cfg.Hosts[i] + ":9042gocql_testSELECT id,mod FROM prepcachetest WHERE id = 1")
+ selFound = selFound || ok
+
+ _, ok = stmtsLRU.lru.Get(session.cfg.Hosts[i] + ":9042gocql_testINSERT INTO prepcachetest (id,mod) VALUES (?, ?)")
+ insFound = insFound || ok
+
+ _, ok = stmtsLRU.lru.Get(session.cfg.Hosts[i] + ":9042gocql_testUPDATE prepcachetest SET mod = ? WHERE id = ?")
+ updFound = updFound || ok
+
+ _, ok = stmtsLRU.lru.Get(session.cfg.Hosts[i] + ":9042gocql_testDELETE FROM prepcachetest WHERE id = ?")
+ delFound = delFound || ok
+
+ _, ok = stmtsLRU.lru.Get(session.cfg.Hosts[i] + ":9042gocql_testSELECT id,mod FROM prepcachetest WHERE id = 0")
+ selEvict = selEvict || !ok
+ }
+
+ stmtsLRU.Unlock()
+
+ if !selEvict {
+ t.Fatalf("expected first select statement to be purged, but statement was found in the cache.")
+ }
+ if !selFound {
+ t.Fatalf("expected second select statement to be cached, but statement was purged or not prepared.")
+ }
+ if !insFound {
+ t.Fatalf("expected insert statement to be cached, but statement was purged or not prepared.")
+ }
+ if !updFound {
+ t.Fatalf("expected update statement to be cached, but statement was purged or not prepared.")
+ }
+ if !delFound {
+ t.Error("expected delete statement to be cached, but statement was purged or not prepared.")
+ }
+}
+
+func TestPreparedCacheKey(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ // create a second keyspace
+ cluster2 := createCluster()
+ createKeyspace(t, cluster2, "gocql_test2")
+ cluster2.Keyspace = "gocql_test2"
+ session2, err := cluster2.CreateSession()
+ if err != nil {
+ t.Fatal("create session:", err)
+ }
+ defer session2.Close()
+
+ // both keyspaces have a table named "test_stmt_cache_key"
+ if err := createTable(session, "CREATE TABLE gocql_test.test_stmt_cache_key (id varchar primary key, field varchar)"); err != nil {
+ t.Fatal("create table:", err)
+ }
+ if err := createTable(session2, "CREATE TABLE gocql_test2.test_stmt_cache_key (id varchar primary key, field varchar)"); err != nil {
+ t.Fatal("create table:", err)
+ }
+
+ // both tables have a single row with the same partition key but different column value
+ if err = session.Query(`INSERT INTO test_stmt_cache_key (id, field) VALUES (?, ?)`, "key", "one").Exec(); err != nil {
+ t.Fatal("insert:", err)
+ }
+ if err = session2.Query(`INSERT INTO test_stmt_cache_key (id, field) VALUES (?, ?)`, "key", "two").Exec(); err != nil {
+ t.Fatal("insert:", err)
+ }
+
+ // should be able to see different values in each keyspace
+ var value string
+ if err = session.Query("SELECT field FROM test_stmt_cache_key WHERE id = ?", "key").Scan(&value); err != nil {
+ t.Fatal("select:", err)
+ }
+ if value != "one" {
+ t.Errorf("Expected one, got %s", value)
+ }
+
+ if err = session2.Query("SELECT field FROM test_stmt_cache_key WHERE id = ?", "key").Scan(&value); err != nil {
+ t.Fatal("select:", err)
+ }
+ if value != "two" {
+ t.Errorf("Expected two, got %s", value)
+ }
+}
+
+//TestMarshalFloat64Ptr tests to see that a pointer to a float64 is marshalled correctly.
+func TestMarshalFloat64Ptr(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, "CREATE TABLE gocql_test.float_test (id double, test double, primary key (id))"); err != nil {
+ t.Fatal("create table:", err)
+ }
+ testNum := float64(7500)
+ if err := session.Query(`INSERT INTO float_test (id,test) VALUES (?,?)`, float64(7500.00), &testNum).Exec(); err != nil {
+ t.Fatal("insert float64:", err)
+ }
+}
+
+//TestMarshalInet tests to see that a pointer to a float64 is marshalled correctly.
+func TestMarshalInet(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, "CREATE TABLE gocql_test.inet_test (ip inet, name text, primary key (ip))"); err != nil {
+ t.Fatal("create table:", err)
+ }
+ stringIp := "123.34.45.56"
+ if err := session.Query(`INSERT INTO inet_test (ip,name) VALUES (?,?)`, stringIp, "Test IP 1").Exec(); err != nil {
+ t.Fatal("insert string inet:", err)
+ }
+ var stringResult string
+ if err := session.Query("SELECT ip FROM inet_test").Scan(&stringResult); err != nil {
+ t.Fatalf("select for string from inet_test 1 failed: %v", err)
+ }
+ if stringResult != stringIp {
+ t.Errorf("Expected %s, was %s", stringIp, stringResult)
+ }
+
+ var ipResult net.IP
+ if err := session.Query("SELECT ip FROM inet_test").Scan(&ipResult); err != nil {
+ t.Fatalf("select for net.IP from inet_test 1 failed: %v", err)
+ }
+ if ipResult.String() != stringIp {
+ t.Errorf("Expected %s, was %s", stringIp, ipResult.String())
+ }
+
+ if err := session.Query(`DELETE FROM inet_test WHERE ip = ?`, stringIp).Exec(); err != nil {
+ t.Fatal("delete inet table:", err)
+ }
+
+ netIp := net.ParseIP("222.43.54.65")
+ if err := session.Query(`INSERT INTO inet_test (ip,name) VALUES (?,?)`, netIp, "Test IP 2").Exec(); err != nil {
+ t.Fatal("insert netIp inet:", err)
+ }
+
+ if err := session.Query("SELECT ip FROM inet_test").Scan(&stringResult); err != nil {
+ t.Fatalf("select for string from inet_test 2 failed: %v", err)
+ }
+ if stringResult != netIp.String() {
+ t.Errorf("Expected %s, was %s", netIp.String(), stringResult)
+ }
+ if err := session.Query("SELECT ip FROM inet_test").Scan(&ipResult); err != nil {
+ t.Fatalf("select for net.IP from inet_test 2 failed: %v", err)
+ }
+ if ipResult.String() != netIp.String() {
+ t.Errorf("Expected %s, was %s", netIp.String(), ipResult.String())
+ }
+
+}
+
+func TestVarint(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, "CREATE TABLE gocql_test.varint_test (id varchar, test varint, test2 varint, primary key (id))"); err != nil {
+ t.Fatalf("failed to create table with error '%v'", err)
+ }
+
+ if err := session.Query(`INSERT INTO varint_test (id, test) VALUES (?, ?)`, "id", 0).Exec(); err != nil {
+ t.Fatalf("insert varint: %v", err)
+ }
+
+ var result int
+ if err := session.Query("SELECT test FROM varint_test").Scan(&result); err != nil {
+ t.Fatalf("select from varint_test failed: %v", err)
+ }
+
+ if result != 0 {
+ t.Errorf("Expected 0, was %d", result)
+ }
+
+ if err := session.Query(`INSERT INTO varint_test (id, test) VALUES (?, ?)`, "id", -1).Exec(); err != nil {
+ t.Fatalf("insert varint: %v", err)
+ }
+
+ if err := session.Query("SELECT test FROM varint_test").Scan(&result); err != nil {
+ t.Fatalf("select from varint_test failed: %v", err)
+ }
+
+ if result != -1 {
+ t.Errorf("Expected -1, was %d", result)
+ }
+
+ if err := session.Query(`INSERT INTO varint_test (id, test) VALUES (?, ?)`, "id", int64(math.MaxInt32)+1).Exec(); err != nil {
+ t.Fatalf("insert varint: %v", err)
+ }
+
+ var result64 int64
+ if err := session.Query("SELECT test FROM varint_test").Scan(&result64); err != nil {
+ t.Fatalf("select from varint_test failed: %v", err)
+ }
+
+ if result64 != int64(math.MaxInt32)+1 {
+ t.Errorf("Expected %d, was %d", int64(math.MaxInt32)+1, result64)
+ }
+
+ biggie := new(big.Int)
+ biggie.SetString("36893488147419103232", 10) // > 2**64
+ if err := session.Query(`INSERT INTO varint_test (id, test) VALUES (?, ?)`, "id", biggie).Exec(); err != nil {
+ t.Fatalf("insert varint: %v", err)
+ }
+
+ resultBig := new(big.Int)
+ if err := session.Query("SELECT test FROM varint_test").Scan(resultBig); err != nil {
+ t.Fatalf("select from varint_test failed: %v", err)
+ }
+
+ if resultBig.String() != biggie.String() {
+ t.Errorf("Expected %s, was %s", biggie.String(), resultBig.String())
+ }
+
+ err := session.Query("SELECT test FROM varint_test").Scan(&result64)
+ if err == nil || strings.Index(err.Error(), "out of range") == -1 {
+ t.Errorf("expected out of range error since value is too big for int64")
+ }
+
+ // value not set in cassandra, leave bind variable empty
+ resultBig = new(big.Int)
+ if err := session.Query("SELECT test2 FROM varint_test").Scan(resultBig); err != nil {
+ t.Fatalf("select from varint_test failed: %v", err)
+ }
+
+ if resultBig.Int64() != 0 {
+ t.Errorf("Expected %s, was %s", biggie.String(), resultBig.String())
+ }
+
+ // can use double pointer to explicitly detect value is not set in cassandra
+ if err := session.Query("SELECT test2 FROM varint_test").Scan(&resultBig); err != nil {
+ t.Fatalf("select from varint_test failed: %v", err)
+ }
+
+ if resultBig != nil {
+ t.Errorf("Expected %v, was %v", nil, *resultBig)
+ }
+}
+
+//TestQueryStats confirms that the stats are returning valid data. Accuracy may be questionable.
+func TestQueryStats(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+ qry := session.Query("SELECT * FROM system.peers")
+ if err := qry.Exec(); err != nil {
+ t.Fatalf("query failed. %v", err)
+ } else {
+ if qry.Attempts() < 1 {
+ t.Fatal("expected at least 1 attempt, but got 0")
+ }
+ if qry.Latency() <= 0 {
+ t.Fatalf("expected latency to be greater than 0, but got %v instead.", qry.Latency())
+ }
+ }
+}
+
+//TestBatchStats confirms that the stats are returning valid data. Accuracy may be questionable.
+func TestBatchStats(t *testing.T) {
+ if *flagProto == 1 {
+ t.Skip("atomic batches not supported. Please use Cassandra >= 2.0")
+ }
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, "CREATE TABLE gocql_test.batchStats (id int, PRIMARY KEY (id))"); err != nil {
+ t.Fatalf("failed to create table with error '%v'", err)
+ }
+
+ b := session.NewBatch(LoggedBatch)
+ b.Query("INSERT INTO batchStats (id) VALUES (?)", 1)
+ b.Query("INSERT INTO batchStats (id) VALUES (?)", 2)
+
+ if err := session.ExecuteBatch(b); err != nil {
+ t.Fatalf("query failed. %v", err)
+ } else {
+ if b.Attempts() < 1 {
+ t.Fatal("expected at least 1 attempt, but got 0")
+ }
+ if b.Latency() <= 0 {
+ t.Fatalf("expected latency to be greater than 0, but got %v instead.", b.Latency())
+ }
+ }
+}
+
+//TestNilInQuery tests to see that a nil value passed to a query is handled by Cassandra
+//TODO validate the nil value by reading back the nil. Need to fix Unmarshalling.
+func TestNilInQuery(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, "CREATE TABLE gocql_test.testNilInsert (id int, count int, PRIMARY KEY (id))"); err != nil {
+ t.Fatalf("failed to create table with error '%v'", err)
+ }
+ if err := session.Query("INSERT INTO testNilInsert (id,count) VALUES (?,?)", 1, nil).Exec(); err != nil {
+ t.Fatalf("failed to insert with err: %v", err)
+ }
+
+ var id int
+
+ if err := session.Query("SELECT id FROM testNilInsert").Scan(&id); err != nil {
+ t.Fatalf("failed to select with err: %v", err)
+ } else if id != 1 {
+ t.Fatalf("expected id to be 1, got %v", id)
+ }
+}
+
+// Don't initialize time.Time bind variable if cassandra timestamp column is empty
+func TestEmptyTimestamp(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, "CREATE TABLE gocql_test.test_empty_timestamp (id int, time timestamp, num int, PRIMARY KEY (id))"); err != nil {
+ t.Fatalf("failed to create table with error '%v'", err)
+ }
+
+ if err := session.Query("INSERT INTO test_empty_timestamp (id, num) VALUES (?,?)", 1, 561).Exec(); err != nil {
+ t.Fatalf("failed to insert with err: %v", err)
+ }
+
+ var timeVal time.Time
+
+ if err := session.Query("SELECT time FROM test_empty_timestamp where id = ?", 1).Scan(&timeVal); err != nil {
+ t.Fatalf("failed to select with err: %v", err)
+ }
+
+ if !timeVal.IsZero() {
+ t.Errorf("time.Time bind variable should still be empty (was %s)", timeVal)
+ }
+}
+
+// Integration test of just querying for data from the system.schema_keyspace table
+func TestGetKeyspaceMetadata(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ keyspaceMetadata, err := getKeyspaceMetadata(session, "gocql_test")
+ if err != nil {
+ t.Fatalf("failed to query the keyspace metadata with err: %v", err)
+ }
+ if keyspaceMetadata == nil {
+ t.Fatal("failed to query the keyspace metadata, nil returned")
+ }
+ if keyspaceMetadata.Name != "gocql_test" {
+ t.Errorf("Expected keyspace name to be 'gocql' but was '%s'", keyspaceMetadata.Name)
+ }
+ if keyspaceMetadata.StrategyClass != "org.apache.cassandra.locator.SimpleStrategy" {
+ t.Errorf("Expected replication strategy class to be 'org.apache.cassandra.locator.SimpleStrategy' but was '%s'", keyspaceMetadata.StrategyClass)
+ }
+ if keyspaceMetadata.StrategyOptions == nil {
+ t.Error("Expected replication strategy options map but was nil")
+ }
+ rfStr, ok := keyspaceMetadata.StrategyOptions["replication_factor"]
+ if !ok {
+ t.Fatalf("Expected strategy option 'replication_factor' but was not found in %v", keyspaceMetadata.StrategyOptions)
+ }
+ rfInt, err := strconv.Atoi(rfStr.(string))
+ if err != nil {
+ t.Fatalf("Error converting string to int with err: %v", err)
+ }
+ if rfInt != *flagRF {
+ t.Errorf("Expected replication factor to be %d but was %d", *flagRF, rfInt)
+ }
+}
+
+// Integration test of just querying for data from the system.schema_columnfamilies table
+func TestGetTableMetadata(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, "CREATE TABLE gocql_test.test_table_metadata (first_id int, second_id int, third_id int, PRIMARY KEY (first_id, second_id))"); err != nil {
+ t.Fatalf("failed to create table with error '%v'", err)
+ }
+
+ tables, err := getTableMetadata(session, "gocql_test")
+ if err != nil {
+ t.Fatalf("failed to query the table metadata with err: %v", err)
+ }
+ if tables == nil {
+ t.Fatal("failed to query the table metadata, nil returned")
+ }
+
+ var testTable *TableMetadata
+
+ // verify all tables have minimum expected data
+ for i := range tables {
+ table := &tables[i]
+
+ if table.Name == "" {
+ t.Errorf("Expected table name to be set, but it was empty: index=%d metadata=%+v", i, table)
+ }
+ if table.Keyspace != "gocql_test" {
+ t.Errorf("Expected keyspace for '%d' table metadata to be 'gocql_test' but was '%s'", table.Name, table.Keyspace)
+ }
+ if table.KeyValidator == "" {
+ t.Errorf("Expected key validator to be set for table %s", table.Name)
+ }
+ if table.Comparator == "" {
+ t.Errorf("Expected comparator to be set for table %s", table.Name)
+ }
+ if table.DefaultValidator == "" {
+ t.Errorf("Expected default validator to be set for table %s", table.Name)
+ }
+
+ // these fields are not set until the metadata is compiled
+ if table.PartitionKey != nil {
+ t.Errorf("Did not expect partition key for table %s", table.Name)
+ }
+ if table.ClusteringColumns != nil {
+ t.Errorf("Did not expect clustering columns for table %s", table.Name)
+ }
+ if table.Columns != nil {
+ t.Errorf("Did not expect columns for table %s", table.Name)
+ }
+
+ // for the next part of the test after this loop, find the metadata for the test table
+ if table.Name == "test_table_metadata" {
+ testTable = table
+ }
+ }
+
+ // verify actual values on the test tables
+ if testTable == nil {
+ t.Fatal("Expected table metadata for name 'test_table_metadata'")
+ }
+ if testTable.KeyValidator != "org.apache.cassandra.db.marshal.Int32Type" {
+ t.Errorf("Expected test_table_metadata key validator to be 'org.apache.cassandra.db.marshal.Int32Type' but was '%s'", testTable.KeyValidator)
+ }
+ if testTable.Comparator != "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.UTF8Type)" {
+ t.Errorf("Expected test_table_metadata key validator to be 'org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.UTF8Type)' but was '%s'", testTable.Comparator)
+ }
+ if testTable.DefaultValidator != "org.apache.cassandra.db.marshal.BytesType" {
+ t.Errorf("Expected test_table_metadata key validator to be 'org.apache.cassandra.db.marshal.BytesType' but was '%s'", testTable.DefaultValidator)
+ }
+ if *flagProto < protoVersion4 {
+ expectedKeyAliases := []string{"first_id"}
+ if !reflect.DeepEqual(testTable.KeyAliases, expectedKeyAliases) {
+ t.Errorf("Expected key aliases %v but was %v", expectedKeyAliases, testTable.KeyAliases)
+ }
+ expectedColumnAliases := []string{"second_id"}
+ if !reflect.DeepEqual(testTable.ColumnAliases, expectedColumnAliases) {
+ t.Errorf("Expected key aliases %v but was %v", expectedColumnAliases, testTable.ColumnAliases)
+ }
+ }
+ if testTable.ValueAlias != "" {
+ t.Errorf("Expected value alias '' but was '%s'", testTable.ValueAlias)
+ }
+}
+
+// Integration test of just querying for data from the system.schema_columns table
+func TestGetColumnMetadata(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, "CREATE TABLE gocql_test.test_column_metadata (first_id int, second_id int, third_id int, PRIMARY KEY (first_id, second_id))"); err != nil {
+ t.Fatalf("failed to create table with error '%v'", err)
+ }
+
+ if err := session.Query("CREATE INDEX index_column_metadata ON test_column_metadata ( third_id )").Exec(); err != nil {
+ t.Fatalf("failed to create index with err: %v", err)
+ }
+
+ columns, err := getColumnMetadata(session, "gocql_test")
+ if err != nil {
+ t.Fatalf("failed to query column metadata with err: %v", err)
+ }
+ if columns == nil {
+ t.Fatal("failed to query column metadata, nil returned")
+ }
+
+ testColumns := map[string]*ColumnMetadata{}
+
+ // verify actual values on the test columns
+ for i := range columns {
+ column := &columns[i]
+
+ if column.Name == "" {
+ t.Errorf("Expected column name to be set, but it was empty: index=%d metadata=%+v", i, column)
+ }
+ if column.Table == "" {
+ t.Errorf("Expected column %s table name to be set, but it was empty", column.Name)
+ }
+ if column.Keyspace != "gocql_test" {
+ t.Errorf("Expected column %s keyspace name to be 'gocql_test', but it was '%s'", column.Name, column.Keyspace)
+ }
+ if column.Kind == "" {
+ t.Errorf("Expected column %s kind to be set, but it was empty", column.Name)
+ }
+ if session.cfg.ProtoVersion == 1 && column.Kind != "regular" {
+ t.Errorf("Expected column %s kind to be set to 'regular' for proto V1 but it was '%s'", column.Name, column.Kind)
+ }
+ if column.Validator == "" {
+ t.Errorf("Expected column %s validator to be set, but it was empty", column.Name)
+ }
+
+ // find the test table columns for the next step after this loop
+ if column.Table == "test_column_metadata" {
+ testColumns[column.Name] = column
+ }
+ }
+
+ if *flagProto == 1 {
+ // V1 proto only returns "regular columns"
+ if len(testColumns) != 1 {
+ t.Errorf("Expected 1 test columns but there were %d", len(testColumns))
+ }
+ thirdID, found := testColumns["third_id"]
+ if !found {
+ t.Fatalf("Expected to find column 'third_id' metadata but there was only %v", testColumns)
+ }
+
+ if thirdID.Kind != REGULAR {
+ t.Errorf("Expected %s column kind to be '%s' but it was '%s'", thirdID.Name, REGULAR, thirdID.Kind)
+ }
+
+ if thirdID.Index.Name != "index_column_metadata" {
+ t.Errorf("Expected %s column index name to be 'index_column_metadata' but it was '%s'", thirdID.Name, thirdID.Index.Name)
+ }
+ } else {
+ if len(testColumns) != 3 {
+ t.Errorf("Expected 3 test columns but there were %d", len(testColumns))
+ }
+ firstID, found := testColumns["first_id"]
+ if !found {
+ t.Fatalf("Expected to find column 'first_id' metadata but there was only %v", testColumns)
+ }
+ secondID, found := testColumns["second_id"]
+ if !found {
+ t.Fatalf("Expected to find column 'second_id' metadata but there was only %v", testColumns)
+ }
+ thirdID, found := testColumns["third_id"]
+ if !found {
+ t.Fatalf("Expected to find column 'third_id' metadata but there was only %v", testColumns)
+ }
+
+ if firstID.Kind != PARTITION_KEY {
+ t.Errorf("Expected %s column kind to be '%s' but it was '%s'", firstID.Name, PARTITION_KEY, firstID.Kind)
+ }
+ if secondID.Kind != CLUSTERING_KEY {
+ t.Errorf("Expected %s column kind to be '%s' but it was '%s'", secondID.Name, CLUSTERING_KEY, secondID.Kind)
+ }
+ if thirdID.Kind != REGULAR {
+ t.Errorf("Expected %s column kind to be '%s' but it was '%s'", thirdID.Name, REGULAR, thirdID.Kind)
+ }
+
+ if thirdID.Index.Name != "index_column_metadata" {
+ t.Errorf("Expected %s column index name to be 'index_column_metadata' but it was '%s'", thirdID.Name, thirdID.Index.Name)
+ }
+ }
+}
+
+// Integration test of querying and composition the keyspace metadata
+func TestKeyspaceMetadata(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, "CREATE TABLE gocql_test.test_metadata (first_id int, second_id int, third_id int, PRIMARY KEY (first_id, second_id))"); err != nil {
+ t.Fatalf("failed to create table with error '%v'", err)
+ }
+
+ if err := session.Query("CREATE INDEX index_metadata ON test_metadata ( third_id )").Exec(); err != nil {
+ t.Fatalf("failed to create index with err: %v", err)
+ }
+
+ keyspaceMetadata, err := session.KeyspaceMetadata("gocql_test")
+ if err != nil {
+ t.Fatalf("failed to query keyspace metadata with err: %v", err)
+ }
+ if keyspaceMetadata == nil {
+ t.Fatal("expected the keyspace metadata to not be nil, but it was nil")
+ }
+ if keyspaceMetadata.Name != session.cfg.Keyspace {
+ t.Fatalf("Expected the keyspace name to be %s but was %s", session.cfg.Keyspace, keyspaceMetadata.Name)
+ }
+ if len(keyspaceMetadata.Tables) == 0 {
+ t.Errorf("Expected tables but there were none")
+ }
+
+ tableMetadata, found := keyspaceMetadata.Tables["test_metadata"]
+ if !found {
+ t.Fatalf("failed to find the test_metadata table metadata")
+ }
+
+ if len(tableMetadata.PartitionKey) != 1 {
+ t.Errorf("expected partition key length of 1, but was %d", len(tableMetadata.PartitionKey))
+ }
+ for i, column := range tableMetadata.PartitionKey {
+ if column == nil {
+ t.Errorf("partition key column metadata at index %d was nil", i)
+ }
+ }
+ if tableMetadata.PartitionKey[0].Name != "first_id" {
+ t.Errorf("Expected the first partition key column to be 'first_id' but was '%s'", tableMetadata.PartitionKey[0].Name)
+ }
+ if len(tableMetadata.ClusteringColumns) != 1 {
+ t.Fatalf("expected clustering columns length of 1, but was %d", len(tableMetadata.ClusteringColumns))
+ }
+ for i, column := range tableMetadata.ClusteringColumns {
+ if column == nil {
+ t.Fatalf("clustering column metadata at index %d was nil", i)
+ }
+ }
+ if tableMetadata.ClusteringColumns[0].Name != "second_id" {
+ t.Errorf("Expected the first clustering column to be 'second_id' but was '%s'", tableMetadata.ClusteringColumns[0].Name)
+ }
+ thirdColumn, found := tableMetadata.Columns["third_id"]
+ if !found {
+ t.Fatalf("Expected a column definition for 'third_id'")
+ }
+ if thirdColumn.Index.Name != "index_metadata" {
+ t.Errorf("Expected column index named 'index_metadata' but was '%s'", thirdColumn.Index.Name)
+ }
+}
+
+// Integration test of the routing key calculation
+func TestRoutingKey(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, "CREATE TABLE gocql_test.test_single_routing_key (first_id int, second_id int, PRIMARY KEY (first_id, second_id))"); err != nil {
+ t.Fatalf("failed to create table with error '%v'", err)
+ }
+ if err := createTable(session, "CREATE TABLE gocql_test.test_composite_routing_key (first_id int, second_id int, PRIMARY KEY ((first_id, second_id)))"); err != nil {
+ t.Fatalf("failed to create table with error '%v'", err)
+ }
+
+ routingKeyInfo, err := session.routingKeyInfo("SELECT * FROM test_single_routing_key WHERE second_id=? AND first_id=?")
+ if err != nil {
+ t.Fatalf("failed to get routing key info due to error: %v", err)
+ }
+ if routingKeyInfo == nil {
+ t.Fatal("Expected routing key info, but was nil")
+ }
+ if len(routingKeyInfo.indexes) != 1 {
+ t.Fatalf("Expected routing key indexes length to be 1 but was %d", len(routingKeyInfo.indexes))
+ }
+ if routingKeyInfo.indexes[0] != 1 {
+ t.Errorf("Expected routing key index[0] to be 1 but was %d", routingKeyInfo.indexes[0])
+ }
+ if len(routingKeyInfo.types) != 1 {
+ t.Fatalf("Expected routing key types length to be 1 but was %d", len(routingKeyInfo.types))
+ }
+ if routingKeyInfo.types[0] == nil {
+ t.Fatal("Expected routing key types[0] to be non-nil")
+ }
+ if routingKeyInfo.types[0].Type() != TypeInt {
+ t.Fatalf("Expected routing key types[0].Type to be %v but was %v", TypeInt, routingKeyInfo.types[0].Type())
+ }
+
+ // verify the cache is working
+ routingKeyInfo, err = session.routingKeyInfo("SELECT * FROM test_single_routing_key WHERE second_id=? AND first_id=?")
+ if err != nil {
+ t.Fatalf("failed to get routing key info due to error: %v", err)
+ }
+ if len(routingKeyInfo.indexes) != 1 {
+ t.Fatalf("Expected routing key indexes length to be 1 but was %d", len(routingKeyInfo.indexes))
+ }
+ if routingKeyInfo.indexes[0] != 1 {
+ t.Errorf("Expected routing key index[0] to be 1 but was %d", routingKeyInfo.indexes[0])
+ }
+ if len(routingKeyInfo.types) != 1 {
+ t.Fatalf("Expected routing key types length to be 1 but was %d", len(routingKeyInfo.types))
+ }
+ if routingKeyInfo.types[0] == nil {
+ t.Fatal("Expected routing key types[0] to be non-nil")
+ }
+ if routingKeyInfo.types[0].Type() != TypeInt {
+ t.Fatalf("Expected routing key types[0] to be %v but was %v", TypeInt, routingKeyInfo.types[0].Type())
+ }
+ cacheSize := session.routingKeyInfoCache.lru.Len()
+ if cacheSize != 1 {
+ t.Errorf("Expected cache size to be 1 but was %d", cacheSize)
+ }
+
+ query := session.Query("SELECT * FROM test_single_routing_key WHERE second_id=? AND first_id=?", 1, 2)
+ routingKey, err := query.GetRoutingKey()
+ if err != nil {
+ t.Fatalf("Failed to get routing key due to error: %v", err)
+ }
+ expectedRoutingKey := []byte{0, 0, 0, 2}
+ if !reflect.DeepEqual(expectedRoutingKey, routingKey) {
+ t.Errorf("Expected routing key %v but was %v", expectedRoutingKey, routingKey)
+ }
+
+ routingKeyInfo, err = session.routingKeyInfo("SELECT * FROM test_composite_routing_key WHERE second_id=? AND first_id=?")
+ if err != nil {
+ t.Fatalf("failed to get routing key info due to error: %v", err)
+ }
+ if routingKeyInfo == nil {
+ t.Fatal("Expected routing key info, but was nil")
+ }
+ if len(routingKeyInfo.indexes) != 2 {
+ t.Fatalf("Expected routing key indexes length to be 2 but was %d", len(routingKeyInfo.indexes))
+ }
+ if routingKeyInfo.indexes[0] != 1 {
+ t.Errorf("Expected routing key index[0] to be 1 but was %d", routingKeyInfo.indexes[0])
+ }
+ if routingKeyInfo.indexes[1] != 0 {
+ t.Errorf("Expected routing key index[1] to be 0 but was %d", routingKeyInfo.indexes[1])
+ }
+ if len(routingKeyInfo.types) != 2 {
+ t.Fatalf("Expected routing key types length to be 1 but was %d", len(routingKeyInfo.types))
+ }
+ if routingKeyInfo.types[0] == nil {
+ t.Fatal("Expected routing key types[0] to be non-nil")
+ }
+ if routingKeyInfo.types[0].Type() != TypeInt {
+ t.Fatalf("Expected routing key types[0] to be %v but was %v", TypeInt, routingKeyInfo.types[0].Type())
+ }
+ if routingKeyInfo.types[1] == nil {
+ t.Fatal("Expected routing key types[1] to be non-nil")
+ }
+ if routingKeyInfo.types[1].Type() != TypeInt {
+ t.Fatalf("Expected routing key types[0] to be %v but was %v", TypeInt, routingKeyInfo.types[1].Type())
+ }
+
+ query = session.Query("SELECT * FROM test_composite_routing_key WHERE second_id=? AND first_id=?", 1, 2)
+ routingKey, err = query.GetRoutingKey()
+ if err != nil {
+ t.Fatalf("Failed to get routing key due to error: %v", err)
+ }
+ expectedRoutingKey = []byte{0, 4, 0, 0, 0, 2, 0, 0, 4, 0, 0, 0, 1, 0}
+ if !reflect.DeepEqual(expectedRoutingKey, routingKey) {
+ t.Errorf("Expected routing key %v but was %v", expectedRoutingKey, routingKey)
+ }
+
+ // verify the cache is working
+ cacheSize = session.routingKeyInfoCache.lru.Len()
+ if cacheSize != 2 {
+ t.Errorf("Expected cache size to be 2 but was %d", cacheSize)
+ }
+}
+
+// Integration test of the token-aware policy-based connection pool
+func TestTokenAwareConnPool(t *testing.T) {
+ cluster := createCluster()
+ cluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy())
+ cluster.DiscoverHosts = true
+
+ session := createSessionFromCluster(cluster, t)
+ defer session.Close()
+
+ if session.pool.Size() != cluster.NumConns*len(cluster.Hosts) {
+ t.Errorf("Expected pool size %d but was %d", cluster.NumConns*len(cluster.Hosts), session.pool.Size())
+ }
+
+ if err := createTable(session, "CREATE TABLE gocql_test.test_token_aware (id int, data text, PRIMARY KEY (id))"); err != nil {
+ t.Fatalf("failed to create test_token_aware table with err: %v", err)
+ }
+ query := session.Query("INSERT INTO test_token_aware (id, data) VALUES (?,?)", 42, "8 * 6 =")
+ if err := query.Exec(); err != nil {
+ t.Fatalf("failed to insert with err: %v", err)
+ }
+
+ query = session.Query("SELECT data FROM test_token_aware where id = ?", 42).Consistency(One)
+ var data string
+ if err := query.Scan(&data); err != nil {
+ t.Error(err)
+ }
+
+ // TODO add verification that the query went to the correct host
+}
+
+type frameWriterFunc func(framer *framer, streamID int) error
+
+func (f frameWriterFunc) writeFrame(framer *framer, streamID int) error {
+ return f(framer, streamID)
+}
+
+func TestStream0(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ var conn *Conn
+ for i := 0; i < 5; i++ {
+ if conn != nil {
+ break
+ }
+
+ _, conn = session.pool.Pick(nil)
+ }
+
+ if conn == nil {
+ t.Fatal("no connections available in the pool")
+ }
+
+ writer := frameWriterFunc(func(f *framer, streamID int) error {
+ if streamID == 0 {
+ t.Fatal("should not use stream 0 for requests")
+ }
+ f.writeHeader(0, opError, streamID)
+ f.writeString("i am a bad frame")
+ f.wbuf[0] = 0xFF
+ return f.finishWrite()
+ })
+
+ const expErr = "gocql: error on stream 0:"
+ // need to write out an invalid frame, which we need a connection to do
+ frame, err := conn.exec(writer, nil)
+ if err == nil {
+ t.Fatal("expected to get an error on stream 0")
+ } else if !strings.HasPrefix(err.Error(), expErr) {
+ t.Fatalf("expected to get error prefix %q got %q", expErr, err.Error())
+ } else if frame != nil {
+ t.Fatalf("expected to get nil frame got %+v", frame)
+ }
+}
+
+func TestNegativeStream(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ var conn *Conn
+ for i := 0; i < 5; i++ {
+ if conn != nil {
+ break
+ }
+
+ _, conn = session.pool.Pick(nil)
+ }
+
+ if conn == nil {
+ t.Fatal("no connections available in the pool")
+ }
+
+ const stream = -50
+ writer := frameWriterFunc(func(f *framer, streamID int) error {
+ f.writeHeader(0, opOptions, stream)
+ return f.finishWrite()
+ })
+
+ frame, err := conn.exec(writer, nil)
+ if err == nil {
+ t.Fatalf("expected to get an error on stream %d", stream)
+ } else if frame != nil {
+ t.Fatalf("expected to get nil frame got %+v", frame)
+ }
+}
+
+func TestManualQueryPaging(t *testing.T) {
+ const rowsToInsert = 5
+
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, "CREATE TABLE gocql_test.testManualPaging (id int, count int, PRIMARY KEY (id))"); err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < rowsToInsert; i++ {
+ err := session.Query("INSERT INTO testManualPaging(id, count) VALUES(?, ?)", i, i*i).Exec()
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // disable auto paging, 1 page per iteration
+ query := session.Query("SELECT id, count FROM testManualPaging").PageState(nil).PageSize(2)
+ var id, count, fetched int
+
+ iter := query.Iter()
+ // NOTE: this isnt very indicitive of how it should be used, the idea is that
+ // the page state is returned to some client who will send it back to manually
+ // page through the results.
+ for {
+ for iter.Scan(&id, &count) {
+ if count != (id * id) {
+ t.Fatalf("got wrong value from iteration: got %d expected %d", count, id*id)
+ }
+
+ fetched++
+ }
+
+ if len(iter.PageState()) > 0 {
+ // more pages
+ iter = query.PageState(iter.PageState()).Iter()
+ } else {
+ break
+ }
+ }
+
+ if err := iter.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ if fetched != rowsToInsert {
+ t.Fatalf("expected to fetch %d rows got %d", fetched, rowsToInsert)
+ }
+}
+
+func TestLexicalUUIDType(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, `CREATE TABLE gocql_test.test_lexical_uuid (
+ key varchar,
+ column1 'org.apache.cassandra.db.marshal.LexicalUUIDType',
+ value int,
+ PRIMARY KEY (key, column1)
+ )`); err != nil {
+ t.Fatal("create:", err)
+ }
+
+ key := TimeUUID().String()
+ column1 := TimeUUID()
+
+ err := session.Query("INSERT INTO test_lexical_uuid(key, column1, value) VALUES(?, ?, ?)", key, column1, 55).Exec()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var gotUUID UUID
+ if err := session.Query("SELECT column1 from test_lexical_uuid where key = ? AND column1 = ?", key, column1).Scan(&gotUUID); err != nil {
+ t.Fatal(err)
+ }
+
+ if gotUUID != column1 {
+ t.Errorf("got %s, expected %s", gotUUID, column1)
+ }
+}
+
+// Issue 475
+func TestSessionBindRoutingKey(t *testing.T) {
+ cluster := createCluster()
+ cluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy())
+
+ session := createSessionFromCluster(cluster, t)
+ defer session.Close()
+
+ if err := createTable(session, `CREATE TABLE gocql_test.test_bind_routing_key (
+ key varchar,
+ value int,
+ PRIMARY KEY (key)
+ )`); err != nil {
+
+ t.Fatal(err)
+ }
+
+ const (
+ key = "routing-key"
+ value = 5
+ )
+
+ fn := func(info *QueryInfo) ([]interface{}, error) {
+ return []interface{}{key, value}, nil
+ }
+
+ q := session.Bind("INSERT INTO test_bind_routing_key(key, value) VALUES(?, ?)", fn)
+ if err := q.Exec(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestJSONSupport(t *testing.T) {
+ if *flagProto < 4 {
+ t.Skip("skipping JSON support on proto < 4")
+ }
+
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, `CREATE TABLE gocql_test.test_json (
+ id text PRIMARY KEY,
+ age int,
+ state text
+ )`); err != nil {
+
+ t.Fatal(err)
+ }
+
+ err := session.Query("INSERT INTO test_json JSON ?", `{"id": "user123", "age": 42, "state": "TX"}`).Exec()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var (
+ id string
+ age int
+ state string
+ )
+
+ err = session.Query("SELECT id, age, state FROM test_json WHERE id = ?", "user123").Scan(&id, &age, &state)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if id != "user123" {
+ t.Errorf("got id %q expected %q", id, "user123")
+ }
+ if age != 42 {
+ t.Errorf("got age %d expected %d", age, 42)
+ }
+ if state != "TX" {
+ t.Errorf("got state %q expected %q", state, "TX")
+ }
+}
+
+func TestUDF(t *testing.T) {
+ if *flagProto < 4 {
+ t.Skip("skipping UDF support on proto < 4")
+ }
+
+ session := createSession(t)
+ defer session.Close()
+
+ const query = `CREATE OR REPLACE FUNCTION uniq(state set, val text)
+ CALLED ON NULL INPUT RETURNS set LANGUAGE java
+ AS 'state.add(val); return state;'`
+
+ err := session.Query(query).Exec()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestDiscoverViaProxy(t *testing.T) {
+ // This (complicated) test tests that when the driver is given an initial host
+ // that is infact a proxy it discovers the rest of the ring behind the proxy
+ // and does not store the proxies address as a host in its connection pool.
+ // See https://github.com/gocql/gocql/issues/481
+ proxy, err := net.Listen("tcp", ":0")
+ if err != nil {
+ t.Fatalf("unable to create proxy listener: %v", err)
+ }
+
+ var (
+ wg sync.WaitGroup
+ mu sync.Mutex
+ proxyConns []net.Conn
+ closed bool
+ )
+
+ go func(wg *sync.WaitGroup) {
+ cassandraAddr := JoinHostPort(clusterHosts[0], 9042)
+
+ cassandra := func() (net.Conn, error) {
+ return net.Dial("tcp", cassandraAddr)
+ }
+
+ proxyFn := func(wg *sync.WaitGroup, from, to net.Conn) {
+ defer wg.Done()
+
+ _, err := io.Copy(to, from)
+ if err != nil {
+ mu.Lock()
+ if !closed {
+ t.Error(err)
+ }
+ mu.Unlock()
+ }
+ }
+
+ // handle dials cassandra and then proxies requests and reponsess. It waits
+ // for both the read and write side of the TCP connection to close before
+ // returning.
+ handle := func(conn net.Conn) error {
+ defer conn.Close()
+
+ cass, err := cassandra()
+ if err != nil {
+ return err
+ }
+
+ mu.Lock()
+ proxyConns = append(proxyConns, cass)
+ mu.Unlock()
+
+ defer cass.Close()
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go proxyFn(&wg, conn, cass)
+
+ wg.Add(1)
+ go proxyFn(&wg, cass, conn)
+
+ wg.Wait()
+
+ return nil
+ }
+
+ for {
+ // proxy just accepts connections and then proxies them to cassandra,
+ // it runs until it is closed.
+ conn, err := proxy.Accept()
+ if err != nil {
+ mu.Lock()
+ if !closed {
+ t.Error(err)
+ }
+ mu.Unlock()
+ return
+ }
+
+ mu.Lock()
+ proxyConns = append(proxyConns, conn)
+ mu.Unlock()
+
+ wg.Add(1)
+ go func(conn net.Conn) {
+ defer wg.Done()
+
+ if err := handle(conn); err != nil {
+ t.Error(err)
+ return
+ }
+ }(conn)
+ }
+ }(&wg)
+
+ defer wg.Wait()
+
+ proxyAddr := proxy.Addr().String()
+
+ cluster := createCluster()
+ cluster.DiscoverHosts = true
+ cluster.NumConns = 1
+ cluster.Discovery.Sleep = 100 * time.Millisecond
+ // initial host is the proxy address
+ cluster.Hosts = []string{proxyAddr}
+
+ session := createSessionFromCluster(cluster, t)
+ defer session.Close()
+
+ if !session.hostSource.localHasRpcAddr {
+ t.Skip("Target cluster does not have rpc_address in system.local.")
+ goto close
+ }
+
+ // we shouldnt need this but to be safe
+ time.Sleep(1 * time.Second)
+
+ session.pool.mu.RLock()
+ for _, host := range clusterHosts {
+ if _, ok := session.pool.hostConnPools[host]; !ok {
+ t.Errorf("missing host in pool after discovery: %q", host)
+ }
+ }
+ session.pool.mu.RUnlock()
+
+close:
+ if err := proxy.Close(); err != nil {
+ t.Log(err)
+ }
+
+ mu.Lock()
+ closed = true
+ for _, conn := range proxyConns {
+ if err := conn.Close(); err != nil {
+ t.Log(err)
+ }
+ }
+ mu.Unlock()
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/compressor_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/compressor_test.go
new file mode 100644
index 000000000..cbf16a468
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/compressor_test.go
@@ -0,0 +1,40 @@
+// +build all unit
+
+package gocql
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/golang/snappy"
+)
+
+func TestSnappyCompressor(t *testing.T) {
+ c := SnappyCompressor{}
+ if c.Name() != "snappy" {
+ t.Fatalf("expected name to be 'snappy', got %v", c.Name())
+ }
+
+ str := "My Test String"
+ //Test Encoding
+ expected := snappy.Encode(nil, []byte(str))
+ if res, err := c.Encode([]byte(str)); err != nil {
+ t.Fatalf("failed to encode '%v' with error %v", str, err)
+ } else if bytes.Compare(expected, res) != 0 {
+ t.Fatal("failed to match the expected encoded value with the result encoded value.")
+ }
+
+ val, err := c.Encode([]byte(str))
+ if err != nil {
+ t.Fatalf("failed to encode '%v' with error '%v'", str, err)
+ }
+
+ //Test Decoding
+ if expected, err := snappy.Decode(nil, val); err != nil {
+ t.Fatalf("failed to decode '%v' with error %v", val, err)
+ } else if res, err := c.Decode(val); err != nil {
+ t.Fatalf("failed to decode '%v' with error %v", val, err)
+ } else if bytes.Compare(expected, res) != 0 {
+ t.Fatal("failed to match the expected decoded value with the result decoded value.")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/conn_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/conn_test.go
new file mode 100644
index 000000000..709f7b3e9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/conn_test.go
@@ -0,0 +1,767 @@
+// Copyright (c) 2012 The gocql Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// +build all unit
+
+package gocql
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+const (
+ defaultProto = protoVersion2
+)
+
+func TestJoinHostPort(t *testing.T) {
+ tests := map[string]string{
+ "127.0.0.1:0": JoinHostPort("127.0.0.1", 0),
+ "127.0.0.1:1": JoinHostPort("127.0.0.1:1", 9142),
+ "[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:0": JoinHostPort("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 0),
+ "[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:1": JoinHostPort("[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:1", 9142),
+ }
+ for k, v := range tests {
+ if k != v {
+ t.Fatalf("expected '%v', got '%v'", k, v)
+ }
+ }
+}
+
+func TestSimple(t *testing.T) {
+ srv := NewTestServer(t, defaultProto)
+ defer srv.Stop()
+
+ cluster := NewCluster(srv.Address)
+ cluster.ProtoVersion = int(defaultProto)
+ db, err := cluster.CreateSession()
+ if err != nil {
+ t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
+ }
+
+ if err := db.Query("void").Exec(); err != nil {
+ t.Fatalf("0x%x: %v", defaultProto, err)
+ }
+}
+
+func TestSSLSimple(t *testing.T) {
+ srv := NewSSLTestServer(t, defaultProto)
+ defer srv.Stop()
+
+ db, err := createTestSslCluster(srv.Address, defaultProto, true).CreateSession()
+ if err != nil {
+ t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
+ }
+
+ if err := db.Query("void").Exec(); err != nil {
+ t.Fatalf("0x%x: %v", defaultProto, err)
+ }
+}
+
+func TestSSLSimpleNoClientCert(t *testing.T) {
+ srv := NewSSLTestServer(t, defaultProto)
+ defer srv.Stop()
+
+ db, err := createTestSslCluster(srv.Address, defaultProto, false).CreateSession()
+ if err != nil {
+ t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
+ }
+
+ if err := db.Query("void").Exec(); err != nil {
+ t.Fatalf("0x%x: %v", defaultProto, err)
+ }
+}
+
+func createTestSslCluster(hosts string, proto uint8, useClientCert bool) *ClusterConfig {
+ cluster := NewCluster(hosts)
+ sslOpts := &SslOptions{
+ CaPath: "testdata/pki/ca.crt",
+ EnableHostVerification: false,
+ }
+ if useClientCert {
+ sslOpts.CertPath = "testdata/pki/gocql.crt"
+ sslOpts.KeyPath = "testdata/pki/gocql.key"
+ }
+ cluster.SslOpts = sslOpts
+ cluster.ProtoVersion = int(proto)
+ return cluster
+}
+
+func TestClosed(t *testing.T) {
+ t.Skip("Skipping the execution of TestClosed for now to try to concentrate on more important test failures on Travis")
+
+ srv := NewTestServer(t, defaultProto)
+ defer srv.Stop()
+
+ cluster := NewCluster(srv.Address)
+ cluster.ProtoVersion = int(defaultProto)
+
+ session, err := cluster.CreateSession()
+ defer session.Close()
+ if err != nil {
+ t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
+ }
+
+ if err := session.Query("void").Exec(); err != ErrSessionClosed {
+ t.Fatalf("0x%x: expected %#v, got %#v", defaultProto, ErrSessionClosed, err)
+ }
+}
+
+func newTestSession(addr string, proto uint8) (*Session, error) {
+ cluster := NewCluster(addr)
+ cluster.ProtoVersion = int(proto)
+ return cluster.CreateSession()
+}
+
+func TestTimeout(t *testing.T) {
+
+ srv := NewTestServer(t, defaultProto)
+ defer srv.Stop()
+
+ db, err := newTestSession(srv.Address, defaultProto)
+ if err != nil {
+ t.Fatalf("NewCluster: %v", err)
+ }
+ defer db.Close()
+
+ go func() {
+ <-time.After(2 * time.Second)
+ t.Errorf("no timeout")
+ }()
+
+ if err := db.Query("kill").Exec(); err == nil {
+ t.Errorf("expected error")
+ }
+}
+
+// TestQueryRetry will test to make sure that gocql will execute
+// the exact amount of retry queries designated by the user.
+func TestQueryRetry(t *testing.T) {
+ srv := NewTestServer(t, defaultProto)
+ defer srv.Stop()
+
+ db, err := newTestSession(srv.Address, defaultProto)
+ if err != nil {
+ t.Fatalf("NewCluster: %v", err)
+ }
+ defer db.Close()
+
+ go func() {
+ <-time.After(5 * time.Second)
+ t.Fatalf("no timeout")
+ }()
+ rt := &SimpleRetryPolicy{NumRetries: 1}
+
+ qry := db.Query("kill").RetryPolicy(rt)
+ if err := qry.Exec(); err == nil {
+ t.Fatalf("expected error")
+ }
+
+ requests := atomic.LoadInt64(&srv.nKillReq)
+ attempts := qry.Attempts()
+ if requests != int64(attempts) {
+ t.Fatalf("expected requests %v to match query attemps %v", requests, attempts)
+ }
+
+ //Minus 1 from the requests variable since there is the initial query attempt
+ if requests-1 != int64(rt.NumRetries) {
+ t.Fatalf("failed to retry the query %v time(s). Query executed %v times", rt.NumRetries, requests-1)
+ }
+}
+
+func TestConnClosing(t *testing.T) {
+ t.Skip("Skipping until test can be ran reliably")
+
+ srv := NewTestServer(t, protoVersion2)
+ defer srv.Stop()
+
+ db, err := NewCluster(srv.Address).CreateSession()
+ if err != nil {
+ t.Fatalf("NewCluster: %v", err)
+ }
+ defer db.Close()
+
+ numConns := db.cfg.NumConns
+ count := db.cfg.NumStreams * numConns
+
+ wg := &sync.WaitGroup{}
+ wg.Add(count)
+ for i := 0; i < count; i++ {
+ go func(wg *sync.WaitGroup) {
+ wg.Done()
+ db.Query("kill").Exec()
+ }(wg)
+ }
+
+ wg.Wait()
+
+ time.Sleep(1 * time.Second) //Sleep so the fillPool can complete.
+ conns := db.pool.Size()
+
+ if conns != numConns {
+ t.Errorf("Expected to have %d connections but have %d", numConns, conns)
+ }
+}
+
+func TestStreams_Protocol1(t *testing.T) {
+ srv := NewTestServer(t, protoVersion1)
+ defer srv.Stop()
+
+ // TODO: these are more like session tests and should instead operate
+ // on a single Conn
+ cluster := NewCluster(srv.Address)
+ cluster.NumConns = 1
+ cluster.ProtoVersion = 1
+
+ db, err := cluster.CreateSession()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer db.Close()
+
+ var wg sync.WaitGroup
+ for i := 1; i < db.cfg.NumStreams; i++ {
+ // here were just validating that if we send NumStream request we get
+ // a response for every stream and the lengths for the queries are set
+ // correctly.
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ if err := db.Query("void").Exec(); err != nil {
+ t.Error(err)
+ }
+ }()
+ }
+ wg.Wait()
+}
+
+func TestStreams_Protocol2(t *testing.T) {
+ srv := NewTestServer(t, protoVersion2)
+ defer srv.Stop()
+
+ // TODO: these are more like session tests and should instead operate
+ // on a single Conn
+ cluster := NewCluster(srv.Address)
+ cluster.NumConns = 1
+ cluster.ProtoVersion = 2
+
+ db, err := cluster.CreateSession()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer db.Close()
+
+ for i := 1; i < db.cfg.NumStreams; i++ {
+ // the test server processes each conn synchronously
+ // here were just validating that if we send NumStream request we get
+ // a response for every stream and the lengths for the queries are set
+ // correctly.
+ if err = db.Query("void").Exec(); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestStreams_Protocol3(t *testing.T) {
+ srv := NewTestServer(t, protoVersion3)
+ defer srv.Stop()
+
+ // TODO: these are more like session tests and should instead operate
+ // on a single Conn
+ cluster := NewCluster(srv.Address)
+ cluster.NumConns = 1
+ cluster.ProtoVersion = 3
+
+ db, err := cluster.CreateSession()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer db.Close()
+
+ for i := 1; i < db.cfg.NumStreams; i++ {
+ // the test server processes each conn synchronously
+ // here were just validating that if we send NumStream request we get
+ // a response for every stream and the lengths for the queries are set
+ // correctly.
+ if err = db.Query("void").Exec(); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkProtocolV3(b *testing.B) {
+ srv := NewTestServer(b, protoVersion3)
+ defer srv.Stop()
+
+ // TODO: these are more like session tests and should instead operate
+ // on a single Conn
+ cluster := NewCluster(srv.Address)
+ cluster.NumConns = 1
+ cluster.ProtoVersion = 3
+
+ db, err := cluster.CreateSession()
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer db.Close()
+
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ if err = db.Query("void").Exec(); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestRoundRobinConnPoolRoundRobin(t *testing.T) {
+ // create 5 test servers
+ servers := make([]*TestServer, 5)
+ addrs := make([]string, len(servers))
+ for n := 0; n < len(servers); n++ {
+ servers[n] = NewTestServer(t, defaultProto)
+ addrs[n] = servers[n].Address
+ defer servers[n].Stop()
+ }
+
+ // create a new cluster using the policy-based round robin conn pool
+ cluster := NewCluster(addrs...)
+ cluster.PoolConfig.HostSelectionPolicy = RoundRobinHostPolicy()
+ cluster.PoolConfig.ConnSelectionPolicy = RoundRobinConnPolicy()
+ cluster.disableControlConn = true
+
+ db, err := cluster.CreateSession()
+ if err != nil {
+ t.Fatalf("failed to create a new session: %v", err)
+ }
+
+ // Sleep to allow the pool to fill
+ time.Sleep(100 * time.Millisecond)
+
+ // run concurrent queries against the pool, server usage should
+ // be even
+ var wg sync.WaitGroup
+ wg.Add(5)
+ for n := 0; n < 5; n++ {
+ go func() {
+ defer wg.Done()
+
+ for j := 0; j < 5; j++ {
+ if err := db.Query("void").Exec(); err != nil {
+ t.Errorf("Query failed with error: %v", err)
+ return
+ }
+ }
+ }()
+ }
+ wg.Wait()
+
+ db.Close()
+
+ // wait for the pool to drain
+ time.Sleep(100 * time.Millisecond)
+ size := db.pool.Size()
+ if size != 0 {
+ t.Errorf("connection pool did not drain, still contains %d connections", size)
+ }
+
+ // verify that server usage is even
+ diff := 0
+ for n := 1; n < len(servers); n++ {
+ d := 0
+ if servers[n].nreq > servers[n-1].nreq {
+ d = int(servers[n].nreq - servers[n-1].nreq)
+ } else {
+ d = int(servers[n-1].nreq - servers[n].nreq)
+ }
+ if d > diff {
+ diff = d
+ }
+ }
+
+ if diff > 0 {
+ t.Fatalf("expected 0 difference in usage but was %d", diff)
+ }
+}
+
+// This tests that the policy connection pool handles SSL correctly
+func TestPolicyConnPoolSSL(t *testing.T) {
+ srv := NewSSLTestServer(t, defaultProto)
+ defer srv.Stop()
+
+ cluster := createTestSslCluster(srv.Address, defaultProto, true)
+ cluster.PoolConfig.HostSelectionPolicy = RoundRobinHostPolicy()
+ cluster.PoolConfig.ConnSelectionPolicy = RoundRobinConnPolicy()
+
+ db, err := cluster.CreateSession()
+ if err != nil {
+ db.Close()
+ t.Fatalf("failed to create new session: %v", err)
+ }
+
+ if err := db.Query("void").Exec(); err != nil {
+ t.Fatalf("query failed due to error: %v", err)
+ }
+ db.Close()
+
+ // wait for the pool to drain
+ time.Sleep(100 * time.Millisecond)
+ size := db.pool.Size()
+ if size != 0 {
+ t.Fatalf("connection pool did not drain, still contains %d connections", size)
+ }
+}
+
+func TestQueryTimeout(t *testing.T) {
+ srv := NewTestServer(t, defaultProto)
+ defer srv.Stop()
+
+ cluster := NewCluster(srv.Address)
+ // Set the timeout arbitrarily low so that the query hits the timeout in a
+ // timely manner.
+ cluster.Timeout = 1 * time.Millisecond
+
+ db, err := cluster.CreateSession()
+ if err != nil {
+ t.Fatalf("NewCluster: %v", err)
+ }
+ defer db.Close()
+
+ ch := make(chan error, 1)
+
+ go func() {
+ err := db.Query("timeout").Exec()
+ if err != nil {
+ ch <- err
+ return
+ }
+ t.Errorf("err was nil, expected to get a timeout after %v", db.cfg.Timeout)
+ }()
+
+ select {
+ case err := <-ch:
+ if err != ErrTimeoutNoResponse {
+ t.Fatalf("expected to get %v for timeout got %v", ErrTimeoutNoResponse, err)
+ }
+ case <-time.After(10*time.Millisecond + db.cfg.Timeout):
+ // ensure that the query goroutines have been scheduled
+ t.Fatalf("query did not timeout after %v", db.cfg.Timeout)
+ }
+}
+
+func TestQueryTimeoutReuseStream(t *testing.T) {
+ srv := NewTestServer(t, defaultProto)
+ defer srv.Stop()
+
+ cluster := NewCluster(srv.Address)
+ // Set the timeout arbitrarily low so that the query hits the timeout in a
+ // timely manner.
+ cluster.Timeout = 1 * time.Millisecond
+ cluster.NumConns = 1
+ cluster.NumStreams = 1
+
+ db, err := cluster.CreateSession()
+ if err != nil {
+ t.Fatalf("NewCluster: %v", err)
+ }
+ defer db.Close()
+
+ db.Query("slow").Exec()
+
+ err = db.Query("void").Exec()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestQueryTimeoutClose(t *testing.T) {
+ srv := NewTestServer(t, defaultProto)
+ defer srv.Stop()
+
+ cluster := NewCluster(srv.Address)
+ // Set the timeout arbitrarily low so that the query hits the timeout in a
+ // timely manner.
+ cluster.Timeout = 1000 * time.Millisecond
+ cluster.NumConns = 1
+ cluster.NumStreams = 1
+
+ db, err := cluster.CreateSession()
+ if err != nil {
+ t.Fatalf("NewCluster: %v", err)
+ }
+
+ ch := make(chan error)
+ go func() {
+ err := db.Query("timeout").Exec()
+ ch <- err
+ }()
+ // ensure that the above goroutine gets sheduled
+ time.Sleep(50 * time.Millisecond)
+
+ db.Close()
+ select {
+ case err = <-ch:
+ case <-time.After(1 * time.Second):
+ t.Fatal("timedout waiting to get a response once cluster is closed")
+ }
+
+ if err != ErrConnectionClosed {
+ t.Fatalf("expected to get %v got %v", ErrConnectionClosed, err)
+ }
+}
+
+func TestExecPanic(t *testing.T) {
+ t.Skip("test can cause unrelated failures, skipping until it can be fixed.")
+ srv := NewTestServer(t, defaultProto)
+ defer srv.Stop()
+
+ cluster := NewCluster(srv.Address)
+ // Set the timeout arbitrarily low so that the query hits the timeout in a
+ // timely manner.
+ cluster.Timeout = 5 * time.Millisecond
+ cluster.NumConns = 1
+ // cluster.NumStreams = 1
+
+ db, err := cluster.CreateSession()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer db.Close()
+
+ streams := db.cfg.NumStreams
+
+ wg := &sync.WaitGroup{}
+ wg.Add(streams)
+ for i := 0; i < streams; i++ {
+ go func() {
+ defer wg.Done()
+ q := db.Query("void")
+ for {
+ if err := q.Exec(); err != nil {
+ return
+ }
+ }
+ }()
+ }
+
+ wg.Add(1)
+
+ go func() {
+ defer wg.Done()
+ for i := 0; i < int(TimeoutLimit); i++ {
+ db.Query("timeout").Exec()
+ }
+ }()
+
+ wg.Wait()
+
+ time.Sleep(500 * time.Millisecond)
+}
+
+func NewTestServer(t testing.TB, protocol uint8) *TestServer {
+ laddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ listen, err := net.ListenTCP("tcp", laddr)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ headerSize := 8
+ if protocol > protoVersion2 {
+ headerSize = 9
+ }
+
+ srv := &TestServer{
+ Address: listen.Addr().String(),
+ listen: listen,
+ t: t,
+ protocol: protocol,
+ headerSize: headerSize,
+ quit: make(chan struct{}),
+ }
+
+ go srv.serve()
+
+ return srv
+}
+
+func NewSSLTestServer(t testing.TB, protocol uint8) *TestServer {
+ pem, err := ioutil.ReadFile("testdata/pki/ca.crt")
+ certPool := x509.NewCertPool()
+ if !certPool.AppendCertsFromPEM(pem) {
+ t.Fatalf("Failed parsing or appending certs")
+ }
+ mycert, err := tls.LoadX509KeyPair("testdata/pki/cassandra.crt", "testdata/pki/cassandra.key")
+ if err != nil {
+ t.Fatalf("could not load cert")
+ }
+ config := &tls.Config{
+ Certificates: []tls.Certificate{mycert},
+ RootCAs: certPool,
+ }
+ listen, err := tls.Listen("tcp", "127.0.0.1:0", config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ headerSize := 8
+ if protocol > protoVersion2 {
+ headerSize = 9
+ }
+
+ srv := &TestServer{
+ Address: listen.Addr().String(),
+ listen: listen,
+ t: t,
+ protocol: protocol,
+ headerSize: headerSize,
+ quit: make(chan struct{}),
+ }
+ go srv.serve()
+ return srv
+}
+
+type TestServer struct {
+ Address string
+ t testing.TB
+ nreq uint64
+ listen net.Listener
+ nKillReq int64
+ compressor Compressor
+
+ protocol byte
+ headerSize int
+
+ quit chan struct{}
+}
+
+func (srv *TestServer) serve() {
+ defer srv.listen.Close()
+ for {
+ conn, err := srv.listen.Accept()
+ if err != nil {
+ break
+ }
+ go func(conn net.Conn) {
+ defer conn.Close()
+ for {
+ framer, err := srv.readFrame(conn)
+ if err != nil {
+ if err == io.EOF {
+ return
+ }
+
+ srv.t.Error(err)
+ return
+ }
+
+ atomic.AddUint64(&srv.nreq, 1)
+
+ go srv.process(framer)
+ }
+ }(conn)
+ }
+}
+
+func (srv *TestServer) Stop() {
+ srv.listen.Close()
+ close(srv.quit)
+}
+
+func (srv *TestServer) process(f *framer) {
+ head := f.header
+ if head == nil {
+ srv.t.Error("process frame with a nil header")
+ return
+ }
+
+ switch head.op {
+ case opStartup:
+ f.writeHeader(0, opReady, head.stream)
+ case opOptions:
+ f.writeHeader(0, opSupported, head.stream)
+ f.writeShort(0)
+ case opQuery:
+ query := f.readLongString()
+ first := query
+ if n := strings.Index(query, " "); n > 0 {
+ first = first[:n]
+ }
+ switch strings.ToLower(first) {
+ case "kill":
+ atomic.AddInt64(&srv.nKillReq, 1)
+ f.writeHeader(0, opError, head.stream)
+ f.writeInt(0x1001)
+ f.writeString("query killed")
+ case "use":
+ f.writeInt(resultKindKeyspace)
+ f.writeString(strings.TrimSpace(query[3:]))
+ case "void":
+ f.writeHeader(0, opResult, head.stream)
+ f.writeInt(resultKindVoid)
+ case "timeout":
+ <-srv.quit
+ return
+ case "slow":
+ go func() {
+ f.writeHeader(0, opResult, head.stream)
+ f.writeInt(resultKindVoid)
+ f.wbuf[0] = srv.protocol | 0x80
+ select {
+ case <-srv.quit:
+ case <-time.After(50 * time.Millisecond):
+ f.finishWrite()
+ }
+ }()
+ return
+ default:
+ f.writeHeader(0, opResult, head.stream)
+ f.writeInt(resultKindVoid)
+ }
+ default:
+ f.writeHeader(0, opError, head.stream)
+ f.writeInt(0)
+ f.writeString("not supported")
+ }
+
+ f.wbuf[0] = srv.protocol | 0x80
+
+ if err := f.finishWrite(); err != nil {
+ srv.t.Error(err)
+ }
+}
+
+func (srv *TestServer) readFrame(conn net.Conn) (*framer, error) {
+ buf := make([]byte, srv.headerSize)
+ head, err := readHeader(conn, buf)
+ if err != nil {
+ return nil, err
+ }
+ framer := newFramer(conn, conn, nil, srv.protocol)
+
+ err = framer.readFrame(&head)
+ if err != nil {
+ return nil, err
+ }
+
+ // should be a request frame
+ if head.version.response() {
+ return nil, fmt.Errorf("expected to read a request frame got version: %v", head.version)
+ } else if head.version.version() != srv.protocol {
+ return nil, fmt.Errorf("expected to read protocol version 0x%x got 0x%x", srv.protocol, head.version.version())
+ }
+
+ return framer, nil
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/connectionpool.go b/Godeps/_workspace/src/github.com/gocql/gocql/connectionpool.go
index 455f8c8f7..0e96a9328 100644
--- a/Godeps/_workspace/src/github.com/gocql/gocql/connectionpool.go
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/connectionpool.go
@@ -171,11 +171,11 @@ func (p *policyConnPool) Size() int {
return count
}
-func (p *policyConnPool) Pick(qry *Query) *Conn {
+func (p *policyConnPool) Pick(qry *Query) (SelectedHost, *Conn) {
nextHost := p.hostPolicy.Pick(qry)
var (
- host *HostInfo
+ host SelectedHost
conn *Conn
)
@@ -185,10 +185,10 @@ func (p *policyConnPool) Pick(qry *Query) *Conn {
if host == nil {
break
}
- conn = p.hostConnPools[host.Peer].Pick(qry)
+ conn = p.hostConnPools[host.Info().Peer].Pick(qry)
}
p.mu.RUnlock()
- return conn
+ return host, conn
}
func (p *policyConnPool) Close() {
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/control.go b/Godeps/_workspace/src/github.com/gocql/gocql/control.go
index f46952458..07a08334d 100644
--- a/Godeps/_workspace/src/github.com/gocql/gocql/control.go
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/control.go
@@ -26,7 +26,6 @@ func createControlConn(session *Session) *controlConn {
}
control.conn.Store((*Conn)(nil))
- control.reconnect()
go control.heartBeat()
return control
@@ -55,14 +54,14 @@ func (c *controlConn) heartBeat() {
}
reconn:
- c.reconnect()
- time.Sleep(5 * time.Second)
+ c.reconnect(true)
+ // time.Sleep(5 * time.Second)
continue
}
}
-func (c *controlConn) reconnect() {
+func (c *controlConn) reconnect(refreshring bool) {
if !atomic.CompareAndSwapUint64(&c.connecting, 0, 1) {
return
}
@@ -84,23 +83,29 @@ func (c *controlConn) reconnect() {
// TODO: should have our own roundrobbin for hosts so that we can try each
// in succession and guantee that we get a different host each time.
- conn := c.session.pool.Pick(nil)
+ host, conn := c.session.pool.Pick(nil)
if conn == nil {
return
}
newConn, err := Connect(conn.addr, conn.cfg, c)
if err != nil {
+ host.Mark(err)
// TODO: add log handler for things like this
return
}
+ host.Mark(nil)
c.conn.Store(newConn)
success = true
if oldConn != nil {
oldConn.Close()
}
+
+ if refreshring {
+ c.session.hostSource.refreshRing()
+ }
}
func (c *controlConn) HandleError(conn *Conn, err error, closed bool) {
@@ -113,7 +118,7 @@ func (c *controlConn) HandleError(conn *Conn, err error, closed bool) {
return
}
- c.reconnect()
+ c.reconnect(true)
}
func (c *controlConn) writeFrame(w frameWriter) (frame, error) {
@@ -146,7 +151,7 @@ func (c *controlConn) query(statement string, values ...interface{}) (iter *Iter
connectAttempts++
- c.reconnect()
+ c.reconnect(false)
continue
}
@@ -212,6 +217,15 @@ func (c *controlConn) awaitSchemaAgreement() (err error) {
// not exported
return errors.New("gocql: cluster schema versions not consistent")
}
+
+func (c *controlConn) addr() string {
+ conn := c.conn.Load().(*Conn)
+ if conn == nil {
+ return ""
+ }
+ return conn.addr
+}
+
func (c *controlConn) close() {
// TODO: handle more gracefully
close(c.quit)
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/errors_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/errors_test.go
new file mode 100644
index 000000000..b774d8a34
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/errors_test.go
@@ -0,0 +1,29 @@
+// +build all integration
+
+package gocql
+
+import (
+ "testing"
+)
+
+func TestErrorsParse(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ if err := createTable(session, `CREATE TABLE gocql_test.errors_parse (id int primary key)`); err != nil {
+ t.Fatal("create:", err)
+ }
+
+ if err := createTable(session, `CREATE TABLE gocql_test.errors_parse (id int primary key)`); err == nil {
+ t.Fatal("Should have gotten already exists error from cassandra server.")
+ } else {
+ switch e := err.(type) {
+ case *RequestErrAlreadyExists:
+ if e.Table != "errors_parse" {
+ t.Fatalf("expected error table to be 'errors_parse' but was %q", e.Table)
+ }
+ default:
+ t.Fatalf("expected to get RequestErrAlreadyExists instead got %T", e)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/frame_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/frame_test.go
new file mode 100644
index 000000000..1ffe338e4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/frame_test.go
@@ -0,0 +1,98 @@
+package gocql
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestFuzzBugs(t *testing.T) {
+ // these inputs are found using go-fuzz (https://github.com/dvyukov/go-fuzz)
+ // and should cause a panic unless fixed.
+ tests := [][]byte{
+ []byte("00000\xa0000"),
+ []byte("\x8000\x0e\x00\x00\x00\x000"),
+ []byte("\x8000\x00\x00\x00\x00\t0000000000"),
+ []byte("\xa0\xff\x01\xae\xefqE\xf2\x1a"),
+ []byte("\x8200\b\x00\x00\x00c\x00\x00\x00\x02000\x01\x00\x00\x00\x03" +
+ "\x00\n0000000000\x00\x14000000" +
+ "00000000000000\x00\x020000" +
+ "\x00\a000000000\x00\x050000000" +
+ "\xff0000000000000000000" +
+ "0000000"),
+ []byte("\x82\xe600\x00\x00\x00\x000"),
+ []byte("\x8200\b\x00\x00\x00\b0\x00\x00\x00\x040000"),
+ []byte("\x8200\x00\x00\x00\x00\x100\x00\x00\x12\x00\x00\x0000000" +
+ "00000"),
+ []byte("\x83000\b\x00\x00\x00\x14\x00\x00\x00\x020000000" +
+ "000000000"),
+ []byte("\x83000\b\x00\x00\x000\x00\x00\x00\x04\x00\x1000000" +
+ "00000000000000e00000" +
+ "000\x800000000000000000" +
+ "0000000000000"),
+ }
+
+ for i, test := range tests {
+ t.Logf("test %d input: %q", i, test)
+
+ var bw bytes.Buffer
+
+ r := bytes.NewReader(test)
+ head, err := readHeader(r, make([]byte, 9))
+ if err != nil {
+ continue
+ }
+
+ framer := newFramer(r, &bw, nil, byte(head.version))
+ err = framer.readFrame(&head)
+ if err != nil {
+ continue
+ }
+
+ _, err = framer.parseFrame()
+ if err != nil {
+ continue
+ }
+
+ t.Errorf("(%d) expected to fail for input %q", i, test)
+ }
+}
+
+func TestFrameWriteTooLong(t *testing.T) {
+ w := &bytes.Buffer{}
+ framer := newFramer(nil, w, nil, 2)
+
+ framer.writeHeader(0, opStartup, 1)
+ framer.writeBytes(make([]byte, maxFrameSize+1))
+ err := framer.finishWrite()
+ if err != ErrFrameTooBig {
+ t.Fatalf("expected to get %v got %v", ErrFrameTooBig, err)
+ }
+}
+
+func TestFrameReadTooLong(t *testing.T) {
+ r := &bytes.Buffer{}
+ r.Write(make([]byte, maxFrameSize+1))
+ // write a new header right after this frame to verify that we can read it
+ r.Write([]byte{0x02, 0x00, 0x00, byte(opReady), 0x00, 0x00, 0x00, 0x00})
+
+ framer := newFramer(r, nil, nil, 2)
+
+ head := frameHeader{
+ version: 2,
+ op: opReady,
+ length: r.Len() - 8,
+ }
+
+ err := framer.readFrame(&head)
+ if err != ErrFrameTooBig {
+ t.Fatalf("expected to get %v got %v", ErrFrameTooBig, err)
+ }
+
+ head, err = readHeader(r, make([]byte, 8))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if head.op != opReady {
+ t.Fatalf("expected to get header %v got %v", opReady, head.op)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/framer_bench_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/framer_bench_test.go
new file mode 100644
index 000000000..06dfac45e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/framer_bench_test.go
@@ -0,0 +1,48 @@
+package gocql
+
+import (
+ "compress/gzip"
+ "io/ioutil"
+ "os"
+ "testing"
+)
+
+func readGzipData(path string) ([]byte, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ r, err := gzip.NewReader(f)
+ if err != nil {
+ return nil, err
+ }
+ defer r.Close()
+
+ return ioutil.ReadAll(r)
+}
+
+func BenchmarkParseRowsFrame(b *testing.B) {
+ data, err := readGzipData("testdata/frames/bench_parse_result.gz")
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ framer := &framer{
+ header: &frameHeader{
+ version: protoVersion4 | 0x80,
+ op: opResult,
+ length: len(data),
+ },
+ rbuf: data,
+ }
+
+ _, err = framer.parseFrame()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/host_source.go b/Godeps/_workspace/src/github.com/gocql/gocql/host_source.go
index ac40a4d30..27f269c49 100644
--- a/Godeps/_workspace/src/github.com/gocql/gocql/host_source.go
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/host_source.go
@@ -1,8 +1,10 @@
package gocql
import (
+ "fmt"
"log"
"net"
+ "sync"
"time"
)
@@ -14,6 +16,10 @@ type HostInfo struct {
Tokens []string
}
+func (h HostInfo) String() string {
+ return fmt.Sprintf("[hostinfo peer=%q data_centre=%q rack=%q host_id=%q num_tokens=%d]", h.Peer, h.DataCenter, h.Rack, h.HostId, len(h.Tokens))
+}
+
// Polls system.peers at a specific interval to find new hosts
type ringDescriber struct {
dcFilter string
@@ -22,46 +28,82 @@ type ringDescriber struct {
prevPartitioner string
session *Session
closeChan chan bool
+ // indicates that we can use system.local to get the connections remote address
+ localHasRpcAddr bool
+
+ mu sync.Mutex
+}
+
+func checkSystemLocal(control *controlConn) (bool, error) {
+ iter := control.query("SELECT rpc_address FROM system.local")
+ if err := iter.err; err != nil {
+ if errf, ok := err.(*errorFrame); ok {
+ if errf.code == errSyntax {
+ return false, nil
+ }
+ }
+
+ return false, err
+ }
+
+ return true, nil
}
func (r *ringDescriber) GetHosts() (hosts []HostInfo, partitioner string, err error) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
// we need conn to be the same because we need to query system.peers and system.local
// on the same node to get the whole cluster
- iter := r.session.control.query("SELECT data_center, rack, host_id, tokens, partitioner FROM system.local")
- if iter == nil {
- return r.prevHosts, r.prevPartitioner, nil
+ const (
+ legacyLocalQuery = "SELECT data_center, rack, host_id, tokens, partitioner FROM system.local"
+ // only supported in 2.2.0, 2.1.6, 2.0.16
+ localQuery = "SELECT rpc_address, data_center, rack, host_id, tokens, partitioner FROM system.local"
+ )
+
+ var localHost HostInfo
+ if r.localHasRpcAddr {
+ iter := r.session.control.query(localQuery)
+ if iter == nil {
+ return r.prevHosts, r.prevPartitioner, nil
+ }
+
+ iter.Scan(&localHost.Peer, &localHost.DataCenter, &localHost.Rack,
+ &localHost.HostId, &localHost.Tokens, &partitioner)
+
+ if err = iter.Close(); err != nil {
+ return nil, "", err
+ }
+ } else {
+ iter := r.session.control.query(legacyLocalQuery)
+ if iter == nil {
+ return r.prevHosts, r.prevPartitioner, nil
+ }
+
+ iter.Scan(&localHost.DataCenter, &localHost.Rack, &localHost.HostId, &localHost.Tokens, &partitioner)
+
+ if err = iter.Close(); err != nil {
+ return nil, "", err
+ }
+
+ addr, _, err := net.SplitHostPort(r.session.control.addr())
+ if err != nil {
+ // this should not happen, ever, as this is the address that was dialed by conn, here
+ // a panic makes sense, please report a bug if it occurs.
+ panic(err)
+ }
+
+ localHost.Peer = addr
}
- conn := r.session.pool.Pick(nil)
- if conn == nil {
+ hosts = []HostInfo{localHost}
+
+ iter := r.session.control.query("SELECT rpc_address, data_center, rack, host_id, tokens FROM system.peers")
+ if iter == nil {
return r.prevHosts, r.prevPartitioner, nil
}
host := HostInfo{}
- iter.Scan(&host.DataCenter, &host.Rack, &host.HostId, &host.Tokens, &partitioner)
-
- if err = iter.Close(); err != nil {
- return nil, "", err
- }
-
- addr, _, err := net.SplitHostPort(conn.Address())
- if err != nil {
- // this should not happen, ever, as this is the address that was dialed by conn, here
- // a panic makes sense, please report a bug if it occurs.
- panic(err)
- }
-
- host.Peer = addr
-
- hosts = []HostInfo{host}
-
- iter = r.session.control.query("SELECT peer, data_center, rack, host_id, tokens FROM system.peers")
- if iter == nil {
- return r.prevHosts, r.prevPartitioner, nil
- }
-
- host = HostInfo{}
for iter.Scan(&host.Peer, &host.DataCenter, &host.Rack, &host.HostId, &host.Tokens) {
if r.matchFilter(&host) {
hosts = append(hosts, host)
@@ -92,28 +134,32 @@ func (r *ringDescriber) matchFilter(host *HostInfo) bool {
return true
}
-func (h *ringDescriber) run(sleep time.Duration) {
+func (r *ringDescriber) refreshRing() {
+ // if we have 0 hosts this will return the previous list of hosts to
+ // attempt to reconnect to the cluster otherwise we would never find
+ // downed hosts again, could possibly have an optimisation to only
+ // try to add new hosts if GetHosts didnt error and the hosts didnt change.
+ hosts, partitioner, err := r.GetHosts()
+ if err != nil {
+ log.Println("RingDescriber: unable to get ring topology:", err)
+ return
+ }
+
+ r.session.pool.SetHosts(hosts)
+ r.session.pool.SetPartitioner(partitioner)
+}
+
+func (r *ringDescriber) run(sleep time.Duration) {
if sleep == 0 {
sleep = 30 * time.Second
}
for {
- // if we have 0 hosts this will return the previous list of hosts to
- // attempt to reconnect to the cluster otherwise we would never find
- // downed hosts again, could possibly have an optimisation to only
- // try to add new hosts if GetHosts didnt error and the hosts didnt change.
- hosts, partitioner, err := h.GetHosts()
- if err != nil {
- log.Println("RingDescriber: unable to get ring topology:", err)
- continue
- }
-
- h.session.pool.SetHosts(hosts)
- h.session.pool.SetPartitioner(partitioner)
+ r.refreshRing()
select {
case <-time.After(sleep):
- case <-h.closeChan:
+ case <-r.closeChan:
return
}
}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/lru/lru_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/lru/lru_test.go
new file mode 100644
index 000000000..1a6414b33
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/lru/lru_test.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2015 To gocql authors
+Copyright 2013 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package lru
+
+import (
+ "testing"
+)
+
+type simpleStruct struct {
+ int
+ string
+}
+
+type complexStruct struct {
+ int
+ simpleStruct
+}
+
+var getTests = []struct {
+ name string
+ keyToAdd string
+ keyToGet string
+ expectedOk bool
+}{
+ {"string_hit", "mystring", "mystring", true},
+ {"string_miss", "mystring", "nonsense", false},
+ {"simple_struct_hit", "two", "two", true},
+ {"simeple_struct_miss", "two", "noway", false},
+}
+
+func TestGet(t *testing.T) {
+ for _, tt := range getTests {
+ lru := New(0)
+ lru.Add(tt.keyToAdd, 1234)
+ val, ok := lru.Get(tt.keyToGet)
+ if ok != tt.expectedOk {
+ t.Fatalf("%s: cache hit = %v; want %v", tt.name, ok, !ok)
+ } else if ok && val != 1234 {
+ t.Fatalf("%s expected get to return 1234 but got %v", tt.name, val)
+ }
+ }
+}
+
+func TestRemove(t *testing.T) {
+ lru := New(0)
+ lru.Add("mystring", 1234)
+ if val, ok := lru.Get("mystring"); !ok {
+ t.Fatal("TestRemove returned no match")
+ } else if val != 1234 {
+ t.Fatalf("TestRemove failed. Expected %d, got %v", 1234, val)
+ }
+
+ lru.Remove("mystring")
+ if _, ok := lru.Get("mystring"); ok {
+ t.Fatal("TestRemove returned a removed entry")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/marshal_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/marshal_test.go
new file mode 100644
index 000000000..73e3f63e2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/marshal_test.go
@@ -0,0 +1,899 @@
+// +build all unit
+
+package gocql
+
+import (
+ "bytes"
+ "math"
+ "math/big"
+ "net"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "gopkg.in/inf.v0"
+)
+
+var marshalTests = []struct {
+ Info TypeInfo
+ Data []byte
+ Value interface{}
+}{
+ {
+ NativeType{proto: 2, typ: TypeVarchar},
+ []byte("hello world"),
+ []byte("hello world"),
+ },
+ {
+ NativeType{proto: 2, typ: TypeVarchar},
+ []byte("hello world"),
+ "hello world",
+ },
+ {
+ NativeType{proto: 2, typ: TypeVarchar},
+ []byte(nil),
+ []byte(nil),
+ },
+ {
+ NativeType{proto: 2, typ: TypeVarchar},
+ []byte("hello world"),
+ MyString("hello world"),
+ },
+ {
+ NativeType{proto: 2, typ: TypeVarchar},
+ []byte("HELLO WORLD"),
+ CustomString("hello world"),
+ },
+ {
+ NativeType{proto: 2, typ: TypeBlob},
+ []byte("hello\x00"),
+ []byte("hello\x00"),
+ },
+ {
+ NativeType{proto: 2, typ: TypeBlob},
+ []byte(nil),
+ []byte(nil),
+ },
+ {
+ NativeType{proto: 2, typ: TypeTimeUUID},
+ []byte{0x3d, 0xcd, 0x98, 0x0, 0xf3, 0xd9, 0x11, 0xbf, 0x86, 0xd4, 0xb8, 0xe8, 0x56, 0x2c, 0xc, 0xd0},
+ func() UUID {
+ x, _ := UUIDFromBytes([]byte{0x3d, 0xcd, 0x98, 0x0, 0xf3, 0xd9, 0x11, 0xbf, 0x86, 0xd4, 0xb8, 0xe8, 0x56, 0x2c, 0xc, 0xd0})
+ return x
+ }(),
+ },
+ {
+ NativeType{proto: 2, typ: TypeInt},
+ []byte("\x00\x00\x00\x00"),
+ 0,
+ },
+ {
+ NativeType{proto: 2, typ: TypeInt},
+ []byte("\x01\x02\x03\x04"),
+ int(16909060),
+ },
+ {
+ NativeType{proto: 2, typ: TypeInt},
+ []byte("\x80\x00\x00\x00"),
+ int32(math.MinInt32),
+ },
+ {
+ NativeType{proto: 2, typ: TypeInt},
+ []byte("\x7f\xff\xff\xff"),
+ int32(math.MaxInt32),
+ },
+ {
+ NativeType{proto: 2, typ: TypeInt},
+ []byte("\x00\x00\x00\x00"),
+ "0",
+ },
+ {
+ NativeType{proto: 2, typ: TypeInt},
+ []byte("\x01\x02\x03\x04"),
+ "16909060",
+ },
+ {
+ NativeType{proto: 2, typ: TypeInt},
+ []byte("\x80\x00\x00\x00"),
+ "-2147483648", // math.MinInt32
+ },
+ {
+ NativeType{proto: 2, typ: TypeInt},
+ []byte("\x7f\xff\xff\xff"),
+ "2147483647", // math.MaxInt32
+ },
+ {
+ NativeType{proto: 2, typ: TypeBigInt},
+ []byte("\x00\x00\x00\x00\x00\x00\x00\x00"),
+ 0,
+ },
+ {
+ NativeType{proto: 2, typ: TypeBigInt},
+ []byte("\x01\x02\x03\x04\x05\x06\x07\x08"),
+ 72623859790382856,
+ },
+ {
+ NativeType{proto: 2, typ: TypeBigInt},
+ []byte("\x80\x00\x00\x00\x00\x00\x00\x00"),
+ int64(math.MinInt64),
+ },
+ {
+ NativeType{proto: 2, typ: TypeBigInt},
+ []byte("\x7f\xff\xff\xff\xff\xff\xff\xff"),
+ int64(math.MaxInt64),
+ },
+ {
+ NativeType{proto: 2, typ: TypeBigInt},
+ []byte("\x00\x00\x00\x00\x00\x00\x00\x00"),
+ "0",
+ },
+ {
+ NativeType{proto: 2, typ: TypeBigInt},
+ []byte("\x01\x02\x03\x04\x05\x06\x07\x08"),
+ "72623859790382856",
+ },
+ {
+ NativeType{proto: 2, typ: TypeBigInt},
+ []byte("\x80\x00\x00\x00\x00\x00\x00\x00"),
+ "-9223372036854775808", // math.MinInt64
+ },
+ {
+ NativeType{proto: 2, typ: TypeBigInt},
+ []byte("\x7f\xff\xff\xff\xff\xff\xff\xff"),
+ "9223372036854775807", // math.MaxInt64
+ },
+ {
+ NativeType{proto: 2, typ: TypeBoolean},
+ []byte("\x00"),
+ false,
+ },
+ {
+ NativeType{proto: 2, typ: TypeBoolean},
+ []byte("\x01"),
+ true,
+ },
+ {
+ NativeType{proto: 2, typ: TypeFloat},
+ []byte("\x40\x49\x0f\xdb"),
+ float32(3.14159265),
+ },
+ {
+ NativeType{proto: 2, typ: TypeDouble},
+ []byte("\x40\x09\x21\xfb\x53\xc8\xd4\xf1"),
+ float64(3.14159265),
+ },
+ {
+ NativeType{proto: 2, typ: TypeDecimal},
+ []byte("\x00\x00\x00\x00\x00"),
+ inf.NewDec(0, 0),
+ },
+ {
+ NativeType{proto: 2, typ: TypeDecimal},
+ []byte("\x00\x00\x00\x00\x64"),
+ inf.NewDec(100, 0),
+ },
+ {
+ NativeType{proto: 2, typ: TypeDecimal},
+ []byte("\x00\x00\x00\x02\x19"),
+ decimalize("0.25"),
+ },
+ {
+ NativeType{proto: 2, typ: TypeDecimal},
+ []byte("\x00\x00\x00\x13\xD5\a;\x20\x14\xA2\x91"),
+ decimalize("-0.0012095473475870063"), // From the iconara/cql-rb test suite
+ },
+ {
+ NativeType{proto: 2, typ: TypeDecimal},
+ []byte("\x00\x00\x00\x13*\xF8\xC4\xDF\xEB]o"),
+ decimalize("0.0012095473475870063"), // From the iconara/cql-rb test suite
+ },
+ {
+ NativeType{proto: 2, typ: TypeDecimal},
+ []byte("\x00\x00\x00\x12\xF2\xD8\x02\xB6R\x7F\x99\xEE\x98#\x99\xA9V"),
+ decimalize("-1042342234234.123423435647768234"), // From the iconara/cql-rb test suite
+ },
+ {
+ NativeType{proto: 2, typ: TypeDecimal},
+ []byte("\x00\x00\x00\r\nJ\x04\"^\x91\x04\x8a\xb1\x18\xfe"),
+ decimalize("1243878957943.1234124191998"), // From the datastax/python-driver test suite
+ },
+ {
+ NativeType{proto: 2, typ: TypeDecimal},
+ []byte("\x00\x00\x00\x06\xe5\xde]\x98Y"),
+ decimalize("-112233.441191"), // From the datastax/python-driver test suite
+ },
+ {
+ NativeType{proto: 2, typ: TypeDecimal},
+ []byte("\x00\x00\x00\x14\x00\xfa\xce"),
+ decimalize("0.00000000000000064206"), // From the datastax/python-driver test suite
+ },
+ {
+ NativeType{proto: 2, typ: TypeDecimal},
+ []byte("\x00\x00\x00\x14\xff\x052"),
+ decimalize("-0.00000000000000064206"), // From the datastax/python-driver test suite
+ },
+ {
+ NativeType{proto: 2, typ: TypeDecimal},
+ []byte("\xff\xff\xff\x9c\x00\xfa\xce"),
+ inf.NewDec(64206, -100), // From the datastax/python-driver test suite
+ },
+ {
+ NativeType{proto: 2, typ: TypeTimestamp},
+ []byte("\x00\x00\x01\x40\x77\x16\xe1\xb8"),
+ time.Date(2013, time.August, 13, 9, 52, 3, 0, time.UTC),
+ },
+ {
+ NativeType{proto: 2, typ: TypeTimestamp},
+ []byte("\x00\x00\x01\x40\x77\x16\xe1\xb8"),
+ int64(1376387523000),
+ },
+ {
+ CollectionType{
+ NativeType: NativeType{proto: 2, typ: TypeList},
+ Elem: NativeType{proto: 2, typ: TypeInt},
+ },
+ []byte("\x00\x02\x00\x04\x00\x00\x00\x01\x00\x04\x00\x00\x00\x02"),
+ []int{1, 2},
+ },
+ {
+ CollectionType{
+ NativeType: NativeType{proto: 2, typ: TypeList},
+ Elem: NativeType{proto: 2, typ: TypeInt},
+ },
+ []byte("\x00\x02\x00\x04\x00\x00\x00\x01\x00\x04\x00\x00\x00\x02"),
+ [2]int{1, 2},
+ },
+ {
+ CollectionType{
+ NativeType: NativeType{proto: 2, typ: TypeSet},
+ Elem: NativeType{proto: 2, typ: TypeInt},
+ },
+ []byte("\x00\x02\x00\x04\x00\x00\x00\x01\x00\x04\x00\x00\x00\x02"),
+ []int{1, 2},
+ },
+ {
+ CollectionType{
+ NativeType: NativeType{proto: 2, typ: TypeSet},
+ Elem: NativeType{proto: 2, typ: TypeInt},
+ },
+ []byte(nil),
+ []int(nil),
+ },
+ {
+ CollectionType{
+ NativeType: NativeType{proto: 2, typ: TypeMap},
+ Key: NativeType{proto: 2, typ: TypeVarchar},
+ Elem: NativeType{proto: 2, typ: TypeInt},
+ },
+ []byte("\x00\x01\x00\x03foo\x00\x04\x00\x00\x00\x01"),
+ map[string]int{"foo": 1},
+ },
+ {
+ CollectionType{
+ NativeType: NativeType{proto: 2, typ: TypeMap},
+ Key: NativeType{proto: 2, typ: TypeVarchar},
+ Elem: NativeType{proto: 2, typ: TypeInt},
+ },
+ []byte(nil),
+ map[string]int(nil),
+ },
+ {
+ CollectionType{
+ NativeType: NativeType{proto: 2, typ: TypeList},
+ Elem: NativeType{proto: 2, typ: TypeVarchar},
+ },
+ bytes.Join([][]byte{
+ []byte("\x00\x01\xFF\xFF"),
+ bytes.Repeat([]byte("X"), 65535)}, []byte("")),
+ []string{strings.Repeat("X", 65535)},
+ },
+ {
+ CollectionType{
+ NativeType: NativeType{proto: 2, typ: TypeMap},
+ Key: NativeType{proto: 2, typ: TypeVarchar},
+ Elem: NativeType{proto: 2, typ: TypeVarchar},
+ },
+ bytes.Join([][]byte{
+ []byte("\x00\x01\xFF\xFF"),
+ bytes.Repeat([]byte("X"), 65535),
+ []byte("\xFF\xFF"),
+ bytes.Repeat([]byte("Y"), 65535)}, []byte("")),
+ map[string]string{
+ strings.Repeat("X", 65535): strings.Repeat("Y", 65535),
+ },
+ },
+ {
+ NativeType{proto: 2, typ: TypeVarint},
+ []byte("\x00"),
+ 0,
+ },
+ {
+ NativeType{proto: 2, typ: TypeVarint},
+ []byte("\x37\xE2\x3C\xEC"),
+ int32(937573612),
+ },
+ {
+ NativeType{proto: 2, typ: TypeVarint},
+ []byte("\x37\xE2\x3C\xEC"),
+ big.NewInt(937573612),
+ },
+ {
+ NativeType{proto: 2, typ: TypeVarint},
+ []byte("\x03\x9EV \x15\f\x03\x9DK\x18\xCDI\\$?\a["),
+ bigintize("1231312312331283012830129382342342412123"), // From the iconara/cql-rb test suite
+ },
+ {
+ NativeType{proto: 2, typ: TypeVarint},
+ []byte("\xC9v\x8D:\x86"),
+ big.NewInt(-234234234234), // From the iconara/cql-rb test suite
+ },
+ {
+ NativeType{proto: 2, typ: TypeVarint},
+ []byte("f\x1e\xfd\xf2\xe3\xb1\x9f|\x04_\x15"),
+ bigintize("123456789123456789123456789"), // From the datastax/python-driver test suite
+ },
+ {
+ NativeType{proto: 2, typ: TypeInet},
+ []byte("\x7F\x00\x00\x01"),
+ net.ParseIP("127.0.0.1").To4(),
+ },
+ {
+ NativeType{proto: 2, typ: TypeInet},
+ []byte("\xFF\xFF\xFF\xFF"),
+ net.ParseIP("255.255.255.255").To4(),
+ },
+ {
+ NativeType{proto: 2, typ: TypeInet},
+ []byte("\x7F\x00\x00\x01"),
+ "127.0.0.1",
+ },
+ {
+ NativeType{proto: 2, typ: TypeInet},
+ []byte("\xFF\xFF\xFF\xFF"),
+ "255.255.255.255",
+ },
+ {
+ NativeType{proto: 2, typ: TypeInet},
+ []byte("\x21\xDA\x00\xd3\x00\x00\x2f\x3b\x02\xaa\x00\xff\xfe\x28\x9c\x5a"),
+ "21da:d3:0:2f3b:2aa:ff:fe28:9c5a",
+ },
+ {
+ NativeType{proto: 2, typ: TypeInet},
+ []byte("\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x02\xb3\xff\xfe\x1e\x83\x29"),
+ "fe80::202:b3ff:fe1e:8329",
+ },
+ {
+ NativeType{proto: 2, typ: TypeInet},
+ []byte("\x21\xDA\x00\xd3\x00\x00\x2f\x3b\x02\xaa\x00\xff\xfe\x28\x9c\x5a"),
+ net.ParseIP("21da:d3:0:2f3b:2aa:ff:fe28:9c5a"),
+ },
+ {
+ NativeType{proto: 2, typ: TypeInet},
+ []byte("\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x02\xb3\xff\xfe\x1e\x83\x29"),
+ net.ParseIP("fe80::202:b3ff:fe1e:8329"),
+ },
+ {
+ NativeType{proto: 2, typ: TypeInt},
+ []byte(nil),
+ nil,
+ },
+ {
+ NativeType{proto: 2, typ: TypeVarchar},
+ []byte("nullable string"),
+ func() *string {
+ value := "nullable string"
+ return &value
+ }(),
+ },
+ {
+ NativeType{proto: 2, typ: TypeVarchar},
+ []byte{},
+ (*string)(nil),
+ },
+ {
+ NativeType{proto: 2, typ: TypeInt},
+ []byte("\x7f\xff\xff\xff"),
+ func() *int {
+ var value int = math.MaxInt32
+ return &value
+ }(),
+ },
+ {
+ NativeType{proto: 2, typ: TypeInt},
+ []byte(nil),
+ (*int)(nil),
+ },
+ {
+ NativeType{proto: 2, typ: TypeTimeUUID},
+ []byte{0x3d, 0xcd, 0x98, 0x0, 0xf3, 0xd9, 0x11, 0xbf, 0x86, 0xd4, 0xb8, 0xe8, 0x56, 0x2c, 0xc, 0xd0},
+ &UUID{0x3d, 0xcd, 0x98, 0x0, 0xf3, 0xd9, 0x11, 0xbf, 0x86, 0xd4, 0xb8, 0xe8, 0x56, 0x2c, 0xc, 0xd0},
+ },
+ {
+ NativeType{proto: 2, typ: TypeTimeUUID},
+ []byte{},
+ (*UUID)(nil),
+ },
+ {
+ NativeType{proto: 2, typ: TypeTimestamp},
+ []byte("\x00\x00\x01\x40\x77\x16\xe1\xb8"),
+ func() *time.Time {
+ t := time.Date(2013, time.August, 13, 9, 52, 3, 0, time.UTC)
+ return &t
+ }(),
+ },
+ {
+ NativeType{proto: 2, typ: TypeTimestamp},
+ []byte(nil),
+ (*time.Time)(nil),
+ },
+ {
+ NativeType{proto: 2, typ: TypeBoolean},
+ []byte("\x00"),
+ func() *bool {
+ b := false
+ return &b
+ }(),
+ },
+ {
+ NativeType{proto: 2, typ: TypeBoolean},
+ []byte("\x01"),
+ func() *bool {
+ b := true
+ return &b
+ }(),
+ },
+ {
+ NativeType{proto: 2, typ: TypeBoolean},
+ []byte(nil),
+ (*bool)(nil),
+ },
+ {
+ NativeType{proto: 2, typ: TypeFloat},
+ []byte("\x40\x49\x0f\xdb"),
+ func() *float32 {
+ f := float32(3.14159265)
+ return &f
+ }(),
+ },
+ {
+ NativeType{proto: 2, typ: TypeFloat},
+ []byte(nil),
+ (*float32)(nil),
+ },
+ {
+ NativeType{proto: 2, typ: TypeDouble},
+ []byte("\x40\x09\x21\xfb\x53\xc8\xd4\xf1"),
+ func() *float64 {
+ d := float64(3.14159265)
+ return &d
+ }(),
+ },
+ {
+ NativeType{proto: 2, typ: TypeDouble},
+ []byte(nil),
+ (*float64)(nil),
+ },
+ {
+ NativeType{proto: 2, typ: TypeInet},
+ []byte("\x7F\x00\x00\x01"),
+ func() *net.IP {
+ ip := net.ParseIP("127.0.0.1").To4()
+ return &ip
+ }(),
+ },
+ {
+ NativeType{proto: 2, typ: TypeInet},
+ []byte(nil),
+ (*net.IP)(nil),
+ },
+ {
+ CollectionType{
+ NativeType: NativeType{proto: 2, typ: TypeList},
+ Elem: NativeType{proto: 2, typ: TypeInt},
+ },
+ []byte("\x00\x02\x00\x04\x00\x00\x00\x01\x00\x04\x00\x00\x00\x02"),
+ func() *[]int {
+ l := []int{1, 2}
+ return &l
+ }(),
+ },
+ {
+ CollectionType{
+ NativeType: NativeType{proto: 2, typ: TypeList},
+ Elem: NativeType{proto: 2, typ: TypeInt},
+ },
+ []byte(nil),
+ (*[]int)(nil),
+ },
+ {
+ CollectionType{
+ NativeType: NativeType{proto: 2, typ: TypeMap},
+ Key: NativeType{proto: 2, typ: TypeVarchar},
+ Elem: NativeType{proto: 2, typ: TypeInt},
+ },
+ []byte("\x00\x01\x00\x03foo\x00\x04\x00\x00\x00\x01"),
+ func() *map[string]int {
+ m := map[string]int{"foo": 1}
+ return &m
+ }(),
+ },
+ {
+ CollectionType{
+ NativeType: NativeType{proto: 2, typ: TypeMap},
+ Key: NativeType{proto: 2, typ: TypeVarchar},
+ Elem: NativeType{proto: 2, typ: TypeInt},
+ },
+ []byte(nil),
+ (*map[string]int)(nil),
+ },
+ {
+ NativeType{proto: 2, typ: TypeVarchar},
+ []byte("HELLO WORLD"),
+ func() *CustomString {
+ customString := CustomString("hello world")
+ return &customString
+ }(),
+ },
+ {
+ NativeType{proto: 2, typ: TypeVarchar},
+ []byte(nil),
+ (*CustomString)(nil),
+ },
+}
+
+func decimalize(s string) *inf.Dec {
+ i, _ := new(inf.Dec).SetString(s)
+ return i
+}
+
+func bigintize(s string) *big.Int {
+ i, _ := new(big.Int).SetString(s, 10)
+ return i
+}
+
+func TestMarshal(t *testing.T) {
+ for i, test := range marshalTests {
+ data, err := Marshal(test.Info, test.Value)
+ if err != nil {
+ t.Errorf("marshalTest[%d]: %v", i, err)
+ continue
+ }
+ if !bytes.Equal(data, test.Data) {
+ t.Errorf("marshalTest[%d]: expected %q, got %q (%#v)", i, test.Data, data, test.Value)
+ }
+ }
+}
+
+func TestUnmarshal(t *testing.T) {
+ for i, test := range marshalTests {
+ if test.Value != nil {
+ v := reflect.New(reflect.TypeOf(test.Value))
+ err := Unmarshal(test.Info, test.Data, v.Interface())
+ if err != nil {
+ t.Errorf("unmarshalTest[%d]: %v", i, err)
+ continue
+ }
+ if !reflect.DeepEqual(v.Elem().Interface(), test.Value) {
+ t.Errorf("unmarshalTest[%d]: expected %#v, got %#v.", i, test.Value, v.Elem().Interface())
+ }
+ } else {
+ if err := Unmarshal(test.Info, test.Data, test.Value); nil == err {
+ t.Errorf("unmarshalTest[%d]: %#v not return error.", i, test.Value)
+ }
+ }
+ }
+}
+
+func TestMarshalVarint(t *testing.T) {
+ varintTests := []struct {
+ Value interface{}
+ Marshaled []byte
+ Unmarshaled *big.Int
+ }{
+ {
+ Value: int8(0),
+ Marshaled: []byte("\x00"),
+ Unmarshaled: big.NewInt(0),
+ },
+ {
+ Value: uint8(255),
+ Marshaled: []byte("\x00\xFF"),
+ Unmarshaled: big.NewInt(255),
+ },
+ {
+ Value: int8(-1),
+ Marshaled: []byte("\xFF"),
+ Unmarshaled: big.NewInt(-1),
+ },
+ {
+ Value: big.NewInt(math.MaxInt32),
+ Marshaled: []byte("\x7F\xFF\xFF\xFF"),
+ Unmarshaled: big.NewInt(math.MaxInt32),
+ },
+ {
+ Value: big.NewInt(int64(math.MaxInt32) + 1),
+ Marshaled: []byte("\x00\x80\x00\x00\x00"),
+ Unmarshaled: big.NewInt(int64(math.MaxInt32) + 1),
+ },
+ {
+ Value: big.NewInt(math.MinInt32),
+ Marshaled: []byte("\x80\x00\x00\x00"),
+ Unmarshaled: big.NewInt(math.MinInt32),
+ },
+ {
+ Value: big.NewInt(int64(math.MinInt32) - 1),
+ Marshaled: []byte("\xFF\x7F\xFF\xFF\xFF"),
+ Unmarshaled: big.NewInt(int64(math.MinInt32) - 1),
+ },
+ {
+ Value: math.MinInt64,
+ Marshaled: []byte("\x80\x00\x00\x00\x00\x00\x00\x00"),
+ Unmarshaled: big.NewInt(math.MinInt64),
+ },
+ {
+ Value: uint64(math.MaxInt64) + 1,
+ Marshaled: []byte("\x00\x80\x00\x00\x00\x00\x00\x00\x00"),
+ Unmarshaled: bigintize("9223372036854775808"),
+ },
+ {
+ Value: bigintize("2361183241434822606848"), // 2**71
+ Marshaled: []byte("\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00"),
+ Unmarshaled: bigintize("2361183241434822606848"),
+ },
+ {
+ Value: bigintize("-9223372036854775809"), // -2**63 - 1
+ Marshaled: []byte("\xFF\x7F\xFF\xFF\xFF\xFF\xFF\xFF\xFF"),
+ Unmarshaled: bigintize("-9223372036854775809"),
+ },
+ }
+
+ for i, test := range varintTests {
+ data, err := Marshal(NativeType{proto: 2, typ: TypeVarint}, test.Value)
+ if err != nil {
+ t.Errorf("error marshaling varint: %v (test #%d)", err, i)
+ }
+
+ if !bytes.Equal(test.Marshaled, data) {
+ t.Errorf("marshaled varint mismatch: expected %v, got %v (test #%d)", test.Marshaled, data, i)
+ }
+
+ binder := new(big.Int)
+ err = Unmarshal(NativeType{proto: 2, typ: TypeVarint}, test.Marshaled, binder)
+ if err != nil {
+ t.Errorf("error unmarshaling varint: %v (test #%d)", err, i)
+ }
+
+ if test.Unmarshaled.Cmp(binder) != 0 {
+ t.Errorf("unmarshaled varint mismatch: expected %v, got %v (test #%d)", test.Unmarshaled, binder, i)
+ }
+ }
+}
+
+func equalStringSlice(leftList, rightList []string) bool {
+ if len(leftList) != len(rightList) {
+ return false
+ }
+ for index := range leftList {
+ if rightList[index] != leftList[index] {
+ return false
+ }
+ }
+ return true
+}
+
+func TestMarshalList(t *testing.T) {
+ typeInfo := CollectionType{
+ NativeType: NativeType{proto: 2, typ: TypeList},
+ Elem: NativeType{proto: 2, typ: TypeVarchar},
+ }
+
+ sourceLists := [][]string{
+ []string{"valueA"},
+ []string{"valueA", "valueB"},
+ []string{"valueB"},
+ }
+
+ listDatas := [][]byte{}
+
+ for _, list := range sourceLists {
+ listData, marshalErr := Marshal(typeInfo, list)
+ if nil != marshalErr {
+ t.Errorf("Error marshal %+v of type %+v: %s", list, typeInfo, marshalErr)
+ }
+ listDatas = append(listDatas, listData)
+ }
+
+ outputLists := [][]string{}
+
+ var outputList []string
+
+ for _, listData := range listDatas {
+ if unmarshalErr := Unmarshal(typeInfo, listData, &outputList); nil != unmarshalErr {
+ t.Error(unmarshalErr)
+ }
+ outputLists = append(outputLists, outputList)
+ }
+
+ for index, sourceList := range sourceLists {
+ outputList := outputLists[index]
+ if !equalStringSlice(sourceList, outputList) {
+ t.Errorf("Lists %+v not equal to lists %+v, but should", sourceList, outputList)
+ }
+ }
+}
+
+type CustomString string
+
+func (c CustomString) MarshalCQL(info TypeInfo) ([]byte, error) {
+ return []byte(strings.ToUpper(string(c))), nil
+}
+func (c *CustomString) UnmarshalCQL(info TypeInfo, data []byte) error {
+ *c = CustomString(strings.ToLower(string(data)))
+ return nil
+}
+
+type MyString string
+
+type MyInt int
+
+var typeLookupTest = []struct {
+ TypeName string
+ ExpectedType Type
+}{
+ {"AsciiType", TypeAscii},
+ {"LongType", TypeBigInt},
+ {"BytesType", TypeBlob},
+ {"BooleanType", TypeBoolean},
+ {"CounterColumnType", TypeCounter},
+ {"DecimalType", TypeDecimal},
+ {"DoubleType", TypeDouble},
+ {"FloatType", TypeFloat},
+ {"Int32Type", TypeInt},
+ {"DateType", TypeTimestamp},
+ {"TimestampType", TypeTimestamp},
+ {"UUIDType", TypeUUID},
+ {"UTF8Type", TypeVarchar},
+ {"IntegerType", TypeVarint},
+ {"TimeUUIDType", TypeTimeUUID},
+ {"InetAddressType", TypeInet},
+ {"MapType", TypeMap},
+ {"ListType", TypeList},
+ {"SetType", TypeSet},
+ {"unknown", TypeCustom},
+}
+
+func testType(t *testing.T, cassType string, expectedType Type) {
+ if computedType := getApacheCassandraType(apacheCassandraTypePrefix + cassType); computedType != expectedType {
+ t.Errorf("Cassandra custom type lookup for %s failed. Expected %s, got %s.", cassType, expectedType.String(), computedType.String())
+ }
+}
+
+func TestLookupCassType(t *testing.T) {
+ for _, lookupTest := range typeLookupTest {
+ testType(t, lookupTest.TypeName, lookupTest.ExpectedType)
+ }
+}
+
+type MyPointerMarshaler struct{}
+
+func (m *MyPointerMarshaler) MarshalCQL(_ TypeInfo) ([]byte, error) {
+ return []byte{42}, nil
+}
+
+func TestMarshalPointer(t *testing.T) {
+ m := &MyPointerMarshaler{}
+ typ := NativeType{proto: 2, typ: TypeInt}
+
+ data, err := Marshal(typ, m)
+
+ if err != nil {
+ t.Errorf("Pointer marshaling failed. Error: %s", err)
+ }
+ if len(data) != 1 || data[0] != 42 {
+ t.Errorf("Pointer marshaling failed. Expected %+v, got %+v", []byte{42}, data)
+ }
+}
+
+func TestMarshalTimestamp(t *testing.T) {
+ var marshalTimestampTests = []struct {
+ Info TypeInfo
+ Data []byte
+ Value interface{}
+ }{
+ {
+ NativeType{proto: 2, typ: TypeTimestamp},
+ []byte("\x00\x00\x01\x40\x77\x16\xe1\xb8"),
+ time.Date(2013, time.August, 13, 9, 52, 3, 0, time.UTC),
+ },
+ {
+ NativeType{proto: 2, typ: TypeTimestamp},
+ []byte("\x00\x00\x01\x40\x77\x16\xe1\xb8"),
+ int64(1376387523000),
+ },
+ {
+ // 9223372036854 is the maximum time representable in ms since the epoch
+ // with int64 if using UnixNano to convert
+ NativeType{proto: 2, typ: TypeTimestamp},
+ []byte("\x00\x00\x08\x63\x7b\xd0\x5a\xf6"),
+ time.Date(2262, time.April, 11, 23, 47, 16, 854775807, time.UTC),
+ },
+ {
+ // One nanosecond after causes overflow when using UnixNano
+ // Instead it should resolve to the same time in ms
+ NativeType{proto: 2, typ: TypeTimestamp},
+ []byte("\x00\x00\x08\x63\x7b\xd0\x5a\xf6"),
+ time.Date(2262, time.April, 11, 23, 47, 16, 854775808, time.UTC),
+ },
+ {
+ // -9223372036855 is the minimum time representable in ms since the epoch
+ // with int64 if using UnixNano to convert
+ NativeType{proto: 2, typ: TypeTimestamp},
+ []byte("\xff\xff\xf7\x9c\x84\x2f\xa5\x09"),
+ time.Date(1677, time.September, 21, 00, 12, 43, 145224192, time.UTC),
+ },
+ {
+ // One nanosecond earlier causes overflow when using UnixNano
+ // it should resolve to the same time in ms
+ NativeType{proto: 2, typ: TypeTimestamp},
+ []byte("\xff\xff\xf7\x9c\x84\x2f\xa5\x09"),
+ time.Date(1677, time.September, 21, 00, 12, 43, 145224191, time.UTC),
+ },
+ {
+ // Store the zero time as a blank slice
+ NativeType{proto: 2, typ: TypeTimestamp},
+ []byte{},
+ time.Time{},
+ },
+ }
+
+ for i, test := range marshalTimestampTests {
+ t.Log(i, test)
+ data, err := Marshal(test.Info, test.Value)
+ if err != nil {
+ t.Errorf("marshalTest[%d]: %v", i, err)
+ continue
+ }
+ if !bytes.Equal(data, test.Data) {
+ t.Errorf("marshalTest[%d]: expected %x (%v), got %x (%v) for time %s", i,
+ test.Data, decBigInt(test.Data), data, decBigInt(data), test.Value)
+ }
+ }
+}
+
+func TestMarshalTuple(t *testing.T) {
+ info := TupleTypeInfo{
+ NativeType: NativeType{proto: 3, typ: TypeTuple},
+ Elems: []TypeInfo{
+ NativeType{proto: 3, typ: TypeVarchar},
+ NativeType{proto: 3, typ: TypeVarchar},
+ },
+ }
+
+ expectedData := []byte("\x00\x00\x00\x03foo\x00\x00\x00\x03bar")
+ value := []interface{}{"foo", "bar"}
+
+ data, err := Marshal(info, value)
+ if err != nil {
+ t.Errorf("marshalTest: %v", err)
+ return
+ }
+
+ if !bytes.Equal(data, expectedData) {
+ t.Errorf("marshalTest: expected %x (%v), got %x (%v)",
+ expectedData, decBigInt(expectedData), data, decBigInt(data))
+ return
+ }
+
+ var s1, s2 string
+ val := []interface{}{&s1, &s2}
+ err = Unmarshal(info, expectedData, val)
+ if err != nil {
+ t.Errorf("unmarshalTest: %v", err)
+ return
+ }
+
+ if s1 != "foo" || s2 != "bar" {
+ t.Errorf("unmarshalTest: expected [foo, bar], got [%s, %s]", s1, s2)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/metadata_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/metadata_test.go
new file mode 100644
index 000000000..7ce9c7ce6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/metadata_test.go
@@ -0,0 +1,815 @@
+// Copyright (c) 2015 The gocql Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gocql
+
+import (
+ "strconv"
+ "testing"
+)
+
+// Tests V1 and V2 metadata "compilation" from example data which might be returned
+// from metadata schema queries (see getKeyspaceMetadata, getTableMetadata, and getColumnMetadata)
+func TestCompileMetadata(t *testing.T) {
+ // V1 tests - these are all based on real examples from the integration test ccm cluster
+ keyspace := &KeyspaceMetadata{
+ Name: "V1Keyspace",
+ }
+ tables := []TableMetadata{
+ TableMetadata{
+ // This table, found in the system keyspace, has no key aliases or column aliases
+ Keyspace: "V1Keyspace",
+ Name: "Schema",
+ KeyValidator: "org.apache.cassandra.db.marshal.BytesType",
+ Comparator: "org.apache.cassandra.db.marshal.UTF8Type",
+ DefaultValidator: "org.apache.cassandra.db.marshal.BytesType",
+ KeyAliases: []string{},
+ ColumnAliases: []string{},
+ ValueAlias: "",
+ },
+ TableMetadata{
+ // This table, found in the system keyspace, has key aliases, column aliases, and a value alias.
+ Keyspace: "V1Keyspace",
+ Name: "hints",
+ KeyValidator: "org.apache.cassandra.db.marshal.UUIDType",
+ Comparator: "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.TimeUUIDType,org.apache.cassandra.db.marshal.Int32Type)",
+ DefaultValidator: "org.apache.cassandra.db.marshal.BytesType",
+ KeyAliases: []string{"target_id"},
+ ColumnAliases: []string{"hint_id", "message_version"},
+ ValueAlias: "mutation",
+ },
+ TableMetadata{
+ // This table, found in the system keyspace, has a comparator with collections, but no column aliases
+ Keyspace: "V1Keyspace",
+ Name: "peers",
+ KeyValidator: "org.apache.cassandra.db.marshal.InetAddressType",
+ Comparator: "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.ColumnToCollectionType(746f6b656e73:org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.UTF8Type)))",
+ DefaultValidator: "org.apache.cassandra.db.marshal.BytesType",
+ KeyAliases: []string{"peer"},
+ ColumnAliases: []string{},
+ ValueAlias: "",
+ },
+ TableMetadata{
+ // This table, found in the system keyspace, has a column alias, but not a composite comparator
+ Keyspace: "V1Keyspace",
+ Name: "IndexInfo",
+ KeyValidator: "org.apache.cassandra.db.marshal.UTF8Type",
+ Comparator: "org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.UTF8Type)",
+ DefaultValidator: "org.apache.cassandra.db.marshal.BytesType",
+ KeyAliases: []string{"table_name"},
+ ColumnAliases: []string{"index_name"},
+ ValueAlias: "",
+ },
+ TableMetadata{
+ // This table, found in the gocql_test keyspace following an integration test run, has a composite comparator with collections as well as a column alias
+ Keyspace: "V1Keyspace",
+ Name: "wiki_page",
+ KeyValidator: "org.apache.cassandra.db.marshal.UTF8Type",
+ Comparator: "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.TimeUUIDType,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.ColumnToCollectionType(74616773:org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.UTF8Type),6174746163686d656e7473:org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.BytesType)))",
+ DefaultValidator: "org.apache.cassandra.db.marshal.BytesType",
+ KeyAliases: []string{"title"},
+ ColumnAliases: []string{"revid"},
+ ValueAlias: "",
+ },
+ TableMetadata{
+ // This is a made up example with multiple unnamed aliases
+ Keyspace: "V1Keyspace",
+ Name: "no_names",
+ KeyValidator: "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.UUIDType)",
+ Comparator: "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.Int32Type)",
+ DefaultValidator: "org.apache.cassandra.db.marshal.BytesType",
+ KeyAliases: []string{},
+ ColumnAliases: []string{},
+ ValueAlias: "",
+ },
+ }
+ columns := []ColumnMetadata{
+ // Here are the regular columns from the peers table for testing regular columns
+ ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "data_center", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UTF8Type"},
+ ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "host_id", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UUIDType"},
+ ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "rack", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UTF8Type"},
+ ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "release_version", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UTF8Type"},
+ ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "rpc_address", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.InetAddressType"},
+ ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "schema_version", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UUIDType"},
+ ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "tokens", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.UTF8Type)"},
+ }
+ compileMetadata(1, keyspace, tables, columns)
+ assertKeyspaceMetadata(
+ t,
+ keyspace,
+ &KeyspaceMetadata{
+ Name: "V1Keyspace",
+ Tables: map[string]*TableMetadata{
+ "Schema": &TableMetadata{
+ PartitionKey: []*ColumnMetadata{
+ &ColumnMetadata{
+ Name: "key",
+ Type: NativeType{typ: TypeBlob},
+ },
+ },
+ ClusteringColumns: []*ColumnMetadata{},
+ Columns: map[string]*ColumnMetadata{
+ "key": &ColumnMetadata{
+ Name: "key",
+ Type: NativeType{typ: TypeBlob},
+ Kind: PARTITION_KEY,
+ },
+ },
+ },
+ "hints": &TableMetadata{
+ PartitionKey: []*ColumnMetadata{
+ &ColumnMetadata{
+ Name: "target_id",
+ Type: NativeType{typ: TypeUUID},
+ },
+ },
+ ClusteringColumns: []*ColumnMetadata{
+ &ColumnMetadata{
+ Name: "hint_id",
+ Type: NativeType{typ: TypeTimeUUID},
+ Order: ASC,
+ },
+ &ColumnMetadata{
+ Name: "message_version",
+ Type: NativeType{typ: TypeInt},
+ Order: ASC,
+ },
+ },
+ Columns: map[string]*ColumnMetadata{
+ "target_id": &ColumnMetadata{
+ Name: "target_id",
+ Type: NativeType{typ: TypeUUID},
+ Kind: PARTITION_KEY,
+ },
+ "hint_id": &ColumnMetadata{
+ Name: "hint_id",
+ Type: NativeType{typ: TypeTimeUUID},
+ Order: ASC,
+ Kind: CLUSTERING_KEY,
+ },
+ "message_version": &ColumnMetadata{
+ Name: "message_version",
+ Type: NativeType{typ: TypeInt},
+ Order: ASC,
+ Kind: CLUSTERING_KEY,
+ },
+ "mutation": &ColumnMetadata{
+ Name: "mutation",
+ Type: NativeType{typ: TypeBlob},
+ Kind: REGULAR,
+ },
+ },
+ },
+ "peers": &TableMetadata{
+ PartitionKey: []*ColumnMetadata{
+ &ColumnMetadata{
+ Name: "peer",
+ Type: NativeType{typ: TypeInet},
+ },
+ },
+ ClusteringColumns: []*ColumnMetadata{},
+ Columns: map[string]*ColumnMetadata{
+ "peer": &ColumnMetadata{
+ Name: "peer",
+ Type: NativeType{typ: TypeInet},
+ Kind: PARTITION_KEY,
+ },
+ "data_center": &ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "data_center", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UTF8Type", Type: NativeType{typ: TypeVarchar}},
+ "host_id": &ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "host_id", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UUIDType", Type: NativeType{typ: TypeUUID}},
+ "rack": &ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "rack", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UTF8Type", Type: NativeType{typ: TypeVarchar}},
+ "release_version": &ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "release_version", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UTF8Type", Type: NativeType{typ: TypeVarchar}},
+ "rpc_address": &ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "rpc_address", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.InetAddressType", Type: NativeType{typ: TypeInet}},
+ "schema_version": &ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "schema_version", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UUIDType", Type: NativeType{typ: TypeUUID}},
+ "tokens": &ColumnMetadata{Keyspace: "V1Keyspace", Table: "peers", Kind: REGULAR, Name: "tokens", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.UTF8Type)", Type: CollectionType{NativeType: NativeType{typ: TypeSet}}},
+ },
+ },
+ "IndexInfo": &TableMetadata{
+ PartitionKey: []*ColumnMetadata{
+ &ColumnMetadata{
+ Name: "table_name",
+ Type: NativeType{typ: TypeVarchar},
+ },
+ },
+ ClusteringColumns: []*ColumnMetadata{
+ &ColumnMetadata{
+ Name: "index_name",
+ Type: NativeType{typ: TypeVarchar},
+ Order: DESC,
+ },
+ },
+ Columns: map[string]*ColumnMetadata{
+ "table_name": &ColumnMetadata{
+ Name: "table_name",
+ Type: NativeType{typ: TypeVarchar},
+ Kind: PARTITION_KEY,
+ },
+ "index_name": &ColumnMetadata{
+ Name: "index_name",
+ Type: NativeType{typ: TypeVarchar},
+ Order: DESC,
+ Kind: CLUSTERING_KEY,
+ },
+ "value": &ColumnMetadata{
+ Name: "value",
+ Type: NativeType{typ: TypeBlob},
+ Kind: REGULAR,
+ },
+ },
+ },
+ "wiki_page": &TableMetadata{
+ PartitionKey: []*ColumnMetadata{
+ &ColumnMetadata{
+ Name: "title",
+ Type: NativeType{typ: TypeVarchar},
+ },
+ },
+ ClusteringColumns: []*ColumnMetadata{
+ &ColumnMetadata{
+ Name: "revid",
+ Type: NativeType{typ: TypeTimeUUID},
+ Order: ASC,
+ },
+ },
+ Columns: map[string]*ColumnMetadata{
+ "title": &ColumnMetadata{
+ Name: "title",
+ Type: NativeType{typ: TypeVarchar},
+ Kind: PARTITION_KEY,
+ },
+ "revid": &ColumnMetadata{
+ Name: "revid",
+ Type: NativeType{typ: TypeTimeUUID},
+ Kind: CLUSTERING_KEY,
+ },
+ },
+ },
+ "no_names": &TableMetadata{
+ PartitionKey: []*ColumnMetadata{
+ &ColumnMetadata{
+ Name: "key",
+ Type: NativeType{typ: TypeUUID},
+ },
+ &ColumnMetadata{
+ Name: "key2",
+ Type: NativeType{typ: TypeUUID},
+ },
+ },
+ ClusteringColumns: []*ColumnMetadata{
+ &ColumnMetadata{
+ Name: "column",
+ Type: NativeType{typ: TypeInt},
+ Order: ASC,
+ },
+ &ColumnMetadata{
+ Name: "column2",
+ Type: NativeType{typ: TypeInt},
+ Order: ASC,
+ },
+ &ColumnMetadata{
+ Name: "column3",
+ Type: NativeType{typ: TypeInt},
+ Order: ASC,
+ },
+ },
+ Columns: map[string]*ColumnMetadata{
+ "key": &ColumnMetadata{
+ Name: "key",
+ Type: NativeType{typ: TypeUUID},
+ Kind: PARTITION_KEY,
+ },
+ "key2": &ColumnMetadata{
+ Name: "key2",
+ Type: NativeType{typ: TypeUUID},
+ Kind: PARTITION_KEY,
+ },
+ "column": &ColumnMetadata{
+ Name: "column",
+ Type: NativeType{typ: TypeInt},
+ Order: ASC,
+ Kind: CLUSTERING_KEY,
+ },
+ "column2": &ColumnMetadata{
+ Name: "column2",
+ Type: NativeType{typ: TypeInt},
+ Order: ASC,
+ Kind: CLUSTERING_KEY,
+ },
+ "column3": &ColumnMetadata{
+ Name: "column3",
+ Type: NativeType{typ: TypeInt},
+ Order: ASC,
+ Kind: CLUSTERING_KEY,
+ },
+ "value": &ColumnMetadata{
+ Name: "value",
+ Type: NativeType{typ: TypeBlob},
+ Kind: REGULAR,
+ },
+ },
+ },
+ },
+ },
+ )
+
+ // V2 test - V2+ protocol is simpler so here are some toy examples to verify that the mapping works
+ keyspace = &KeyspaceMetadata{
+ Name: "V2Keyspace",
+ }
+ tables = []TableMetadata{
+ TableMetadata{
+ Keyspace: "V2Keyspace",
+ Name: "Table1",
+ },
+ TableMetadata{
+ Keyspace: "V2Keyspace",
+ Name: "Table2",
+ },
+ }
+ columns = []ColumnMetadata{
+ ColumnMetadata{
+ Keyspace: "V2Keyspace",
+ Table: "Table1",
+ Name: "KEY1",
+ Kind: PARTITION_KEY,
+ ComponentIndex: 0,
+ Validator: "org.apache.cassandra.db.marshal.UTF8Type",
+ },
+ ColumnMetadata{
+ Keyspace: "V2Keyspace",
+ Table: "Table1",
+ Name: "Key1",
+ Kind: PARTITION_KEY,
+ ComponentIndex: 0,
+ Validator: "org.apache.cassandra.db.marshal.UTF8Type",
+ },
+ ColumnMetadata{
+ Keyspace: "V2Keyspace",
+ Table: "Table2",
+ Name: "Column1",
+ Kind: PARTITION_KEY,
+ ComponentIndex: 0,
+ Validator: "org.apache.cassandra.db.marshal.UTF8Type",
+ },
+ ColumnMetadata{
+ Keyspace: "V2Keyspace",
+ Table: "Table2",
+ Name: "Column2",
+ Kind: CLUSTERING_KEY,
+ ComponentIndex: 0,
+ Validator: "org.apache.cassandra.db.marshal.UTF8Type",
+ },
+ ColumnMetadata{
+ Keyspace: "V2Keyspace",
+ Table: "Table2",
+ Name: "Column3",
+ Kind: CLUSTERING_KEY,
+ ComponentIndex: 1,
+ Validator: "org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.UTF8Type)",
+ },
+ ColumnMetadata{
+ Keyspace: "V2Keyspace",
+ Table: "Table2",
+ Name: "Column4",
+ Kind: REGULAR,
+ Validator: "org.apache.cassandra.db.marshal.UTF8Type",
+ },
+ }
+ compileMetadata(2, keyspace, tables, columns)
+ assertKeyspaceMetadata(
+ t,
+ keyspace,
+ &KeyspaceMetadata{
+ Name: "V2Keyspace",
+ Tables: map[string]*TableMetadata{
+ "Table1": &TableMetadata{
+ PartitionKey: []*ColumnMetadata{
+ &ColumnMetadata{
+ Name: "Key1",
+ Type: NativeType{typ: TypeVarchar},
+ },
+ },
+ ClusteringColumns: []*ColumnMetadata{},
+ Columns: map[string]*ColumnMetadata{
+ "KEY1": &ColumnMetadata{
+ Name: "KEY1",
+ Type: NativeType{typ: TypeVarchar},
+ Kind: PARTITION_KEY,
+ },
+ "Key1": &ColumnMetadata{
+ Name: "Key1",
+ Type: NativeType{typ: TypeVarchar},
+ Kind: PARTITION_KEY,
+ },
+ },
+ },
+ "Table2": &TableMetadata{
+ PartitionKey: []*ColumnMetadata{
+ &ColumnMetadata{
+ Name: "Column1",
+ Type: NativeType{typ: TypeVarchar},
+ },
+ },
+ ClusteringColumns: []*ColumnMetadata{
+ &ColumnMetadata{
+ Name: "Column2",
+ Type: NativeType{typ: TypeVarchar},
+ Order: ASC,
+ },
+ &ColumnMetadata{
+ Name: "Column3",
+ Type: NativeType{typ: TypeVarchar},
+ Order: DESC,
+ },
+ },
+ Columns: map[string]*ColumnMetadata{
+ "Column1": &ColumnMetadata{
+ Name: "Column1",
+ Type: NativeType{typ: TypeVarchar},
+ Kind: PARTITION_KEY,
+ },
+ "Column2": &ColumnMetadata{
+ Name: "Column2",
+ Type: NativeType{typ: TypeVarchar},
+ Order: ASC,
+ Kind: CLUSTERING_KEY,
+ },
+ "Column3": &ColumnMetadata{
+ Name: "Column3",
+ Type: NativeType{typ: TypeVarchar},
+ Order: DESC,
+ Kind: CLUSTERING_KEY,
+ },
+ "Column4": &ColumnMetadata{
+ Name: "Column4",
+ Type: NativeType{typ: TypeVarchar},
+ Kind: REGULAR,
+ },
+ },
+ },
+ },
+ },
+ )
+}
+
+// Helper function for asserting that actual metadata returned was as expected
+func assertKeyspaceMetadata(t *testing.T, actual, expected *KeyspaceMetadata) {
+ if len(expected.Tables) != len(actual.Tables) {
+ t.Errorf("Expected len(%s.Tables) to be %v but was %v", expected.Name, len(expected.Tables), len(actual.Tables))
+ }
+ for keyT := range expected.Tables {
+ et := expected.Tables[keyT]
+ at, found := actual.Tables[keyT]
+
+ if !found {
+ t.Errorf("Expected %s.Tables[%s] but was not found", expected.Name, keyT)
+ } else {
+ if keyT != at.Name {
+ t.Errorf("Expected %s.Tables[%s].Name to be %v but was %v", expected.Name, keyT, keyT, at.Name)
+ }
+ if len(et.PartitionKey) != len(at.PartitionKey) {
+ t.Errorf("Expected len(%s.Tables[%s].PartitionKey) to be %v but was %v", expected.Name, keyT, len(et.PartitionKey), len(at.PartitionKey))
+ } else {
+ for i := range et.PartitionKey {
+ if et.PartitionKey[i].Name != at.PartitionKey[i].Name {
+ t.Errorf("Expected %s.Tables[%s].PartitionKey[%d].Name to be '%v' but was '%v'", expected.Name, keyT, i, et.PartitionKey[i].Name, at.PartitionKey[i].Name)
+ }
+ if expected.Name != at.PartitionKey[i].Keyspace {
+ t.Errorf("Expected %s.Tables[%s].PartitionKey[%d].Keyspace to be '%v' but was '%v'", expected.Name, keyT, i, expected.Name, at.PartitionKey[i].Keyspace)
+ }
+ if keyT != at.PartitionKey[i].Table {
+ t.Errorf("Expected %s.Tables[%s].PartitionKey[%d].Table to be '%v' but was '%v'", expected.Name, keyT, i, keyT, at.PartitionKey[i].Table)
+ }
+ if et.PartitionKey[i].Type.Type() != at.PartitionKey[i].Type.Type() {
+ t.Errorf("Expected %s.Tables[%s].PartitionKey[%d].Type.Type to be %v but was %v", expected.Name, keyT, i, et.PartitionKey[i].Type.Type(), at.PartitionKey[i].Type.Type())
+ }
+ if i != at.PartitionKey[i].ComponentIndex {
+ t.Errorf("Expected %s.Tables[%s].PartitionKey[%d].ComponentIndex to be %v but was %v", expected.Name, keyT, i, i, at.PartitionKey[i].ComponentIndex)
+ }
+ if PARTITION_KEY != at.PartitionKey[i].Kind {
+ t.Errorf("Expected %s.Tables[%s].PartitionKey[%d].Kind to be '%v' but was '%v'", expected.Name, keyT, i, PARTITION_KEY, at.PartitionKey[i].Kind)
+ }
+ }
+ }
+ if len(et.ClusteringColumns) != len(at.ClusteringColumns) {
+ t.Errorf("Expected len(%s.Tables[%s].ClusteringColumns) to be %v but was %v", expected.Name, keyT, len(et.ClusteringColumns), len(at.ClusteringColumns))
+ } else {
+ for i := range et.ClusteringColumns {
+ if at.ClusteringColumns[i] == nil {
+ t.Fatalf("Unexpected nil value: %s.Tables[%s].ClusteringColumns[%d]", expected.Name, keyT, i)
+ }
+ if et.ClusteringColumns[i].Name != at.ClusteringColumns[i].Name {
+ t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].Name to be '%v' but was '%v'", expected.Name, keyT, i, et.ClusteringColumns[i].Name, at.ClusteringColumns[i].Name)
+ }
+ if expected.Name != at.ClusteringColumns[i].Keyspace {
+ t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].Keyspace to be '%v' but was '%v'", expected.Name, keyT, i, expected.Name, at.ClusteringColumns[i].Keyspace)
+ }
+ if keyT != at.ClusteringColumns[i].Table {
+ t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].Table to be '%v' but was '%v'", expected.Name, keyT, i, keyT, at.ClusteringColumns[i].Table)
+ }
+ if et.ClusteringColumns[i].Type.Type() != at.ClusteringColumns[i].Type.Type() {
+ t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].Type.Type to be %v but was %v", expected.Name, keyT, i, et.ClusteringColumns[i].Type.Type(), at.ClusteringColumns[i].Type.Type())
+ }
+ if i != at.ClusteringColumns[i].ComponentIndex {
+ t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].ComponentIndex to be %v but was %v", expected.Name, keyT, i, i, at.ClusteringColumns[i].ComponentIndex)
+ }
+ if et.ClusteringColumns[i].Order != at.ClusteringColumns[i].Order {
+ t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].Order to be %v but was %v", expected.Name, keyT, i, et.ClusteringColumns[i].Order, at.ClusteringColumns[i].Order)
+ }
+ if CLUSTERING_KEY != at.ClusteringColumns[i].Kind {
+ t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].Kind to be '%v' but was '%v'", expected.Name, keyT, i, CLUSTERING_KEY, at.ClusteringColumns[i].Kind)
+ }
+ }
+ }
+ if len(et.Columns) != len(at.Columns) {
+ eKeys := make([]string, 0, len(et.Columns))
+ for key := range et.Columns {
+ eKeys = append(eKeys, key)
+ }
+ aKeys := make([]string, 0, len(at.Columns))
+ for key := range at.Columns {
+ aKeys = append(aKeys, key)
+ }
+ t.Errorf("Expected len(%s.Tables[%s].Columns) to be %v (keys:%v) but was %v (keys:%v)", expected.Name, keyT, len(et.Columns), eKeys, len(at.Columns), aKeys)
+ } else {
+ for keyC := range et.Columns {
+ ec := et.Columns[keyC]
+ ac, found := at.Columns[keyC]
+
+ if !found {
+ t.Errorf("Expected %s.Tables[%s].Columns[%s] but was not found", expected.Name, keyT, keyC)
+ } else {
+ if keyC != ac.Name {
+ t.Errorf("Expected %s.Tables[%s].Columns[%s].Name to be '%v' but was '%v'", expected.Name, keyT, keyC, keyC, at.Name)
+ }
+ if expected.Name != ac.Keyspace {
+ t.Errorf("Expected %s.Tables[%s].Columns[%s].Keyspace to be '%v' but was '%v'", expected.Name, keyT, keyC, expected.Name, ac.Keyspace)
+ }
+ if keyT != ac.Table {
+ t.Errorf("Expected %s.Tables[%s].Columns[%s].Table to be '%v' but was '%v'", expected.Name, keyT, keyC, keyT, ac.Table)
+ }
+ if ec.Type.Type() != ac.Type.Type() {
+ t.Errorf("Expected %s.Tables[%s].Columns[%s].Type.Type to be %v but was %v", expected.Name, keyT, keyC, ec.Type.Type(), ac.Type.Type())
+ }
+ if ec.Order != ac.Order {
+ t.Errorf("Expected %s.Tables[%s].Columns[%s].Order to be %v but was %v", expected.Name, keyT, keyC, ec.Order, ac.Order)
+ }
+ if ec.Kind != ac.Kind {
+ t.Errorf("Expected %s.Tables[%s].Columns[%s].Kind to be '%v' but was '%v'", expected.Name, keyT, keyC, ec.Kind, ac.Kind)
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+// Tests the cassandra type definition parser
+func TestTypeParser(t *testing.T) {
+ // native type
+ assertParseNonCompositeType(
+ t,
+ "org.apache.cassandra.db.marshal.UTF8Type",
+ assertTypeInfo{Type: TypeVarchar},
+ )
+
+ // reversed
+ assertParseNonCompositeType(
+ t,
+ "org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.UUIDType)",
+ assertTypeInfo{Type: TypeUUID, Reversed: true},
+ )
+
+ // set
+ assertParseNonCompositeType(
+ t,
+ "org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.Int32Type)",
+ assertTypeInfo{
+ Type: TypeSet,
+ Elem: &assertTypeInfo{Type: TypeInt},
+ },
+ )
+
+ // list
+ assertParseNonCompositeType(
+ t,
+ "org.apache.cassandra.db.marshal.ListType(org.apache.cassandra.db.marshal.TimeUUIDType)",
+ assertTypeInfo{
+ Type: TypeList,
+ Elem: &assertTypeInfo{Type: TypeTimeUUID},
+ },
+ )
+
+ // map
+ assertParseNonCompositeType(
+ t,
+ " org.apache.cassandra.db.marshal.MapType( org.apache.cassandra.db.marshal.UUIDType , org.apache.cassandra.db.marshal.BytesType ) ",
+ assertTypeInfo{
+ Type: TypeMap,
+ Key: &assertTypeInfo{Type: TypeUUID},
+ Elem: &assertTypeInfo{Type: TypeBlob},
+ },
+ )
+
+ // custom
+ assertParseNonCompositeType(
+ t,
+ "org.apache.cassandra.db.marshal.UserType(sandbox,61646472657373,737472656574:org.apache.cassandra.db.marshal.UTF8Type,63697479:org.apache.cassandra.db.marshal.UTF8Type,7a6970:org.apache.cassandra.db.marshal.Int32Type)",
+ assertTypeInfo{Type: TypeCustom, Custom: "org.apache.cassandra.db.marshal.UserType(sandbox,61646472657373,737472656574:org.apache.cassandra.db.marshal.UTF8Type,63697479:org.apache.cassandra.db.marshal.UTF8Type,7a6970:org.apache.cassandra.db.marshal.Int32Type)"},
+ )
+ assertParseNonCompositeType(
+ t,
+ "org.apache.cassandra.db.marshal.DynamicCompositeType(u=>org.apache.cassandra.db.marshal.UUIDType,d=>org.apache.cassandra.db.marshal.DateType,t=>org.apache.cassandra.db.marshal.TimeUUIDType,b=>org.apache.cassandra.db.marshal.BytesType,s=>org.apache.cassandra.db.marshal.UTF8Type,B=>org.apache.cassandra.db.marshal.BooleanType,a=>org.apache.cassandra.db.marshal.AsciiType,l=>org.apache.cassandra.db.marshal.LongType,i=>org.apache.cassandra.db.marshal.IntegerType,x=>org.apache.cassandra.db.marshal.LexicalUUIDType)",
+ assertTypeInfo{Type: TypeCustom, Custom: "org.apache.cassandra.db.marshal.DynamicCompositeType(u=>org.apache.cassandra.db.marshal.UUIDType,d=>org.apache.cassandra.db.marshal.DateType,t=>org.apache.cassandra.db.marshal.TimeUUIDType,b=>org.apache.cassandra.db.marshal.BytesType,s=>org.apache.cassandra.db.marshal.UTF8Type,B=>org.apache.cassandra.db.marshal.BooleanType,a=>org.apache.cassandra.db.marshal.AsciiType,l=>org.apache.cassandra.db.marshal.LongType,i=>org.apache.cassandra.db.marshal.IntegerType,x=>org.apache.cassandra.db.marshal.LexicalUUIDType)"},
+ )
+
+ // composite defs
+ assertParseCompositeType(
+ t,
+ "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type)",
+ []assertTypeInfo{
+ assertTypeInfo{Type: TypeVarchar},
+ },
+ nil,
+ )
+ assertParseCompositeType(
+ t,
+ "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.DateType),org.apache.cassandra.db.marshal.UTF8Type)",
+ []assertTypeInfo{
+ assertTypeInfo{Type: TypeTimestamp, Reversed: true},
+ assertTypeInfo{Type: TypeVarchar},
+ },
+ nil,
+ )
+ assertParseCompositeType(
+ t,
+ "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.ColumnToCollectionType(726f77735f6d6572676564:org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.LongType)))",
+ []assertTypeInfo{
+ assertTypeInfo{Type: TypeVarchar},
+ },
+ map[string]assertTypeInfo{
+ "rows_merged": assertTypeInfo{
+ Type: TypeMap,
+ Key: &assertTypeInfo{Type: TypeInt},
+ Elem: &assertTypeInfo{Type: TypeBigInt},
+ },
+ },
+ )
+}
+
+// expected data holder
+type assertTypeInfo struct {
+ Type Type
+ Reversed bool
+ Elem *assertTypeInfo
+ Key *assertTypeInfo
+ Custom string
+}
+
+// Helper function for asserting that the type parser returns the expected
+// results for the given definition
+func assertParseNonCompositeType(
+ t *testing.T,
+ def string,
+ typeExpected assertTypeInfo,
+) {
+
+ result := parseType(def)
+ if len(result.reversed) != 1 {
+ t.Errorf("%s expected %d reversed values but there were %d", def, 1, len(result.reversed))
+ }
+
+ assertParseNonCompositeTypes(
+ t,
+ def,
+ []assertTypeInfo{typeExpected},
+ result.types,
+ )
+
+ // expect no composite part of the result
+ if result.isComposite {
+ t.Errorf("%s: Expected not composite", def)
+ }
+ if result.collections != nil {
+ t.Errorf("%s: Expected nil collections: %v", def, result.collections)
+ }
+}
+
+// Helper function for asserting that the type parser returns the expected
+// results for the given definition
+func assertParseCompositeType(
+ t *testing.T,
+ def string,
+ typesExpected []assertTypeInfo,
+ collectionsExpected map[string]assertTypeInfo,
+) {
+
+ result := parseType(def)
+ if len(result.reversed) != len(typesExpected) {
+ t.Errorf("%s expected %d reversed values but there were %d", def, len(typesExpected), len(result.reversed))
+ }
+
+ assertParseNonCompositeTypes(
+ t,
+ def,
+ typesExpected,
+ result.types,
+ )
+
+ // expect composite part of the result
+ if !result.isComposite {
+ t.Errorf("%s: Expected composite", def)
+ }
+ if result.collections == nil {
+ t.Errorf("%s: Expected non-nil collections: %v", def, result.collections)
+ }
+
+ for name, typeExpected := range collectionsExpected {
+ // check for an actual type for this name
+ typeActual, found := result.collections[name]
+ if !found {
+ t.Errorf("%s.tcollections: Expected param named %s but there wasn't", def, name)
+ } else {
+ // remove the actual from the collection so we can detect extras
+ delete(result.collections, name)
+
+ // check the type
+ assertParseNonCompositeTypes(
+ t,
+ def+"collections["+name+"]",
+ []assertTypeInfo{typeExpected},
+ []TypeInfo{typeActual},
+ )
+ }
+ }
+
+ if len(result.collections) != 0 {
+ t.Errorf("%s.collections: Expected no more types in collections, but there was %v", def, result.collections)
+ }
+}
+
+// Helper function for asserting that the type parser returns the expected
+// results for the given definition
+func assertParseNonCompositeTypes(
+ t *testing.T,
+ context string,
+ typesExpected []assertTypeInfo,
+ typesActual []TypeInfo,
+) {
+ if len(typesActual) != len(typesExpected) {
+ t.Errorf("%s: Expected %d types, but there were %d", context, len(typesExpected), len(typesActual))
+ }
+
+ for i := range typesExpected {
+ typeExpected := typesExpected[i]
+ typeActual := typesActual[i]
+
+ // shadow copy the context for local modification
+ context := context
+ if len(typesExpected) > 1 {
+ context = context + "[" + strconv.Itoa(i) + "]"
+ }
+
+ // check the type
+ if typeActual.Type() != typeExpected.Type {
+ t.Errorf("%s: Expected to parse Type to %s but was %s", context, typeExpected.Type, typeActual.Type())
+ }
+ // check the custom
+ if typeActual.Custom() != typeExpected.Custom {
+ t.Errorf("%s: Expected to parse Custom %s but was %s", context, typeExpected.Custom, typeActual.Custom())
+ }
+
+ collection, _ := typeActual.(CollectionType)
+ // check the elem
+ if typeExpected.Elem != nil {
+ if collection.Elem == nil {
+ t.Errorf("%s: Expected to parse Elem, but was nil ", context)
+ } else {
+ assertParseNonCompositeTypes(
+ t,
+ context+".Elem",
+ []assertTypeInfo{*typeExpected.Elem},
+ []TypeInfo{collection.Elem},
+ )
+ }
+ } else if collection.Elem != nil {
+ t.Errorf("%s: Expected to not parse Elem, but was %+v", context, collection.Elem)
+ }
+
+ // check the key
+ if typeExpected.Key != nil {
+ if collection.Key == nil {
+ t.Errorf("%s: Expected to parse Key, but was nil ", context)
+ } else {
+ assertParseNonCompositeTypes(
+ t,
+ context+".Key",
+ []assertTypeInfo{*typeExpected.Key},
+ []TypeInfo{collection.Key},
+ )
+ }
+ } else if collection.Key != nil {
+ t.Errorf("%s: Expected to not parse Key, but was %+v", context, collection.Key)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/murmur_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/murmur_test.go
new file mode 100644
index 000000000..35297df76
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/murmur_test.go
@@ -0,0 +1,70 @@
+package gocql
+
+import (
+ "strconv"
+ "testing"
+)
+
+// Test the implementation of murmur3
+func TestMurmur3H1(t *testing.T) {
+ // these examples are based on adding a index number to a sample string in
+ // a loop. The expected values were generated by the java datastax murmur3
+ // implementation. The number of examples here of increasing lengths ensure
+ // test coverage of all tail-length branches in the murmur3 algorithm
+ seriesExpected := [...]uint64{
+ 0x0000000000000000, // ""
+ 0x2ac9debed546a380, // "0"
+ 0x649e4eaa7fc1708e, // "01"
+ 0xce68f60d7c353bdb, // "012"
+ 0x0f95757ce7f38254, // "0123"
+ 0x0f04e459497f3fc1, // "01234"
+ 0x88c0a92586be0a27, // "012345"
+ 0x13eb9fb82606f7a6, // "0123456"
+ 0x8236039b7387354d, // "01234567"
+ 0x4c1e87519fe738ba, // "012345678"
+ 0x3f9652ac3effeb24, // "0123456789"
+ 0x3f33760ded9006c6, // "01234567890"
+ 0xaed70a6631854cb1, // "012345678901"
+ 0x8a299a8f8e0e2da7, // "0123456789012"
+ 0x624b675c779249a6, // "01234567890123"
+ 0xa4b203bb1d90b9a3, // "012345678901234"
+ 0xa3293ad698ecb99a, // "0123456789012345"
+ 0xbc740023dbd50048, // "01234567890123456"
+ 0x3fe5ab9837d25cdd, // "012345678901234567"
+ 0x2d0338c1ca87d132, // "0123456789012345678"
+ }
+ sample := ""
+ for i, expected := range seriesExpected {
+ assertMurmur3H1(t, []byte(sample), expected)
+
+ sample = sample + strconv.Itoa(i%10)
+ }
+
+ // Here are some test examples from other driver implementations
+ assertMurmur3H1(t, []byte("hello"), 0xcbd8a7b341bd9b02)
+ assertMurmur3H1(t, []byte("hello, world"), 0x342fac623a5ebc8e)
+ assertMurmur3H1(t, []byte("19 Jan 2038 at 3:14:07 AM"), 0xb89e5988b737affc)
+ assertMurmur3H1(t, []byte("The quick brown fox jumps over the lazy dog."), 0xcd99481f9ee902c9)
+}
+
+// helper function for testing the murmur3 implementation
+func assertMurmur3H1(t *testing.T, data []byte, expected uint64) {
+ actual := murmur3H1(data)
+ if actual != expected {
+ t.Errorf("Expected h1 = %x for data = %x, but was %x", expected, data, actual)
+ }
+}
+
+// Benchmark of the performance of the murmur3 implementation
+func BenchmarkMurmur3H1(b *testing.B) {
+ var h1 uint64
+ var data [1024]byte
+ for i := 0; i < 1024; i++ {
+ data[i] = byte(i)
+ }
+ for i := 0; i < b.N; i++ {
+ b.ResetTimer()
+ h1 = murmur3H1(data[:])
+ _ = murmur3Token(int64(h1))
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/policies.go b/Godeps/_workspace/src/github.com/gocql/gocql/policies.go
index ed3e606cb..73982368c 100644
--- a/Godeps/_workspace/src/github.com/gocql/gocql/policies.go
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/policies.go
@@ -8,6 +8,8 @@ import (
"log"
"sync"
"sync/atomic"
+
+ "github.com/hailocab/go-hostpool"
)
// RetryableQuery is an interface that represents a query or batch statement that
@@ -57,8 +59,15 @@ type HostSelectionPolicy interface {
Pick(*Query) NextHost
}
+// SelectedHost is an interface returned when picking a host from a host
+// selection policy.
+type SelectedHost interface {
+ Info() *HostInfo
+ Mark(error)
+}
+
// NextHost is an iteration function over picked hosts
-type NextHost func() *HostInfo
+type NextHost func() SelectedHost
// RoundRobinHostPolicy is a round-robin load balancing policy, where each host
// is tried sequentially for each query.
@@ -86,7 +95,7 @@ func (r *roundRobinHostPolicy) Pick(qry *Query) NextHost {
// i is used to limit the number of attempts to find a host
// to the number of hosts known to this policy
var i uint32 = 0
- return func() *HostInfo {
+ return func() SelectedHost {
r.mu.RLock()
if len(r.hosts) == 0 {
r.mu.RUnlock()
@@ -102,10 +111,24 @@ func (r *roundRobinHostPolicy) Pick(qry *Query) NextHost {
i++
}
r.mu.RUnlock()
- return host
+ return selectedRoundRobinHost{host}
}
}
+// selectedRoundRobinHost is a host returned by the roundRobinHostPolicy and
+// implements the SelectedHost interface
+type selectedRoundRobinHost struct {
+ info *HostInfo
+}
+
+func (host selectedRoundRobinHost) Info() *HostInfo {
+ return host.info
+}
+
+func (host selectedRoundRobinHost) Mark(err error) {
+ // noop
+}
+
// TokenAwareHostPolicy is a token aware host selection policy, where hosts are
// selected based on the partition key, so queries are sent to the host which
// owns the partition. Fallback is used when routing information is not available.
@@ -195,10 +218,10 @@ func (t *tokenAwareHostPolicy) Pick(qry *Query) NextHost {
hostReturned bool
fallbackIter NextHost
)
- return func() *HostInfo {
+ return func() SelectedHost {
if !hostReturned {
hostReturned = true
- return host
+ return selectedTokenAwareHost{host}
}
// fallback
@@ -209,7 +232,7 @@ func (t *tokenAwareHostPolicy) Pick(qry *Query) NextHost {
fallbackHost := fallbackIter()
// filter the token aware selected hosts from the fallback hosts
- if fallbackHost == host {
+ if fallbackHost.Info() == host {
fallbackHost = fallbackIter()
}
@@ -217,6 +240,97 @@ func (t *tokenAwareHostPolicy) Pick(qry *Query) NextHost {
}
}
+// selectedTokenAwareHost is a host returned by the tokenAwareHostPolicy and
+// implements the SelectedHost interface
+type selectedTokenAwareHost struct {
+ info *HostInfo
+}
+
+func (host selectedTokenAwareHost) Info() *HostInfo {
+ return host.info
+}
+
+func (host selectedTokenAwareHost) Mark(err error) {
+ // noop
+}
+
+// HostPoolHostPolicy is a host policy which uses the bitly/go-hostpool library
+// to distribute queries between hosts and prevent sending queries to
+// unresponsive hosts. When creating the host pool that is passed to the policy
+// use an empty slice of hosts as the hostpool will be populated later by gocql.
+// See below for examples of usage:
+//
+// // Create host selection policy using a simple host pool
+// cluster.PoolConfig.HostSelectionPolicy = HostPoolHostPolicy(hostpool.New(nil))
+//
+// // Create host selection policy using an epsilon greddy pool
+// cluster.PoolConfig.HostSelectionPolicy = HostPoolHostPolicy(
+// hostpool.NewEpsilonGreedy(nil, 0, &hostpool.LinearEpsilonValueCalculator{}),
+// )
+//
+func HostPoolHostPolicy(hp hostpool.HostPool) HostSelectionPolicy {
+ return &hostPoolHostPolicy{hostMap: map[string]HostInfo{}, hp: hp}
+}
+
+type hostPoolHostPolicy struct {
+ hp hostpool.HostPool
+ hostMap map[string]HostInfo
+ mu sync.RWMutex
+}
+
+func (r *hostPoolHostPolicy) SetHosts(hosts []HostInfo) {
+ peers := make([]string, len(hosts))
+ hostMap := make(map[string]HostInfo, len(hosts))
+
+ for i, host := range hosts {
+ peers[i] = host.Peer
+ hostMap[host.Peer] = host
+ }
+
+ r.mu.Lock()
+ r.hp.SetHosts(peers)
+ r.hostMap = hostMap
+ r.mu.Unlock()
+}
+
+func (r *hostPoolHostPolicy) SetPartitioner(partitioner string) {
+ // noop
+}
+
+func (r *hostPoolHostPolicy) Pick(qry *Query) NextHost {
+ return func() SelectedHost {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if len(r.hostMap) == 0 {
+ return nil
+ }
+
+ hostR := r.hp.Get()
+ host, ok := r.hostMap[hostR.Host()]
+ if !ok {
+ return nil
+ }
+
+ return selectedHostPoolHost{&host, hostR}
+ }
+}
+
+// selectedHostPoolHost is a host returned by the hostPoolHostPolicy and
+// implements the SelectedHost interface
+type selectedHostPoolHost struct {
+ info *HostInfo
+ hostR hostpool.HostPoolResponse
+}
+
+func (host selectedHostPoolHost) Info() *HostInfo {
+ return host.info
+}
+
+func (host selectedHostPoolHost) Mark(err error) {
+ host.hostR.Mark(err)
+}
+
//ConnSelectionPolicy is an interface for selecting an
//appropriate connection for executing a query
type ConnSelectionPolicy interface {
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/policies_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/policies_test.go
new file mode 100644
index 000000000..82cc2c7da
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/policies_test.go
@@ -0,0 +1,169 @@
+// Copyright (c) 2015 The gocql Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gocql
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/hailocab/go-hostpool"
+)
+
+// Tests of the round-robin host selection policy implementation
+func TestRoundRobinHostPolicy(t *testing.T) {
+ policy := RoundRobinHostPolicy()
+
+ hosts := []HostInfo{
+ HostInfo{HostId: "0"},
+ HostInfo{HostId: "1"},
+ }
+
+ policy.SetHosts(hosts)
+
+ // the first host selected is actually at [1], but this is ok for RR
+ // interleaved iteration should always increment the host
+ iterA := policy.Pick(nil)
+ if actual := iterA(); actual.Info() != &hosts[1] {
+ t.Errorf("Expected hosts[1] but was hosts[%s]", actual.Info().HostId)
+ }
+ iterB := policy.Pick(nil)
+ if actual := iterB(); actual.Info() != &hosts[0] {
+ t.Errorf("Expected hosts[0] but was hosts[%s]", actual.Info().HostId)
+ }
+ if actual := iterB(); actual.Info() != &hosts[1] {
+ t.Errorf("Expected hosts[1] but was hosts[%s]", actual.Info().HostId)
+ }
+ if actual := iterA(); actual.Info() != &hosts[0] {
+ t.Errorf("Expected hosts[0] but was hosts[%s]", actual.Info().HostId)
+ }
+
+ iterC := policy.Pick(nil)
+ if actual := iterC(); actual.Info() != &hosts[1] {
+ t.Errorf("Expected hosts[1] but was hosts[%s]", actual.Info().HostId)
+ }
+ if actual := iterC(); actual.Info() != &hosts[0] {
+ t.Errorf("Expected hosts[0] but was hosts[%s]", actual.Info().HostId)
+ }
+}
+
+// Tests of the token-aware host selection policy implementation with a
+// round-robin host selection policy fallback.
+func TestTokenAwareHostPolicy(t *testing.T) {
+ policy := TokenAwareHostPolicy(RoundRobinHostPolicy())
+
+ query := &Query{}
+
+ iter := policy.Pick(nil)
+ if iter == nil {
+ t.Fatal("host iterator was nil")
+ }
+ actual := iter()
+ if actual != nil {
+ t.Fatalf("expected nil from iterator, but was %v", actual)
+ }
+
+ // set the hosts
+ hosts := []HostInfo{
+ HostInfo{Peer: "0", Tokens: []string{"00"}},
+ HostInfo{Peer: "1", Tokens: []string{"25"}},
+ HostInfo{Peer: "2", Tokens: []string{"50"}},
+ HostInfo{Peer: "3", Tokens: []string{"75"}},
+ }
+ policy.SetHosts(hosts)
+
+ // the token ring is not setup without the partitioner, but the fallback
+ // should work
+ if actual := policy.Pick(nil)(); actual.Info().Peer != "1" {
+ t.Errorf("Expected peer 1 but was %s", actual.Info().Peer)
+ }
+
+ query.RoutingKey([]byte("30"))
+ if actual := policy.Pick(query)(); actual.Info().Peer != "2" {
+ t.Errorf("Expected peer 2 but was %s", actual.Info().Peer)
+ }
+
+ policy.SetPartitioner("OrderedPartitioner")
+
+ // now the token ring is configured
+ query.RoutingKey([]byte("20"))
+ iter = policy.Pick(query)
+ if actual := iter(); actual.Info().Peer != "1" {
+ t.Errorf("Expected peer 1 but was %s", actual.Info().Peer)
+ }
+ // rest are round robin
+ if actual := iter(); actual.Info().Peer != "3" {
+ t.Errorf("Expected peer 3 but was %s", actual.Info().Peer)
+ }
+ if actual := iter(); actual.Info().Peer != "0" {
+ t.Errorf("Expected peer 0 but was %s", actual.Info().Peer)
+ }
+ if actual := iter(); actual.Info().Peer != "2" {
+ t.Errorf("Expected peer 2 but was %s", actual.Info().Peer)
+ }
+}
+
+// Tests of the host pool host selection policy implementation
+func TestHostPoolHostPolicy(t *testing.T) {
+ policy := HostPoolHostPolicy(hostpool.New(nil))
+
+ hosts := []HostInfo{
+ HostInfo{HostId: "0", Peer: "0"},
+ HostInfo{HostId: "1", Peer: "1"},
+ }
+
+ policy.SetHosts(hosts)
+
+ // the first host selected is actually at [1], but this is ok for RR
+ // interleaved iteration should always increment the host
+ iter := policy.Pick(nil)
+ actualA := iter()
+ if actualA.Info().HostId != "0" {
+ t.Errorf("Expected hosts[0] but was hosts[%s]", actualA.Info().HostId)
+ }
+ actualA.Mark(nil)
+
+ actualB := iter()
+ if actualB.Info().HostId != "1" {
+ t.Errorf("Expected hosts[1] but was hosts[%s]", actualB.Info().HostId)
+ }
+ actualB.Mark(fmt.Errorf("error"))
+
+ actualC := iter()
+ if actualC.Info().HostId != "0" {
+ t.Errorf("Expected hosts[0] but was hosts[%s]", actualC.Info().HostId)
+ }
+ actualC.Mark(nil)
+
+ actualD := iter()
+ if actualD.Info().HostId != "0" {
+ t.Errorf("Expected hosts[0] but was hosts[%s]", actualD.Info().HostId)
+ }
+ actualD.Mark(nil)
+}
+
+// Tests of the round-robin connection selection policy implementation
+func TestRoundRobinConnPolicy(t *testing.T) {
+ policy := RoundRobinConnPolicy()()
+
+ conn0 := &Conn{}
+ conn1 := &Conn{}
+ conn := []*Conn{
+ conn0,
+ conn1,
+ }
+
+ policy.SetConns(conn)
+
+ // the first conn selected is actually at [1], but this is ok for RR
+ if actual := policy.Pick(nil); actual != conn1 {
+ t.Error("Expected conn1")
+ }
+ if actual := policy.Pick(nil); actual != conn0 {
+ t.Error("Expected conn0")
+ }
+ if actual := policy.Pick(nil); actual != conn1 {
+ t.Error("Expected conn1")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/session.go b/Godeps/_workspace/src/github.com/gocql/gocql/session.go
index 799f099ce..36ff5251a 100644
--- a/Godeps/_workspace/src/github.com/gocql/gocql/session.go
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/session.go
@@ -10,6 +10,7 @@ import (
"errors"
"fmt"
"io"
+ "log"
"strings"
"sync"
"time"
@@ -80,7 +81,7 @@ func NewSession(cfg ClusterConfig) (*Session, error) {
}
s.pool = pool
- //See if there are any connections in the pool
+ // See if there are any connections in the pool
if pool.Size() == 0 {
s.Close()
return nil, ErrNoConnectionsStarted
@@ -88,10 +89,8 @@ func NewSession(cfg ClusterConfig) (*Session, error) {
s.routingKeyInfoCache.lru = lru.New(cfg.MaxRoutingKeyInfo)
- if !cfg.disableControlConn {
- s.control = createControlConn(s)
- }
-
+ // I think it might be a good idea to simplify this and make it always discover
+ // hosts, maybe with more filters.
if cfg.DiscoverHosts {
s.hostSource = &ringDescriber{
session: s,
@@ -99,7 +98,25 @@ func NewSession(cfg ClusterConfig) (*Session, error) {
rackFilter: cfg.Discovery.RackFilter,
closeChan: make(chan bool),
}
+ }
+ if !cfg.disableControlConn {
+ s.control = createControlConn(s)
+ s.control.reconnect(false)
+
+ // need to setup host source to check for rpc_address in system.local
+ localHasRPCAddr, err := checkSystemLocal(s.control)
+ if err != nil {
+ log.Printf("gocql: unable to verify if system.local table contains rpc_address, falling back to connection address: %v", err)
+ }
+
+ if cfg.DiscoverHosts {
+ s.hostSource.localHasRpcAddr = localHasRPCAddr
+ }
+ }
+
+ if cfg.DiscoverHosts {
+ s.hostSource.refreshRing()
go s.hostSource.run(cfg.Discovery.Sleep)
}
@@ -218,7 +235,7 @@ func (s *Session) executeQuery(qry *Query) *Iter {
qry.attempts = 0
qry.totalLatency = 0
for {
- conn := s.pool.Pick(qry)
+ host, conn := s.pool.Pick(qry)
//Assign the error unavailable to the iterator
if conn == nil {
@@ -237,9 +254,13 @@ func (s *Session) executeQuery(qry *Query) *Iter {
//Exit for loop if the query was successful
if iter.err == nil {
+ host.Mark(iter.err)
break
}
+ // Mark host as ok
+ host.Mark(nil)
+
if qry.rt == nil || !qry.rt.Attempt(qry) {
break
}
@@ -306,7 +327,7 @@ func (s *Session) routingKeyInfo(stmt string) (*routingKeyInfo, error) {
)
// get the query info for the statement
- conn := s.pool.Pick(nil)
+ host, conn := s.pool.Pick(nil)
if conn == nil {
// no connections
inflight.err = ErrNoConnections
@@ -319,9 +340,13 @@ func (s *Session) routingKeyInfo(stmt string) (*routingKeyInfo, error) {
if inflight.err != nil {
// don't cache this error
s.routingKeyInfoCache.Remove(stmt)
+ host.Mark(inflight.err)
return nil, inflight.err
}
+ // Mark host as OK
+ host.Mark(nil)
+
if len(info.Args) == 0 {
// no arguments, no routing key, and no error
return nil, nil
@@ -401,7 +426,7 @@ func (s *Session) executeBatch(batch *Batch) (*Iter, error) {
batch.attempts = 0
batch.totalLatency = 0
for {
- conn := s.pool.Pick(nil)
+ host, conn := s.pool.Pick(nil)
//Assign the error unavailable and break loop
if conn == nil {
@@ -414,9 +439,13 @@ func (s *Session) executeBatch(batch *Batch) (*Iter, error) {
batch.attempts++
//Exit loop if operation executed correctly
if err == nil {
+ host.Mark(err)
return iter, err
}
+ // Mark host as OK
+ host.Mark(nil)
+
if batch.rt == nil || !batch.rt.Attempt(batch) {
break
}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/session_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/session_test.go
new file mode 100644
index 000000000..965b7b5ea
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/session_test.go
@@ -0,0 +1,255 @@
+// +build all integration
+
+package gocql
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestSessionAPI(t *testing.T) {
+
+ cfg := &ClusterConfig{}
+ pool, err := cfg.PoolConfig.buildPool(cfg)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ s := &Session{
+ pool: pool,
+ cfg: *cfg,
+ cons: Quorum,
+ }
+
+ defer s.Close()
+
+ s.SetConsistency(All)
+ if s.cons != All {
+ t.Fatalf("expected consistency 'All', got '%v'", s.cons)
+ }
+
+ s.SetPageSize(100)
+ if s.pageSize != 100 {
+ t.Fatalf("expected pageSize 100, got %v", s.pageSize)
+ }
+
+ s.SetPrefetch(0.75)
+ if s.prefetch != 0.75 {
+ t.Fatalf("expceted prefetch 0.75, got %v", s.prefetch)
+ }
+
+ trace := &traceWriter{}
+
+ s.SetTrace(trace)
+ if s.trace != trace {
+ t.Fatalf("expected traceWriter '%v',got '%v'", trace, s.trace)
+ }
+
+ qry := s.Query("test", 1)
+ if v, ok := qry.values[0].(int); !ok {
+ t.Fatalf("expected qry.values[0] to be an int, got %v", qry.values[0])
+ } else if v != 1 {
+ t.Fatalf("expceted qry.values[0] to be 1, got %v", v)
+ } else if qry.stmt != "test" {
+ t.Fatalf("expected qry.stmt to be 'test', got '%v'", qry.stmt)
+ }
+
+ boundQry := s.Bind("test", func(q *QueryInfo) ([]interface{}, error) {
+ return nil, nil
+ })
+ if boundQry.binding == nil {
+ t.Fatal("expected qry.binding to be defined, got nil")
+ } else if boundQry.stmt != "test" {
+ t.Fatalf("expected qry.stmt to be 'test', got '%v'", boundQry.stmt)
+ }
+
+ itr := s.executeQuery(qry)
+ if itr.err != ErrNoConnections {
+ t.Fatalf("expected itr.err to be '%v', got '%v'", ErrNoConnections, itr.err)
+ }
+
+ testBatch := s.NewBatch(LoggedBatch)
+ testBatch.Query("test")
+ err = s.ExecuteBatch(testBatch)
+
+ if err != ErrNoConnections {
+ t.Fatalf("expected session.ExecuteBatch to return '%v', got '%v'", ErrNoConnections, err)
+ }
+
+ s.Close()
+ if !s.Closed() {
+ t.Fatal("expected s.Closed() to be true, got false")
+ }
+ //Should just return cleanly
+ s.Close()
+
+ err = s.ExecuteBatch(testBatch)
+ if err != ErrSessionClosed {
+ t.Fatalf("expected session.ExecuteBatch to return '%v', got '%v'", ErrSessionClosed, err)
+ }
+}
+
+func TestQueryBasicAPI(t *testing.T) {
+ qry := &Query{}
+
+ if qry.Latency() != 0 {
+ t.Fatalf("expected Query.Latency() to return 0, got %v", qry.Latency())
+ }
+
+ qry.attempts = 2
+ qry.totalLatency = 4
+ if qry.Attempts() != 2 {
+ t.Fatalf("expected Query.Attempts() to return 2, got %v", qry.Attempts())
+ }
+ if qry.Latency() != 2 {
+ t.Fatalf("expected Query.Latency() to return 2, got %v", qry.Latency())
+ }
+
+ qry.Consistency(All)
+ if qry.GetConsistency() != All {
+ t.Fatalf("expected Query.GetConsistency to return 'All', got '%s'", qry.GetConsistency())
+ }
+
+ trace := &traceWriter{}
+ qry.Trace(trace)
+ if qry.trace != trace {
+ t.Fatalf("expected Query.Trace to be '%v', got '%v'", trace, qry.trace)
+ }
+
+ qry.PageSize(10)
+ if qry.pageSize != 10 {
+ t.Fatalf("expected Query.PageSize to be 10, got %v", qry.pageSize)
+ }
+
+ qry.Prefetch(0.75)
+ if qry.prefetch != 0.75 {
+ t.Fatalf("expected Query.Prefetch to be 0.75, got %v", qry.prefetch)
+ }
+
+ rt := &SimpleRetryPolicy{NumRetries: 3}
+ if qry.RetryPolicy(rt); qry.rt != rt {
+ t.Fatalf("expected Query.RetryPolicy to be '%v', got '%v'", rt, qry.rt)
+ }
+
+ qry.Bind(qry)
+ if qry.values[0] != qry {
+ t.Fatalf("expected Query.Values[0] to be '%v', got '%v'", qry, qry.values[0])
+ }
+}
+
+func TestQueryShouldPrepare(t *testing.T) {
+ toPrepare := []string{"select * ", "INSERT INTO", "update table", "delete from", "begin batch"}
+ cantPrepare := []string{"create table", "USE table", "LIST keyspaces", "alter table", "drop table", "grant user", "revoke user"}
+ q := &Query{}
+
+ for i := 0; i < len(toPrepare); i++ {
+ q.stmt = toPrepare[i]
+ if !q.shouldPrepare() {
+ t.Fatalf("expected Query.shouldPrepare to return true, got false for statement '%v'", toPrepare[i])
+ }
+ }
+
+ for i := 0; i < len(cantPrepare); i++ {
+ q.stmt = cantPrepare[i]
+ if q.shouldPrepare() {
+ t.Fatalf("expected Query.shouldPrepare to return false, got true for statement '%v'", cantPrepare[i])
+ }
+ }
+}
+
+func TestBatchBasicAPI(t *testing.T) {
+
+ cfg := &ClusterConfig{RetryPolicy: &SimpleRetryPolicy{NumRetries: 2}}
+ pool, err := cfg.PoolConfig.buildPool(cfg)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ s := &Session{
+ pool: pool,
+ cfg: *cfg,
+ cons: Quorum,
+ }
+ defer s.Close()
+
+ b := s.NewBatch(UnloggedBatch)
+ if b.Type != UnloggedBatch {
+ t.Fatalf("expceted batch.Type to be '%v', got '%v'", UnloggedBatch, b.Type)
+ } else if b.rt != cfg.RetryPolicy {
+ t.Fatalf("expceted batch.RetryPolicy to be '%v', got '%v'", cfg.RetryPolicy, b.rt)
+ }
+
+ b = NewBatch(LoggedBatch)
+ if b.Type != LoggedBatch {
+ t.Fatalf("expected batch.Type to be '%v', got '%v'", LoggedBatch, b.Type)
+ }
+
+ b.attempts = 1
+ if b.Attempts() != 1 {
+ t.Fatalf("expceted batch.Attempts() to return %v, got %v", 1, b.Attempts())
+ }
+
+ if b.Latency() != 0 {
+ t.Fatalf("expected batch.Latency() to be 0, got %v", b.Latency())
+ }
+
+ b.totalLatency = 4
+ if b.Latency() != 4 {
+ t.Fatalf("expected batch.Latency() to return %v, got %v", 4, b.Latency())
+ }
+
+ b.Cons = One
+ if b.GetConsistency() != One {
+ t.Fatalf("expected batch.GetConsistency() to return 'One', got '%s'", b.GetConsistency())
+ }
+
+ b.Query("test", 1)
+ if b.Entries[0].Stmt != "test" {
+ t.Fatalf("expected batch.Entries[0].Stmt to be 'test', got '%v'", b.Entries[0].Stmt)
+ } else if b.Entries[0].Args[0].(int) != 1 {
+ t.Fatalf("expected batch.Entries[0].Args[0] to be 1, got %v", b.Entries[0].Args[0])
+ }
+
+ b.Bind("test2", func(q *QueryInfo) ([]interface{}, error) {
+ return nil, nil
+ })
+
+ if b.Entries[1].Stmt != "test2" {
+ t.Fatalf("expected batch.Entries[1].Stmt to be 'test2', got '%v'", b.Entries[1].Stmt)
+ } else if b.Entries[1].binding == nil {
+ t.Fatal("expected batch.Entries[1].binding to be defined, got nil")
+ }
+ r := &SimpleRetryPolicy{NumRetries: 4}
+
+ b.RetryPolicy(r)
+ if b.rt != r {
+ t.Fatalf("expected batch.RetryPolicy to be '%v', got '%v'", r, b.rt)
+ }
+
+ if b.Size() != 2 {
+ t.Fatalf("expected batch.Size() to return 2, got %v", b.Size())
+ }
+
+}
+
+func TestConsistencyNames(t *testing.T) {
+ names := map[fmt.Stringer]string{
+ Any: "ANY",
+ One: "ONE",
+ Two: "TWO",
+ Three: "THREE",
+ Quorum: "QUORUM",
+ All: "ALL",
+ LocalQuorum: "LOCAL_QUORUM",
+ EachQuorum: "EACH_QUORUM",
+ Serial: "SERIAL",
+ LocalSerial: "LOCAL_SERIAL",
+ LocalOne: "LOCAL_ONE",
+ }
+
+ for k, v := range names {
+ if k.String() != v {
+ t.Fatalf("expected '%v', got '%v'", v, k.String())
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/stress_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/stress_test.go
new file mode 100644
index 000000000..74f125821
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/stress_test.go
@@ -0,0 +1,72 @@
+// +build all integration
+
+package gocql
+
+import (
+ "sync/atomic"
+
+ "testing"
+)
+
+func BenchmarkConnStress(b *testing.B) {
+ const workers = 16
+
+ cluster := createCluster()
+ cluster.NumConns = 1
+ cluster.NumStreams = workers
+ session := createSessionFromCluster(cluster, b)
+ defer session.Close()
+
+ if err := createTable(session, "CREATE TABLE IF NOT EXISTS conn_stress (id int primary key)"); err != nil {
+ b.Fatal(err)
+ }
+
+ var seed uint64
+ writer := func(pb *testing.PB) {
+ seed := atomic.AddUint64(&seed, 1)
+ var i uint64 = 0
+ for pb.Next() {
+ if err := session.Query("insert into conn_stress (id) values (?)", i*seed).Exec(); err != nil {
+ b.Error(err)
+ return
+ }
+ i++
+ }
+ }
+
+ b.SetParallelism(workers)
+ b.RunParallel(writer)
+}
+
+func BenchmarkConnRoutingKey(b *testing.B) {
+ const workers = 16
+
+ cluster := createCluster()
+ cluster.NumConns = 1
+ cluster.NumStreams = workers
+ cluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy())
+ session := createSessionFromCluster(cluster, b)
+ defer session.Close()
+
+ if err := createTable(session, "CREATE TABLE IF NOT EXISTS routing_key_stress (id int primary key)"); err != nil {
+ b.Fatal(err)
+ }
+
+ var seed uint64
+ writer := func(pb *testing.PB) {
+ seed := atomic.AddUint64(&seed, 1)
+ var i uint64 = 0
+ query := session.Query("insert into routing_key_stress (id) values (?)")
+
+ for pb.Next() {
+ if _, err := query.Bind(i * seed).GetRoutingKey(); err != nil {
+ b.Error(err)
+ return
+ }
+ i++
+ }
+ }
+
+ b.SetParallelism(workers)
+ b.RunParallel(writer)
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/.keystore b/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/.keystore
new file mode 100644
index 000000000..0bbf1929f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/.keystore
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d9d7522bb1c517241b555d66e3ba3e98922309c6378e625e991ee69372c97b7e
+size 2178
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/.truststore b/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/.truststore
new file mode 100644
index 000000000..e7f20ba11
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/.truststore
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:02b50c1f6bb4f5a6fb013c2c9ebe47ecd08db683dca48045f08e9e9ca98d8963
+size 882
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/ca.crt b/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/ca.crt
new file mode 100644
index 000000000..fc3ba63f2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/ca.crt
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDLzCCAhegAwIBAgIJAIKbAXgemwsjMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV
+BAMTCWNhc3NhbmRyYTAeFw0xNDA5MTkyMTE4MTNaFw0yNDA5MTYyMTE4MTNaMBQx
+EjAQBgNVBAMTCWNhc3NhbmRyYTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAL5fX0l1WDNa+mO1krxw7k8lfUQn+Ec4L3Mqv6IstGoNdCPq4YRA+SXRD5YC
+k/UXrFBWh9Hbs849GiuTYMPdj9HDLYz40RaQjM9GbieS23iy3UStQ0tKhxaaG6FN
+6XBypXFKCTsanu0TkEoDGhAkSzAMcCAC3gkFBzMrZ5qt4HEzjY9rasZ2gthN+xop
+nq3t4dDkE8HGaiFJcFvqTor7xmrnAaPjrPzUpvOF/ObIC09omwg/KXdPRx4DKPon
+gCMKEE3ckebKnJvbsRX3WO8H5nTHBYZ6v1JxLZz5pqmV+P0NGxldCARM0gCQUBz5
+wjMJkD/3e1ETC+q6uwfnAG0hlD8CAwEAAaOBgzCBgDAdBgNVHQ4EFgQUjHzn0nYF
+iXEaI1vUWbRR4lwKXOgwRAYDVR0jBD0wO4AUjHzn0nYFiXEaI1vUWbRR4lwKXOih
+GKQWMBQxEjAQBgNVBAMTCWNhc3NhbmRyYYIJAIKbAXgemwsjMAwGA1UdEwQFMAMB
+Af8wCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4IBAQBCYDdIhtf/Y12Et947
+am1B8TzSX+/iQ1V1J3JtvgD5F4fvNjfArat/I3D277WREUTAc76o16BCp2OBGqzO
+zf9MvZPkjkAUoyU0TtPUEHyqxq4gZxbWKugIZGYkmQ1hCvSIgA5UnjRL3dylMmZb
+Y33JJA2QY63FZwnhmWsM8FYZwh+8MzVCQx3mgXC/k/jS6OuYyIT/KjxQHHjyr5ZS
+zAAQln1IcZycLfh1w5MtCFahCIethFcVDnWUWYPcPGDGgMJW7WBpNZdHbLxYY8cI
+eCc3Hcrbdc/CG5CaLJeqUidBayjnlUIO/NNgglkJ1KhQzkM6bd+37e0AX1hLIqx7
+gIZR
+-----END CERTIFICATE-----
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/ca.key b/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/ca.key
new file mode 100644
index 000000000..4360c17a2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/ca.key
@@ -0,0 +1,30 @@
+-----BEGIN RSA PRIVATE KEY-----
+Proc-Type: 4,ENCRYPTED
+DEK-Info: DES-EDE3-CBC,54C8072C0FF3B3A3
+
+27eijmHdgB+s3beNPmU0+iz+muxMD0BVvWkDzyec/uawMv/Cn4c3mYXOcsFxS3BL
++qLT9MEttOmjqhHSaVrDYOPKoJIMpn+bVeKiR08V89icO36shEPy1feGqanagKtw
+ecgzFDBTA8ZbqjAhftXlhTwxADebvNms/2aDh5Aw04vIcbo8nQ/8z1Wz8O7Firsn
+kaseSTMTC6lxc+pa2V1X6mN0/2UpDi55bZbx1Z/mQ3+1CsdHOx0p7m/KY2m3ysov
+XluaC0sqmzHkcwNgDhUs3Jh+apE33vXzLGU+W4BDOwrYJiL6KpspZW/mJj3OEx8B
+8xdAZU3a/ei8NUA/lDStGmcYX+dOysExwJ6GMrCBm9iufZiefDQCQ8yRqWnr6Zop
+lsFd+CqHNWYxfWDI1pSUBw3bsgIjevI0f0B7PxkFEF0DmIhCgB324/uqToRzGsOF
+4MSVg6cSK7Sjo/u3r8r75A3aUAcY8NbR3peiZfAPMsTiUcfp4DoU+MJTqkX5PyQq
+FNxHOJoARZqjjQ2IhZiUQWfIINHvZ8F9G2K7VaES8A0EATyUghqaRyeLbyI3IYdW
+pGZBzrpGtdFlk9AVetHDDlY+gQiurtYhxOsxvlxJJuTj8FV+A5NWSElfPele0OiR
+iprE3xkFSk3whHu5L1vnzamvdSlnBWOAE7pQD7kQA6NmcEw/tqnXK0dVdAw8RIFh
+4BKgv0sNrXzBgnzE8+bKLUf1a2Byc/YKuBrI7EpSZ9/VHYvOcgmOxNxMmRS6NYd1
+Ly+agQn0AyvsDmSlBZBp8GCzVp6JYBMDKSXyPVN8+wjK9OQM0PZdEdXouMwPCOVN
+oNSjhmMtfjOsnG2SZ9tRas3p0qFdfh/N/E6Q7QHG3WD3cUIEweFV9ji1FTSRUrIa
+shuKug8MUfNjvDJNMsdGyf6Hi/7Iik++42Rq3ZdTy0ZVkj5snv5yBN77pr2M/J4b
+M+dsXjyXPO4SDW3kP/e3RnLRlWmUv1PNdOmNDdjBBUTKgVZ3ur+4HmSY1iDvhlUF
+/hz2tz3/XUKQwYuv3KJVlBhLrniXeES36GK+JQadIszrjwb5N4q4p6xrIdIR7XgR
+TJCSL1NGPLeQyjK6byWLNPRcCGrvnxWs0k0ev6trMRJL1EjsIFDCJam9szhcXkZP
+iYl1d7ZMKPS3cAqCjdaFRSe65cZ+qI/cqxiv122orq/jkDY7ZSA9rWywY4YnYQ7A
+BqvcPzC/6K0bteXqmMQkIy/84aSnEts6ecb/4s5e5xXLhHe0dchG0HkasC/Gb+v/
+m9NOqACTerWvSD+Ecv9OvnBjP+GTlA1g7xTiRANLXsTJuiJomtxewXcV6kGZEMmZ
+QWerGtPJGGUx36WRWrMiPeBfWZoIbjYGPmOO5mYNXMTjABGGWcFnKAqWUKsFihi9
+pC0OpZ7A0dtc9uSm0ZmsHUc3XENMHTeeEN+qgWxVKcMzRKEcnapu/0OcHrOUHDZf
+qPoG4EkNnG9kPMq3HzvFPx3qbQ017yl87vAkWy/Edo+ojfHoNghRBVGCw1zt/BMN
+eJbFFHop+rQ87omz8WIL4K+zVf91rJ0REVAJssQVDo16O5wrMo+f+c8v2GANQks5
+-----END RSA PRIVATE KEY-----
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/cassandra.crt b/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/cassandra.crt
new file mode 100644
index 000000000..9d2facf9d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/cassandra.crt
@@ -0,0 +1,83 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 2 (0x2)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: CN=cassandra
+ Validity
+ Not Before: Sep 19 21:18:48 2014 GMT
+ Not After : Sep 16 21:18:48 2024 GMT
+ Subject: CN=cassandra
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ RSA Public Key: (2048 bit)
+ Modulus (2048 bit):
+ 00:e5:9c:20:9e:de:98:73:44:41:0d:37:4c:62:c3:
+ 9f:87:5f:9b:4f:aa:cf:f6:90:6e:a5:e0:89:88:7a:
+ 00:c6:bb:d7:80:87:69:2e:fa:f0:35:59:80:6e:82:
+ 25:c8:b3:6c:f6:a4:97:97:93:93:ea:f0:70:70:a4:
+ e1:b7:aa:da:c1:99:66:9b:93:04:3a:ce:0b:83:07:
+ 06:22:3d:a6:db:7f:68:0f:49:80:bd:86:a8:bb:54:
+ 6d:38:5f:0f:b0:fa:1b:97:24:ae:cc:9d:37:98:7e:
+ 76:cc:e3:1b:45:1b:21:25:17:02:c0:1a:c5:fb:76:
+ c3:8b:93:d7:c5:85:14:0a:5c:a4:12:e7:18:69:98:
+ f5:76:cd:78:cd:99:5a:29:65:f1:68:20:97:d3:be:
+ 09:b3:68:1b:f2:a3:a2:9a:73:58:53:7e:ed:86:32:
+ a3:5a:d5:46:03:f9:b3:b4:ec:63:71:ba:bb:fb:6f:
+ f9:82:63:e4:55:47:7a:7a:e4:7b:17:6b:d7:e6:cf:
+ 3b:c9:ab:0c:30:15:c9:ed:c7:d6:fc:b6:72:b2:14:
+ 7d:c7:f3:7f:8a:f4:63:70:64:8e:0f:db:e8:3a:45:
+ 47:cd:b9:7b:ae:c8:31:c1:52:d1:3e:34:12:b7:73:
+ e7:ba:89:86:9a:36:ed:a0:5a:69:d0:d4:e3:b6:16:
+ 85:af
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ X509v3 Subject Key Identifier:
+ 4A:D3:EC:63:07:E0:8F:1A:4E:F5:09:43:90:9F:7A:C5:31:D1:8F:D8
+ X509v3 Authority Key Identifier:
+ keyid:8C:7C:E7:D2:76:05:89:71:1A:23:5B:D4:59:B4:51:E2:5C:0A:5C:E8
+ DirName:/CN=cassandra
+ serial:82:9B:01:78:1E:9B:0B:23
+
+ X509v3 Extended Key Usage:
+ TLS Web Server Authentication
+ X509v3 Key Usage:
+ Digital Signature, Key Encipherment
+ Signature Algorithm: sha256WithRSAEncryption
+ ac:bc:80:82:2d:6d:f1:a0:46:eb:00:05:d2:25:9a:83:66:57:
+ 40:51:6e:ff:db:e3:28:04:7b:16:63:74:ec:55:a0:c0:5b:47:
+ 13:e1:5a:a5:6d:22:d0:e5:fe:c1:51:e8:f6:c6:9c:f9:be:b7:
+ be:82:14:e4:a0:b2:0b:9f:ee:68:bc:ac:17:0d:13:50:c6:9e:
+ 52:91:8c:a0:98:db:4e:2d:f6:3d:6e:85:0a:bb:b9:dd:01:bf:
+ ad:52:dd:6e:e4:41:01:a5:93:58:dd:3f:cf:bf:15:e6:25:aa:
+ a0:4f:98:0d:75:8a:3f:5b:ba:67:37:f6:b1:0b:3f:21:34:97:
+ 50:9a:85:97:2b:b6:05:41:9a:f3:cf:c4:92:23:06:ab:3e:87:
+ 98:30:eb:cb:d3:83:ab:04:7d:5c:b9:f0:12:d1:43:b3:c5:7d:
+ 33:9a:2e:2b:80:3a:66:be:f1:8c:08:37:7a:93:9c:9b:60:60:
+ 53:71:16:70:86:df:ca:5f:a9:0b:e2:8b:3d:af:02:62:3b:61:
+ 30:da:53:89:e3:d8:0b:88:04:9a:93:6a:f6:28:f8:dd:0d:8f:
+ 0c:82:5b:c0:e5:f8:0d:ad:06:76:a7:3b:4b:ae:54:37:25:15:
+ f5:0c:67:0f:77:c5:c4:97:68:09:c3:02:a7:a0:46:10:1c:d1:
+ 95:3a:4c:94
+-----BEGIN CERTIFICATE-----
+MIIDOTCCAiGgAwIBAgIBAjANBgkqhkiG9w0BAQsFADAUMRIwEAYDVQQDEwljYXNz
+YW5kcmEwHhcNMTQwOTE5MjExODQ4WhcNMjQwOTE2MjExODQ4WjAUMRIwEAYDVQQD
+EwljYXNzYW5kcmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDlnCCe
+3phzREENN0xiw5+HX5tPqs/2kG6l4ImIegDGu9eAh2ku+vA1WYBugiXIs2z2pJeX
+k5Pq8HBwpOG3qtrBmWabkwQ6zguDBwYiPabbf2gPSYC9hqi7VG04Xw+w+huXJK7M
+nTeYfnbM4xtFGyElFwLAGsX7dsOLk9fFhRQKXKQS5xhpmPV2zXjNmVopZfFoIJfT
+vgmzaBvyo6Kac1hTfu2GMqNa1UYD+bO07GNxurv7b/mCY+RVR3p65HsXa9fmzzvJ
+qwwwFcntx9b8tnKyFH3H83+K9GNwZI4P2+g6RUfNuXuuyDHBUtE+NBK3c+e6iYaa
+Nu2gWmnQ1OO2FoWvAgMBAAGjgZUwgZIwCQYDVR0TBAIwADAdBgNVHQ4EFgQUStPs
+YwfgjxpO9QlDkJ96xTHRj9gwRAYDVR0jBD0wO4AUjHzn0nYFiXEaI1vUWbRR4lwK
+XOihGKQWMBQxEjAQBgNVBAMTCWNhc3NhbmRyYYIJAIKbAXgemwsjMBMGA1UdJQQM
+MAoGCCsGAQUFBwMBMAsGA1UdDwQEAwIFoDANBgkqhkiG9w0BAQsFAAOCAQEArLyA
+gi1t8aBG6wAF0iWag2ZXQFFu/9vjKAR7FmN07FWgwFtHE+FapW0i0OX+wVHo9sac
++b63voIU5KCyC5/uaLysFw0TUMaeUpGMoJjbTi32PW6FCru53QG/rVLdbuRBAaWT
+WN0/z78V5iWqoE+YDXWKP1u6Zzf2sQs/ITSXUJqFlyu2BUGa88/EkiMGqz6HmDDr
+y9ODqwR9XLnwEtFDs8V9M5ouK4A6Zr7xjAg3epOcm2BgU3EWcIbfyl+pC+KLPa8C
+YjthMNpTiePYC4gEmpNq9ij43Q2PDIJbwOX4Da0Gdqc7S65UNyUV9QxnD3fFxJdo
+CcMCp6BGEBzRlTpMlA==
+-----END CERTIFICATE-----
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/cassandra.key b/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/cassandra.key
new file mode 100644
index 000000000..6878e8209
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/cassandra.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEA5Zwgnt6Yc0RBDTdMYsOfh1+bT6rP9pBupeCJiHoAxrvXgIdp
+LvrwNVmAboIlyLNs9qSXl5OT6vBwcKTht6rawZlmm5MEOs4LgwcGIj2m239oD0mA
+vYaou1RtOF8PsPoblySuzJ03mH52zOMbRRshJRcCwBrF+3bDi5PXxYUUClykEucY
+aZj1ds14zZlaKWXxaCCX074Js2gb8qOimnNYU37thjKjWtVGA/mztOxjcbq7+2/5
+gmPkVUd6euR7F2vX5s87yasMMBXJ7cfW/LZyshR9x/N/ivRjcGSOD9voOkVHzbl7
+rsgxwVLRPjQSt3PnuomGmjbtoFpp0NTjthaFrwIDAQABAoIBAQChjdjl73kUoVGk
+GuSEGWCFv59nzqfEtJsl23bpr+4b5s8agCxiAe5Bm1fiaXBsZtKkN+rxm8TX6ZUz
+rM+ki3KgBW9Mx4SSW6d96dNHBFoC1wJAv1b2A2l1ZVHz9+7ydwgysHzNO1GC2nh8
+cM8fMJeBoU8uG6hx5n5wFvYa5CfVoUQh8+Oq0b+mVxEFKHmRPnWp9/jPzL5eBIdr
+ulbDt9S3dKJtouHgHBUNdkq/7Ex3QeHrUOahX6Y4eX1rzLnfLYY+0J4EA2PCKvgQ
+bfKCxVnnzL6ywviH8eS3ql6OvTfnbK9kCRw7WxX9CC50qKj3EmwC/51MPhWohWlq
+jw3qf38BAoGBAPPNyb3vUiyUqoErZxxIPFc2ob3vCjj06cvi7uKpOgrkdgC3iBhz
+aCFQ28r7LrxLAHaKvNvwp71Lc7WYo8WWkLI1DVn0dx+GiQYW3DbNcwZOS40ZQz5L
+zsjEcG4+cnZmuqGZBMNvQ+xUjkuucxvxPWKpEKM18GfDjgEkKbmDr+uNAoGBAPEY
+kVSfSZGtP0MoXIfRkrxBlhvCj9m+p60P37pyHrJBrlrwvxB7x3Oz8S70D6kV8s2g
+vVHgOS3VPj17VaQG8a3jBLKjzp5JLe34G8D1Ny8GqDc2wzOBtZySpJbifXuSUSPk
+cqF7yiu1cD/wRPlwyWxBX9ZbaxvxnIUwLLd3ygkrAoGBAKQaw42uVkCdvPr/DQOT
+d9I4erxO9zGJYQmU8bjtsZz9VJR89QWIQPIT7C3/zuB9F42zKxZcMXwQGo2EddAc
+3b6mSRtgmwJEW10W7BmTRrZa4y3RcFqxSjoHR6pdLEyYL01woy0taqnb7H/yp5aK
+VghfxkwllXEyxxXrko5FnpdNAoGBANeJLBunz2BxrnW+doJhZDnytFya4nk6TbKU
+12FaNoEL4PCh+12kGtogSwS74eg6m/citT2mI9gKpHrYcOaT4qmeo4uEj+nH6Eyv
+Gzi0wCHFZMr/pSC92/teyc+uKZo4Y1ugFq6w+Tt8GB7BERiisR+bji8XSTkRFemn
++MIIUFFDAoGAM8Va2Q5aTUkfg2mYlNLqT2tUAXVEhbmzjPA6laSo25PQEYWmX7vj
+hiU0DPCDJQ/PlPI23xYtDDLNk83Zbx+Oj29GO5pawJY9NvFI8n60EFXfLbP1nEdG
+j077QZNZOKfcgJirWi3+RrHSAK4tFftCe7rkV8ZmlMRBY3SDxzKOGcc=
+-----END RSA PRIVATE KEY-----
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/gocql.crt b/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/gocql.crt
new file mode 100644
index 000000000..22bf19f56
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/gocql.crt
@@ -0,0 +1,83 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 1 (0x1)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: CN=cassandra
+ Validity
+ Not Before: Sep 19 21:18:33 2014 GMT
+ Not After : Sep 16 21:18:33 2024 GMT
+ Subject: CN=gocql
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ RSA Public Key: (2048 bit)
+ Modulus (2048 bit):
+ 00:ae:e9:fa:9e:fd:e2:69:85:1d:08:0f:35:68:bc:
+ 63:7b:92:50:7f:73:50:fc:42:43:35:06:b3:5c:9e:
+ 27:1e:16:05:69:ec:88:d5:9c:4f:ef:e8:13:69:7a:
+ b5:b3:7f:66:6d:14:00:2e:d6:af:5b:ff:2c:90:91:
+ a6:11:07:72:5e:b0:37:c0:6d:ff:7b:76:2b:fe:de:
+ 4c:d2:8d:ce:43:3b:1a:c4:1d:de:b6:d8:26:08:25:
+ 89:59:a1:4b:94:a3:57:9e:19:46:28:6e:97:11:7c:
+ e6:b7:41:96:8f:42:dd:66:da:86:d2:53:dd:d8:f5:
+ 20:cd:24:8b:0f:ab:df:c4:10:b2:64:20:1d:e0:0f:
+ f4:2d:f6:ca:94:be:83:ac:3e:a8:4a:77:b6:08:97:
+ 3a:7e:7b:e0:3e:ab:68:cf:ee:f6:a1:8e:bf:ec:be:
+ 06:d1:ad:6c:ed:4f:35:d1:04:97:08:33:b1:65:5b:
+ 61:32:8d:4b:f0:30:35:4b:8b:6b:06:f2:1a:72:8c:
+ 69:bd:f3:b2:c4:a4:a4:70:45:e3:67:a2:7a:9f:2e:
+ cb:28:2d:9f:68:03:f1:c7:d9:4f:83:c9:3d:8c:34:
+ 04:0a:3b:13:87:92:e1:f7:e3:79:7e:ab:c0:25:b1:
+ e5:38:09:44:3e:31:df:12:d4:dc:7b:0e:35:bf:ee:
+ 25:5f
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ X509v3 Subject Key Identifier:
+ 9F:F1:B2:C4:82:34:D0:2F:FF:E9:7F:19:F1:3B:51:57:BF:E8:95:BB
+ X509v3 Authority Key Identifier:
+ keyid:8C:7C:E7:D2:76:05:89:71:1A:23:5B:D4:59:B4:51:E2:5C:0A:5C:E8
+ DirName:/CN=cassandra
+ serial:82:9B:01:78:1E:9B:0B:23
+
+ X509v3 Extended Key Usage:
+ TLS Web Client Authentication
+ X509v3 Key Usage:
+ Digital Signature
+ Signature Algorithm: sha256WithRSAEncryption
+ 12:aa:1b:a6:58:27:52:32:c9:46:19:32:d3:69:ae:95:ad:23:
+ 55:ad:12:65:da:2c:4c:72:f3:29:bd:2b:5a:97:3b:b7:68:8b:
+ 68:80:77:55:e6:32:81:f1:f5:20:54:ba:0e:2b:86:90:d8:44:
+ cf:f2:9f:ec:4d:39:67:4e:36:6c:9b:49:4a:80:e6:c1:ed:a4:
+ 41:39:19:16:d2:88:df:17:0c:46:5a:b9:88:53:f5:67:19:f0:
+ 1f:9a:51:40:1b:40:12:bc:57:db:de:dd:d3:f5:a8:93:68:30:
+ ac:ba:4e:ee:6b:af:f8:13:3d:11:1a:fa:90:93:d0:68:ce:77:
+ 5f:85:8b:a4:95:2a:4c:25:7b:53:9c:44:43:b1:d9:fe:0c:83:
+ b8:19:2a:88:cc:d8:d1:d9:b3:04:eb:45:9b:30:5e:cb:61:e0:
+ e1:88:23:9c:b0:34:79:62:82:0d:f8:10:ed:96:bb:a0:fd:0d:
+ 02:cb:c5:d3:47:1f:35:a7:e3:39:31:56:d5:b3:eb:2f:93:8f:
+ 18:b4:b7:3c:00:03:a7:b4:1c:17:72:91:7e:b6:f6:36:17:3d:
+ f6:54:3b:87:84:d1:9b:43:d1:88:42:64:20:7a:e3:cc:f7:05:
+ 98:0e:1c:51:da:20:b7:9b:49:88:e8:c6:e1:de:0d:f5:56:4f:
+ 79:41:d0:7f
+-----BEGIN CERTIFICATE-----
+MIIDNTCCAh2gAwIBAgIBATANBgkqhkiG9w0BAQsFADAUMRIwEAYDVQQDEwljYXNz
+YW5kcmEwHhcNMTQwOTE5MjExODMzWhcNMjQwOTE2MjExODMzWjAQMQ4wDAYDVQQD
+EwVnb2NxbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK7p+p794mmF
+HQgPNWi8Y3uSUH9zUPxCQzUGs1yeJx4WBWnsiNWcT+/oE2l6tbN/Zm0UAC7Wr1v/
+LJCRphEHcl6wN8Bt/3t2K/7eTNKNzkM7GsQd3rbYJggliVmhS5SjV54ZRihulxF8
+5rdBlo9C3WbahtJT3dj1IM0kiw+r38QQsmQgHeAP9C32ypS+g6w+qEp3tgiXOn57
+4D6raM/u9qGOv+y+BtGtbO1PNdEElwgzsWVbYTKNS/AwNUuLawbyGnKMab3zssSk
+pHBF42eiep8uyygtn2gD8cfZT4PJPYw0BAo7E4eS4ffjeX6rwCWx5TgJRD4x3xLU
+3HsONb/uJV8CAwEAAaOBlTCBkjAJBgNVHRMEAjAAMB0GA1UdDgQWBBSf8bLEgjTQ
+L//pfxnxO1FXv+iVuzBEBgNVHSMEPTA7gBSMfOfSdgWJcRojW9RZtFHiXApc6KEY
+pBYwFDESMBAGA1UEAxMJY2Fzc2FuZHJhggkAgpsBeB6bCyMwEwYDVR0lBAwwCgYI
+KwYBBQUHAwIwCwYDVR0PBAQDAgeAMA0GCSqGSIb3DQEBCwUAA4IBAQASqhumWCdS
+MslGGTLTaa6VrSNVrRJl2ixMcvMpvStalzu3aItogHdV5jKB8fUgVLoOK4aQ2ETP
+8p/sTTlnTjZsm0lKgObB7aRBORkW0ojfFwxGWrmIU/VnGfAfmlFAG0ASvFfb3t3T
+9aiTaDCsuk7ua6/4Ez0RGvqQk9BozndfhYuklSpMJXtTnERDsdn+DIO4GSqIzNjR
+2bME60WbMF7LYeDhiCOcsDR5YoIN+BDtlrug/Q0Cy8XTRx81p+M5MVbVs+svk48Y
+tLc8AAOntBwXcpF+tvY2Fz32VDuHhNGbQ9GIQmQgeuPM9wWYDhxR2iC3m0mI6Mbh
+3g31Vk95QdB/
+-----END CERTIFICATE-----
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/gocql.key b/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/gocql.key
new file mode 100644
index 000000000..0d701f43e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/gocql.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEArun6nv3iaYUdCA81aLxje5JQf3NQ/EJDNQazXJ4nHhYFaeyI
+1ZxP7+gTaXq1s39mbRQALtavW/8skJGmEQdyXrA3wG3/e3Yr/t5M0o3OQzsaxB3e
+ttgmCCWJWaFLlKNXnhlGKG6XEXzmt0GWj0LdZtqG0lPd2PUgzSSLD6vfxBCyZCAd
+4A/0LfbKlL6DrD6oSne2CJc6fnvgPqtoz+72oY6/7L4G0a1s7U810QSXCDOxZVth
+Mo1L8DA1S4trBvIacoxpvfOyxKSkcEXjZ6J6ny7LKC2faAPxx9lPg8k9jDQECjsT
+h5Lh9+N5fqvAJbHlOAlEPjHfEtTcew41v+4lXwIDAQABAoIBAQCCP9XSwzfwX6Fo
+uPqKjY5/HEs5PQPXdPha6ixyEYsLilZptCuI9adI/MZHy4q2qW36V+Ry/IcEuJXU
+6cCB+cue2xYJA2A17Z+BYMRQHiy0P7UEyUFpYrefZWRMDCIeAyxhnGxz+zYfXaTo
+Xbzh3WbFCoFO6gjPYGoWmNm8x74PXyunNaMa/gWFECX5MMBXoOk5xSFGbHzI2Cds
+iT7sdCQJVbBs7yidYwNqPWQuOwrskFinPIFSc7bZ0Sx9wO3XTIrQFCE94v/AN6yR
+9Q37ida54g5tgtoeg/5EGsUM++i4wqJVoT3tWUHv1jBozO4Lm65uWR/1HcrusVnr
+x0TM9SaBAoGBAOMeaZdUrCJXnIiSoqCGDvZmylTAeOo6n2RAiviOYxVB4GP/SSjh
+8VeddFhYT1GCmZ+YjIXnRWK+dSqVukzCuf5xW5mWY7PDNGZe2P6O78lXnY4cb8Nc
+Uo9/S2aPnNmNHL2TYVBYUiZj+t2azIQEFvRth4Vu/AHRUG41/USxpwm/AoGBAMUo
+GX0xgSFAVpHnTLdzWrHNRrzHgYN8ywPKFgNOASvdgW0BFoqXEvVGc1Ak6uW82m1/
+L9ChOzWjCY7CoT+LPmdUVyGT9/UAPtWeLfo8Owl4tG91jQjePmJFvLoXErryCFRt
+SOOvCsTTTq2gN3PREHxY3dj2kJqaCBLCEzx3cYxhAoGBAIUxdrc6/t/9BV3KsPj2
+5Zt3WL0vSzoCOyut9lIiHtV+lrvOIPeK2eCKBIsy7wFcV/+SlQaKRNTN4SSiPml5
+4V3o2NFPsxTfK8HFafiPluw7J7kJ0Dl/0SM6gduZ6WBkMzCyV+WohjTheWOwvrPF
+OjkKaunD1qKyQDsCCo/Yp589AoGAdKgnfNZf68bf8nEECcBtt6sY4fbCgYTDszhO
+EiKDuurT/CWaquJ9SzgmXxOZEdrO+9838aCVIkWYECrFso23nPhgnfOp0gQVKdzw
+o5Ij9JTBXvoVO1wVWZyd8RZZ9Nflad9IM8CNBK1rbnzQkuzvbkQ+8HPkWDYv9Ll1
+HGAohcECgYBQeirIumumj1B17WD/KmNe0U0qCHHp+oSW4W2r7pjlEVZzeQmggX4O
+anbEngyQaZKeUiUOj9snBDmzLv7S+j5p7Us4d1fbp70sCKuK6tcAnROU8gK8IGiI
+I01ypD8Z1Mb556qek56eRWlr71sy6wI1lbQa856cUBvePajUOKsKsw==
+-----END RSA PRIVATE KEY-----
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/token_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/token_test.go
new file mode 100644
index 000000000..8f8cdada5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/token_test.go
@@ -0,0 +1,410 @@
+// Copyright (c) 2015 The gocql Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gocql
+
+import (
+ "bytes"
+ "math/big"
+ "sort"
+ "strconv"
+ "testing"
+)
+
+// Tests of the murmur3Patitioner
+func TestMurmur3Partitioner(t *testing.T) {
+ token := murmur3Partitioner{}.ParseString("-1053604476080545076")
+
+ if "-1053604476080545076" != token.String() {
+ t.Errorf("Expected '-1053604476080545076' but was '%s'", token)
+ }
+
+ // at least verify that the partitioner
+ // doesn't return nil
+ pk, _ := marshalInt(nil, 1)
+ token = murmur3Partitioner{}.Hash(pk)
+ if token == nil {
+ t.Fatal("token was nil")
+ }
+}
+
+// Tests of the murmur3Token
+func TestMurmur3Token(t *testing.T) {
+ if murmur3Token(42).Less(murmur3Token(42)) {
+ t.Errorf("Expected Less to return false, but was true")
+ }
+ if !murmur3Token(-42).Less(murmur3Token(42)) {
+ t.Errorf("Expected Less to return true, but was false")
+ }
+ if murmur3Token(42).Less(murmur3Token(-42)) {
+ t.Errorf("Expected Less to return false, but was true")
+ }
+}
+
+// Tests of the orderedPartitioner
+func TestOrderedPartitioner(t *testing.T) {
+ // at least verify that the partitioner
+ // doesn't return nil
+ p := orderedPartitioner{}
+ pk, _ := marshalInt(nil, 1)
+ token := p.Hash(pk)
+ if token == nil {
+ t.Fatal("token was nil")
+ }
+
+ str := token.String()
+ parsedToken := p.ParseString(str)
+
+ if !bytes.Equal([]byte(token.(orderedToken)), []byte(parsedToken.(orderedToken))) {
+ t.Errorf("Failed to convert to and from a string %s expected %x but was %x",
+ str,
+ []byte(token.(orderedToken)),
+ []byte(parsedToken.(orderedToken)),
+ )
+ }
+}
+
+// Tests of the orderedToken
+func TestOrderedToken(t *testing.T) {
+ if orderedToken([]byte{0, 0, 4, 2}).Less(orderedToken([]byte{0, 0, 4, 2})) {
+ t.Errorf("Expected Less to return false, but was true")
+ }
+ if !orderedToken([]byte{0, 0, 3}).Less(orderedToken([]byte{0, 0, 4, 2})) {
+ t.Errorf("Expected Less to return true, but was false")
+ }
+ if orderedToken([]byte{0, 0, 4, 2}).Less(orderedToken([]byte{0, 0, 3})) {
+ t.Errorf("Expected Less to return false, but was true")
+ }
+}
+
+// Tests of the randomPartitioner
+func TestRandomPartitioner(t *testing.T) {
+ // at least verify that the partitioner
+ // doesn't return nil
+ p := randomPartitioner{}
+ pk, _ := marshalInt(nil, 1)
+ token := p.Hash(pk)
+ if token == nil {
+ t.Fatal("token was nil")
+ }
+
+ str := token.String()
+ parsedToken := p.ParseString(str)
+
+ if (*big.Int)(token.(*randomToken)).Cmp((*big.Int)(parsedToken.(*randomToken))) != 0 {
+ t.Errorf("Failed to convert to and from a string %s expected %v but was %v",
+ str,
+ token,
+ parsedToken,
+ )
+ }
+}
+
+// Tests of the randomToken
+func TestRandomToken(t *testing.T) {
+ if ((*randomToken)(big.NewInt(42))).Less((*randomToken)(big.NewInt(42))) {
+ t.Errorf("Expected Less to return false, but was true")
+ }
+ if !((*randomToken)(big.NewInt(41))).Less((*randomToken)(big.NewInt(42))) {
+ t.Errorf("Expected Less to return true, but was false")
+ }
+ if ((*randomToken)(big.NewInt(42))).Less((*randomToken)(big.NewInt(41))) {
+ t.Errorf("Expected Less to return false, but was true")
+ }
+}
+
+type intToken int
+
+func (i intToken) String() string {
+ return strconv.Itoa(int(i))
+}
+
+func (i intToken) Less(token token) bool {
+ return i < token.(intToken)
+}
+
+// Test of the token ring implementation based on example at the start of this
+// page of documentation:
+// http://www.datastax.com/docs/0.8/cluster_architecture/partitioning
+func TestIntTokenRing(t *testing.T) {
+ host0 := &HostInfo{}
+ host25 := &HostInfo{}
+ host50 := &HostInfo{}
+ host75 := &HostInfo{}
+ ring := &tokenRing{
+ partitioner: nil,
+ // these tokens and hosts are out of order to test sorting
+ tokens: []token{
+ intToken(0),
+ intToken(50),
+ intToken(75),
+ intToken(25),
+ },
+ hosts: []*HostInfo{
+ host0,
+ host50,
+ host75,
+ host25,
+ },
+ }
+
+ sort.Sort(ring)
+
+ if ring.GetHostForToken(intToken(0)) != host0 {
+ t.Error("Expected host 0 for token 0")
+ }
+ if ring.GetHostForToken(intToken(1)) != host25 {
+ t.Error("Expected host 25 for token 1")
+ }
+ if ring.GetHostForToken(intToken(24)) != host25 {
+ t.Error("Expected host 25 for token 24")
+ }
+ if ring.GetHostForToken(intToken(25)) != host25 {
+ t.Error("Expected host 25 for token 25")
+ }
+ if ring.GetHostForToken(intToken(26)) != host50 {
+ t.Error("Expected host 50 for token 26")
+ }
+ if ring.GetHostForToken(intToken(49)) != host50 {
+ t.Error("Expected host 50 for token 49")
+ }
+ if ring.GetHostForToken(intToken(50)) != host50 {
+ t.Error("Expected host 50 for token 50")
+ }
+ if ring.GetHostForToken(intToken(51)) != host75 {
+ t.Error("Expected host 75 for token 51")
+ }
+ if ring.GetHostForToken(intToken(74)) != host75 {
+ t.Error("Expected host 75 for token 74")
+ }
+ if ring.GetHostForToken(intToken(75)) != host75 {
+ t.Error("Expected host 75 for token 75")
+ }
+ if ring.GetHostForToken(intToken(76)) != host0 {
+ t.Error("Expected host 0 for token 76")
+ }
+ if ring.GetHostForToken(intToken(99)) != host0 {
+ t.Error("Expected host 0 for token 99")
+ }
+ if ring.GetHostForToken(intToken(100)) != host0 {
+ t.Error("Expected host 0 for token 100")
+ }
+}
+
+// Test for the behavior of a nil pointer to tokenRing
+func TestNilTokenRing(t *testing.T) {
+ var ring *tokenRing = nil
+
+ if ring.GetHostForToken(nil) != nil {
+ t.Error("Expected nil for nil token ring")
+ }
+ if ring.GetHostForPartitionKey(nil) != nil {
+ t.Error("Expected nil for nil token ring")
+ }
+}
+
+// Test of the recognition of the partitioner class
+func TestUnknownTokenRing(t *testing.T) {
+ _, err := newTokenRing("UnknownPartitioner", nil)
+ if err == nil {
+ t.Error("Expected error for unknown partitioner value, but was nil")
+ }
+}
+
+// Test of the tokenRing with the Murmur3Partitioner
+func TestMurmur3TokenRing(t *testing.T) {
+ // Note, strings are parsed directly to int64, they are not murmur3 hashed
+ var hosts []HostInfo = []HostInfo{
+ HostInfo{
+ Peer: "0",
+ Tokens: []string{"0"},
+ },
+ HostInfo{
+ Peer: "1",
+ Tokens: []string{"25"},
+ },
+ HostInfo{
+ Peer: "2",
+ Tokens: []string{"50"},
+ },
+ HostInfo{
+ Peer: "3",
+ Tokens: []string{"75"},
+ },
+ }
+ ring, err := newTokenRing("Murmur3Partitioner", hosts)
+ if err != nil {
+ t.Fatalf("Failed to create token ring due to error: %v", err)
+ }
+
+ p := murmur3Partitioner{}
+
+ var actual *HostInfo
+ actual = ring.GetHostForToken(p.ParseString("0"))
+ if actual.Peer != "0" {
+ t.Errorf("Expected peer 0 for token \"0\", but was %s", actual.Peer)
+ }
+
+ actual = ring.GetHostForToken(p.ParseString("25"))
+ if actual.Peer != "1" {
+ t.Errorf("Expected peer 1 for token \"25\", but was %s", actual.Peer)
+ }
+
+ actual = ring.GetHostForToken(p.ParseString("50"))
+ if actual.Peer != "2" {
+ t.Errorf("Expected peer 2 for token \"50\", but was %s", actual.Peer)
+ }
+
+ actual = ring.GetHostForToken(p.ParseString("75"))
+ if actual.Peer != "3" {
+ t.Errorf("Expected peer 3 for token \"01\", but was %s", actual.Peer)
+ }
+
+ actual = ring.GetHostForToken(p.ParseString("12"))
+ if actual.Peer != "1" {
+ t.Errorf("Expected peer 1 for token \"12\", but was %s", actual.Peer)
+ }
+
+ actual = ring.GetHostForToken(p.ParseString("24324545443332"))
+ if actual.Peer != "0" {
+ t.Errorf("Expected peer 0 for token \"24324545443332\", but was %s", actual.Peer)
+ }
+}
+
+// Test of the tokenRing with the OrderedPartitioner
+func TestOrderedTokenRing(t *testing.T) {
+ // Tokens here more or less are similar layout to the int tokens above due
+ // to each numeric character translating to a consistently offset byte.
+ var hosts []HostInfo = []HostInfo{
+ HostInfo{
+ Peer: "0",
+ Tokens: []string{
+ "00",
+ },
+ },
+ HostInfo{
+ Peer: "1",
+ Tokens: []string{
+ "25",
+ },
+ },
+ HostInfo{
+ Peer: "2",
+ Tokens: []string{
+ "50",
+ },
+ },
+ HostInfo{
+ Peer: "3",
+ Tokens: []string{
+ "75",
+ },
+ },
+ }
+ ring, err := newTokenRing("OrderedPartitioner", hosts)
+ if err != nil {
+ t.Fatalf("Failed to create token ring due to error: %v", err)
+ }
+
+ p := orderedPartitioner{}
+
+ var actual *HostInfo
+ actual = ring.GetHostForToken(p.ParseString("0"))
+ if actual.Peer != "0" {
+ t.Errorf("Expected peer 0 for token \"0\", but was %s", actual.Peer)
+ }
+
+ actual = ring.GetHostForToken(p.ParseString("25"))
+ if actual.Peer != "1" {
+ t.Errorf("Expected peer 1 for token \"25\", but was %s", actual.Peer)
+ }
+
+ actual = ring.GetHostForToken(p.ParseString("50"))
+ if actual.Peer != "2" {
+ t.Errorf("Expected peer 2 for token \"50\", but was %s", actual.Peer)
+ }
+
+ actual = ring.GetHostForToken(p.ParseString("75"))
+ if actual.Peer != "3" {
+ t.Errorf("Expected peer 3 for token \"01\", but was %s", actual.Peer)
+ }
+
+ actual = ring.GetHostForToken(p.ParseString("12"))
+ if actual.Peer != "1" {
+ t.Errorf("Expected peer 1 for token \"12\", but was %s", actual.Peer)
+ }
+
+ actual = ring.GetHostForToken(p.ParseString("24324545443332"))
+ if actual.Peer != "1" {
+ t.Errorf("Expected peer 1 for token \"24324545443332\", but was %s", actual.Peer)
+ }
+}
+
+// Test of the tokenRing with the RandomPartitioner
+func TestRandomTokenRing(t *testing.T) {
+ // String tokens are parsed into big.Int in base 10
+ var hosts []HostInfo = []HostInfo{
+ HostInfo{
+ Peer: "0",
+ Tokens: []string{
+ "00",
+ },
+ },
+ HostInfo{
+ Peer: "1",
+ Tokens: []string{
+ "25",
+ },
+ },
+ HostInfo{
+ Peer: "2",
+ Tokens: []string{
+ "50",
+ },
+ },
+ HostInfo{
+ Peer: "3",
+ Tokens: []string{
+ "75",
+ },
+ },
+ }
+ ring, err := newTokenRing("RandomPartitioner", hosts)
+ if err != nil {
+ t.Fatalf("Failed to create token ring due to error: %v", err)
+ }
+
+ p := randomPartitioner{}
+
+ var actual *HostInfo
+ actual = ring.GetHostForToken(p.ParseString("0"))
+ if actual.Peer != "0" {
+ t.Errorf("Expected peer 0 for token \"0\", but was %s", actual.Peer)
+ }
+
+ actual = ring.GetHostForToken(p.ParseString("25"))
+ if actual.Peer != "1" {
+ t.Errorf("Expected peer 1 for token \"25\", but was %s", actual.Peer)
+ }
+
+ actual = ring.GetHostForToken(p.ParseString("50"))
+ if actual.Peer != "2" {
+ t.Errorf("Expected peer 2 for token \"50\", but was %s", actual.Peer)
+ }
+
+ actual = ring.GetHostForToken(p.ParseString("75"))
+ if actual.Peer != "3" {
+ t.Errorf("Expected peer 3 for token \"01\", but was %s", actual.Peer)
+ }
+
+ actual = ring.GetHostForToken(p.ParseString("12"))
+ if actual.Peer != "1" {
+ t.Errorf("Expected peer 1 for token \"12\", but was %s", actual.Peer)
+ }
+
+ actual = ring.GetHostForToken(p.ParseString("24324545443332"))
+ if actual.Peer != "0" {
+ t.Errorf("Expected peer 0 for token \"24324545443332\", but was %s", actual.Peer)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/topology_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/topology_test.go
new file mode 100644
index 000000000..8384824ab
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/topology_test.go
@@ -0,0 +1,51 @@
+// +build all unit
+
+package gocql
+
+import (
+ "testing"
+)
+
+// fakeNode is used as a simple structure to test the RoundRobin API
+type fakeNode struct {
+ conn *Conn
+ closed bool
+}
+
+// Pick is needed to satisfy the Node interface
+func (n *fakeNode) Pick(qry *Query) *Conn {
+ if n.conn == nil {
+ n.conn = &Conn{}
+ }
+ return n.conn
+}
+
+//Close is needed to satisfy the Node interface
+func (n *fakeNode) Close() {
+ n.closed = true
+}
+
+//TestRoundRobinAPI tests the exported methods of the RoundRobin struct
+//to make sure the API behaves accordingly.
+func TestRoundRobinAPI(t *testing.T) {
+ node := &fakeNode{}
+ rr := NewRoundRobin()
+ rr.AddNode(node)
+
+ if rr.Size() != 1 {
+ t.Fatalf("expected size to be 1, got %v", rr.Size())
+ }
+
+ if c := rr.Pick(nil); c != node.conn {
+ t.Fatalf("expected conn %v, got %v", node.conn, c)
+ }
+
+ rr.Close()
+ if rr.pool != nil {
+ t.Fatalf("expected rr.pool to be nil, got %v", rr.pool)
+ }
+
+ if !node.closed {
+ t.Fatal("expected node.closed to be true, got false")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/tuple_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/tuple_test.go
new file mode 100644
index 000000000..e9381f8b6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/tuple_test.go
@@ -0,0 +1,51 @@
+// +build all integration
+
+package gocql
+
+import "testing"
+
+func TestTupleSimple(t *testing.T) {
+ if *flagProto < protoVersion3 {
+ t.Skip("tuple types are only available of proto>=3")
+ }
+
+ session := createSession(t)
+ defer session.Close()
+
+ err := createTable(session, `CREATE TABLE gocql_test.tuple_test(
+ id int,
+ coord frozen>,
+
+ primary key(id))`)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = session.Query("INSERT INTO tuple_test(id, coord) VALUES(?, (?, ?))", 1, 100, -100).Exec()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var (
+ id int
+ coord struct {
+ x int
+ y int
+ }
+ )
+
+ iter := session.Query("SELECT id, coord FROM tuple_test WHERE id=?", 1)
+ if err := iter.Scan(&id, &coord.x, &coord.y); err != nil {
+ t.Fatal(err)
+ }
+
+ if id != 1 {
+ t.Errorf("expected to get id=1 got: %v", id)
+ }
+ if coord.x != 100 {
+ t.Errorf("expected to get coord.x=100 got: %v", coord.x)
+ }
+ if coord.y != -100 {
+ t.Errorf("expected to get coord.y=-100 got: %v", coord.y)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/udt_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/udt_test.go
new file mode 100644
index 000000000..dc96947bb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/udt_test.go
@@ -0,0 +1,343 @@
+// +build all integration
+
+package gocql
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+)
+
+type position struct {
+ Lat int `cql:"lat"`
+ Lon int `cql:"lon"`
+ Padding string `json:"padding"`
+}
+
+// NOTE: due to current implementation details it is not currently possible to use
+// a pointer receiver type for the UDTMarshaler interface to handle UDT's
+func (p position) MarshalUDT(name string, info TypeInfo) ([]byte, error) {
+ switch name {
+ case "lat":
+ return Marshal(info, p.Lat)
+ case "lon":
+ return Marshal(info, p.Lon)
+ case "padding":
+ return Marshal(info, p.Padding)
+ default:
+ return nil, fmt.Errorf("unknown column for position: %q", name)
+ }
+}
+
+func (p *position) UnmarshalUDT(name string, info TypeInfo, data []byte) error {
+ switch name {
+ case "lat":
+ return Unmarshal(info, data, &p.Lat)
+ case "lon":
+ return Unmarshal(info, data, &p.Lon)
+ case "padding":
+ return Unmarshal(info, data, &p.Padding)
+ default:
+ return fmt.Errorf("unknown column for position: %q", name)
+ }
+}
+
+func TestUDT_Marshaler(t *testing.T) {
+ if *flagProto < protoVersion3 {
+ t.Skip("UDT are only available on protocol >= 3")
+ }
+
+ session := createSession(t)
+ defer session.Close()
+
+ err := createTable(session, `CREATE TYPE gocql_test.position(
+ lat int,
+ lon int,
+ padding text);`)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = createTable(session, `CREATE TABLE gocql_test.houses(
+ id int,
+ name text,
+ loc frozen,
+
+ primary key(id)
+ );`)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ const (
+ expLat = -1
+ expLon = 2
+ )
+ pad := strings.Repeat("X", 1000)
+
+ err = session.Query("INSERT INTO houses(id, name, loc) VALUES(?, ?, ?)", 1, "test", &position{expLat, expLon, pad}).Exec()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pos := &position{}
+
+ err = session.Query("SELECT loc FROM houses WHERE id = ?", 1).Scan(pos)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if pos.Lat != expLat {
+ t.Errorf("expeceted lat to be be %d got %d", expLat, pos.Lat)
+ }
+ if pos.Lon != expLon {
+ t.Errorf("expeceted lon to be be %d got %d", expLon, pos.Lon)
+ }
+ if pos.Padding != pad {
+ t.Errorf("expected to get padding %q got %q\n", pad, pos.Padding)
+ }
+}
+
+func TestUDT_Reflect(t *testing.T) {
+ if *flagProto < protoVersion3 {
+ t.Skip("UDT are only available on protocol >= 3")
+ }
+
+ // Uses reflection instead of implementing the marshaling type
+ session := createSession(t)
+ defer session.Close()
+
+ err := createTable(session, `CREATE TYPE gocql_test.horse(
+ name text,
+ owner text);`)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = createTable(session, `CREATE TABLE gocql_test.horse_race(
+ position int,
+ horse frozen,
+
+ primary key(position)
+ );`)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ type horse struct {
+ Name string `cql:"name"`
+ Owner string `cql:"owner"`
+ }
+
+ insertedHorse := &horse{
+ Name: "pony",
+ Owner: "jim",
+ }
+
+ err = session.Query("INSERT INTO horse_race(position, horse) VALUES(?, ?)", 1, insertedHorse).Exec()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ retrievedHorse := &horse{}
+ err = session.Query("SELECT horse FROM horse_race WHERE position = ?", 1).Scan(retrievedHorse)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if *retrievedHorse != *insertedHorse {
+ t.Fatal("exepcted to get %+v got %+v", insertedHorse, retrievedHorse)
+ }
+}
+
+func TestUDT_Proto2error(t *testing.T) {
+ if *flagProto < protoVersion3 {
+ t.Skip("UDT are only available on protocol >= 3")
+ }
+
+ cluster := createCluster()
+ cluster.ProtoVersion = 2
+ cluster.Keyspace = "gocql_test"
+
+ // Uses reflection instead of implementing the marshaling type
+ session, err := cluster.CreateSession()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer session.Close()
+
+ err = createTable(session, `CREATE TYPE gocql_test.fish(
+ name text,
+ owner text);`)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = createTable(session, `CREATE TABLE gocql_test.fish_race(
+ position int,
+ fish frozen,
+
+ primary key(position)
+ );`)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ type fish struct {
+ Name string `cql:"name"`
+ Owner string `cql:"owner"`
+ }
+
+ insertedFish := &fish{
+ Name: "pony",
+ Owner: "jim",
+ }
+
+ err = session.Query("INSERT INTO fish_race(position, fish) VALUES(?, ?)", 1, insertedFish).Exec()
+ if err != ErrorUDTUnavailable {
+ t.Fatalf("expected to get %v got %v", ErrorUDTUnavailable, err)
+ }
+}
+
+func TestUDT_NullObject(t *testing.T) {
+ if *flagProto < protoVersion3 {
+ t.Skip("UDT are only available on protocol >= 3")
+ }
+
+ session := createSession(t)
+ defer session.Close()
+
+ err := createTable(session, `CREATE TYPE gocql_test.udt_null_type(
+ name text,
+ owner text);`)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = createTable(session, `CREATE TABLE gocql_test.udt_null_table(
+ id uuid,
+ udt_col frozen,
+
+ primary key(id)
+ );`)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ type col struct {
+ Name string `cql:"name"`
+ Owner string `cql:"owner"`
+ }
+
+ id := TimeUUID()
+ err = session.Query("INSERT INTO udt_null_table(id) VALUES(?)", id).Exec()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ readCol := &col{
+ Name: "temp",
+ Owner: "temp",
+ }
+
+ err = session.Query("SELECT udt_col FROM udt_null_table WHERE id = ?", id).Scan(readCol)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if readCol.Name != "" {
+ t.Errorf("expected empty string to be returned for null udt: got %q", readCol.Name)
+ }
+ if readCol.Owner != "" {
+ t.Errorf("expected empty string to be returned for null udt: got %q", readCol.Owner)
+ }
+}
+
+func TestMapScanUDT(t *testing.T) {
+ if *flagProto < protoVersion3 {
+ t.Skip("UDT are only available on protocol >= 3")
+ }
+
+ session := createSession(t)
+ defer session.Close()
+
+ err := createTable(session, `CREATE TYPE gocql_test.log_entry (
+ created_timestamp timestamp,
+ message text
+ );`)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = createTable(session, `CREATE TABLE gocql_test.requests_by_id (
+ id uuid PRIMARY KEY,
+ type int,
+ log_entries list>
+ );`)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ entry := []struct {
+ CreatedTimestamp time.Time `cql:"created_timestamp"`
+ Message string `cql:"message"`
+ }{
+ {
+ CreatedTimestamp: time.Now().Truncate(time.Millisecond),
+ Message: "test time now",
+ },
+ }
+
+ id, _ := RandomUUID()
+ const typ = 1
+
+ err = session.Query("INSERT INTO requests_by_id(id, type, log_entries) VALUES (?, ?, ?)", id, typ, entry).Exec()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ rawResult := map[string]interface{}{}
+ err = session.Query(`SELECT * FROM requests_by_id WHERE id = ?`, id).MapScan(rawResult)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ logEntries, ok := rawResult["log_entries"].([]map[string]interface{})
+ if !ok {
+ t.Fatal("log_entries not in scanned map")
+ }
+
+ if len(logEntries) != 1 {
+ t.Fatalf("expected to get 1 log_entry got %d", len(logEntries))
+ }
+
+ logEntry := logEntries[0]
+
+ timestamp, ok := logEntry["created_timestamp"]
+ if !ok {
+ t.Error("created_timestamp not unmarshalled into map")
+ } else {
+ if ts, ok := timestamp.(time.Time); ok {
+ if !ts.In(time.UTC).Equal(entry[0].CreatedTimestamp.In(time.UTC)) {
+ t.Errorf("created_timestamp not equal to stored: got %v expected %v", ts.In(time.UTC), entry[0].CreatedTimestamp.In(time.UTC))
+ }
+ } else {
+ t.Errorf("created_timestamp was not time.Time got: %T", timestamp)
+ }
+ }
+
+ message, ok := logEntry["message"]
+ if !ok {
+ t.Error("message not unmarshalled into map")
+ } else {
+ if ts, ok := message.(string); ok {
+ if ts != message {
+ t.Errorf("message not equal to stored: got %v expected %v", ts, entry[0].Message)
+ }
+ } else {
+ t.Errorf("message was not string got: %T", message)
+ }
+ }
+
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/uuid_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/uuid_test.go
new file mode 100644
index 000000000..ec6a69ff9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/uuid_test.go
@@ -0,0 +1,197 @@
+// +build all unit
+
+package gocql
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestUUIDNil(t *testing.T) {
+ var uuid UUID
+ want, got := "00000000-0000-0000-0000-000000000000", uuid.String()
+ if want != got {
+ t.Fatalf("TestNil: expected %q got %q", want, got)
+ }
+}
+
+var testsUUID = []struct {
+ input string
+ variant int
+ version int
+}{
+ {"b4f00409-cef8-4822-802c-deb20704c365", VariantIETF, 4},
+ {"B4F00409-CEF8-4822-802C-DEB20704C365", VariantIETF, 4}, //Use capital letters
+ {"f81d4fae-7dec-11d0-a765-00a0c91e6bf6", VariantIETF, 1},
+ {"00000000-7dec-11d0-a765-00a0c91e6bf6", VariantIETF, 1},
+ {"3051a8d7-aea7-1801-e0bf-bc539dd60cf3", VariantFuture, 1},
+ {"3051a8d7-aea7-2801-e0bf-bc539dd60cf3", VariantFuture, 2},
+ {"3051a8d7-aea7-3801-e0bf-bc539dd60cf3", VariantFuture, 3},
+ {"3051a8d7-aea7-4801-e0bf-bc539dd60cf3", VariantFuture, 4},
+ {"3051a8d7-aea7-3801-e0bf-bc539dd60cf3", VariantFuture, 5},
+ {"d0e817e1-e4b1-1801-3fe6-b4b60ccecf9d", VariantNCSCompat, 0},
+ {"d0e817e1-e4b1-1801-bfe6-b4b60ccecf9d", VariantIETF, 1},
+ {"d0e817e1-e4b1-1801-dfe6-b4b60ccecf9d", VariantMicrosoft, 0},
+ {"d0e817e1-e4b1-1801-ffe6-b4b60ccecf9d", VariantFuture, 0},
+}
+
+func TestPredefinedUUID(t *testing.T) {
+ for i := range testsUUID {
+ uuid, err := ParseUUID(testsUUID[i].input)
+ if err != nil {
+ t.Errorf("ParseUUID #%d: %v", i, err)
+ continue
+ }
+
+ if str := uuid.String(); str != strings.ToLower(testsUUID[i].input) {
+ t.Errorf("String #%d: expected %q got %q", i, testsUUID[i].input, str)
+ continue
+ }
+
+ if variant := uuid.Variant(); variant != testsUUID[i].variant {
+ t.Errorf("Variant #%d: expected %d got %d", i, testsUUID[i].variant, variant)
+ }
+
+ if testsUUID[i].variant == VariantIETF {
+ if version := uuid.Version(); version != testsUUID[i].version {
+ t.Errorf("Version #%d: expected %d got %d", i, testsUUID[i].version, version)
+ }
+ }
+
+ json, err := uuid.MarshalJSON()
+ if err != nil {
+ t.Errorf("MarshalJSON #%d: %v", i, err)
+ }
+ expectedJson := `"` + strings.ToLower(testsUUID[i].input) + `"`
+ if string(json) != expectedJson {
+ t.Errorf("MarshalJSON #%d: expected %v got %v", i, expectedJson, string(json))
+ }
+
+ var unmarshaled UUID
+ err = unmarshaled.UnmarshalJSON(json)
+ if err != nil {
+ t.Errorf("UnmarshalJSON #%d: %v", i, err)
+ }
+ if unmarshaled != uuid {
+ t.Errorf("UnmarshalJSON #%d: expected %v got %v", i, uuid, unmarshaled)
+ }
+ }
+}
+
+func TestInvalidUUIDCharacter(t *testing.T) {
+ _, err := ParseUUID("z4f00409-cef8-4822-802c-deb20704c365")
+ if err == nil || !strings.Contains(err.Error(), "invalid UUID") {
+ t.Fatalf("expected invalid UUID error, got '%v' ", err)
+ }
+}
+
+func TestInvalidUUIDLength(t *testing.T) {
+ _, err := ParseUUID("4f00")
+ if err == nil || !strings.Contains(err.Error(), "invalid UUID") {
+ t.Fatalf("expected invalid UUID error, got '%v' ", err)
+ }
+
+ _, err = UUIDFromBytes(TimeUUID().Bytes()[:15])
+ if err == nil || err.Error() != "UUIDs must be exactly 16 bytes long" {
+ t.Fatalf("expected error '%v', got '%v'", "UUIDs must be exactly 16 bytes long", err)
+ }
+}
+
+func TestRandomUUID(t *testing.T) {
+ for i := 0; i < 20; i++ {
+ uuid, err := RandomUUID()
+ if err != nil {
+ t.Errorf("RandomUUID: %v", err)
+ }
+ if variant := uuid.Variant(); variant != VariantIETF {
+ t.Errorf("wrong variant. expected %d got %d", VariantIETF, variant)
+ }
+ if version := uuid.Version(); version != 4 {
+ t.Errorf("wrong version. expected %d got %d", 4, version)
+ }
+ }
+}
+
+func TestRandomUUIDInvalidAPICalls(t *testing.T) {
+ uuid, err := RandomUUID()
+ if err != nil {
+ t.Fatalf("unexpected error %v", err)
+ }
+
+ if node := uuid.Node(); node != nil {
+ t.Fatalf("expected nil, got %v", node)
+ }
+
+ if stamp := uuid.Timestamp(); stamp != 0 {
+ t.Fatalf("expceted 0, got %v", stamp)
+ }
+ zeroT := time.Time{}
+ if to := uuid.Time(); to != zeroT {
+ t.Fatalf("expected %v, got %v", zeroT, to)
+ }
+}
+
+func TestUUIDFromTime(t *testing.T) {
+ date := time.Date(1982, 5, 5, 12, 34, 56, 400, time.UTC)
+ uuid := UUIDFromTime(date)
+
+ if uuid.Time() != date {
+ t.Errorf("embedded time incorrect. Expected %v got %v", date, uuid.Time())
+ }
+}
+
+func TestParseUUID(t *testing.T) {
+ uuid, _ := ParseUUID("486f3a88-775b-11e3-ae07-d231feb1dc81")
+ if uuid.Time() != time.Date(2014, 1, 7, 5, 19, 29, 222516000, time.UTC) {
+ t.Errorf("Expected date of 1/7/2014 at 5:19:29.222516, got %v", uuid.Time())
+ }
+}
+
+func TestTimeUUID(t *testing.T) {
+ var node []byte
+ timestamp := int64(0)
+ for i := 0; i < 20; i++ {
+ uuid := TimeUUID()
+
+ if variant := uuid.Variant(); variant != VariantIETF {
+ t.Errorf("wrong variant. expected %d got %d", VariantIETF, variant)
+ }
+ if version := uuid.Version(); version != 1 {
+ t.Errorf("wrong version. expected %d got %d", 1, version)
+ }
+
+ if n := uuid.Node(); !bytes.Equal(n, node) && i > 0 {
+ t.Errorf("wrong node. expected %x, got %x", node, n)
+ } else if i == 0 {
+ node = n
+ }
+
+ ts := uuid.Timestamp()
+ if ts < timestamp {
+ t.Errorf("timestamps must grow")
+ }
+ timestamp = ts
+ }
+}
+
+func TestUnmarshalJSON(t *testing.T) {
+ var withHyphens, withoutHypens, tooLong UUID
+
+ withHyphens.UnmarshalJSON([]byte(`"486f3a88-775b-11e3-ae07-d231feb1dc81"`))
+ if withHyphens.Time().Truncate(time.Second) != time.Date(2014, 1, 7, 5, 19, 29, 0, time.UTC) {
+ t.Errorf("Expected date of 1/7/2014 at 5:19:29, got %v", withHyphens.Time())
+ }
+
+ withoutHypens.UnmarshalJSON([]byte(`"486f3a88775b11e3ae07d231feb1dc81"`))
+ if withoutHypens.Time().Truncate(time.Second) != time.Date(2014, 1, 7, 5, 19, 29, 0, time.UTC) {
+ t.Errorf("Expected date of 1/7/2014 at 5:19:29, got %v", withoutHypens.Time())
+ }
+
+ err := tooLong.UnmarshalJSON([]byte(`"486f3a88-775b-11e3-ae07-d231feb1dc81486f3a88"`))
+ if err == nil {
+ t.Errorf("no error for invalid JSON UUID")
+ }
+
+}
diff --git a/Godeps/_workspace/src/github.com/gocql/gocql/wiki_test.go b/Godeps/_workspace/src/github.com/gocql/gocql/wiki_test.go
new file mode 100644
index 000000000..a762a9aff
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gocql/gocql/wiki_test.go
@@ -0,0 +1,279 @@
+// +build all integration
+
+package gocql
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+ "testing"
+ "time"
+
+ "gopkg.in/inf.v0"
+)
+
+type WikiPage struct {
+ Title string
+ RevId UUID
+ Body string
+ Views int64
+ Protected bool
+ Modified time.Time
+ Rating *inf.Dec
+ Tags []string
+ Attachments map[string]WikiAttachment
+}
+
+type WikiAttachment []byte
+
+var wikiTestData = []*WikiPage{
+ &WikiPage{
+ Title: "Frontpage",
+ RevId: TimeUUID(),
+ Body: "Welcome to this wiki page!",
+ Rating: inf.NewDec(131, 3),
+ Modified: time.Date(2013, time.August, 13, 9, 52, 3, 0, time.UTC),
+ Tags: []string{"start", "important", "test"},
+ Attachments: map[string]WikiAttachment{
+ "logo": WikiAttachment("\x00company logo\x00"),
+ "favicon": WikiAttachment("favicon.ico"),
+ },
+ },
+ &WikiPage{
+ Title: "Foobar",
+ RevId: TimeUUID(),
+ Body: "foo::Foo f = new foo::Foo(foo::Foo::INIT);",
+ Modified: time.Date(2013, time.August, 13, 9, 52, 3, 0, time.UTC),
+ },
+}
+
+type WikiTest struct {
+ session *Session
+ tb testing.TB
+
+ table string
+}
+
+func CreateSchema(session *Session, tb testing.TB, table string) *WikiTest {
+ table = "wiki_" + table
+ if err := createTable(session, fmt.Sprintf("DROP TABLE IF EXISTS gocql_test.%s", table)); err != nil {
+ tb.Fatal("CreateSchema:", err)
+ }
+
+ err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (
+ title varchar,
+ revid timeuuid,
+ body varchar,
+ views bigint,
+ protected boolean,
+ modified timestamp,
+ rating decimal,
+ tags set,
+ attachments map,
+ PRIMARY KEY (title, revid)
+ )`, table))
+
+ if err != nil {
+ tb.Fatal("CreateSchema:", err)
+ }
+
+ return &WikiTest{
+ session: session,
+ tb: tb,
+ table: table,
+ }
+}
+
+func (w *WikiTest) CreatePages(n int) {
+ var page WikiPage
+ t0 := time.Now()
+ for i := 0; i < n; i++ {
+ page.Title = fmt.Sprintf("generated_%d", (i&16)+1)
+ page.Modified = t0.Add(time.Duration(i-n) * time.Minute)
+ page.RevId = UUIDFromTime(page.Modified)
+ page.Body = fmt.Sprintf("text %d", i)
+ if err := w.InsertPage(&page); err != nil {
+ w.tb.Error("CreatePages:", err)
+ }
+ }
+}
+
+func (w *WikiTest) InsertPage(page *WikiPage) error {
+ return w.session.Query(fmt.Sprintf(`INSERT INTO %s
+ (title, revid, body, views, protected, modified, rating, tags, attachments)
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`, w.table),
+ page.Title, page.RevId, page.Body, page.Views, page.Protected,
+ page.Modified, page.Rating, page.Tags, page.Attachments).Exec()
+}
+
+func (w *WikiTest) SelectPage(page *WikiPage, title string, revid UUID) error {
+ return w.session.Query(fmt.Sprintf(`SELECT title, revid, body, views, protected,
+ modified,tags, attachments, rating
+ FROM %s WHERE title = ? AND revid = ? LIMIT 1`, w.table),
+ title, revid).Scan(&page.Title, &page.RevId,
+ &page.Body, &page.Views, &page.Protected, &page.Modified, &page.Tags,
+ &page.Attachments, &page.Rating)
+}
+
+func (w *WikiTest) GetPageCount() int {
+ var count int
+ if err := w.session.Query(fmt.Sprintf(`SELECT COUNT(*) FROM %s`, w.table)).Scan(&count); err != nil {
+ w.tb.Error("GetPageCount", err)
+ }
+ return count
+}
+
+func TestWikiCreateSchema(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ CreateSchema(session, t, "create")
+}
+
+func BenchmarkWikiCreateSchema(b *testing.B) {
+ b.StopTimer()
+ session := createSession(b)
+ defer func() {
+ b.StopTimer()
+ session.Close()
+ }()
+
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ CreateSchema(session, b, "bench_create")
+ }
+}
+
+func TestWikiCreatePages(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ w := CreateSchema(session, t, "create_pages")
+
+ numPages := 5
+ w.CreatePages(numPages)
+ if count := w.GetPageCount(); count != numPages {
+ t.Errorf("expected %d pages, got %d pages.", numPages, count)
+ }
+}
+
+func BenchmarkWikiCreatePages(b *testing.B) {
+ b.StopTimer()
+ session := createSession(b)
+ defer func() {
+ b.StopTimer()
+ session.Close()
+ }()
+
+ w := CreateSchema(session, b, "bench_create_pages")
+
+ b.StartTimer()
+
+ w.CreatePages(b.N)
+}
+
+func BenchmarkWikiSelectAllPages(b *testing.B) {
+ b.StopTimer()
+ session := createSession(b)
+ defer func() {
+ b.StopTimer()
+ session.Close()
+ }()
+ w := CreateSchema(session, b, "bench_select_all")
+
+ w.CreatePages(100)
+ b.StartTimer()
+
+ var page WikiPage
+ for i := 0; i < b.N; i++ {
+ iter := session.Query(fmt.Sprintf(`SELECT title, revid, body, views, protected,
+ modified, tags, attachments, rating
+ FROM %s`, w.table)).Iter()
+ for iter.Scan(&page.Title, &page.RevId, &page.Body, &page.Views,
+ &page.Protected, &page.Modified, &page.Tags, &page.Attachments,
+ &page.Rating) {
+ // pass
+ }
+ if err := iter.Close(); err != nil {
+ b.Error(err)
+ }
+ }
+}
+
+func BenchmarkWikiSelectSinglePage(b *testing.B) {
+ b.StopTimer()
+ session := createSession(b)
+ defer func() {
+ b.StopTimer()
+ session.Close()
+ }()
+ w := CreateSchema(session, b, "bench_select_single")
+ pages := make([]WikiPage, 100)
+ w.CreatePages(len(pages))
+ iter := session.Query(fmt.Sprintf(`SELECT title, revid FROM %s`, w.table)).Iter()
+ for i := 0; i < len(pages); i++ {
+ if !iter.Scan(&pages[i].Title, &pages[i].RevId) {
+ pages = pages[:i]
+ break
+ }
+ }
+ if err := iter.Close(); err != nil {
+ b.Error(err)
+ }
+ b.StartTimer()
+
+ var page WikiPage
+ for i := 0; i < b.N; i++ {
+ p := &pages[i%len(pages)]
+ if err := w.SelectPage(&page, p.Title, p.RevId); err != nil {
+ b.Error(err)
+ }
+ }
+}
+
+func BenchmarkWikiSelectPageCount(b *testing.B) {
+ b.StopTimer()
+ session := createSession(b)
+ defer func() {
+ b.StopTimer()
+ session.Close()
+ }()
+
+ w := CreateSchema(session, b, "bench_page_count")
+ const numPages = 10
+ w.CreatePages(numPages)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ if count := w.GetPageCount(); count != numPages {
+ b.Errorf("expected %d pages, got %d pages.", numPages, count)
+ }
+ }
+}
+
+func TestWikiTypicalCRUD(t *testing.T) {
+ session := createSession(t)
+ defer session.Close()
+
+ w := CreateSchema(session, t, "crud")
+
+ for _, page := range wikiTestData {
+ if err := w.InsertPage(page); err != nil {
+ t.Error("InsertPage:", err)
+ }
+ }
+ if count := w.GetPageCount(); count != len(wikiTestData) {
+ t.Errorf("count: expected %d, got %d\n", len(wikiTestData), count)
+ }
+ for _, original := range wikiTestData {
+ page := new(WikiPage)
+ if err := w.SelectPage(page, original.Title, original.RevId); err != nil {
+ t.Error("SelectPage:", err)
+ continue
+ }
+ sort.Sort(sort.StringSlice(page.Tags))
+ sort.Sort(sort.StringSlice(original.Tags))
+ if !reflect.DeepEqual(page, original) {
+ t.Errorf("page: expected %#v, got %#v\n", original, page)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go b/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go
new file mode 100644
index 000000000..f8188f11e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go
@@ -0,0 +1,377 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+var (
+ download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
+ testdata = flag.String("testdata", "testdata", "Directory containing the test data")
+)
+
+func roundtrip(b, ebuf, dbuf []byte) error {
+ d, err := Decode(dbuf, Encode(ebuf, b))
+ if err != nil {
+ return fmt.Errorf("decoding error: %v", err)
+ }
+ if !bytes.Equal(b, d) {
+ return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d)
+ }
+ return nil
+}
+
+func TestEmpty(t *testing.T) {
+ if err := roundtrip(nil, nil, nil); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestSmallCopy(t *testing.T) {
+ for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
+ for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
+ for i := 0; i < 32; i++ {
+ s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb"
+ if err := roundtrip([]byte(s), ebuf, dbuf); err != nil {
+ t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err)
+ }
+ }
+ }
+ }
+}
+
+func TestSmallRand(t *testing.T) {
+ rng := rand.New(rand.NewSource(27354294))
+ for n := 1; n < 20000; n += 23 {
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = uint8(rng.Uint32())
+ }
+ if err := roundtrip(b, nil, nil); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestSmallRegular(t *testing.T) {
+ for n := 1; n < 20000; n += 23 {
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = uint8(i%10 + 'a')
+ }
+ if err := roundtrip(b, nil, nil); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestInvalidVarint(t *testing.T) {
+ data := []byte("\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00")
+ if _, err := DecodedLen(data); err != ErrCorrupt {
+ t.Errorf("DecodedLen: got %v, want ErrCorrupt", err)
+ }
+ if _, err := Decode(nil, data); err != ErrCorrupt {
+ t.Errorf("Decode: got %v, want ErrCorrupt", err)
+ }
+
+ // The encoded varint overflows 32 bits
+ data = []byte("\xff\xff\xff\xff\xff\x00")
+
+ if _, err := DecodedLen(data); err != ErrCorrupt {
+ t.Errorf("DecodedLen: got %v, want ErrCorrupt", err)
+ }
+ if _, err := Decode(nil, data); err != ErrCorrupt {
+ t.Errorf("Decode: got %v, want ErrCorrupt", err)
+ }
+}
+
+func cmp(a, b []byte) error {
+ if len(a) != len(b) {
+ return fmt.Errorf("got %d bytes, want %d", len(a), len(b))
+ }
+ for i := range a {
+ if a[i] != b[i] {
+ return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i])
+ }
+ }
+ return nil
+}
+
+func TestFramingFormat(t *testing.T) {
+ // src is comprised of alternating 1e5-sized sequences of random
+ // (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
+ // because it is larger than maxUncompressedChunkLen (64k).
+ src := make([]byte, 1e6)
+ rng := rand.New(rand.NewSource(1))
+ for i := 0; i < 10; i++ {
+ if i%2 == 0 {
+ for j := 0; j < 1e5; j++ {
+ src[1e5*i+j] = uint8(rng.Intn(256))
+ }
+ } else {
+ for j := 0; j < 1e5; j++ {
+ src[1e5*i+j] = uint8(i)
+ }
+ }
+ }
+
+ buf := new(bytes.Buffer)
+ if _, err := NewWriter(buf).Write(src); err != nil {
+ t.Fatalf("Write: encoding: %v", err)
+ }
+ dst, err := ioutil.ReadAll(NewReader(buf))
+ if err != nil {
+ t.Fatalf("ReadAll: decoding: %v", err)
+ }
+ if err := cmp(dst, src); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestReaderReset(t *testing.T) {
+ gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000)
+ buf := new(bytes.Buffer)
+ if _, err := NewWriter(buf).Write(gold); err != nil {
+ t.Fatalf("Write: %v", err)
+ }
+ encoded, invalid, partial := buf.String(), "invalid", "partial"
+ r := NewReader(nil)
+ for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} {
+ if s == partial {
+ r.Reset(strings.NewReader(encoded))
+ if _, err := r.Read(make([]byte, 101)); err != nil {
+ t.Errorf("#%d: %v", i, err)
+ continue
+ }
+ continue
+ }
+ r.Reset(strings.NewReader(s))
+ got, err := ioutil.ReadAll(r)
+ switch s {
+ case encoded:
+ if err != nil {
+ t.Errorf("#%d: %v", i, err)
+ continue
+ }
+ if err := cmp(got, gold); err != nil {
+ t.Errorf("#%d: %v", i, err)
+ continue
+ }
+ case invalid:
+ if err == nil {
+ t.Errorf("#%d: got nil error, want non-nil", i)
+ continue
+ }
+ }
+ }
+}
+
+func TestWriterReset(t *testing.T) {
+ gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000)
+ var gots, wants [][]byte
+ const n = 20
+ w, failed := NewWriter(nil), false
+ for i := 0; i <= n; i++ {
+ buf := new(bytes.Buffer)
+ w.Reset(buf)
+ want := gold[:len(gold)*i/n]
+ if _, err := w.Write(want); err != nil {
+ t.Errorf("#%d: Write: %v", i, err)
+ failed = true
+ continue
+ }
+ got, err := ioutil.ReadAll(NewReader(buf))
+ if err != nil {
+ t.Errorf("#%d: ReadAll: %v", i, err)
+ failed = true
+ continue
+ }
+ gots = append(gots, got)
+ wants = append(wants, want)
+ }
+ if failed {
+ return
+ }
+ for i := range gots {
+ if err := cmp(gots[i], wants[i]); err != nil {
+ t.Errorf("#%d: %v", i, err)
+ }
+ }
+}
+
+func benchDecode(b *testing.B, src []byte) {
+ encoded := Encode(nil, src)
+ // Bandwidth is in amount of uncompressed data.
+ b.SetBytes(int64(len(src)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ Decode(src, encoded)
+ }
+}
+
+func benchEncode(b *testing.B, src []byte) {
+ // Bandwidth is in amount of uncompressed data.
+ b.SetBytes(int64(len(src)))
+ dst := make([]byte, MaxEncodedLen(len(src)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ Encode(dst, src)
+ }
+}
+
+func readFile(b testing.TB, filename string) []byte {
+ src, err := ioutil.ReadFile(filename)
+ if err != nil {
+ b.Skipf("skipping benchmark: %v", err)
+ }
+ if len(src) == 0 {
+ b.Fatalf("%s has zero length", filename)
+ }
+ return src
+}
+
+// expand returns a slice of length n containing repeated copies of src.
+func expand(src []byte, n int) []byte {
+ dst := make([]byte, n)
+ for x := dst; len(x) > 0; {
+ i := copy(x, src)
+ x = x[i:]
+ }
+ return dst
+}
+
+func benchWords(b *testing.B, n int, decode bool) {
+ // Note: the file is OS-language dependent so the resulting values are not
+ // directly comparable for non-US-English OS installations.
+ data := expand(readFile(b, "/usr/share/dict/words"), n)
+ if decode {
+ benchDecode(b, data)
+ } else {
+ benchEncode(b, data)
+ }
+}
+
+func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) }
+func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) }
+func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) }
+func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) }
+func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) }
+func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) }
+func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
+func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
+
+// testFiles' values are copied directly from
+// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc
+// The label field is unused in snappy-go.
+var testFiles = []struct {
+ label string
+ filename string
+}{
+ {"html", "html"},
+ {"urls", "urls.10K"},
+ {"jpg", "fireworks.jpeg"},
+ {"jpg_200", "fireworks.jpeg"},
+ {"pdf", "paper-100k.pdf"},
+ {"html4", "html_x_4"},
+ {"txt1", "alice29.txt"},
+ {"txt2", "asyoulik.txt"},
+ {"txt3", "lcet10.txt"},
+ {"txt4", "plrabn12.txt"},
+ {"pb", "geo.protodata"},
+ {"gaviota", "kppkn.gtb"},
+}
+
+// The test data files are present at this canonical URL.
+const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
+
+func downloadTestdata(b *testing.B, basename string) (errRet error) {
+ filename := filepath.Join(*testdata, basename)
+ if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
+ return nil
+ }
+
+ if !*download {
+ b.Skipf("test data not found; skipping benchmark without the -download flag")
+ }
+ // Download the official snappy C++ implementation reference test data
+ // files for benchmarking.
+ if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) {
+ return fmt.Errorf("failed to create testdata: %s", err)
+ }
+
+ f, err := os.Create(filename)
+ if err != nil {
+ return fmt.Errorf("failed to create %s: %s", filename, err)
+ }
+ defer f.Close()
+ defer func() {
+ if errRet != nil {
+ os.Remove(filename)
+ }
+ }()
+ url := baseURL + basename
+ resp, err := http.Get(url)
+ if err != nil {
+ return fmt.Errorf("failed to download %s: %s", url, err)
+ }
+ defer resp.Body.Close()
+ if s := resp.StatusCode; s != http.StatusOK {
+ return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s))
+ }
+ _, err = io.Copy(f, resp.Body)
+ if err != nil {
+ return fmt.Errorf("failed to download %s to %s: %s", url, filename, err)
+ }
+ return nil
+}
+
+func benchFile(b *testing.B, n int, decode bool) {
+ if err := downloadTestdata(b, testFiles[n].filename); err != nil {
+ b.Fatalf("failed to download testdata: %s", err)
+ }
+ data := readFile(b, filepath.Join(*testdata, testFiles[n].filename))
+ if decode {
+ benchDecode(b, data)
+ } else {
+ benchEncode(b, data)
+ }
+}
+
+// Naming convention is kept similar to what snappy's C++ implementation uses.
+func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) }
+func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) }
+func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) }
+func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) }
+func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) }
+func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) }
+func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) }
+func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) }
+func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) }
+func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) }
+func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
+func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
+func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) }
+func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) }
+func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) }
+func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) }
+func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) }
+func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) }
+func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) }
+func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) }
+func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) }
+func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) }
+func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
+func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/activity_events_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/activity_events_test.go
new file mode 100644
index 000000000..8d1d1ff32
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/activity_events_test.go
@@ -0,0 +1,305 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestActivityService_ListEvents(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/events", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "page": "2",
+ })
+ fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ events, _, err := client.Activity.ListEvents(opt)
+ if err != nil {
+ t.Errorf("Activities.ListEvents returned error: %v", err)
+ }
+
+ want := []Event{{ID: String("1")}, {ID: String("2")}}
+ if !reflect.DeepEqual(events, want) {
+ t.Errorf("Activities.ListEvents returned %+v, want %+v", events, want)
+ }
+}
+
+func TestActivityService_ListRepositoryEvents(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/events", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "page": "2",
+ })
+ fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ events, _, err := client.Activity.ListRepositoryEvents("o", "r", opt)
+ if err != nil {
+ t.Errorf("Activities.ListRepositoryEvents returned error: %v", err)
+ }
+
+ want := []Event{{ID: String("1")}, {ID: String("2")}}
+ if !reflect.DeepEqual(events, want) {
+ t.Errorf("Activities.ListRepositoryEvents returned %+v, want %+v", events, want)
+ }
+}
+
+func TestActivityService_ListRepositoryEvents_invalidOwner(t *testing.T) {
+ _, _, err := client.Activity.ListRepositoryEvents("%", "%", nil)
+ testURLParseError(t, err)
+}
+
+func TestActivityService_ListIssueEventsForRepository(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/issues/events", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "page": "2",
+ })
+ fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ events, _, err := client.Activity.ListIssueEventsForRepository("o", "r", opt)
+ if err != nil {
+ t.Errorf("Activities.ListIssueEventsForRepository returned error: %v", err)
+ }
+
+ want := []Event{{ID: String("1")}, {ID: String("2")}}
+ if !reflect.DeepEqual(events, want) {
+ t.Errorf("Activities.ListIssueEventsForRepository returned %+v, want %+v", events, want)
+ }
+}
+
+func TestActivityService_ListIssueEventsForRepository_invalidOwner(t *testing.T) {
+ _, _, err := client.Activity.ListIssueEventsForRepository("%", "%", nil)
+ testURLParseError(t, err)
+}
+
+func TestActivityService_ListEventsForRepoNetwork(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/networks/o/r/events", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "page": "2",
+ })
+ fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ events, _, err := client.Activity.ListEventsForRepoNetwork("o", "r", opt)
+ if err != nil {
+ t.Errorf("Activities.ListEventsForRepoNetwork returned error: %v", err)
+ }
+
+ want := []Event{{ID: String("1")}, {ID: String("2")}}
+ if !reflect.DeepEqual(events, want) {
+ t.Errorf("Activities.ListEventsForRepoNetwork returned %+v, want %+v", events, want)
+ }
+}
+
+func TestActivityService_ListEventsForRepoNetwork_invalidOwner(t *testing.T) {
+ _, _, err := client.Activity.ListEventsForRepoNetwork("%", "%", nil)
+ testURLParseError(t, err)
+}
+
+func TestActivityService_ListEventsForOrganization(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/events", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "page": "2",
+ })
+ fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ events, _, err := client.Activity.ListEventsForOrganization("o", opt)
+ if err != nil {
+ t.Errorf("Activities.ListEventsForOrganization returned error: %v", err)
+ }
+
+ want := []Event{{ID: String("1")}, {ID: String("2")}}
+ if !reflect.DeepEqual(events, want) {
+ t.Errorf("Activities.ListEventsForOrganization returned %+v, want %+v", events, want)
+ }
+}
+
+func TestActivityService_ListEventsForOrganization_invalidOrg(t *testing.T) {
+ _, _, err := client.Activity.ListEventsForOrganization("%", nil)
+ testURLParseError(t, err)
+}
+
+func TestActivityService_ListEventsPerformedByUser_all(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u/events", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "page": "2",
+ })
+ fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ events, _, err := client.Activity.ListEventsPerformedByUser("u", false, opt)
+ if err != nil {
+ t.Errorf("Events.ListPerformedByUser returned error: %v", err)
+ }
+
+ want := []Event{{ID: String("1")}, {ID: String("2")}}
+ if !reflect.DeepEqual(events, want) {
+ t.Errorf("Events.ListPerformedByUser returned %+v, want %+v", events, want)
+ }
+}
+
+func TestActivityService_ListEventsPerformedByUser_publicOnly(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u/events/public", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
+ })
+
+ events, _, err := client.Activity.ListEventsPerformedByUser("u", true, nil)
+ if err != nil {
+ t.Errorf("Events.ListPerformedByUser returned error: %v", err)
+ }
+
+ want := []Event{{ID: String("1")}, {ID: String("2")}}
+ if !reflect.DeepEqual(events, want) {
+ t.Errorf("Events.ListPerformedByUser returned %+v, want %+v", events, want)
+ }
+}
+
+func TestActivityService_ListEventsPerformedByUser_invalidUser(t *testing.T) {
+ _, _, err := client.Activity.ListEventsPerformedByUser("%", false, nil)
+ testURLParseError(t, err)
+}
+
+func TestActivityService_ListEventsReceivedByUser_all(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u/received_events", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "page": "2",
+ })
+ fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ events, _, err := client.Activity.ListEventsReceivedByUser("u", false, opt)
+ if err != nil {
+ t.Errorf("Events.ListReceivedByUser returned error: %v", err)
+ }
+
+ want := []Event{{ID: String("1")}, {ID: String("2")}}
+ if !reflect.DeepEqual(events, want) {
+ t.Errorf("Events.ListReceivedUser returned %+v, want %+v", events, want)
+ }
+}
+
+func TestActivityService_ListEventsReceivedByUser_publicOnly(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u/received_events/public", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
+ })
+
+ events, _, err := client.Activity.ListEventsReceivedByUser("u", true, nil)
+ if err != nil {
+ t.Errorf("Events.ListReceivedByUser returned error: %v", err)
+ }
+
+ want := []Event{{ID: String("1")}, {ID: String("2")}}
+ if !reflect.DeepEqual(events, want) {
+ t.Errorf("Events.ListReceivedByUser returned %+v, want %+v", events, want)
+ }
+}
+
+func TestActivityService_ListEventsReceivedByUser_invalidUser(t *testing.T) {
+ _, _, err := client.Activity.ListEventsReceivedByUser("%", false, nil)
+ testURLParseError(t, err)
+}
+
+func TestActivityService_ListUserEventsForOrganization(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u/events/orgs/o", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "page": "2",
+ })
+ fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ events, _, err := client.Activity.ListUserEventsForOrganization("o", "u", opt)
+ if err != nil {
+ t.Errorf("Activities.ListUserEventsForOrganization returned error: %v", err)
+ }
+
+ want := []Event{{ID: String("1")}, {ID: String("2")}}
+ if !reflect.DeepEqual(events, want) {
+ t.Errorf("Activities.ListUserEventsForOrganization returned %+v, want %+v", events, want)
+ }
+}
+
+func TestActivity_EventPayload_typed(t *testing.T) {
+ raw := []byte(`{"type": "PushEvent","payload":{"push_id": 1}}`)
+ var event *Event
+ if err := json.Unmarshal(raw, &event); err != nil {
+ t.Fatalf("Unmarshal Event returned error: %v", err)
+ }
+
+ want := &PushEvent{PushID: Int(1)}
+ if !reflect.DeepEqual(event.Payload(), want) {
+ t.Errorf("Event Payload returned %+v, want %+v", event.Payload(), want)
+ }
+}
+
+// TestEvent_Payload_untyped checks that unrecognized events are parsed to an
+// interface{} value (instead of being discarded or throwing an error), for
+// forward compatibility with new event types.
+func TestActivity_EventPayload_untyped(t *testing.T) {
+ raw := []byte(`{"type": "UnrecognizedEvent","payload":{"field": "val"}}`)
+ var event *Event
+ if err := json.Unmarshal(raw, &event); err != nil {
+ t.Fatalf("Unmarshal Event returned error: %v", err)
+ }
+
+ want := map[string]interface{}{"field": "val"}
+ if !reflect.DeepEqual(event.Payload(), want) {
+ t.Errorf("Event Payload returned %+v, want %+v", event.Payload(), want)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/activity_notifications_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/activity_notifications_test.go
new file mode 100644
index 000000000..f72ee413c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/activity_notifications_test.go
@@ -0,0 +1,205 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestActivityService_ListNotification(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/notifications", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "all": "true",
+ "participating": "true",
+ "since": "2006-01-02T15:04:05Z",
+ "before": "2007-03-04T15:04:05Z",
+ })
+
+ fmt.Fprint(w, `[{"id":"1", "subject":{"title":"t"}}]`)
+ })
+
+ opt := &NotificationListOptions{
+ All: true,
+ Participating: true,
+ Since: time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC),
+ Before: time.Date(2007, 03, 04, 15, 04, 05, 0, time.UTC),
+ }
+ notifications, _, err := client.Activity.ListNotifications(opt)
+ if err != nil {
+ t.Errorf("Activity.ListNotifications returned error: %v", err)
+ }
+
+ want := []Notification{{ID: String("1"), Subject: &NotificationSubject{Title: String("t")}}}
+ if !reflect.DeepEqual(notifications, want) {
+ t.Errorf("Activity.ListNotifications returned %+v, want %+v", notifications, want)
+ }
+}
+
+func TestActivityService_ListRepositoryNotification(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/notifications", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{"id":"1"}]`)
+ })
+
+ notifications, _, err := client.Activity.ListRepositoryNotifications("o", "r", nil)
+ if err != nil {
+ t.Errorf("Activity.ListRepositoryNotifications returned error: %v", err)
+ }
+
+ want := []Notification{{ID: String("1")}}
+ if !reflect.DeepEqual(notifications, want) {
+ t.Errorf("Activity.ListRepositoryNotifications returned %+v, want %+v", notifications, want)
+ }
+}
+
+func TestActivityService_MarkNotificationsRead(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/notifications", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "PUT")
+ testFormValues(t, r, values{
+ "last_read_at": "2006-01-02T15:04:05Z",
+ })
+
+ w.WriteHeader(http.StatusResetContent)
+ })
+
+ _, err := client.Activity.MarkNotificationsRead(time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC))
+ if err != nil {
+ t.Errorf("Activity.MarkNotificationsRead returned error: %v", err)
+ }
+}
+
+func TestActivityService_MarkRepositoryNotificationsRead(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/notifications", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "PUT")
+ testFormValues(t, r, values{
+ "last_read_at": "2006-01-02T15:04:05Z",
+ })
+
+ w.WriteHeader(http.StatusResetContent)
+ })
+
+ _, err := client.Activity.MarkRepositoryNotificationsRead("o", "r", time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC))
+ if err != nil {
+ t.Errorf("Activity.MarkRepositoryNotificationsRead returned error: %v", err)
+ }
+}
+
+func TestActivityService_GetThread(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/notifications/threads/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id":"1"}`)
+ })
+
+ notification, _, err := client.Activity.GetThread("1")
+ if err != nil {
+ t.Errorf("Activity.GetThread returned error: %v", err)
+ }
+
+ want := &Notification{ID: String("1")}
+ if !reflect.DeepEqual(notification, want) {
+ t.Errorf("Activity.GetThread returned %+v, want %+v", notification, want)
+ }
+}
+
+func TestActivityService_MarkThreadRead(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/notifications/threads/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "PATCH")
+ w.WriteHeader(http.StatusResetContent)
+ })
+
+ _, err := client.Activity.MarkThreadRead("1")
+ if err != nil {
+ t.Errorf("Activity.MarkThreadRead returned error: %v", err)
+ }
+}
+
+func TestActivityService_GetThreadSubscription(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/notifications/threads/1/subscription", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"subscribed":true}`)
+ })
+
+ sub, _, err := client.Activity.GetThreadSubscription("1")
+ if err != nil {
+ t.Errorf("Activity.GetThreadSubscription returned error: %v", err)
+ }
+
+ want := &Subscription{Subscribed: Bool(true)}
+ if !reflect.DeepEqual(sub, want) {
+ t.Errorf("Activity.GetThreadSubscription returned %+v, want %+v", sub, want)
+ }
+}
+
+func TestActivityService_SetThreadSubscription(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Subscription{Subscribed: Bool(true)}
+
+ mux.HandleFunc("/notifications/threads/1/subscription", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Subscription)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PUT")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"ignored":true}`)
+ })
+
+ sub, _, err := client.Activity.SetThreadSubscription("1", input)
+ if err != nil {
+ t.Errorf("Activity.SetThreadSubscription returned error: %v", err)
+ }
+
+ want := &Subscription{Ignored: Bool(true)}
+ if !reflect.DeepEqual(sub, want) {
+ t.Errorf("Activity.SetThreadSubscription returned %+v, want %+v", sub, want)
+ }
+}
+
+func TestActivityService_DeleteThreadSubscription(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/notifications/threads/1/subscription", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ _, err := client.Activity.DeleteThreadSubscription("1")
+ if err != nil {
+ t.Errorf("Activity.DeleteThreadSubscription returned error: %v", err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/activity_star_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/activity_star_test.go
new file mode 100644
index 000000000..eb2c4055e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/activity_star_test.go
@@ -0,0 +1,170 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestActivityService_ListStargazers(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/stargazers", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "page": "2",
+ })
+
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ stargazers, _, err := client.Activity.ListStargazers("o", "r", &ListOptions{Page: 2})
+ if err != nil {
+ t.Errorf("Activity.ListStargazers returned error: %v", err)
+ }
+
+ want := []User{{ID: Int(1)}}
+ if !reflect.DeepEqual(stargazers, want) {
+ t.Errorf("Activity.ListStargazers returned %+v, want %+v", stargazers, want)
+ }
+}
+
+func TestActivityService_ListStarred_authenticatedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/starred", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testHeader(t, r, "Accept", mediaTypeStarringPreview)
+ fmt.Fprint(w, `[{"starred_at":"2002-02-10T15:30:00Z","repo":{"id":1}}]`)
+ })
+
+ repos, _, err := client.Activity.ListStarred("", nil)
+ if err != nil {
+ t.Errorf("Activity.ListStarred returned error: %v", err)
+ }
+
+ want := []StarredRepository{{StarredAt: &Timestamp{time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC)}, Repository: &Repository{ID: Int(1)}}}
+ if !reflect.DeepEqual(repos, want) {
+ t.Errorf("Activity.ListStarred returned %+v, want %+v", repos, want)
+ }
+}
+
+func TestActivityService_ListStarred_specifiedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u/starred", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testHeader(t, r, "Accept", mediaTypeStarringPreview)
+ testFormValues(t, r, values{
+ "sort": "created",
+ "direction": "asc",
+ "page": "2",
+ })
+ fmt.Fprint(w, `[{"starred_at":"2002-02-10T15:30:00Z","repo":{"id":2}}]`)
+ })
+
+ opt := &ActivityListStarredOptions{"created", "asc", ListOptions{Page: 2}}
+ repos, _, err := client.Activity.ListStarred("u", opt)
+ if err != nil {
+ t.Errorf("Activity.ListStarred returned error: %v", err)
+ }
+
+ want := []StarredRepository{{StarredAt: &Timestamp{time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC)}, Repository: &Repository{ID: Int(2)}}}
+ if !reflect.DeepEqual(repos, want) {
+ t.Errorf("Activity.ListStarred returned %+v, want %+v", repos, want)
+ }
+}
+
+func TestActivityService_ListStarred_invalidUser(t *testing.T) {
+ _, _, err := client.Activity.ListStarred("%", nil)
+ testURLParseError(t, err)
+}
+
+func TestActivityService_IsStarred_hasStar(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/starred/o/r", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ star, _, err := client.Activity.IsStarred("o", "r")
+ if err != nil {
+ t.Errorf("Activity.IsStarred returned error: %v", err)
+ }
+ if want := true; star != want {
+ t.Errorf("Activity.IsStarred returned %+v, want %+v", star, want)
+ }
+}
+
+func TestActivityService_IsStarred_noStar(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/starred/o/r", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.WriteHeader(http.StatusNotFound)
+ })
+
+ star, _, err := client.Activity.IsStarred("o", "r")
+ if err != nil {
+ t.Errorf("Activity.IsStarred returned error: %v", err)
+ }
+ if want := false; star != want {
+ t.Errorf("Activity.IsStarred returned %+v, want %+v", star, want)
+ }
+}
+
+func TestActivityService_IsStarred_invalidID(t *testing.T) {
+ _, _, err := client.Activity.IsStarred("%", "%")
+ testURLParseError(t, err)
+}
+
+func TestActivityService_Star(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/starred/o/r", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "PUT")
+ })
+
+ _, err := client.Activity.Star("o", "r")
+ if err != nil {
+ t.Errorf("Activity.Star returned error: %v", err)
+ }
+}
+
+func TestActivityService_Star_invalidID(t *testing.T) {
+ _, err := client.Activity.Star("%", "%")
+ testURLParseError(t, err)
+}
+
+func TestActivityService_Unstar(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/starred/o/r", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Activity.Unstar("o", "r")
+ if err != nil {
+ t.Errorf("Activity.Unstar returned error: %v", err)
+ }
+}
+
+func TestActivityService_Unstar_invalidID(t *testing.T) {
+ _, err := client.Activity.Unstar("%", "%")
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/activity_watching_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/activity_watching_test.go
new file mode 100644
index 000000000..8046ee217
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/activity_watching_test.go
@@ -0,0 +1,177 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestActivityService_ListWatchers(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/subscribers", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "page": "2",
+ })
+
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ watchers, _, err := client.Activity.ListWatchers("o", "r", &ListOptions{Page: 2})
+ if err != nil {
+ t.Errorf("Activity.ListWatchers returned error: %v", err)
+ }
+
+ want := []User{{ID: Int(1)}}
+ if !reflect.DeepEqual(watchers, want) {
+ t.Errorf("Activity.ListWatchers returned %+v, want %+v", watchers, want)
+ }
+}
+
+func TestActivityService_ListWatched_authenticatedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/subscriptions", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ watched, _, err := client.Activity.ListWatched("")
+ if err != nil {
+ t.Errorf("Activity.ListWatched returned error: %v", err)
+ }
+
+ want := []Repository{{ID: Int(1)}}
+ if !reflect.DeepEqual(watched, want) {
+ t.Errorf("Activity.ListWatched returned %+v, want %+v", watched, want)
+ }
+}
+
+func TestActivityService_ListWatched_specifiedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u/subscriptions", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ watched, _, err := client.Activity.ListWatched("u")
+ if err != nil {
+ t.Errorf("Activity.ListWatched returned error: %v", err)
+ }
+
+ want := []Repository{{ID: Int(1)}}
+ if !reflect.DeepEqual(watched, want) {
+ t.Errorf("Activity.ListWatched returned %+v, want %+v", watched, want)
+ }
+}
+
+func TestActivityService_GetRepositorySubscription_true(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/subscription", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"subscribed":true}`)
+ })
+
+ sub, _, err := client.Activity.GetRepositorySubscription("o", "r")
+ if err != nil {
+ t.Errorf("Activity.GetRepositorySubscription returned error: %v", err)
+ }
+
+ want := &Subscription{Subscribed: Bool(true)}
+ if !reflect.DeepEqual(sub, want) {
+ t.Errorf("Activity.GetRepositorySubscription returned %+v, want %+v", sub, want)
+ }
+}
+
+func TestActivityService_GetRepositorySubscription_false(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/subscription", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.WriteHeader(http.StatusNotFound)
+ })
+
+ sub, _, err := client.Activity.GetRepositorySubscription("o", "r")
+ if err != nil {
+ t.Errorf("Activity.GetRepositorySubscription returned error: %v", err)
+ }
+
+ var want *Subscription
+ if !reflect.DeepEqual(sub, want) {
+ t.Errorf("Activity.GetRepositorySubscription returned %+v, want %+v", sub, want)
+ }
+}
+
+func TestActivityService_GetRepositorySubscription_error(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/subscription", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.WriteHeader(http.StatusBadRequest)
+ })
+
+ _, _, err := client.Activity.GetRepositorySubscription("o", "r")
+ if err == nil {
+ t.Errorf("Expected HTTP 400 response")
+ }
+}
+
+func TestActivityService_SetRepositorySubscription(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Subscription{Subscribed: Bool(true)}
+
+ mux.HandleFunc("/repos/o/r/subscription", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Subscription)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PUT")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"ignored":true}`)
+ })
+
+ sub, _, err := client.Activity.SetRepositorySubscription("o", "r", input)
+ if err != nil {
+ t.Errorf("Activity.SetRepositorySubscription returned error: %v", err)
+ }
+
+ want := &Subscription{Ignored: Bool(true)}
+ if !reflect.DeepEqual(sub, want) {
+ t.Errorf("Activity.SetRepositorySubscription returned %+v, want %+v", sub, want)
+ }
+}
+
+func TestActivityService_DeleteRepositorySubscription(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/subscription", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ _, err := client.Activity.DeleteRepositorySubscription("o", "r")
+ if err != nil {
+ t.Errorf("Activity.DeleteRepositorySubscription returned error: %v", err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/gists_comments_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/gists_comments_test.go
new file mode 100644
index 000000000..b2bbf23f7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/gists_comments_test.go
@@ -0,0 +1,155 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestGistsService_ListComments(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/gists/1/comments", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"id": 1}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ comments, _, err := client.Gists.ListComments("1", opt)
+
+ if err != nil {
+ t.Errorf("Gists.Comments returned error: %v", err)
+ }
+
+ want := []GistComment{{ID: Int(1)}}
+ if !reflect.DeepEqual(comments, want) {
+ t.Errorf("Gists.ListComments returned %+v, want %+v", comments, want)
+ }
+}
+
+func TestGistsService_ListComments_invalidID(t *testing.T) {
+ _, _, err := client.Gists.ListComments("%", nil)
+ testURLParseError(t, err)
+}
+
+func TestGistsService_GetComment(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/gists/1/comments/2", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id": 1}`)
+ })
+
+ comment, _, err := client.Gists.GetComment("1", 2)
+
+ if err != nil {
+ t.Errorf("Gists.GetComment returned error: %v", err)
+ }
+
+ want := &GistComment{ID: Int(1)}
+ if !reflect.DeepEqual(comment, want) {
+ t.Errorf("Gists.GetComment returned %+v, want %+v", comment, want)
+ }
+}
+
+func TestGistsService_GetComment_invalidID(t *testing.T) {
+ _, _, err := client.Gists.GetComment("%", 1)
+ testURLParseError(t, err)
+}
+
+func TestGistsService_CreateComment(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &GistComment{ID: Int(1), Body: String("b")}
+
+ mux.HandleFunc("/gists/1/comments", func(w http.ResponseWriter, r *http.Request) {
+ v := new(GistComment)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ comment, _, err := client.Gists.CreateComment("1", input)
+ if err != nil {
+ t.Errorf("Gists.CreateComment returned error: %v", err)
+ }
+
+ want := &GistComment{ID: Int(1)}
+ if !reflect.DeepEqual(comment, want) {
+ t.Errorf("Gists.CreateComment returned %+v, want %+v", comment, want)
+ }
+}
+
+func TestGistsService_CreateComment_invalidID(t *testing.T) {
+ _, _, err := client.Gists.CreateComment("%", nil)
+ testURLParseError(t, err)
+}
+
+func TestGistsService_EditComment(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &GistComment{ID: Int(1), Body: String("b")}
+
+ mux.HandleFunc("/gists/1/comments/2", func(w http.ResponseWriter, r *http.Request) {
+ v := new(GistComment)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ comment, _, err := client.Gists.EditComment("1", 2, input)
+ if err != nil {
+ t.Errorf("Gists.EditComment returned error: %v", err)
+ }
+
+ want := &GistComment{ID: Int(1)}
+ if !reflect.DeepEqual(comment, want) {
+ t.Errorf("Gists.EditComment returned %+v, want %+v", comment, want)
+ }
+}
+
+func TestGistsService_EditComment_invalidID(t *testing.T) {
+ _, _, err := client.Gists.EditComment("%", 1, nil)
+ testURLParseError(t, err)
+}
+
+func TestGistsService_DeleteComment(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/gists/1/comments/2", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Gists.DeleteComment("1", 2)
+ if err != nil {
+ t.Errorf("Gists.Delete returned error: %v", err)
+ }
+}
+
+func TestGistsService_DeleteComment_invalidID(t *testing.T) {
+ _, err := client.Gists.DeleteComment("%", 1)
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/gists_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/gists_test.go
new file mode 100644
index 000000000..573120129
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/gists_test.go
@@ -0,0 +1,411 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestGistsService_List_specifiedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ since := "2013-01-01T00:00:00Z"
+
+ mux.HandleFunc("/users/u/gists", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "since": since,
+ })
+ fmt.Fprint(w, `[{"id": "1"}]`)
+ })
+
+ opt := &GistListOptions{Since: time.Date(2013, time.January, 1, 0, 0, 0, 0, time.UTC)}
+ gists, _, err := client.Gists.List("u", opt)
+
+ if err != nil {
+ t.Errorf("Gists.List returned error: %v", err)
+ }
+
+ want := []Gist{{ID: String("1")}}
+ if !reflect.DeepEqual(gists, want) {
+ t.Errorf("Gists.List returned %+v, want %+v", gists, want)
+ }
+}
+
+func TestGistsService_List_authenticatedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/gists", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{"id": "1"}]`)
+ })
+
+ gists, _, err := client.Gists.List("", nil)
+ if err != nil {
+ t.Errorf("Gists.List returned error: %v", err)
+ }
+
+ want := []Gist{{ID: String("1")}}
+ if !reflect.DeepEqual(gists, want) {
+ t.Errorf("Gists.List returned %+v, want %+v", gists, want)
+ }
+}
+
+func TestGistsService_List_invalidUser(t *testing.T) {
+ _, _, err := client.Gists.List("%", nil)
+ testURLParseError(t, err)
+}
+
+func TestGistsService_ListAll(t *testing.T) {
+ setup()
+ defer teardown()
+
+ since := "2013-01-01T00:00:00Z"
+
+ mux.HandleFunc("/gists/public", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "since": since,
+ })
+ fmt.Fprint(w, `[{"id": "1"}]`)
+ })
+
+ opt := &GistListOptions{Since: time.Date(2013, time.January, 1, 0, 0, 0, 0, time.UTC)}
+ gists, _, err := client.Gists.ListAll(opt)
+
+ if err != nil {
+ t.Errorf("Gists.ListAll returned error: %v", err)
+ }
+
+ want := []Gist{{ID: String("1")}}
+ if !reflect.DeepEqual(gists, want) {
+ t.Errorf("Gists.ListAll returned %+v, want %+v", gists, want)
+ }
+}
+
+func TestGistsService_ListStarred(t *testing.T) {
+ setup()
+ defer teardown()
+
+ since := "2013-01-01T00:00:00Z"
+
+ mux.HandleFunc("/gists/starred", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "since": since,
+ })
+ fmt.Fprint(w, `[{"id": "1"}]`)
+ })
+
+ opt := &GistListOptions{Since: time.Date(2013, time.January, 1, 0, 0, 0, 0, time.UTC)}
+ gists, _, err := client.Gists.ListStarred(opt)
+
+ if err != nil {
+ t.Errorf("Gists.ListStarred returned error: %v", err)
+ }
+
+ want := []Gist{{ID: String("1")}}
+ if !reflect.DeepEqual(gists, want) {
+ t.Errorf("Gists.ListStarred returned %+v, want %+v", gists, want)
+ }
+}
+
+func TestGistsService_Get(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/gists/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id": "1"}`)
+ })
+
+ gist, _, err := client.Gists.Get("1")
+
+ if err != nil {
+ t.Errorf("Gists.Get returned error: %v", err)
+ }
+
+ want := &Gist{ID: String("1")}
+ if !reflect.DeepEqual(gist, want) {
+ t.Errorf("Gists.Get returned %+v, want %+v", gist, want)
+ }
+}
+
+func TestGistsService_Get_invalidID(t *testing.T) {
+ _, _, err := client.Gists.Get("%")
+ testURLParseError(t, err)
+}
+
+func TestGistsService_GetRevision(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/gists/1/s", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id": "1"}`)
+ })
+
+ gist, _, err := client.Gists.GetRevision("1", "s")
+
+ if err != nil {
+ t.Errorf("Gists.Get returned error: %v", err)
+ }
+
+ want := &Gist{ID: String("1")}
+ if !reflect.DeepEqual(gist, want) {
+ t.Errorf("Gists.Get returned %+v, want %+v", gist, want)
+ }
+}
+
+func TestGistsService_GetRevision_invalidID(t *testing.T) {
+ _, _, err := client.Gists.GetRevision("%", "%")
+ testURLParseError(t, err)
+}
+
+func TestGistsService_Create(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Gist{
+ Description: String("Gist description"),
+ Public: Bool(false),
+ Files: map[GistFilename]GistFile{
+ "test.txt": {Content: String("Gist file content")},
+ },
+ }
+
+ mux.HandleFunc("/gists", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Gist)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w,
+ `
+ {
+ "id": "1",
+ "description": "Gist description",
+ "public": false,
+ "files": {
+ "test.txt": {
+ "filename": "test.txt"
+ }
+ }
+ }`)
+ })
+
+ gist, _, err := client.Gists.Create(input)
+ if err != nil {
+ t.Errorf("Gists.Create returned error: %v", err)
+ }
+
+ want := &Gist{
+ ID: String("1"),
+ Description: String("Gist description"),
+ Public: Bool(false),
+ Files: map[GistFilename]GistFile{
+ "test.txt": {Filename: String("test.txt")},
+ },
+ }
+ if !reflect.DeepEqual(gist, want) {
+ t.Errorf("Gists.Create returned %+v, want %+v", gist, want)
+ }
+}
+
+func TestGistsService_Edit(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Gist{
+ Description: String("New description"),
+ Files: map[GistFilename]GistFile{
+ "new.txt": {Content: String("new file content")},
+ },
+ }
+
+ mux.HandleFunc("/gists/1", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Gist)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w,
+ `
+ {
+ "id": "1",
+ "description": "new description",
+ "public": false,
+ "files": {
+ "test.txt": {
+ "filename": "test.txt"
+ },
+ "new.txt": {
+ "filename": "new.txt"
+ }
+ }
+ }`)
+ })
+
+ gist, _, err := client.Gists.Edit("1", input)
+ if err != nil {
+ t.Errorf("Gists.Edit returned error: %v", err)
+ }
+
+ want := &Gist{
+ ID: String("1"),
+ Description: String("new description"),
+ Public: Bool(false),
+ Files: map[GistFilename]GistFile{
+ "test.txt": {Filename: String("test.txt")},
+ "new.txt": {Filename: String("new.txt")},
+ },
+ }
+ if !reflect.DeepEqual(gist, want) {
+ t.Errorf("Gists.Edit returned %+v, want %+v", gist, want)
+ }
+}
+
+func TestGistsService_Edit_invalidID(t *testing.T) {
+ _, _, err := client.Gists.Edit("%", nil)
+ testURLParseError(t, err)
+}
+
+func TestGistsService_Delete(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/gists/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Gists.Delete("1")
+ if err != nil {
+ t.Errorf("Gists.Delete returned error: %v", err)
+ }
+}
+
+func TestGistsService_Delete_invalidID(t *testing.T) {
+ _, err := client.Gists.Delete("%")
+ testURLParseError(t, err)
+}
+
+func TestGistsService_Star(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/gists/1/star", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "PUT")
+ })
+
+ _, err := client.Gists.Star("1")
+ if err != nil {
+ t.Errorf("Gists.Star returned error: %v", err)
+ }
+}
+
+func TestGistsService_Star_invalidID(t *testing.T) {
+ _, err := client.Gists.Star("%")
+ testURLParseError(t, err)
+}
+
+func TestGistsService_Unstar(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/gists/1/star", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Gists.Unstar("1")
+ if err != nil {
+ t.Errorf("Gists.Unstar returned error: %v", err)
+ }
+}
+
+func TestGistsService_Unstar_invalidID(t *testing.T) {
+ _, err := client.Gists.Unstar("%")
+ testURLParseError(t, err)
+}
+
+func TestGistsService_IsStarred_hasStar(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/gists/1/star", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ star, _, err := client.Gists.IsStarred("1")
+ if err != nil {
+ t.Errorf("Gists.Starred returned error: %v", err)
+ }
+ if want := true; star != want {
+ t.Errorf("Gists.Starred returned %+v, want %+v", star, want)
+ }
+}
+
+func TestGistsService_IsStarred_noStar(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/gists/1/star", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.WriteHeader(http.StatusNotFound)
+ })
+
+ star, _, err := client.Gists.IsStarred("1")
+ if err != nil {
+ t.Errorf("Gists.Starred returned error: %v", err)
+ }
+ if want := false; star != want {
+ t.Errorf("Gists.Starred returned %+v, want %+v", star, want)
+ }
+}
+
+func TestGistsService_IsStarred_invalidID(t *testing.T) {
+ _, _, err := client.Gists.IsStarred("%")
+ testURLParseError(t, err)
+}
+
+func TestGistsService_Fork(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/gists/1/forks", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "POST")
+ fmt.Fprint(w, `{"id": "2"}`)
+ })
+
+ gist, _, err := client.Gists.Fork("1")
+
+ if err != nil {
+ t.Errorf("Gists.Fork returned error: %v", err)
+ }
+
+ want := &Gist{ID: String("2")}
+ if !reflect.DeepEqual(gist, want) {
+ t.Errorf("Gists.Fork returned %+v, want %+v", gist, want)
+ }
+}
+
+func TestGistsService_Fork_invalidID(t *testing.T) {
+ _, _, err := client.Gists.Fork("%")
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/git_blobs_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/git_blobs_test.go
new file mode 100644
index 000000000..994549f2c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/git_blobs_test.go
@@ -0,0 +1,92 @@
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestGitService_GetBlob(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/git/blobs/s", func(w http.ResponseWriter, r *http.Request) {
+ if m := "GET"; m != r.Method {
+ t.Errorf("Request method = %v, want %v", r.Method, m)
+ }
+ fmt.Fprint(w, `{
+ "sha": "s",
+ "content": "blob content"
+ }`)
+ })
+
+ blob, _, err := client.Git.GetBlob("o", "r", "s")
+ if err != nil {
+ t.Errorf("Git.GetBlob returned error: %v", err)
+ }
+
+ want := Blob{
+ SHA: String("s"),
+ Content: String("blob content"),
+ }
+
+ if !reflect.DeepEqual(*blob, want) {
+ t.Errorf("Blob.Get returned %+v, want %+v", *blob, want)
+ }
+}
+
+func TestGitService_GetBlob_invalidOwner(t *testing.T) {
+ _, _, err := client.Git.GetBlob("%", "%", "%")
+ testURLParseError(t, err)
+}
+
+func TestGitService_CreateBlob(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Blob{
+ SHA: String("s"),
+ Content: String("blob content"),
+ Encoding: String("utf-8"),
+ Size: Int(12),
+ }
+
+ mux.HandleFunc("/repos/o/r/git/blobs", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Blob)
+ json.NewDecoder(r.Body).Decode(v)
+
+ if m := "POST"; m != r.Method {
+ t.Errorf("Request method = %v, want %v", r.Method, m)
+ }
+
+ want := input
+ if !reflect.DeepEqual(v, want) {
+ t.Errorf("Git.CreateBlob request body: %+v, want %+v", v, want)
+ }
+
+ fmt.Fprint(w, `{
+ "sha": "s",
+ "content": "blob content",
+ "encoding": "utf-8",
+ "size": 12
+ }`)
+ })
+
+ blob, _, err := client.Git.CreateBlob("o", "r", input)
+ if err != nil {
+ t.Errorf("Git.CreateBlob returned error: %v", err)
+ }
+
+ want := input
+
+ if !reflect.DeepEqual(*blob, *want) {
+ t.Errorf("Git.CreateBlob returned %+v, want %+v", *blob, *want)
+ }
+}
+
+func TestGitService_CreateBlob_invalidOwner(t *testing.T) {
+ _, _, err := client.Git.CreateBlob("%", "%", &Blob{})
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/git_commits_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/git_commits_test.go
new file mode 100644
index 000000000..538f52360
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/git_commits_test.go
@@ -0,0 +1,82 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestGitService_GetCommit(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/git/commits/s", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"sha":"s","message":"m","author":{"name":"n"}}`)
+ })
+
+ commit, _, err := client.Git.GetCommit("o", "r", "s")
+ if err != nil {
+ t.Errorf("Git.GetCommit returned error: %v", err)
+ }
+
+ want := &Commit{SHA: String("s"), Message: String("m"), Author: &CommitAuthor{Name: String("n")}}
+ if !reflect.DeepEqual(commit, want) {
+ t.Errorf("Git.GetCommit returned %+v, want %+v", commit, want)
+ }
+}
+
+func TestGitService_GetCommit_invalidOwner(t *testing.T) {
+ _, _, err := client.Git.GetCommit("%", "%", "%")
+ testURLParseError(t, err)
+}
+
+func TestGitService_CreateCommit(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Commit{
+ Message: String("m"),
+ Tree: &Tree{SHA: String("t")},
+ Parents: []Commit{{SHA: String("p")}},
+ }
+
+ mux.HandleFunc("/repos/o/r/git/commits", func(w http.ResponseWriter, r *http.Request) {
+ v := new(createCommit)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+
+ want := &createCommit{
+ Message: input.Message,
+ Tree: String("t"),
+ Parents: []string{"p"},
+ }
+ if !reflect.DeepEqual(v, want) {
+ t.Errorf("Request body = %+v, want %+v", v, want)
+ }
+ fmt.Fprint(w, `{"sha":"s"}`)
+ })
+
+ commit, _, err := client.Git.CreateCommit("o", "r", input)
+ if err != nil {
+ t.Errorf("Git.CreateCommit returned error: %v", err)
+ }
+
+ want := &Commit{SHA: String("s")}
+ if !reflect.DeepEqual(commit, want) {
+ t.Errorf("Git.CreateCommit returned %+v, want %+v", commit, want)
+ }
+}
+
+func TestGitService_CreateCommit_invalidOwner(t *testing.T) {
+ _, _, err := client.Git.CreateCommit("%", "%", nil)
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/git_refs_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/git_refs_test.go
new file mode 100644
index 000000000..e66bf54af
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/git_refs_test.go
@@ -0,0 +1,280 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestGitService_GetRef(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/git/refs/heads/b", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `
+ {
+ "ref": "refs/heads/b",
+ "url": "https://api.github.com/repos/o/r/git/refs/heads/b",
+ "object": {
+ "type": "commit",
+ "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd",
+ "url": "https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"
+ }
+ }`)
+ })
+
+ ref, _, err := client.Git.GetRef("o", "r", "refs/heads/b")
+ if err != nil {
+ t.Errorf("Git.GetRef returned error: %v", err)
+ }
+
+ want := &Reference{
+ Ref: String("refs/heads/b"),
+ URL: String("https://api.github.com/repos/o/r/git/refs/heads/b"),
+ Object: &GitObject{
+ Type: String("commit"),
+ SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"),
+ URL: String("https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"),
+ },
+ }
+ if !reflect.DeepEqual(ref, want) {
+ t.Errorf("Git.GetRef returned %+v, want %+v", ref, want)
+ }
+
+ // without 'refs/' prefix
+ if _, _, err := client.Git.GetRef("o", "r", "heads/b"); err != nil {
+ t.Errorf("Git.GetRef returned error: %v", err)
+ }
+}
+
+func TestGitService_ListRefs(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/git/refs", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `
+ [
+ {
+ "ref": "refs/heads/branchA",
+ "url": "https://api.github.com/repos/o/r/git/refs/heads/branchA",
+ "object": {
+ "type": "commit",
+ "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd",
+ "url": "https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"
+ }
+ },
+ {
+ "ref": "refs/heads/branchB",
+ "url": "https://api.github.com/repos/o/r/git/refs/heads/branchB",
+ "object": {
+ "type": "commit",
+ "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd",
+ "url": "https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"
+ }
+ }
+ ]`)
+ })
+
+ refs, _, err := client.Git.ListRefs("o", "r", nil)
+ if err != nil {
+ t.Errorf("Git.ListRefs returned error: %v", err)
+ }
+
+ want := []Reference{
+ {
+ Ref: String("refs/heads/branchA"),
+ URL: String("https://api.github.com/repos/o/r/git/refs/heads/branchA"),
+ Object: &GitObject{
+ Type: String("commit"),
+ SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"),
+ URL: String("https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"),
+ },
+ },
+ {
+ Ref: String("refs/heads/branchB"),
+ URL: String("https://api.github.com/repos/o/r/git/refs/heads/branchB"),
+ Object: &GitObject{
+ Type: String("commit"),
+ SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"),
+ URL: String("https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"),
+ },
+ },
+ }
+ if !reflect.DeepEqual(refs, want) {
+ t.Errorf("Git.ListRefs returned %+v, want %+v", refs, want)
+ }
+}
+
+func TestGitService_ListRefs_options(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/git/refs/t", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"ref": "r"}]`)
+ })
+
+ opt := &ReferenceListOptions{Type: "t", ListOptions: ListOptions{Page: 2}}
+ refs, _, err := client.Git.ListRefs("o", "r", opt)
+ if err != nil {
+ t.Errorf("Git.ListRefs returned error: %v", err)
+ }
+
+ want := []Reference{{Ref: String("r")}}
+ if !reflect.DeepEqual(refs, want) {
+ t.Errorf("Git.ListRefs returned %+v, want %+v", refs, want)
+ }
+}
+
+func TestGitService_CreateRef(t *testing.T) {
+ setup()
+ defer teardown()
+
+ args := &createRefRequest{
+ Ref: String("refs/heads/b"),
+ SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"),
+ }
+
+ mux.HandleFunc("/repos/o/r/git/refs", func(w http.ResponseWriter, r *http.Request) {
+ v := new(createRefRequest)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, args) {
+ t.Errorf("Request body = %+v, want %+v", v, args)
+ }
+ fmt.Fprint(w, `
+ {
+ "ref": "refs/heads/b",
+ "url": "https://api.github.com/repos/o/r/git/refs/heads/b",
+ "object": {
+ "type": "commit",
+ "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd",
+ "url": "https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"
+ }
+ }`)
+ })
+
+ ref, _, err := client.Git.CreateRef("o", "r", &Reference{
+ Ref: String("refs/heads/b"),
+ Object: &GitObject{
+ SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"),
+ },
+ })
+ if err != nil {
+ t.Errorf("Git.CreateRef returned error: %v", err)
+ }
+
+ want := &Reference{
+ Ref: String("refs/heads/b"),
+ URL: String("https://api.github.com/repos/o/r/git/refs/heads/b"),
+ Object: &GitObject{
+ Type: String("commit"),
+ SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"),
+ URL: String("https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"),
+ },
+ }
+ if !reflect.DeepEqual(ref, want) {
+ t.Errorf("Git.CreateRef returned %+v, want %+v", ref, want)
+ }
+
+ // without 'refs/' prefix
+ _, _, err = client.Git.CreateRef("o", "r", &Reference{
+ Ref: String("heads/b"),
+ Object: &GitObject{
+ SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"),
+ },
+ })
+ if err != nil {
+ t.Errorf("Git.CreateRef returned error: %v", err)
+ }
+}
+
+func TestGitService_UpdateRef(t *testing.T) {
+ setup()
+ defer teardown()
+
+ args := &updateRefRequest{
+ SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"),
+ Force: Bool(true),
+ }
+
+ mux.HandleFunc("/repos/o/r/git/refs/heads/b", func(w http.ResponseWriter, r *http.Request) {
+ v := new(updateRefRequest)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ if !reflect.DeepEqual(v, args) {
+ t.Errorf("Request body = %+v, want %+v", v, args)
+ }
+ fmt.Fprint(w, `
+ {
+ "ref": "refs/heads/b",
+ "url": "https://api.github.com/repos/o/r/git/refs/heads/b",
+ "object": {
+ "type": "commit",
+ "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd",
+ "url": "https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"
+ }
+ }`)
+ })
+
+ ref, _, err := client.Git.UpdateRef("o", "r", &Reference{
+ Ref: String("refs/heads/b"),
+ Object: &GitObject{SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd")},
+ }, true)
+ if err != nil {
+ t.Errorf("Git.UpdateRef returned error: %v", err)
+ }
+
+ want := &Reference{
+ Ref: String("refs/heads/b"),
+ URL: String("https://api.github.com/repos/o/r/git/refs/heads/b"),
+ Object: &GitObject{
+ Type: String("commit"),
+ SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"),
+ URL: String("https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"),
+ },
+ }
+ if !reflect.DeepEqual(ref, want) {
+ t.Errorf("Git.UpdateRef returned %+v, want %+v", ref, want)
+ }
+
+ // without 'refs/' prefix
+ _, _, err = client.Git.UpdateRef("o", "r", &Reference{
+ Ref: String("heads/b"),
+ Object: &GitObject{SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd")},
+ }, true)
+ if err != nil {
+ t.Errorf("Git.UpdateRef returned error: %v", err)
+ }
+}
+
+func TestGitService_DeleteRef(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/git/refs/heads/b", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Git.DeleteRef("o", "r", "refs/heads/b")
+ if err != nil {
+ t.Errorf("Git.DeleteRef returned error: %v", err)
+ }
+
+ // without 'refs/' prefix
+ if _, err := client.Git.DeleteRef("o", "r", "heads/b"); err != nil {
+ t.Errorf("Git.DeleteRef returned error: %v", err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/git_tags_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/git_tags_test.go
new file mode 100644
index 000000000..fb41bf38e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/git_tags_test.go
@@ -0,0 +1,68 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestGitService_GetTag(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/git/tags/s", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+
+ fmt.Fprint(w, `{"tag": "t"}`)
+ })
+
+ tag, _, err := client.Git.GetTag("o", "r", "s")
+
+ if err != nil {
+ t.Errorf("Git.GetTag returned error: %v", err)
+ }
+
+ want := &Tag{Tag: String("t")}
+ if !reflect.DeepEqual(tag, want) {
+ t.Errorf("Git.GetTag returned %+v, want %+v", tag, want)
+ }
+}
+
+func TestGitService_CreateTag(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &createTagRequest{Tag: String("t"), Object: String("s")}
+
+ mux.HandleFunc("/repos/o/r/git/tags", func(w http.ResponseWriter, r *http.Request) {
+ v := new(createTagRequest)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"tag": "t"}`)
+ })
+
+ tag, _, err := client.Git.CreateTag("o", "r", &Tag{
+ Tag: input.Tag,
+ Object: &GitObject{SHA: input.Object},
+ })
+ if err != nil {
+ t.Errorf("Git.CreateTag returned error: %v", err)
+ }
+
+ want := &Tag{Tag: String("t")}
+ if !reflect.DeepEqual(tag, want) {
+ t.Errorf("Git.GetTag returned %+v, want %+v", tag, want)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/git_trees_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/git_trees_test.go
new file mode 100644
index 000000000..99ec4f34c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/git_trees_test.go
@@ -0,0 +1,189 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestGitService_GetTree(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/git/trees/s", func(w http.ResponseWriter, r *http.Request) {
+ if m := "GET"; m != r.Method {
+ t.Errorf("Request method = %v, want %v", r.Method, m)
+ }
+ fmt.Fprint(w, `{
+ "sha": "s",
+ "tree": [ { "type": "blob" } ]
+ }`)
+ })
+
+ tree, _, err := client.Git.GetTree("o", "r", "s", true)
+ if err != nil {
+ t.Errorf("Git.GetTree returned error: %v", err)
+ }
+
+ want := Tree{
+ SHA: String("s"),
+ Entries: []TreeEntry{
+ {
+ Type: String("blob"),
+ },
+ },
+ }
+ if !reflect.DeepEqual(*tree, want) {
+ t.Errorf("Tree.Get returned %+v, want %+v", *tree, want)
+ }
+}
+
+func TestGitService_GetTree_invalidOwner(t *testing.T) {
+ _, _, err := client.Git.GetTree("%", "%", "%", false)
+ testURLParseError(t, err)
+}
+
+func TestGitService_CreateTree(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := []TreeEntry{
+ {
+ Path: String("file.rb"),
+ Mode: String("100644"),
+ Type: String("blob"),
+ SHA: String("7c258a9869f33c1e1e1f74fbb32f07c86cb5a75b"),
+ },
+ }
+
+ mux.HandleFunc("/repos/o/r/git/trees", func(w http.ResponseWriter, r *http.Request) {
+ v := new(createTree)
+ json.NewDecoder(r.Body).Decode(v)
+
+ if m := "POST"; m != r.Method {
+ t.Errorf("Request method = %v, want %v", r.Method, m)
+ }
+
+ want := &createTree{
+ BaseTree: "b",
+ Entries: input,
+ }
+ if !reflect.DeepEqual(v, want) {
+ t.Errorf("Git.CreateTree request body: %+v, want %+v", v, want)
+ }
+
+ fmt.Fprint(w, `{
+ "sha": "cd8274d15fa3ae2ab983129fb037999f264ba9a7",
+ "tree": [
+ {
+ "path": "file.rb",
+ "mode": "100644",
+ "type": "blob",
+ "size": 132,
+ "sha": "7c258a9869f33c1e1e1f74fbb32f07c86cb5a75b"
+ }
+ ]
+ }`)
+ })
+
+ tree, _, err := client.Git.CreateTree("o", "r", "b", input)
+ if err != nil {
+ t.Errorf("Git.CreateTree returned error: %v", err)
+ }
+
+ want := Tree{
+ String("cd8274d15fa3ae2ab983129fb037999f264ba9a7"),
+ []TreeEntry{
+ {
+ Path: String("file.rb"),
+ Mode: String("100644"),
+ Type: String("blob"),
+ Size: Int(132),
+ SHA: String("7c258a9869f33c1e1e1f74fbb32f07c86cb5a75b"),
+ },
+ },
+ }
+
+ if !reflect.DeepEqual(*tree, want) {
+ t.Errorf("Git.CreateTree returned %+v, want %+v", *tree, want)
+ }
+}
+
+func TestGitService_CreateTree_Content(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := []TreeEntry{
+ {
+ Path: String("content.md"),
+ Mode: String("100644"),
+ Content: String("file content"),
+ },
+ }
+
+ mux.HandleFunc("/repos/o/r/git/trees", func(w http.ResponseWriter, r *http.Request) {
+ v := new(createTree)
+ json.NewDecoder(r.Body).Decode(v)
+
+ if m := "POST"; m != r.Method {
+ t.Errorf("Request method = %v, want %v", r.Method, m)
+ }
+
+ want := &createTree{
+ BaseTree: "b",
+ Entries: input,
+ }
+ if !reflect.DeepEqual(v, want) {
+ t.Errorf("Git.CreateTree request body: %+v, want %+v", v, want)
+ }
+
+ fmt.Fprint(w, `{
+ "sha": "5c6780ad2c68743383b740fd1dab6f6a33202b11",
+ "url": "https://api.github.com/repos/o/r/git/trees/5c6780ad2c68743383b740fd1dab6f6a33202b11",
+ "tree": [
+ {
+ "mode": "100644",
+ "type": "blob",
+ "sha": "aad8feacf6f8063150476a7b2bd9770f2794c08b",
+ "path": "content.md",
+ "size": 12,
+ "url": "https://api.github.com/repos/o/r/git/blobs/aad8feacf6f8063150476a7b2bd9770f2794c08b"
+ }
+ ]
+ }`)
+ })
+
+ tree, _, err := client.Git.CreateTree("o", "r", "b", input)
+ if err != nil {
+ t.Errorf("Git.CreateTree returned error: %v", err)
+ }
+
+ want := Tree{
+ String("5c6780ad2c68743383b740fd1dab6f6a33202b11"),
+ []TreeEntry{
+ {
+ Path: String("content.md"),
+ Mode: String("100644"),
+ Type: String("blob"),
+ Size: Int(12),
+ SHA: String("aad8feacf6f8063150476a7b2bd9770f2794c08b"),
+ },
+ },
+ }
+
+ if !reflect.DeepEqual(*tree, want) {
+ t.Errorf("Git.CreateTree returned %+v, want %+v", *tree, want)
+ }
+}
+
+func TestGitService_CreateTree_invalidOwner(t *testing.T) {
+ _, _, err := client.Git.CreateTree("%", "%", "", nil)
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/github_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/github_test.go
new file mode 100644
index 000000000..f4626f470
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/github_test.go
@@ -0,0 +1,679 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "path"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+var (
+ // mux is the HTTP request multiplexer used with the test server.
+ mux *http.ServeMux
+
+ // client is the GitHub client being tested.
+ client *Client
+
+ // server is a test HTTP server used to provide mock API responses.
+ server *httptest.Server
+)
+
+// setup sets up a test HTTP server along with a github.Client that is
+// configured to talk to that test server. Tests should register handlers on
+// mux which provide mock responses for the API method being tested.
+func setup() {
+ // test server
+ mux = http.NewServeMux()
+ server = httptest.NewServer(mux)
+
+ // github client configured to use test server
+ client = NewClient(nil)
+ url, _ := url.Parse(server.URL)
+ client.BaseURL = url
+ client.UploadURL = url
+}
+
+// teardown closes the test HTTP server.
+func teardown() {
+ server.Close()
+}
+
+// openTestFile creates a new file with the given name and content for testing.
+// In order to ensure the exact file name, this function will create a new temp
+// directory, and create the file in that directory. It is the caller's
+// responsibility to remove the directy and its contents when no longer needed.
+func openTestFile(name, content string) (file *os.File, dir string, err error) {
+ dir, err = ioutil.TempDir("", "go-github")
+ if err != nil {
+ return nil, dir, err
+ }
+
+ file, err = os.OpenFile(path.Join(dir, name), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
+ if err != nil {
+ return nil, dir, err
+ }
+
+ fmt.Fprint(file, content)
+
+ // close and re-open the file to keep file.Stat() happy
+ file.Close()
+ file, err = os.Open(file.Name())
+ if err != nil {
+ return nil, dir, err
+ }
+
+ return file, dir, err
+}
+
+func testMethod(t *testing.T, r *http.Request, want string) {
+ if got := r.Method; got != want {
+ t.Errorf("Request method: %v, want %v", got, want)
+ }
+}
+
+type values map[string]string
+
+func testFormValues(t *testing.T, r *http.Request, values values) {
+ want := url.Values{}
+ for k, v := range values {
+ want.Add(k, v)
+ }
+
+ r.ParseForm()
+ if got := r.Form; !reflect.DeepEqual(got, want) {
+ t.Errorf("Request parameters: %v, want %v", got, want)
+ }
+}
+
+func testHeader(t *testing.T, r *http.Request, header string, want string) {
+ if got := r.Header.Get(header); got != want {
+ t.Errorf("Header.Get(%q) returned %s, want %s", header, got, want)
+ }
+}
+
+func testURLParseError(t *testing.T, err error) {
+ if err == nil {
+ t.Errorf("Expected error to be returned")
+ }
+ if err, ok := err.(*url.Error); !ok || err.Op != "parse" {
+ t.Errorf("Expected URL parse error, got %+v", err)
+ }
+}
+
+func testBody(t *testing.T, r *http.Request, want string) {
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Errorf("Error reading request body: %v", err)
+ }
+ if got := string(b); got != want {
+ t.Errorf("request Body is %s, want %s", got, want)
+ }
+}
+
+// Helper function to test that a value is marshalled to JSON as expected.
+func testJSONMarshal(t *testing.T, v interface{}, want string) {
+ j, err := json.Marshal(v)
+ if err != nil {
+ t.Errorf("Unable to marshal JSON for %v", v)
+ }
+
+ w := new(bytes.Buffer)
+ err = json.Compact(w, []byte(want))
+ if err != nil {
+ t.Errorf("String is not valid json: %s", want)
+ }
+
+ if w.String() != string(j) {
+ t.Errorf("json.Marshal(%q) returned %s, want %s", v, j, w)
+ }
+
+ // now go the other direction and make sure things unmarshal as expected
+ u := reflect.ValueOf(v).Interface()
+ if err := json.Unmarshal([]byte(want), u); err != nil {
+ t.Errorf("Unable to unmarshal JSON for %v", want)
+ }
+
+ if !reflect.DeepEqual(v, u) {
+ t.Errorf("json.Unmarshal(%q) returned %s, want %s", want, u, v)
+ }
+}
+
+func TestNewClient(t *testing.T) {
+ c := NewClient(nil)
+
+ if got, want := c.BaseURL.String(), defaultBaseURL; got != want {
+ t.Errorf("NewClient BaseURL is %v, want %v", got, want)
+ }
+ if got, want := c.UserAgent, userAgent; got != want {
+ t.Errorf("NewClient UserAgent is %v, want %v", got, want)
+ }
+}
+
+func TestNewRequest(t *testing.T) {
+ c := NewClient(nil)
+
+ inURL, outURL := "/foo", defaultBaseURL+"foo"
+ inBody, outBody := &User{Login: String("l")}, `{"login":"l"}`+"\n"
+ req, _ := c.NewRequest("GET", inURL, inBody)
+
+ // test that relative URL was expanded
+ if got, want := req.URL.String(), outURL; got != want {
+ t.Errorf("NewRequest(%q) URL is %v, want %v", inURL, got, want)
+ }
+
+ // test that body was JSON encoded
+ body, _ := ioutil.ReadAll(req.Body)
+ if got, want := string(body), outBody; got != want {
+ t.Errorf("NewRequest(%q) Body is %v, want %v", inBody, got, want)
+ }
+
+ // test that default user-agent is attached to the request
+ if got, want := req.Header.Get("User-Agent"), c.UserAgent; got != want {
+ t.Errorf("NewRequest() User-Agent is %v, want %v", got, want)
+ }
+}
+
+func TestNewRequest_invalidJSON(t *testing.T) {
+ c := NewClient(nil)
+
+ type T struct {
+ A map[int]interface{}
+ }
+ _, err := c.NewRequest("GET", "/", &T{})
+
+ if err == nil {
+ t.Error("Expected error to be returned.")
+ }
+ if err, ok := err.(*json.UnsupportedTypeError); !ok {
+ t.Errorf("Expected a JSON error; got %#v.", err)
+ }
+}
+
+func TestNewRequest_badURL(t *testing.T) {
+ c := NewClient(nil)
+ _, err := c.NewRequest("GET", ":", nil)
+ testURLParseError(t, err)
+}
+
+// ensure that no User-Agent header is set if the client's UserAgent is empty.
+// This caused a problem with Google's internal http client.
+func TestNewRequest_emptyUserAgent(t *testing.T) {
+ c := NewClient(nil)
+ c.UserAgent = ""
+ req, err := c.NewRequest("GET", "/", nil)
+ if err != nil {
+ t.Fatalf("NewRequest returned unexpected error: %v", err)
+ }
+ if _, ok := req.Header["User-Agent"]; ok {
+ t.Fatal("constructed request contains unexpected User-Agent header")
+ }
+}
+
+// If a nil body is passed to github.NewRequest, make sure that nil is also
+// passed to http.NewRequest. In most cases, passing an io.Reader that returns
+// no content is fine, since there is no difference between an HTTP request
+// body that is an empty string versus one that is not set at all. However in
+// certain cases, intermediate systems may treat these differently resulting in
+// subtle errors.
+func TestNewRequest_emptyBody(t *testing.T) {
+ c := NewClient(nil)
+ req, err := c.NewRequest("GET", "/", nil)
+ if err != nil {
+ t.Fatalf("NewRequest returned unexpected error: %v", err)
+ }
+ if req.Body != nil {
+ t.Fatalf("constructed request contains a non-nil Body")
+ }
+}
+
+func TestResponse_populatePageValues(t *testing.T) {
+ r := http.Response{
+ Header: http.Header{
+ "Link": {`; rel="first",` +
+ ` ; rel="prev",` +
+ ` ; rel="next",` +
+ ` ; rel="last"`,
+ },
+ },
+ }
+
+ response := newResponse(&r)
+ if got, want := response.FirstPage, 1; got != want {
+ t.Errorf("response.FirstPage: %v, want %v", got, want)
+ }
+ if got, want := response.PrevPage, 2; want != got {
+ t.Errorf("response.PrevPage: %v, want %v", got, want)
+ }
+ if got, want := response.NextPage, 4; want != got {
+ t.Errorf("response.NextPage: %v, want %v", got, want)
+ }
+ if got, want := response.LastPage, 5; want != got {
+ t.Errorf("response.LastPage: %v, want %v", got, want)
+ }
+}
+
+func TestResponse_populatePageValues_invalid(t *testing.T) {
+ r := http.Response{
+ Header: http.Header{
+ "Link": {`,` +
+ `; rel="first",` +
+ `https://api.github.com/?page=2; rel="prev",` +
+ `; rel="next",` +
+ `; rel="last"`,
+ },
+ },
+ }
+
+ response := newResponse(&r)
+ if got, want := response.FirstPage, 0; got != want {
+ t.Errorf("response.FirstPage: %v, want %v", got, want)
+ }
+ if got, want := response.PrevPage, 0; got != want {
+ t.Errorf("response.PrevPage: %v, want %v", got, want)
+ }
+ if got, want := response.NextPage, 0; got != want {
+ t.Errorf("response.NextPage: %v, want %v", got, want)
+ }
+ if got, want := response.LastPage, 0; got != want {
+ t.Errorf("response.LastPage: %v, want %v", got, want)
+ }
+
+ // more invalid URLs
+ r = http.Response{
+ Header: http.Header{
+ "Link": {`; rel="first"`},
+ },
+ }
+
+ response = newResponse(&r)
+ if got, want := response.FirstPage, 0; got != want {
+ t.Errorf("response.FirstPage: %v, want %v", got, want)
+ }
+}
+
+func TestDo(t *testing.T) {
+ setup()
+ defer teardown()
+
+ type foo struct {
+ A string
+ }
+
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ if m := "GET"; m != r.Method {
+ t.Errorf("Request method = %v, want %v", r.Method, m)
+ }
+ fmt.Fprint(w, `{"A":"a"}`)
+ })
+
+ req, _ := client.NewRequest("GET", "/", nil)
+ body := new(foo)
+ client.Do(req, body)
+
+ want := &foo{"a"}
+ if !reflect.DeepEqual(body, want) {
+ t.Errorf("Response body = %v, want %v", body, want)
+ }
+}
+
+func TestDo_httpError(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "Bad Request", 400)
+ })
+
+ req, _ := client.NewRequest("GET", "/", nil)
+ _, err := client.Do(req, nil)
+
+ if err == nil {
+ t.Error("Expected HTTP 400 error.")
+ }
+}
+
+// Test handling of an error caused by the internal http client's Do()
+// function. A redirect loop is pretty unlikely to occur within the GitHub
+// API, but does allow us to exercise the right code path.
+func TestDo_redirectLoop(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ http.Redirect(w, r, "/", http.StatusFound)
+ })
+
+ req, _ := client.NewRequest("GET", "/", nil)
+ _, err := client.Do(req, nil)
+
+ if err == nil {
+ t.Error("Expected error to be returned.")
+ }
+ if err, ok := err.(*url.Error); !ok {
+ t.Errorf("Expected a URL error; got %#v.", err)
+ }
+}
+
+func TestDo_rateLimit(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Add(headerRateLimit, "60")
+ w.Header().Add(headerRateRemaining, "59")
+ w.Header().Add(headerRateReset, "1372700873")
+ })
+
+ if got, want := client.Rate.Limit, 0; got != want {
+ t.Errorf("Client rate limit = %v, want %v", got, want)
+ }
+ if got, want := client.Rate.Remaining, 0; got != want {
+ t.Errorf("Client rate remaining = %v, got %v", got, want)
+ }
+ if !client.Rate.Reset.IsZero() {
+ t.Errorf("Client rate reset not initialized to zero value")
+ }
+
+ req, _ := client.NewRequest("GET", "/", nil)
+ client.Do(req, nil)
+
+ if got, want := client.Rate.Limit, 60; got != want {
+ t.Errorf("Client rate limit = %v, want %v", got, want)
+ }
+ if got, want := client.Rate.Remaining, 59; got != want {
+ t.Errorf("Client rate remaining = %v, want %v", got, want)
+ }
+ reset := time.Date(2013, 7, 1, 17, 47, 53, 0, time.UTC)
+ if client.Rate.Reset.UTC() != reset {
+ t.Errorf("Client rate reset = %v, want %v", client.Rate.Reset, reset)
+ }
+}
+
+// ensure rate limit is still parsed, even for error responses
+func TestDo_rateLimit_errorResponse(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Add(headerRateLimit, "60")
+ w.Header().Add(headerRateRemaining, "59")
+ w.Header().Add(headerRateReset, "1372700873")
+ http.Error(w, "Bad Request", 400)
+ })
+
+ req, _ := client.NewRequest("GET", "/", nil)
+ client.Do(req, nil)
+
+ if got, want := client.Rate.Limit, 60; got != want {
+ t.Errorf("Client rate limit = %v, want %v", got, want)
+ }
+ if got, want := client.Rate.Remaining, 59; got != want {
+ t.Errorf("Client rate remaining = %v, want %v", got, want)
+ }
+ reset := time.Date(2013, 7, 1, 17, 47, 53, 0, time.UTC)
+ if client.Rate.Reset.UTC() != reset {
+ t.Errorf("Client rate reset = %v, want %v", client.Rate.Reset, reset)
+ }
+}
+
+func TestSanitizeURL(t *testing.T) {
+ tests := []struct {
+ in, want string
+ }{
+ {"/?a=b", "/?a=b"},
+ {"/?a=b&client_secret=secret", "/?a=b&client_secret=REDACTED"},
+ {"/?a=b&client_id=id&client_secret=secret", "/?a=b&client_id=id&client_secret=REDACTED"},
+ }
+
+ for _, tt := range tests {
+ inURL, _ := url.Parse(tt.in)
+ want, _ := url.Parse(tt.want)
+
+ if got := sanitizeURL(inURL); !reflect.DeepEqual(got, want) {
+ t.Errorf("sanitizeURL(%v) returned %v, want %v", tt.in, got, want)
+ }
+ }
+}
+
+func TestCheckResponse(t *testing.T) {
+ res := &http.Response{
+ Request: &http.Request{},
+ StatusCode: http.StatusBadRequest,
+ Body: ioutil.NopCloser(strings.NewReader(`{"message":"m",
+ "errors": [{"resource": "r", "field": "f", "code": "c"}]}`)),
+ }
+ err := CheckResponse(res).(*ErrorResponse)
+
+ if err == nil {
+ t.Errorf("Expected error response.")
+ }
+
+ want := &ErrorResponse{
+ Response: res,
+ Message: "m",
+ Errors: []Error{{Resource: "r", Field: "f", Code: "c"}},
+ }
+ if !reflect.DeepEqual(err, want) {
+ t.Errorf("Error = %#v, want %#v", err, want)
+ }
+}
+
+// ensure that we properly handle API errors that do not contain a response body
+func TestCheckResponse_noBody(t *testing.T) {
+ res := &http.Response{
+ Request: &http.Request{},
+ StatusCode: http.StatusBadRequest,
+ Body: ioutil.NopCloser(strings.NewReader("")),
+ }
+ err := CheckResponse(res).(*ErrorResponse)
+
+ if err == nil {
+ t.Errorf("Expected error response.")
+ }
+
+ want := &ErrorResponse{
+ Response: res,
+ }
+ if !reflect.DeepEqual(err, want) {
+ t.Errorf("Error = %#v, want %#v", err, want)
+ }
+}
+
+func TestParseBooleanResponse_true(t *testing.T) {
+ result, err := parseBoolResponse(nil)
+
+ if err != nil {
+ t.Errorf("parseBoolResponse returned error: %+v", err)
+ }
+
+ if want := true; result != want {
+ t.Errorf("parseBoolResponse returned %+v, want: %+v", result, want)
+ }
+}
+
+func TestParseBooleanResponse_false(t *testing.T) {
+ v := &ErrorResponse{Response: &http.Response{StatusCode: http.StatusNotFound}}
+ result, err := parseBoolResponse(v)
+
+ if err != nil {
+ t.Errorf("parseBoolResponse returned error: %+v", err)
+ }
+
+ if want := false; result != want {
+ t.Errorf("parseBoolResponse returned %+v, want: %+v", result, want)
+ }
+}
+
+func TestParseBooleanResponse_error(t *testing.T) {
+ v := &ErrorResponse{Response: &http.Response{StatusCode: http.StatusBadRequest}}
+ result, err := parseBoolResponse(v)
+
+ if err == nil {
+ t.Errorf("Expected error to be returned.")
+ }
+
+ if want := false; result != want {
+ t.Errorf("parseBoolResponse returned %+v, want: %+v", result, want)
+ }
+}
+
+func TestErrorResponse_Error(t *testing.T) {
+ res := &http.Response{Request: &http.Request{}}
+ err := ErrorResponse{Message: "m", Response: res}
+ if err.Error() == "" {
+ t.Errorf("Expected non-empty ErrorResponse.Error()")
+ }
+}
+
+func TestError_Error(t *testing.T) {
+ err := Error{}
+ if err.Error() == "" {
+ t.Errorf("Expected non-empty Error.Error()")
+ }
+}
+
+func TestRateLimit(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/rate_limit", func(w http.ResponseWriter, r *http.Request) {
+ if m := "GET"; m != r.Method {
+ t.Errorf("Request method = %v, want %v", r.Method, m)
+ }
+ //fmt.Fprint(w, `{"resources":{"core": {"limit":2,"remaining":1,"reset":1372700873}}}`)
+ fmt.Fprint(w, `{"resources":{
+ "core": {"limit":2,"remaining":1,"reset":1372700873},
+ "search": {"limit":3,"remaining":2,"reset":1372700874}
+ }}`)
+ })
+
+ rate, _, err := client.RateLimit()
+ if err != nil {
+ t.Errorf("Rate limit returned error: %v", err)
+ }
+
+ want := &Rate{
+ Limit: 2,
+ Remaining: 1,
+ Reset: Timestamp{time.Date(2013, 7, 1, 17, 47, 53, 0, time.UTC).Local()},
+ }
+ if !reflect.DeepEqual(rate, want) {
+ t.Errorf("RateLimit returned %+v, want %+v", rate, want)
+ }
+}
+
+func TestRateLimits(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/rate_limit", func(w http.ResponseWriter, r *http.Request) {
+ if m := "GET"; m != r.Method {
+ t.Errorf("Request method = %v, want %v", r.Method, m)
+ }
+ fmt.Fprint(w, `{"resources":{
+ "core": {"limit":2,"remaining":1,"reset":1372700873},
+ "search": {"limit":3,"remaining":2,"reset":1372700874}
+ }}`)
+ })
+
+ rate, _, err := client.RateLimits()
+ if err != nil {
+ t.Errorf("RateLimits returned error: %v", err)
+ }
+
+ want := &RateLimits{
+ Core: &Rate{
+ Limit: 2,
+ Remaining: 1,
+ Reset: Timestamp{time.Date(2013, 7, 1, 17, 47, 53, 0, time.UTC).Local()},
+ },
+ Search: &Rate{
+ Limit: 3,
+ Remaining: 2,
+ Reset: Timestamp{time.Date(2013, 7, 1, 17, 47, 54, 0, time.UTC).Local()},
+ },
+ }
+ if !reflect.DeepEqual(rate, want) {
+ t.Errorf("RateLimits returned %+v, want %+v", rate, want)
+ }
+}
+
+func TestUnauthenticatedRateLimitedTransport(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ var v, want string
+ q := r.URL.Query()
+ if v, want = q.Get("client_id"), "id"; v != want {
+ t.Errorf("OAuth Client ID = %v, want %v", v, want)
+ }
+ if v, want = q.Get("client_secret"), "secret"; v != want {
+ t.Errorf("OAuth Client Secret = %v, want %v", v, want)
+ }
+ })
+
+ tp := &UnauthenticatedRateLimitedTransport{
+ ClientID: "id",
+ ClientSecret: "secret",
+ }
+ unauthedClient := NewClient(tp.Client())
+ unauthedClient.BaseURL = client.BaseURL
+ req, _ := unauthedClient.NewRequest("GET", "/", nil)
+ unauthedClient.Do(req, nil)
+}
+
+func TestUnauthenticatedRateLimitedTransport_missingFields(t *testing.T) {
+ // missing ClientID
+ tp := &UnauthenticatedRateLimitedTransport{
+ ClientSecret: "secret",
+ }
+ _, err := tp.RoundTrip(nil)
+ if err == nil {
+ t.Errorf("Expected error to be returned")
+ }
+
+ // missing ClientSecret
+ tp = &UnauthenticatedRateLimitedTransport{
+ ClientID: "id",
+ }
+ _, err = tp.RoundTrip(nil)
+ if err == nil {
+ t.Errorf("Expected error to be returned")
+ }
+}
+
+func TestUnauthenticatedRateLimitedTransport_transport(t *testing.T) {
+ // default transport
+ tp := &UnauthenticatedRateLimitedTransport{
+ ClientID: "id",
+ ClientSecret: "secret",
+ }
+ if tp.transport() != http.DefaultTransport {
+ t.Errorf("Expected http.DefaultTransport to be used.")
+ }
+
+ // custom transport
+ tp = &UnauthenticatedRateLimitedTransport{
+ ClientID: "id",
+ ClientSecret: "secret",
+ Transport: &http.Transport{},
+ }
+ if tp.transport() == http.DefaultTransport {
+ t.Errorf("Expected custom transport to be used.")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/gitignore_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/gitignore_test.go
new file mode 100644
index 000000000..6d49d00fa
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/gitignore_test.go
@@ -0,0 +1,58 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestGitignoresService_List(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/gitignore/templates", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `["C", "Go"]`)
+ })
+
+ available, _, err := client.Gitignores.List()
+ if err != nil {
+ t.Errorf("Gitignores.List returned error: %v", err)
+ }
+
+ want := []string{"C", "Go"}
+ if !reflect.DeepEqual(available, want) {
+ t.Errorf("Gitignores.List returned %+v, want %+v", available, want)
+ }
+}
+
+func TestGitignoresService_Get(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/gitignore/templates/name", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"name":"Name","source":"template source"}`)
+ })
+
+ gitignore, _, err := client.Gitignores.Get("name")
+ if err != nil {
+ t.Errorf("Gitignores.List returned error: %v", err)
+ }
+
+ want := &Gitignore{Name: String("Name"), Source: String("template source")}
+ if !reflect.DeepEqual(gitignore, want) {
+ t.Errorf("Gitignores.Get returned %+v, want %+v", gitignore, want)
+ }
+}
+
+func TestGitignoresService_Get_invalidTemplate(t *testing.T) {
+ _, _, err := client.Gitignores.Get("%")
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/issues_assignees_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/issues_assignees_test.go
new file mode 100644
index 000000000..63e024d31
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/issues_assignees_test.go
@@ -0,0 +1,98 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestIssuesService_ListAssignees(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/assignees", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ assignees, _, err := client.Issues.ListAssignees("o", "r", opt)
+ if err != nil {
+ t.Errorf("Issues.List returned error: %v", err)
+ }
+
+ want := []User{{ID: Int(1)}}
+ if !reflect.DeepEqual(assignees, want) {
+ t.Errorf("Issues.ListAssignees returned %+v, want %+v", assignees, want)
+ }
+}
+
+func TestIssuesService_ListAssignees_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.ListAssignees("%", "r", nil)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_IsAssignee_true(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/assignees/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ })
+
+ assignee, _, err := client.Issues.IsAssignee("o", "r", "u")
+ if err != nil {
+ t.Errorf("Issues.IsAssignee returned error: %v", err)
+ }
+ if want := true; assignee != want {
+ t.Errorf("Issues.IsAssignee returned %+v, want %+v", assignee, want)
+ }
+}
+
+func TestIssuesService_IsAssignee_false(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/assignees/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.WriteHeader(http.StatusNotFound)
+ })
+
+ assignee, _, err := client.Issues.IsAssignee("o", "r", "u")
+ if err != nil {
+ t.Errorf("Issues.IsAssignee returned error: %v", err)
+ }
+ if want := false; assignee != want {
+ t.Errorf("Issues.IsAssignee returned %+v, want %+v", assignee, want)
+ }
+}
+
+func TestIssuesService_IsAssignee_error(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/assignees/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ http.Error(w, "BadRequest", http.StatusBadRequest)
+ })
+
+ assignee, _, err := client.Issues.IsAssignee("o", "r", "u")
+ if err == nil {
+ t.Errorf("Expected HTTP 400 response")
+ }
+ if want := false; assignee != want {
+ t.Errorf("Issues.IsAssignee returned %+v, want %+v", assignee, want)
+ }
+}
+
+func TestIssuesService_IsAssignee_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.IsAssignee("%", "r", "u")
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/issues_comments_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/issues_comments_test.go
new file mode 100644
index 000000000..697f4380f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/issues_comments_test.go
@@ -0,0 +1,184 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestIssuesService_ListComments_allIssues(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/issues/comments", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "sort": "updated",
+ "direction": "desc",
+ "since": "2002-02-10T15:30:00Z",
+ "page": "2",
+ })
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &IssueListCommentsOptions{
+ Sort: "updated",
+ Direction: "desc",
+ Since: time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC),
+ ListOptions: ListOptions{Page: 2},
+ }
+ comments, _, err := client.Issues.ListComments("o", "r", 0, opt)
+ if err != nil {
+ t.Errorf("Issues.ListComments returned error: %v", err)
+ }
+
+ want := []IssueComment{{ID: Int(1)}}
+ if !reflect.DeepEqual(comments, want) {
+ t.Errorf("Issues.ListComments returned %+v, want %+v", comments, want)
+ }
+}
+
+func TestIssuesService_ListComments_specificIssue(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/issues/1/comments", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ comments, _, err := client.Issues.ListComments("o", "r", 1, nil)
+ if err != nil {
+ t.Errorf("Issues.ListComments returned error: %v", err)
+ }
+
+ want := []IssueComment{{ID: Int(1)}}
+ if !reflect.DeepEqual(comments, want) {
+ t.Errorf("Issues.ListComments returned %+v, want %+v", comments, want)
+ }
+}
+
+func TestIssuesService_ListComments_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.ListComments("%", "r", 1, nil)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_GetComment(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/issues/comments/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ comment, _, err := client.Issues.GetComment("o", "r", 1)
+ if err != nil {
+ t.Errorf("Issues.GetComment returned error: %v", err)
+ }
+
+ want := &IssueComment{ID: Int(1)}
+ if !reflect.DeepEqual(comment, want) {
+ t.Errorf("Issues.GetComment returned %+v, want %+v", comment, want)
+ }
+}
+
+func TestIssuesService_GetComment_invalidOrg(t *testing.T) {
+ _, _, err := client.Issues.GetComment("%", "r", 1)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_CreateComment(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &IssueComment{Body: String("b")}
+
+ mux.HandleFunc("/repos/o/r/issues/1/comments", func(w http.ResponseWriter, r *http.Request) {
+ v := new(IssueComment)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ comment, _, err := client.Issues.CreateComment("o", "r", 1, input)
+ if err != nil {
+ t.Errorf("Issues.CreateComment returned error: %v", err)
+ }
+
+ want := &IssueComment{ID: Int(1)}
+ if !reflect.DeepEqual(comment, want) {
+ t.Errorf("Issues.CreateComment returned %+v, want %+v", comment, want)
+ }
+}
+
+func TestIssuesService_CreateComment_invalidOrg(t *testing.T) {
+ _, _, err := client.Issues.CreateComment("%", "r", 1, nil)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_EditComment(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &IssueComment{Body: String("b")}
+
+ mux.HandleFunc("/repos/o/r/issues/comments/1", func(w http.ResponseWriter, r *http.Request) {
+ v := new(IssueComment)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ comment, _, err := client.Issues.EditComment("o", "r", 1, input)
+ if err != nil {
+ t.Errorf("Issues.EditComment returned error: %v", err)
+ }
+
+ want := &IssueComment{ID: Int(1)}
+ if !reflect.DeepEqual(comment, want) {
+ t.Errorf("Issues.EditComment returned %+v, want %+v", comment, want)
+ }
+}
+
+func TestIssuesService_EditComment_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.EditComment("%", "r", 1, nil)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_DeleteComment(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/issues/comments/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Issues.DeleteComment("o", "r", 1)
+ if err != nil {
+ t.Errorf("Issues.DeleteComments returned error: %v", err)
+ }
+}
+
+func TestIssuesService_DeleteComment_invalidOwner(t *testing.T) {
+ _, err := client.Issues.DeleteComment("%", "r", 1)
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/issues_events_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/issues_events_test.go
new file mode 100644
index 000000000..f90b64a71
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/issues_events_test.go
@@ -0,0 +1,86 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestIssuesService_ListIssueEvents(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/issues/1/events", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "page": "1",
+ "per_page": "2",
+ })
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &ListOptions{Page: 1, PerPage: 2}
+ events, _, err := client.Issues.ListIssueEvents("o", "r", 1, opt)
+
+ if err != nil {
+ t.Errorf("Issues.ListIssueEvents returned error: %v", err)
+ }
+
+ want := []IssueEvent{{ID: Int(1)}}
+ if !reflect.DeepEqual(events, want) {
+ t.Errorf("Issues.ListIssueEvents returned %+v, want %+v", events, want)
+ }
+}
+
+func TestIssuesService_ListRepositoryEvents(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/issues/events", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "page": "1",
+ "per_page": "2",
+ })
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &ListOptions{Page: 1, PerPage: 2}
+ events, _, err := client.Issues.ListRepositoryEvents("o", "r", opt)
+
+ if err != nil {
+ t.Errorf("Issues.ListRepositoryEvents returned error: %v", err)
+ }
+
+ want := []IssueEvent{{ID: Int(1)}}
+ if !reflect.DeepEqual(events, want) {
+ t.Errorf("Issues.ListRepositoryEvents returned %+v, want %+v", events, want)
+ }
+}
+
+func TestIssuesService_GetEvent(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/issues/events/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ event, _, err := client.Issues.GetEvent("o", "r", 1)
+
+ if err != nil {
+ t.Errorf("Issues.GetEvent returned error: %v", err)
+ }
+
+ want := &IssueEvent{ID: Int(1)}
+ if !reflect.DeepEqual(event, want) {
+ t.Errorf("Issues.GetEvent returned %+v, want %+v", event, want)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/issues_labels_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/issues_labels_test.go
new file mode 100644
index 000000000..2243eb0ee
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/issues_labels_test.go
@@ -0,0 +1,313 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestIssuesService_ListLabels(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/labels", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"name": "a"},{"name": "b"}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ labels, _, err := client.Issues.ListLabels("o", "r", opt)
+ if err != nil {
+ t.Errorf("Issues.ListLabels returned error: %v", err)
+ }
+
+ want := []Label{{Name: String("a")}, {Name: String("b")}}
+ if !reflect.DeepEqual(labels, want) {
+ t.Errorf("Issues.ListLabels returned %+v, want %+v", labels, want)
+ }
+}
+
+func TestIssuesService_ListLabels_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.ListLabels("%", "%", nil)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_GetLabel(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/labels/n", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"url":"u", "name": "n", "color": "c"}`)
+ })
+
+ label, _, err := client.Issues.GetLabel("o", "r", "n")
+ if err != nil {
+ t.Errorf("Issues.GetLabel returned error: %v", err)
+ }
+
+ want := &Label{URL: String("u"), Name: String("n"), Color: String("c")}
+ if !reflect.DeepEqual(label, want) {
+ t.Errorf("Issues.GetLabel returned %+v, want %+v", label, want)
+ }
+}
+
+func TestIssuesService_GetLabel_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.GetLabel("%", "%", "%")
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_CreateLabel(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Label{Name: String("n")}
+
+ mux.HandleFunc("/repos/o/r/labels", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Label)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"url":"u"}`)
+ })
+
+ label, _, err := client.Issues.CreateLabel("o", "r", input)
+ if err != nil {
+ t.Errorf("Issues.CreateLabel returned error: %v", err)
+ }
+
+ want := &Label{URL: String("u")}
+ if !reflect.DeepEqual(label, want) {
+ t.Errorf("Issues.CreateLabel returned %+v, want %+v", label, want)
+ }
+}
+
+func TestIssuesService_CreateLabel_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.CreateLabel("%", "%", nil)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_EditLabel(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Label{Name: String("z")}
+
+ mux.HandleFunc("/repos/o/r/labels/n", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Label)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"url":"u"}`)
+ })
+
+ label, _, err := client.Issues.EditLabel("o", "r", "n", input)
+ if err != nil {
+ t.Errorf("Issues.EditLabel returned error: %v", err)
+ }
+
+ want := &Label{URL: String("u")}
+ if !reflect.DeepEqual(label, want) {
+ t.Errorf("Issues.EditLabel returned %+v, want %+v", label, want)
+ }
+}
+
+func TestIssuesService_EditLabel_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.EditLabel("%", "%", "%", nil)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_DeleteLabel(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/labels/n", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Issues.DeleteLabel("o", "r", "n")
+ if err != nil {
+ t.Errorf("Issues.DeleteLabel returned error: %v", err)
+ }
+}
+
+func TestIssuesService_DeleteLabel_invalidOwner(t *testing.T) {
+ _, err := client.Issues.DeleteLabel("%", "%", "%")
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_ListLabelsByIssue(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/issues/1/labels", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"name": "a"},{"name": "b"}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ labels, _, err := client.Issues.ListLabelsByIssue("o", "r", 1, opt)
+ if err != nil {
+ t.Errorf("Issues.ListLabelsByIssue returned error: %v", err)
+ }
+
+ want := []Label{{Name: String("a")}, {Name: String("b")}}
+ if !reflect.DeepEqual(labels, want) {
+ t.Errorf("Issues.ListLabelsByIssue returned %+v, want %+v", labels, want)
+ }
+}
+
+func TestIssuesService_ListLabelsByIssue_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.ListLabelsByIssue("%", "%", 1, nil)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_AddLabelsToIssue(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := []string{"a", "b"}
+
+ mux.HandleFunc("/repos/o/r/issues/1/labels", func(w http.ResponseWriter, r *http.Request) {
+ v := new([]string)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(*v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `[{"url":"u"}]`)
+ })
+
+ labels, _, err := client.Issues.AddLabelsToIssue("o", "r", 1, input)
+ if err != nil {
+ t.Errorf("Issues.AddLabelsToIssue returned error: %v", err)
+ }
+
+ want := []Label{{URL: String("u")}}
+ if !reflect.DeepEqual(labels, want) {
+ t.Errorf("Issues.AddLabelsToIssue returned %+v, want %+v", labels, want)
+ }
+}
+
+func TestIssuesService_AddLabelsToIssue_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.AddLabelsToIssue("%", "%", 1, nil)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_RemoveLabelForIssue(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/issues/1/labels/l", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Issues.RemoveLabelForIssue("o", "r", 1, "l")
+ if err != nil {
+ t.Errorf("Issues.RemoveLabelForIssue returned error: %v", err)
+ }
+}
+
+func TestIssuesService_RemoveLabelForIssue_invalidOwner(t *testing.T) {
+ _, err := client.Issues.RemoveLabelForIssue("%", "%", 1, "%")
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_ReplaceLabelsForIssue(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := []string{"a", "b"}
+
+ mux.HandleFunc("/repos/o/r/issues/1/labels", func(w http.ResponseWriter, r *http.Request) {
+ v := new([]string)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PUT")
+ if !reflect.DeepEqual(*v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `[{"url":"u"}]`)
+ })
+
+ labels, _, err := client.Issues.ReplaceLabelsForIssue("o", "r", 1, input)
+ if err != nil {
+ t.Errorf("Issues.ReplaceLabelsForIssue returned error: %v", err)
+ }
+
+ want := []Label{{URL: String("u")}}
+ if !reflect.DeepEqual(labels, want) {
+ t.Errorf("Issues.ReplaceLabelsForIssue returned %+v, want %+v", labels, want)
+ }
+}
+
+func TestIssuesService_ReplaceLabelsForIssue_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.ReplaceLabelsForIssue("%", "%", 1, nil)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_RemoveLabelsForIssue(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/issues/1/labels", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Issues.RemoveLabelsForIssue("o", "r", 1)
+ if err != nil {
+ t.Errorf("Issues.RemoveLabelsForIssue returned error: %v", err)
+ }
+}
+
+func TestIssuesService_RemoveLabelsForIssue_invalidOwner(t *testing.T) {
+ _, err := client.Issues.RemoveLabelsForIssue("%", "%", 1)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_ListLabelsForMilestone(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/milestones/1/labels", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"name": "a"},{"name": "b"}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ labels, _, err := client.Issues.ListLabelsForMilestone("o", "r", 1, opt)
+ if err != nil {
+ t.Errorf("Issues.ListLabelsForMilestone returned error: %v", err)
+ }
+
+ want := []Label{{Name: String("a")}, {Name: String("b")}}
+ if !reflect.DeepEqual(labels, want) {
+ t.Errorf("Issues.ListLabelsForMilestone returned %+v, want %+v", labels, want)
+ }
+}
+
+func TestIssuesService_ListLabelsForMilestone_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.ListLabelsForMilestone("%", "%", 1, nil)
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/issues_milestones_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/issues_milestones_test.go
new file mode 100644
index 000000000..817fffedd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/issues_milestones_test.go
@@ -0,0 +1,157 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestIssuesService_ListMilestones(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/milestones", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "state": "closed",
+ "sort": "due_date",
+ "direction": "asc",
+ })
+ fmt.Fprint(w, `[{"number":1}]`)
+ })
+
+ opt := &MilestoneListOptions{"closed", "due_date", "asc"}
+ milestones, _, err := client.Issues.ListMilestones("o", "r", opt)
+ if err != nil {
+ t.Errorf("IssuesService.ListMilestones returned error: %v", err)
+ }
+
+ want := []Milestone{{Number: Int(1)}}
+ if !reflect.DeepEqual(milestones, want) {
+ t.Errorf("IssuesService.ListMilestones returned %+v, want %+v", milestones, want)
+ }
+}
+
+func TestIssuesService_ListMilestones_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.ListMilestones("%", "r", nil)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_GetMilestone(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/milestones/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"number":1}`)
+ })
+
+ milestone, _, err := client.Issues.GetMilestone("o", "r", 1)
+ if err != nil {
+ t.Errorf("IssuesService.GetMilestone returned error: %v", err)
+ }
+
+ want := &Milestone{Number: Int(1)}
+ if !reflect.DeepEqual(milestone, want) {
+ t.Errorf("IssuesService.GetMilestone returned %+v, want %+v", milestone, want)
+ }
+}
+
+func TestIssuesService_GetMilestone_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.GetMilestone("%", "r", 1)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_CreateMilestone(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Milestone{Title: String("t")}
+
+ mux.HandleFunc("/repos/o/r/milestones", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Milestone)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"number":1}`)
+ })
+
+ milestone, _, err := client.Issues.CreateMilestone("o", "r", input)
+ if err != nil {
+ t.Errorf("IssuesService.CreateMilestone returned error: %v", err)
+ }
+
+ want := &Milestone{Number: Int(1)}
+ if !reflect.DeepEqual(milestone, want) {
+ t.Errorf("IssuesService.CreateMilestone returned %+v, want %+v", milestone, want)
+ }
+}
+
+func TestIssuesService_CreateMilestone_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.CreateMilestone("%", "r", nil)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_EditMilestone(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Milestone{Title: String("t")}
+
+ mux.HandleFunc("/repos/o/r/milestones/1", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Milestone)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"number":1}`)
+ })
+
+ milestone, _, err := client.Issues.EditMilestone("o", "r", 1, input)
+ if err != nil {
+ t.Errorf("IssuesService.EditMilestone returned error: %v", err)
+ }
+
+ want := &Milestone{Number: Int(1)}
+ if !reflect.DeepEqual(milestone, want) {
+ t.Errorf("IssuesService.EditMilestone returned %+v, want %+v", milestone, want)
+ }
+}
+
+func TestIssuesService_EditMilestone_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.EditMilestone("%", "r", 1, nil)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_DeleteMilestone(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/milestones/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Issues.DeleteMilestone("o", "r", 1)
+ if err != nil {
+ t.Errorf("IssuesService.DeleteMilestone returned error: %v", err)
+ }
+}
+
+func TestIssuesService_DeleteMilestone_invalidOwner(t *testing.T) {
+ _, err := client.Issues.DeleteMilestone("%", "r", 1)
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/issues_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/issues_test.go
new file mode 100644
index 000000000..f69efd399
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/issues_test.go
@@ -0,0 +1,242 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestIssuesService_List_all(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/issues", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "filter": "all",
+ "state": "closed",
+ "labels": "a,b",
+ "sort": "updated",
+ "direction": "asc",
+ "since": "2002-02-10T15:30:00Z",
+ "page": "1",
+ "per_page": "2",
+ })
+ fmt.Fprint(w, `[{"number":1}]`)
+ })
+
+ opt := &IssueListOptions{
+ "all", "closed", []string{"a", "b"}, "updated", "asc",
+ time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC),
+ ListOptions{Page: 1, PerPage: 2},
+ }
+ issues, _, err := client.Issues.List(true, opt)
+
+ if err != nil {
+ t.Errorf("Issues.List returned error: %v", err)
+ }
+
+ want := []Issue{{Number: Int(1)}}
+ if !reflect.DeepEqual(issues, want) {
+ t.Errorf("Issues.List returned %+v, want %+v", issues, want)
+ }
+}
+
+func TestIssuesService_List_owned(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/issues", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{"number":1}]`)
+ })
+
+ issues, _, err := client.Issues.List(false, nil)
+ if err != nil {
+ t.Errorf("Issues.List returned error: %v", err)
+ }
+
+ want := []Issue{{Number: Int(1)}}
+ if !reflect.DeepEqual(issues, want) {
+ t.Errorf("Issues.List returned %+v, want %+v", issues, want)
+ }
+}
+
+func TestIssuesService_ListByOrg(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/issues", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{"number":1}]`)
+ })
+
+ issues, _, err := client.Issues.ListByOrg("o", nil)
+ if err != nil {
+ t.Errorf("Issues.ListByOrg returned error: %v", err)
+ }
+
+ want := []Issue{{Number: Int(1)}}
+ if !reflect.DeepEqual(issues, want) {
+ t.Errorf("Issues.List returned %+v, want %+v", issues, want)
+ }
+}
+
+func TestIssuesService_ListByOrg_invalidOrg(t *testing.T) {
+ _, _, err := client.Issues.ListByOrg("%", nil)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_ListByRepo(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/issues", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "milestone": "*",
+ "state": "closed",
+ "assignee": "a",
+ "creator": "c",
+ "mentioned": "m",
+ "labels": "a,b",
+ "sort": "updated",
+ "direction": "asc",
+ "since": "2002-02-10T15:30:00Z",
+ })
+ fmt.Fprint(w, `[{"number":1}]`)
+ })
+
+ opt := &IssueListByRepoOptions{
+ "*", "closed", "a", "c", "m", []string{"a", "b"}, "updated", "asc",
+ time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC),
+ ListOptions{0, 0},
+ }
+ issues, _, err := client.Issues.ListByRepo("o", "r", opt)
+ if err != nil {
+ t.Errorf("Issues.ListByOrg returned error: %v", err)
+ }
+
+ want := []Issue{{Number: Int(1)}}
+ if !reflect.DeepEqual(issues, want) {
+ t.Errorf("Issues.List returned %+v, want %+v", issues, want)
+ }
+}
+
+func TestIssuesService_ListByRepo_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.ListByRepo("%", "r", nil)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_Get(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/issues/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"number":1, "labels": [{"url": "u", "name": "n", "color": "c"}]}`)
+ })
+
+ issue, _, err := client.Issues.Get("o", "r", 1)
+ if err != nil {
+ t.Errorf("Issues.Get returned error: %v", err)
+ }
+
+ want := &Issue{
+ Number: Int(1),
+ Labels: []Label{{
+ URL: String("u"),
+ Name: String("n"),
+ Color: String("c"),
+ }},
+ }
+ if !reflect.DeepEqual(issue, want) {
+ t.Errorf("Issues.Get returned %+v, want %+v", issue, want)
+ }
+}
+
+func TestIssuesService_Get_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.Get("%", "r", 1)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_Create(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &IssueRequest{
+ Title: String("t"),
+ Body: String("b"),
+ Assignee: String("a"),
+ Labels: &[]string{"l1", "l2"},
+ }
+
+ mux.HandleFunc("/repos/o/r/issues", func(w http.ResponseWriter, r *http.Request) {
+ v := new(IssueRequest)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"number":1}`)
+ })
+
+ issue, _, err := client.Issues.Create("o", "r", input)
+ if err != nil {
+ t.Errorf("Issues.Create returned error: %v", err)
+ }
+
+ want := &Issue{Number: Int(1)}
+ if !reflect.DeepEqual(issue, want) {
+ t.Errorf("Issues.Create returned %+v, want %+v", issue, want)
+ }
+}
+
+func TestIssuesService_Create_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.Create("%", "r", nil)
+ testURLParseError(t, err)
+}
+
+func TestIssuesService_Edit(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &IssueRequest{Title: String("t")}
+
+ mux.HandleFunc("/repos/o/r/issues/1", func(w http.ResponseWriter, r *http.Request) {
+ v := new(IssueRequest)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"number":1}`)
+ })
+
+ issue, _, err := client.Issues.Edit("o", "r", 1, input)
+ if err != nil {
+ t.Errorf("Issues.Edit returned error: %v", err)
+ }
+
+ want := &Issue{Number: Int(1)}
+ if !reflect.DeepEqual(issue, want) {
+ t.Errorf("Issues.Edit returned %+v, want %+v", issue, want)
+ }
+}
+
+func TestIssuesService_Edit_invalidOwner(t *testing.T) {
+ _, _, err := client.Issues.Edit("%", "r", 1, nil)
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/licenses_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/licenses_test.go
new file mode 100644
index 000000000..dfecfebbb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/licenses_test.go
@@ -0,0 +1,64 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestLicensesService_List(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/licenses", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testHeader(t, r, "Accept", mediaTypeLicensesPreview)
+ fmt.Fprint(w, `[{"key":"mit","name":"MIT","url":"https://api.github.com/licenses/mit"}]`)
+ })
+
+ licenses, _, err := client.Licenses.List()
+ if err != nil {
+ t.Errorf("Licenses.List returned error: %v", err)
+ }
+
+ want := []License{{
+ Key: String("mit"),
+ Name: String("MIT"),
+ URL: String("https://api.github.com/licenses/mit"),
+ }}
+ if !reflect.DeepEqual(licenses, want) {
+ t.Errorf("Licenses.List returned %+v, want %+v", licenses, want)
+ }
+}
+
+func TestLicensesService_Get(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/licenses/mit", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testHeader(t, r, "Accept", mediaTypeLicensesPreview)
+ fmt.Fprint(w, `{"key":"mit","name":"MIT"}`)
+ })
+
+ license, _, err := client.Licenses.Get("mit")
+ if err != nil {
+ t.Errorf("Licenses.Get returned error: %v", err)
+ }
+
+ want := &License{Key: String("mit"), Name: String("MIT")}
+ if !reflect.DeepEqual(license, want) {
+ t.Errorf("Licenses.Get returned %+v, want %+v", license, want)
+ }
+}
+
+func TestLicensesService_Get_invalidTemplate(t *testing.T) {
+ _, _, err := client.Licenses.Get("%")
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/misc_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/misc_test.go
new file mode 100644
index 000000000..8ca58d251
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/misc_test.go
@@ -0,0 +1,170 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestMarkdown(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &markdownRequest{
+ Text: String("# text #"),
+ Mode: String("gfm"),
+ Context: String("google/go-github"),
+ }
+ mux.HandleFunc("/markdown", func(w http.ResponseWriter, r *http.Request) {
+ v := new(markdownRequest)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+ fmt.Fprint(w, `text
`)
+ })
+
+ md, _, err := client.Markdown("# text #", &MarkdownOptions{
+ Mode: "gfm",
+ Context: "google/go-github",
+ })
+ if err != nil {
+ t.Errorf("Markdown returned error: %v", err)
+ }
+
+ if want := "text
"; want != md {
+ t.Errorf("Markdown returned %+v, want %+v", md, want)
+ }
+}
+
+func TestListEmojis(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/emojis", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"+1": "+1.png"}`)
+ })
+
+ emoji, _, err := client.ListEmojis()
+ if err != nil {
+ t.Errorf("ListEmojis returned error: %v", err)
+ }
+
+ want := map[string]string{"+1": "+1.png"}
+ if !reflect.DeepEqual(want, emoji) {
+ t.Errorf("ListEmojis returned %+v, want %+v", emoji, want)
+ }
+}
+
+func TestAPIMeta(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/meta", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"hooks":["h"], "git":["g"], "pages":["p"], "verifiable_password_authentication": true}`)
+ })
+
+ meta, _, err := client.APIMeta()
+ if err != nil {
+ t.Errorf("APIMeta returned error: %v", err)
+ }
+
+ want := &APIMeta{
+ Hooks: []string{"h"},
+ Git: []string{"g"},
+ Pages: []string{"p"},
+ VerifiablePasswordAuthentication: Bool(true),
+ }
+ if !reflect.DeepEqual(want, meta) {
+ t.Errorf("APIMeta returned %+v, want %+v", meta, want)
+ }
+}
+
+func TestOctocat(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := "input"
+ output := "sample text"
+
+ mux.HandleFunc("/octocat", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"s": input})
+ w.Header().Set("Content-Type", "application/octocat-stream")
+ fmt.Fprint(w, output)
+ })
+
+ got, _, err := client.Octocat(input)
+ if err != nil {
+ t.Errorf("Octocat returned error: %v", err)
+ }
+
+ if want := output; got != want {
+ t.Errorf("Octocat returned %+v, want %+v", got, want)
+ }
+}
+
+func TestZen(t *testing.T) {
+ setup()
+ defer teardown()
+
+ output := "sample text"
+
+ mux.HandleFunc("/zen", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.Header().Set("Content-Type", "text/plain;charset=utf-8")
+ fmt.Fprint(w, output)
+ })
+
+ got, _, err := client.Zen()
+ if err != nil {
+ t.Errorf("Zen returned error: %v", err)
+ }
+
+ if want := output; got != want {
+ t.Errorf("Zen returned %+v, want %+v", got, want)
+ }
+}
+
+func TestRepositoriesService_ListServiceHooks(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/hooks", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{
+ "name":"n",
+ "events":["e"],
+ "supported_events":["s"],
+ "schema":[
+ ["a", "b"]
+ ]
+ }]`)
+ })
+
+ hooks, _, err := client.Repositories.ListServiceHooks()
+ if err != nil {
+ t.Errorf("Repositories.ListHooks returned error: %v", err)
+ }
+
+ want := []ServiceHook{{
+ Name: String("n"),
+ Events: []string{"e"},
+ SupportedEvents: []string{"s"},
+ Schema: [][]string{{"a", "b"}},
+ }}
+ if !reflect.DeepEqual(hooks, want) {
+ t.Errorf("Repositories.ListServiceHooks returned %+v, want %+v", hooks, want)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/orgs_hooks_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/orgs_hooks_test.go
new file mode 100644
index 000000000..1ebc07d5a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/orgs_hooks_test.go
@@ -0,0 +1,134 @@
+// Copyright 2015 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestOrganizationsService_ListHooks(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/hooks", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"id":1}, {"id":2}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+
+ hooks, _, err := client.Organizations.ListHooks("o", opt)
+ if err != nil {
+ t.Errorf("Organizations.ListHooks returned error: %v", err)
+ }
+
+ want := []Hook{{ID: Int(1)}, {ID: Int(2)}}
+ if !reflect.DeepEqual(hooks, want) {
+ t.Errorf("Organizations.ListHooks returned %+v, want %+v", hooks, want)
+ }
+}
+
+func TestOrganizationsService_ListHooks_invalidOrg(t *testing.T) {
+ _, _, err := client.Organizations.ListHooks("%", nil)
+ testURLParseError(t, err)
+}
+
+func TestOrganizationsService_GetHook(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/hooks/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ hook, _, err := client.Organizations.GetHook("o", 1)
+ if err != nil {
+ t.Errorf("Organizations.GetHook returned error: %v", err)
+ }
+
+ want := &Hook{ID: Int(1)}
+ if !reflect.DeepEqual(hook, want) {
+ t.Errorf("Organizations.GetHook returned %+v, want %+v", hook, want)
+ }
+}
+
+func TestOrganizationsService_GetHook_invalidOrg(t *testing.T) {
+ _, _, err := client.Organizations.GetHook("%", 1)
+ testURLParseError(t, err)
+}
+
+func TestOrganizationsService_EditHook(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Hook{Name: String("t")}
+
+ mux.HandleFunc("/orgs/o/hooks/1", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Hook)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ hook, _, err := client.Organizations.EditHook("o", 1, input)
+ if err != nil {
+ t.Errorf("Organizations.EditHook returned error: %v", err)
+ }
+
+ want := &Hook{ID: Int(1)}
+ if !reflect.DeepEqual(hook, want) {
+ t.Errorf("Organizations.EditHook returned %+v, want %+v", hook, want)
+ }
+}
+
+func TestOrganizationsService_EditHook_invalidOrg(t *testing.T) {
+ _, _, err := client.Organizations.EditHook("%", 1, nil)
+ testURLParseError(t, err)
+}
+
+func TestOrganizationsService_PingHook(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/hooks/1/pings", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "POST")
+ })
+
+ _, err := client.Organizations.PingHook("o", 1)
+ if err != nil {
+ t.Errorf("Organizations.PingHook returned error: %v", err)
+ }
+}
+
+func TestOrganizationsService_DeleteHook(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/hooks/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Organizations.DeleteHook("o", 1)
+ if err != nil {
+ t.Errorf("Organizations.DeleteHook returned error: %v", err)
+ }
+}
+
+func TestOrganizationsService_DeleteHook_invalidOrg(t *testing.T) {
+ _, err := client.Organizations.DeleteHook("%", 1)
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/orgs_members.go b/Godeps/_workspace/src/github.com/google/go-github/github/orgs_members.go
index c66b4956f..c326ff8a3 100644
--- a/Godeps/_workspace/src/github.com/google/go-github/github/orgs_members.go
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/orgs_members.go
@@ -238,14 +238,16 @@ func (s *OrganizationsService) GetOrgMembership(user, org string) (*Membership,
// GitHub API docs: https://developer.github.com/v3/orgs/members/#add-or-update-organization-membership
// GitHub API docs: https://developer.github.com/v3/orgs/members/#edit-your-organization-membership
func (s *OrganizationsService) EditOrgMembership(user, org string, membership *Membership) (*Membership, *Response, error) {
- var u string
+ var u, method string
if user != "" {
u = fmt.Sprintf("orgs/%v/memberships/%v", org, user)
+ method = "PUT"
} else {
u = fmt.Sprintf("user/memberships/orgs/%v", org)
+ method = "PATCH"
}
- req, err := s.client.NewRequest("PATCH", u, membership)
+ req, err := s.client.NewRequest(method, u, membership)
if err != nil {
return nil, nil, err
}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/orgs_members_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/orgs_members_test.go
new file mode 100644
index 000000000..6613efe17
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/orgs_members_test.go
@@ -0,0 +1,356 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestOrganizationsService_ListMembers(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/members", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testHeader(t, r, "Accept", mediaTypeOrgPermissionPreview)
+ testFormValues(t, r, values{
+ "filter": "2fa_disabled",
+ "role": "admin",
+ "page": "2",
+ })
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &ListMembersOptions{
+ PublicOnly: false,
+ Filter: "2fa_disabled",
+ Role: "admin",
+ ListOptions: ListOptions{Page: 2},
+ }
+ members, _, err := client.Organizations.ListMembers("o", opt)
+ if err != nil {
+ t.Errorf("Organizations.ListMembers returned error: %v", err)
+ }
+
+ want := []User{{ID: Int(1)}}
+ if !reflect.DeepEqual(members, want) {
+ t.Errorf("Organizations.ListMembers returned %+v, want %+v", members, want)
+ }
+}
+
+func TestOrganizationsService_ListMembers_invalidOrg(t *testing.T) {
+ _, _, err := client.Organizations.ListMembers("%", nil)
+ testURLParseError(t, err)
+}
+
+func TestOrganizationsService_ListMembers_public(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/public_members", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &ListMembersOptions{PublicOnly: true}
+ members, _, err := client.Organizations.ListMembers("o", opt)
+ if err != nil {
+ t.Errorf("Organizations.ListMembers returned error: %v", err)
+ }
+
+ want := []User{{ID: Int(1)}}
+ if !reflect.DeepEqual(members, want) {
+ t.Errorf("Organizations.ListMembers returned %+v, want %+v", members, want)
+ }
+}
+
+func TestOrganizationsService_IsMember(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/members/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ member, _, err := client.Organizations.IsMember("o", "u")
+ if err != nil {
+ t.Errorf("Organizations.IsMember returned error: %v", err)
+ }
+ if want := true; member != want {
+ t.Errorf("Organizations.IsMember returned %+v, want %+v", member, want)
+ }
+}
+
+// ensure that a 404 response is interpreted as "false" and not an error
+func TestOrganizationsService_IsMember_notMember(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/members/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.WriteHeader(http.StatusNotFound)
+ })
+
+ member, _, err := client.Organizations.IsMember("o", "u")
+ if err != nil {
+ t.Errorf("Organizations.IsMember returned error: %+v", err)
+ }
+ if want := false; member != want {
+ t.Errorf("Organizations.IsMember returned %+v, want %+v", member, want)
+ }
+}
+
+// ensure that a 400 response is interpreted as an actual error, and not simply
+// as "false" like the above case of a 404
+func TestOrganizationsService_IsMember_error(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/members/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ http.Error(w, "BadRequest", http.StatusBadRequest)
+ })
+
+ member, _, err := client.Organizations.IsMember("o", "u")
+ if err == nil {
+ t.Errorf("Expected HTTP 400 response")
+ }
+ if want := false; member != want {
+ t.Errorf("Organizations.IsMember returned %+v, want %+v", member, want)
+ }
+}
+
+func TestOrganizationsService_IsMember_invalidOrg(t *testing.T) {
+ _, _, err := client.Organizations.IsMember("%", "u")
+ testURLParseError(t, err)
+}
+
+func TestOrganizationsService_IsPublicMember(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/public_members/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ member, _, err := client.Organizations.IsPublicMember("o", "u")
+ if err != nil {
+ t.Errorf("Organizations.IsPublicMember returned error: %v", err)
+ }
+ if want := true; member != want {
+ t.Errorf("Organizations.IsPublicMember returned %+v, want %+v", member, want)
+ }
+}
+
+// ensure that a 404 response is interpreted as "false" and not an error
+func TestOrganizationsService_IsPublicMember_notMember(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/public_members/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.WriteHeader(http.StatusNotFound)
+ })
+
+ member, _, err := client.Organizations.IsPublicMember("o", "u")
+ if err != nil {
+ t.Errorf("Organizations.IsPublicMember returned error: %v", err)
+ }
+ if want := false; member != want {
+ t.Errorf("Organizations.IsPublicMember returned %+v, want %+v", member, want)
+ }
+}
+
+// ensure that a 400 response is interpreted as an actual error, and not simply
+// as "false" like the above case of a 404
+func TestOrganizationsService_IsPublicMember_error(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/public_members/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ http.Error(w, "BadRequest", http.StatusBadRequest)
+ })
+
+ member, _, err := client.Organizations.IsPublicMember("o", "u")
+ if err == nil {
+ t.Errorf("Expected HTTP 400 response")
+ }
+ if want := false; member != want {
+ t.Errorf("Organizations.IsPublicMember returned %+v, want %+v", member, want)
+ }
+}
+
+func TestOrganizationsService_IsPublicMember_invalidOrg(t *testing.T) {
+ _, _, err := client.Organizations.IsPublicMember("%", "u")
+ testURLParseError(t, err)
+}
+
+func TestOrganizationsService_RemoveMember(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/members/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Organizations.RemoveMember("o", "u")
+ if err != nil {
+ t.Errorf("Organizations.RemoveMember returned error: %v", err)
+ }
+}
+
+func TestOrganizationsService_RemoveMember_invalidOrg(t *testing.T) {
+ _, err := client.Organizations.RemoveMember("%", "u")
+ testURLParseError(t, err)
+}
+
+func TestOrganizationsService_ListOrgMemberships(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/memberships/orgs", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "state": "active",
+ "page": "2",
+ })
+ fmt.Fprint(w, `[{"url":"u"}]`)
+ })
+
+ opt := &ListOrgMembershipsOptions{
+ State: "active",
+ ListOptions: ListOptions{Page: 2},
+ }
+ memberships, _, err := client.Organizations.ListOrgMemberships(opt)
+ if err != nil {
+ t.Errorf("Organizations.ListOrgMemberships returned error: %v", err)
+ }
+
+ want := []Membership{{URL: String("u")}}
+ if !reflect.DeepEqual(memberships, want) {
+ t.Errorf("Organizations.ListOrgMemberships returned %+v, want %+v", memberships, want)
+ }
+}
+
+func TestOrganizationsService_GetOrgMembership_AuthenticatedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/memberships/orgs/o", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"url":"u"}`)
+ })
+
+ membership, _, err := client.Organizations.GetOrgMembership("", "o")
+ if err != nil {
+ t.Errorf("Organizations.GetOrgMembership returned error: %v", err)
+ }
+
+ want := &Membership{URL: String("u")}
+ if !reflect.DeepEqual(membership, want) {
+ t.Errorf("Organizations.GetOrgMembership returned %+v, want %+v", membership, want)
+ }
+}
+
+func TestOrganizationsService_GetOrgMembership_SpecifiedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/memberships/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"url":"u"}`)
+ })
+
+ membership, _, err := client.Organizations.GetOrgMembership("u", "o")
+ if err != nil {
+ t.Errorf("Organizations.GetOrgMembership returned error: %v", err)
+ }
+
+ want := &Membership{URL: String("u")}
+ if !reflect.DeepEqual(membership, want) {
+ t.Errorf("Organizations.GetOrgMembership returned %+v, want %+v", membership, want)
+ }
+}
+
+func TestOrganizationsService_EditOrgMembership_AuthenticatedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Membership{State: String("active")}
+
+ mux.HandleFunc("/user/memberships/orgs/o", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Membership)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"url":"u"}`)
+ })
+
+ membership, _, err := client.Organizations.EditOrgMembership("", "o", input)
+ if err != nil {
+ t.Errorf("Organizations.EditOrgMembership returned error: %v", err)
+ }
+
+ want := &Membership{URL: String("u")}
+ if !reflect.DeepEqual(membership, want) {
+ t.Errorf("Organizations.EditOrgMembership returned %+v, want %+v", membership, want)
+ }
+}
+
+func TestOrganizationsService_EditOrgMembership_SpecifiedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Membership{State: String("active")}
+
+ mux.HandleFunc("/orgs/o/memberships/u", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Membership)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PUT")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"url":"u"}`)
+ })
+
+ membership, _, err := client.Organizations.EditOrgMembership("u", "o", input)
+ if err != nil {
+ t.Errorf("Organizations.EditOrgMembership returned error: %v", err)
+ }
+
+ want := &Membership{URL: String("u")}
+ if !reflect.DeepEqual(membership, want) {
+ t.Errorf("Organizations.EditOrgMembership returned %+v, want %+v", membership, want)
+ }
+}
+
+func TestOrganizationsService_RemoveOrgMembership(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/memberships/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ _, err := client.Organizations.RemoveOrgMembership("u", "o")
+ if err != nil {
+ t.Errorf("Organizations.RemoveOrgMembership returned error: %v", err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/orgs_teams_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/orgs_teams_test.go
new file mode 100644
index 000000000..a258137ad
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/orgs_teams_test.go
@@ -0,0 +1,506 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestOrganizationsService_ListTeams(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/teams", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ teams, _, err := client.Organizations.ListTeams("o", opt)
+ if err != nil {
+ t.Errorf("Organizations.ListTeams returned error: %v", err)
+ }
+
+ want := []Team{{ID: Int(1)}}
+ if !reflect.DeepEqual(teams, want) {
+ t.Errorf("Organizations.ListTeams returned %+v, want %+v", teams, want)
+ }
+}
+
+func TestOrganizationsService_ListTeams_invalidOrg(t *testing.T) {
+ _, _, err := client.Organizations.ListTeams("%", nil)
+ testURLParseError(t, err)
+}
+
+func TestOrganizationsService_GetTeam(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/teams/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id":1, "name":"n", "url":"u", "slug": "s", "permission":"p"}`)
+ })
+
+ team, _, err := client.Organizations.GetTeam(1)
+ if err != nil {
+ t.Errorf("Organizations.GetTeam returned error: %v", err)
+ }
+
+ want := &Team{ID: Int(1), Name: String("n"), URL: String("u"), Slug: String("s"), Permission: String("p")}
+ if !reflect.DeepEqual(team, want) {
+ t.Errorf("Organizations.GetTeam returned %+v, want %+v", team, want)
+ }
+}
+
+func TestOrganizationsService_CreateTeam(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Team{Name: String("n"), Privacy: String("closed")}
+
+ mux.HandleFunc("/orgs/o/teams", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Team)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ testHeader(t, r, "Accept", mediaTypeOrgPermissionPreview)
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ team, _, err := client.Organizations.CreateTeam("o", input)
+ if err != nil {
+ t.Errorf("Organizations.CreateTeam returned error: %v", err)
+ }
+
+ want := &Team{ID: Int(1)}
+ if !reflect.DeepEqual(team, want) {
+ t.Errorf("Organizations.CreateTeam returned %+v, want %+v", team, want)
+ }
+}
+
+func TestOrganizationsService_CreateTeam_invalidOrg(t *testing.T) {
+ _, _, err := client.Organizations.CreateTeam("%", nil)
+ testURLParseError(t, err)
+}
+
+func TestOrganizationsService_EditTeam(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Team{Name: String("n"), Privacy: String("closed")}
+
+ mux.HandleFunc("/teams/1", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Team)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ testHeader(t, r, "Accept", mediaTypeOrgPermissionPreview)
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ team, _, err := client.Organizations.EditTeam(1, input)
+ if err != nil {
+ t.Errorf("Organizations.EditTeam returned error: %v", err)
+ }
+
+ want := &Team{ID: Int(1)}
+ if !reflect.DeepEqual(team, want) {
+ t.Errorf("Organizations.EditTeam returned %+v, want %+v", team, want)
+ }
+}
+
+func TestOrganizationsService_DeleteTeam(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/teams/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Organizations.DeleteTeam(1)
+ if err != nil {
+ t.Errorf("Organizations.DeleteTeam returned error: %v", err)
+ }
+}
+
+func TestOrganizationsService_ListTeamMembers(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/teams/1/members", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testHeader(t, r, "Accept", mediaTypeOrgPermissionPreview)
+ testFormValues(t, r, values{"role": "member", "page": "2"})
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &OrganizationListTeamMembersOptions{Role: "member", ListOptions: ListOptions{Page: 2}}
+ members, _, err := client.Organizations.ListTeamMembers(1, opt)
+ if err != nil {
+ t.Errorf("Organizations.ListTeamMembers returned error: %v", err)
+ }
+
+ want := []User{{ID: Int(1)}}
+ if !reflect.DeepEqual(members, want) {
+ t.Errorf("Organizations.ListTeamMembers returned %+v, want %+v", members, want)
+ }
+}
+
+func TestOrganizationsService_IsTeamMember_true(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/teams/1/members/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ })
+
+ member, _, err := client.Organizations.IsTeamMember(1, "u")
+ if err != nil {
+ t.Errorf("Organizations.IsTeamMember returned error: %v", err)
+ }
+ if want := true; member != want {
+ t.Errorf("Organizations.IsTeamMember returned %+v, want %+v", member, want)
+ }
+}
+
+// ensure that a 404 response is interpreted as "false" and not an error
+func TestOrganizationsService_IsTeamMember_false(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/teams/1/members/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.WriteHeader(http.StatusNotFound)
+ })
+
+ member, _, err := client.Organizations.IsTeamMember(1, "u")
+ if err != nil {
+ t.Errorf("Organizations.IsTeamMember returned error: %+v", err)
+ }
+ if want := false; member != want {
+ t.Errorf("Organizations.IsTeamMember returned %+v, want %+v", member, want)
+ }
+}
+
+// ensure that a 400 response is interpreted as an actual error, and not simply
+// as "false" like the above case of a 404
+func TestOrganizationsService_IsTeamMember_error(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/teams/1/members/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ http.Error(w, "BadRequest", http.StatusBadRequest)
+ })
+
+ member, _, err := client.Organizations.IsTeamMember(1, "u")
+ if err == nil {
+ t.Errorf("Expected HTTP 400 response")
+ }
+ if want := false; member != want {
+ t.Errorf("Organizations.IsTeamMember returned %+v, want %+v", member, want)
+ }
+}
+
+func TestOrganizationsService_IsTeamMember_invalidUser(t *testing.T) {
+ _, _, err := client.Organizations.IsTeamMember(1, "%")
+ testURLParseError(t, err)
+}
+
+func TestOrganizationsService_PublicizeMembership(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/public_members/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "PUT")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ _, err := client.Organizations.PublicizeMembership("o", "u")
+ if err != nil {
+ t.Errorf("Organizations.PublicizeMembership returned error: %v", err)
+ }
+}
+
+func TestOrganizationsService_PublicizeMembership_invalidOrg(t *testing.T) {
+ _, err := client.Organizations.PublicizeMembership("%", "u")
+ testURLParseError(t, err)
+}
+
+func TestOrganizationsService_ConcealMembership(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/public_members/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ _, err := client.Organizations.ConcealMembership("o", "u")
+ if err != nil {
+ t.Errorf("Organizations.ConcealMembership returned error: %v", err)
+ }
+}
+
+func TestOrganizationsService_ConcealMembership_invalidOrg(t *testing.T) {
+ _, err := client.Organizations.ConcealMembership("%", "u")
+ testURLParseError(t, err)
+}
+
+func TestOrganizationsService_ListTeamRepos(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/teams/1/repos", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ members, _, err := client.Organizations.ListTeamRepos(1, opt)
+ if err != nil {
+ t.Errorf("Organizations.ListTeamRepos returned error: %v", err)
+ }
+
+ want := []Repository{{ID: Int(1)}}
+ if !reflect.DeepEqual(members, want) {
+ t.Errorf("Organizations.ListTeamRepos returned %+v, want %+v", members, want)
+ }
+}
+
+func TestOrganizationsService_IsTeamRepo_true(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/teams/1/repos/o/r", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testHeader(t, r, "Accept", mediaTypeOrgPermissionRepoPreview)
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ repo, _, err := client.Organizations.IsTeamRepo(1, "o", "r")
+ if err != nil {
+ t.Errorf("Organizations.IsTeamRepo returned error: %v", err)
+ }
+
+ want := &Repository{ID: Int(1)}
+ if !reflect.DeepEqual(repo, want) {
+ t.Errorf("Organizations.IsTeamRepo returned %+v, want %+v", repo, want)
+ }
+}
+
+func TestOrganizationsService_IsTeamRepo_false(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/teams/1/repos/o/r", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.WriteHeader(http.StatusNotFound)
+ })
+
+ repo, resp, err := client.Organizations.IsTeamRepo(1, "o", "r")
+ if err == nil {
+ t.Errorf("Expected HTTP 404 response")
+ }
+ if got, want := resp.Response.StatusCode, http.StatusNotFound; got != want {
+ t.Errorf("Organizations.IsTeamRepo returned status %d, want %d", got, want)
+ }
+ if repo != nil {
+ t.Errorf("Organizations.IsTeamRepo returned %+v, want nil", repo)
+ }
+}
+
+func TestOrganizationsService_IsTeamRepo_error(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/teams/1/repos/o/r", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ http.Error(w, "BadRequest", http.StatusBadRequest)
+ })
+
+ repo, resp, err := client.Organizations.IsTeamRepo(1, "o", "r")
+ if err == nil {
+ t.Errorf("Expected HTTP 400 response")
+ }
+ if got, want := resp.Response.StatusCode, http.StatusBadRequest; got != want {
+ t.Errorf("Organizations.IsTeamRepo returned status %d, want %d", got, want)
+ }
+ if repo != nil {
+ t.Errorf("Organizations.IsTeamRepo returned %+v, want nil", repo)
+ }
+}
+
+func TestOrganizationsService_IsTeamRepo_invalidOwner(t *testing.T) {
+ _, _, err := client.Organizations.IsTeamRepo(1, "%", "r")
+ testURLParseError(t, err)
+}
+
+func TestOrganizationsService_AddTeamRepo(t *testing.T) {
+ setup()
+ defer teardown()
+
+ opt := &OrganizationAddTeamRepoOptions{Permission: "admin"}
+
+ mux.HandleFunc("/teams/1/repos/o/r", func(w http.ResponseWriter, r *http.Request) {
+ v := new(OrganizationAddTeamRepoOptions)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PUT")
+ testHeader(t, r, "Accept", mediaTypeOrgPermissionPreview)
+ if !reflect.DeepEqual(v, opt) {
+ t.Errorf("Request body = %+v, want %+v", v, opt)
+ }
+
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ _, err := client.Organizations.AddTeamRepo(1, "o", "r", opt)
+ if err != nil {
+ t.Errorf("Organizations.AddTeamRepo returned error: %v", err)
+ }
+}
+
+func TestOrganizationsService_AddTeamRepo_noAccess(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/teams/1/repos/o/r", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "PUT")
+ w.WriteHeader(422)
+ })
+
+ _, err := client.Organizations.AddTeamRepo(1, "o", "r", nil)
+ if err == nil {
+ t.Errorf("Expcted error to be returned")
+ }
+}
+
+func TestOrganizationsService_AddTeamRepo_invalidOwner(t *testing.T) {
+ _, err := client.Organizations.AddTeamRepo(1, "%", "r", nil)
+ testURLParseError(t, err)
+}
+
+func TestOrganizationsService_RemoveTeamRepo(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/teams/1/repos/o/r", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ _, err := client.Organizations.RemoveTeamRepo(1, "o", "r")
+ if err != nil {
+ t.Errorf("Organizations.RemoveTeamRepo returned error: %v", err)
+ }
+}
+
+func TestOrganizationsService_RemoveTeamRepo_invalidOwner(t *testing.T) {
+ _, err := client.Organizations.RemoveTeamRepo(1, "%", "r")
+ testURLParseError(t, err)
+}
+
+func TestOrganizationsService_GetTeamMembership(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/teams/1/memberships/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"url":"u", "state":"active"}`)
+ })
+
+ membership, _, err := client.Organizations.GetTeamMembership(1, "u")
+ if err != nil {
+ t.Errorf("Organizations.GetTeamMembership returned error: %v", err)
+ }
+
+ want := &Membership{URL: String("u"), State: String("active")}
+ if !reflect.DeepEqual(membership, want) {
+ t.Errorf("Organizations.GetTeamMembership returned %+v, want %+v", membership, want)
+ }
+}
+
+func TestOrganizationsService_AddTeamMembership(t *testing.T) {
+ setup()
+ defer teardown()
+
+ opt := &OrganizationAddTeamMembershipOptions{Role: "maintainer"}
+
+ mux.HandleFunc("/teams/1/memberships/u", func(w http.ResponseWriter, r *http.Request) {
+ v := new(OrganizationAddTeamMembershipOptions)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PUT")
+ testHeader(t, r, "Accept", mediaTypeOrgPermissionPreview)
+ if !reflect.DeepEqual(v, opt) {
+ t.Errorf("Request body = %+v, want %+v", v, opt)
+ }
+
+ fmt.Fprint(w, `{"url":"u", "state":"pending"}`)
+ })
+
+ membership, _, err := client.Organizations.AddTeamMembership(1, "u", opt)
+ if err != nil {
+ t.Errorf("Organizations.AddTeamMembership returned error: %v", err)
+ }
+
+ want := &Membership{URL: String("u"), State: String("pending")}
+ if !reflect.DeepEqual(membership, want) {
+ t.Errorf("Organizations.AddTeamMembership returned %+v, want %+v", membership, want)
+ }
+}
+
+func TestOrganizationsService_RemoveTeamMembership(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/teams/1/memberships/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ _, err := client.Organizations.RemoveTeamMembership(1, "u")
+ if err != nil {
+ t.Errorf("Organizations.RemoveTeamMembership returned error: %v", err)
+ }
+}
+
+func TestOrganizationsService_ListUserTeams(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/teams", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "1"})
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &ListOptions{Page: 1}
+ teams, _, err := client.Organizations.ListUserTeams(opt)
+ if err != nil {
+ t.Errorf("Organizations.ListUserTeams returned error: %v", err)
+ }
+
+ want := []Team{{ID: Int(1)}}
+ if !reflect.DeepEqual(teams, want) {
+ t.Errorf("Organizations.ListUserTeams returned %+v, want %+v", teams, want)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/orgs_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/orgs_test.go
new file mode 100644
index 000000000..84ebc5468
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/orgs_test.go
@@ -0,0 +1,120 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestOrganizationsService_List_authenticatedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/orgs", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{"id":1},{"id":2}]`)
+ })
+
+ orgs, _, err := client.Organizations.List("", nil)
+ if err != nil {
+ t.Errorf("Organizations.List returned error: %v", err)
+ }
+
+ want := []Organization{{ID: Int(1)}, {ID: Int(2)}}
+ if !reflect.DeepEqual(orgs, want) {
+ t.Errorf("Organizations.List returned %+v, want %+v", orgs, want)
+ }
+}
+
+func TestOrganizationsService_List_specifiedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u/orgs", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"id":1},{"id":2}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ orgs, _, err := client.Organizations.List("u", opt)
+ if err != nil {
+ t.Errorf("Organizations.List returned error: %v", err)
+ }
+
+ want := []Organization{{ID: Int(1)}, {ID: Int(2)}}
+ if !reflect.DeepEqual(orgs, want) {
+ t.Errorf("Organizations.List returned %+v, want %+v", orgs, want)
+ }
+}
+
+func TestOrganizationsService_List_invalidUser(t *testing.T) {
+ _, _, err := client.Organizations.List("%", nil)
+ testURLParseError(t, err)
+}
+
+func TestOrganizationsService_Get(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id":1, "login":"l", "url":"u", "avatar_url": "a", "location":"l"}`)
+ })
+
+ org, _, err := client.Organizations.Get("o")
+ if err != nil {
+ t.Errorf("Organizations.Get returned error: %v", err)
+ }
+
+ want := &Organization{ID: Int(1), Login: String("l"), URL: String("u"), AvatarURL: String("a"), Location: String("l")}
+ if !reflect.DeepEqual(org, want) {
+ t.Errorf("Organizations.Get returned %+v, want %+v", org, want)
+ }
+}
+
+func TestOrganizationsService_Get_invalidOrg(t *testing.T) {
+ _, _, err := client.Organizations.Get("%")
+ testURLParseError(t, err)
+}
+
+func TestOrganizationsService_Edit(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Organization{Login: String("l")}
+
+ mux.HandleFunc("/orgs/o", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Organization)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ org, _, err := client.Organizations.Edit("o", input)
+ if err != nil {
+ t.Errorf("Organizations.Edit returned error: %v", err)
+ }
+
+ want := &Organization{ID: Int(1)}
+ if !reflect.DeepEqual(org, want) {
+ t.Errorf("Organizations.Edit returned %+v, want %+v", org, want)
+ }
+}
+
+func TestOrganizationsService_Edit_invalidOrg(t *testing.T) {
+ _, _, err := client.Organizations.Edit("%", nil)
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/pulls_comments_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/pulls_comments_test.go
new file mode 100644
index 000000000..7885ab158
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/pulls_comments_test.go
@@ -0,0 +1,189 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestPullRequestsService_ListComments_allPulls(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/pulls/comments", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "sort": "updated",
+ "direction": "desc",
+ "since": "2002-02-10T15:30:00Z",
+ "page": "2",
+ })
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &PullRequestListCommentsOptions{
+ Sort: "updated",
+ Direction: "desc",
+ Since: time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC),
+ ListOptions: ListOptions{Page: 2},
+ }
+ pulls, _, err := client.PullRequests.ListComments("o", "r", 0, opt)
+
+ if err != nil {
+ t.Errorf("PullRequests.ListComments returned error: %v", err)
+ }
+
+ want := []PullRequestComment{{ID: Int(1)}}
+ if !reflect.DeepEqual(pulls, want) {
+ t.Errorf("PullRequests.ListComments returned %+v, want %+v", pulls, want)
+ }
+}
+
+func TestPullRequestsService_ListComments_specificPull(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/pulls/1/comments", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ pulls, _, err := client.PullRequests.ListComments("o", "r", 1, nil)
+
+ if err != nil {
+ t.Errorf("PullRequests.ListComments returned error: %v", err)
+ }
+
+ want := []PullRequestComment{{ID: Int(1)}}
+ if !reflect.DeepEqual(pulls, want) {
+ t.Errorf("PullRequests.ListComments returned %+v, want %+v", pulls, want)
+ }
+}
+
+func TestPullRequestsService_ListComments_invalidOwner(t *testing.T) {
+ _, _, err := client.PullRequests.ListComments("%", "r", 1, nil)
+ testURLParseError(t, err)
+}
+
+func TestPullRequestsService_GetComment(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/pulls/comments/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ comment, _, err := client.PullRequests.GetComment("o", "r", 1)
+
+ if err != nil {
+ t.Errorf("PullRequests.GetComment returned error: %v", err)
+ }
+
+ want := &PullRequestComment{ID: Int(1)}
+ if !reflect.DeepEqual(comment, want) {
+ t.Errorf("PullRequests.GetComment returned %+v, want %+v", comment, want)
+ }
+}
+
+func TestPullRequestsService_GetComment_invalidOwner(t *testing.T) {
+ _, _, err := client.PullRequests.GetComment("%", "r", 1)
+ testURLParseError(t, err)
+}
+
+func TestPullRequestsService_CreateComment(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &PullRequestComment{Body: String("b")}
+
+ mux.HandleFunc("/repos/o/r/pulls/1/comments", func(w http.ResponseWriter, r *http.Request) {
+ v := new(PullRequestComment)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ comment, _, err := client.PullRequests.CreateComment("o", "r", 1, input)
+
+ if err != nil {
+ t.Errorf("PullRequests.CreateComment returned error: %v", err)
+ }
+
+ want := &PullRequestComment{ID: Int(1)}
+ if !reflect.DeepEqual(comment, want) {
+ t.Errorf("PullRequests.CreateComment returned %+v, want %+v", comment, want)
+ }
+}
+
+func TestPullRequestsService_CreateComment_invalidOwner(t *testing.T) {
+ _, _, err := client.PullRequests.CreateComment("%", "r", 1, nil)
+ testURLParseError(t, err)
+}
+
+func TestPullRequestsService_EditComment(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &PullRequestComment{Body: String("b")}
+
+ mux.HandleFunc("/repos/o/r/pulls/comments/1", func(w http.ResponseWriter, r *http.Request) {
+ v := new(PullRequestComment)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ comment, _, err := client.PullRequests.EditComment("o", "r", 1, input)
+
+ if err != nil {
+ t.Errorf("PullRequests.EditComment returned error: %v", err)
+ }
+
+ want := &PullRequestComment{ID: Int(1)}
+ if !reflect.DeepEqual(comment, want) {
+ t.Errorf("PullRequests.EditComment returned %+v, want %+v", comment, want)
+ }
+}
+
+func TestPullRequestsService_EditComment_invalidOwner(t *testing.T) {
+ _, _, err := client.PullRequests.EditComment("%", "r", 1, nil)
+ testURLParseError(t, err)
+}
+
+func TestPullRequestsService_DeleteComment(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/pulls/comments/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.PullRequests.DeleteComment("o", "r", 1)
+ if err != nil {
+ t.Errorf("PullRequests.DeleteComment returned error: %v", err)
+ }
+}
+
+func TestPullRequestsService_DeleteComment_invalidOwner(t *testing.T) {
+ _, err := client.PullRequests.DeleteComment("%", "r", 1)
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/pulls_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/pulls_test.go
new file mode 100644
index 000000000..6ac0ddb17
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/pulls_test.go
@@ -0,0 +1,365 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestPullRequestsService_List(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/pulls", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "state": "closed",
+ "head": "h",
+ "base": "b",
+ "sort": "created",
+ "direction": "desc",
+ "page": "2",
+ })
+ fmt.Fprint(w, `[{"number":1}]`)
+ })
+
+ opt := &PullRequestListOptions{"closed", "h", "b", "created", "desc", ListOptions{Page: 2}}
+ pulls, _, err := client.PullRequests.List("o", "r", opt)
+
+ if err != nil {
+ t.Errorf("PullRequests.List returned error: %v", err)
+ }
+
+ want := []PullRequest{{Number: Int(1)}}
+ if !reflect.DeepEqual(pulls, want) {
+ t.Errorf("PullRequests.List returned %+v, want %+v", pulls, want)
+ }
+}
+
+func TestPullRequestsService_List_invalidOwner(t *testing.T) {
+ _, _, err := client.PullRequests.List("%", "r", nil)
+ testURLParseError(t, err)
+}
+
+func TestPullRequestsService_Get(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/pulls/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"number":1}`)
+ })
+
+ pull, _, err := client.PullRequests.Get("o", "r", 1)
+
+ if err != nil {
+ t.Errorf("PullRequests.Get returned error: %v", err)
+ }
+
+ want := &PullRequest{Number: Int(1)}
+ if !reflect.DeepEqual(pull, want) {
+ t.Errorf("PullRequests.Get returned %+v, want %+v", pull, want)
+ }
+}
+
+func TestPullRequestsService_Get_headAndBase(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/pulls/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"number":1,"head":{"ref":"r2","repo":{"id":2}},"base":{"ref":"r1","repo":{"id":1}}}`)
+ })
+
+ pull, _, err := client.PullRequests.Get("o", "r", 1)
+
+ if err != nil {
+ t.Errorf("PullRequests.Get returned error: %v", err)
+ }
+
+ want := &PullRequest{
+ Number: Int(1),
+ Head: &PullRequestBranch{
+ Ref: String("r2"),
+ Repo: &Repository{ID: Int(2)},
+ },
+ Base: &PullRequestBranch{
+ Ref: String("r1"),
+ Repo: &Repository{ID: Int(1)},
+ },
+ }
+ if !reflect.DeepEqual(pull, want) {
+ t.Errorf("PullRequests.Get returned %+v, want %+v", pull, want)
+ }
+}
+
+func TestPullRequestService_Get_DiffURLAndPatchURL(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/pulls/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"number":1,
+ "diff_url": "https://github.com/octocat/Hello-World/pull/1347.diff",
+ "patch_url": "https://github.com/octocat/Hello-World/pull/1347.patch"}`)
+ })
+
+ pull, _, err := client.PullRequests.Get("o", "r", 1)
+
+ if err != nil {
+ t.Errorf("PullRequests.Get returned error: %v", err)
+ }
+
+ want := &PullRequest{Number: Int(1), DiffURL: String("https://github.com/octocat/Hello-World/pull/1347.diff"), PatchURL: String("https://github.com/octocat/Hello-World/pull/1347.patch")}
+ if !reflect.DeepEqual(pull, want) {
+ t.Errorf("PullRequests.Get returned %+v, want %+v", pull, want)
+ }
+}
+
+func TestPullRequestsService_Get_invalidOwner(t *testing.T) {
+ _, _, err := client.PullRequests.Get("%", "r", 1)
+ testURLParseError(t, err)
+}
+
+func TestPullRequestsService_Create(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &NewPullRequest{Title: String("t")}
+
+ mux.HandleFunc("/repos/o/r/pulls", func(w http.ResponseWriter, r *http.Request) {
+ v := new(NewPullRequest)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"number":1}`)
+ })
+
+ pull, _, err := client.PullRequests.Create("o", "r", input)
+ if err != nil {
+ t.Errorf("PullRequests.Create returned error: %v", err)
+ }
+
+ want := &PullRequest{Number: Int(1)}
+ if !reflect.DeepEqual(pull, want) {
+ t.Errorf("PullRequests.Create returned %+v, want %+v", pull, want)
+ }
+}
+
+func TestPullRequestsService_Create_invalidOwner(t *testing.T) {
+ _, _, err := client.PullRequests.Create("%", "r", nil)
+ testURLParseError(t, err)
+}
+
+func TestPullRequestsService_Edit(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &PullRequest{Title: String("t")}
+
+ mux.HandleFunc("/repos/o/r/pulls/1", func(w http.ResponseWriter, r *http.Request) {
+ v := new(PullRequest)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"number":1}`)
+ })
+
+ pull, _, err := client.PullRequests.Edit("o", "r", 1, input)
+ if err != nil {
+ t.Errorf("PullRequests.Edit returned error: %v", err)
+ }
+
+ want := &PullRequest{Number: Int(1)}
+ if !reflect.DeepEqual(pull, want) {
+ t.Errorf("PullRequests.Edit returned %+v, want %+v", pull, want)
+ }
+}
+
+func TestPullRequestsService_Edit_invalidOwner(t *testing.T) {
+ _, _, err := client.PullRequests.Edit("%", "r", 1, nil)
+ testURLParseError(t, err)
+}
+
+func TestPullRequestsService_ListCommits(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/pulls/1/commits", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `
+ [
+ {
+ "sha": "3",
+ "parents": [
+ {
+ "sha": "2"
+ }
+ ]
+ },
+ {
+ "sha": "2",
+ "parents": [
+ {
+ "sha": "1"
+ }
+ ]
+ }
+ ]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ commits, _, err := client.PullRequests.ListCommits("o", "r", 1, opt)
+ if err != nil {
+ t.Errorf("PullRequests.ListCommits returned error: %v", err)
+ }
+
+ want := []RepositoryCommit{
+ {
+ SHA: String("3"),
+ Parents: []Commit{
+ {
+ SHA: String("2"),
+ },
+ },
+ },
+ {
+ SHA: String("2"),
+ Parents: []Commit{
+ {
+ SHA: String("1"),
+ },
+ },
+ },
+ }
+ if !reflect.DeepEqual(commits, want) {
+ t.Errorf("PullRequests.ListCommits returned %+v, want %+v", commits, want)
+ }
+}
+
+func TestPullRequestsService_ListFiles(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/pulls/1/files", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `
+ [
+ {
+ "sha": "6dcb09b5b57875f334f61aebed695e2e4193db5e",
+ "filename": "file1.txt",
+ "status": "added",
+ "additions": 103,
+ "deletions": 21,
+ "changes": 124,
+ "patch": "@@ -132,7 +132,7 @@ module Test @@ -1000,7 +1000,7 @@ module Test"
+ },
+ {
+ "sha": "f61aebed695e2e4193db5e6dcb09b5b57875f334",
+ "filename": "file2.txt",
+ "status": "modified",
+ "additions": 5,
+ "deletions": 3,
+ "changes": 103,
+ "patch": "@@ -132,7 +132,7 @@ module Test @@ -1000,7 +1000,7 @@ module Test"
+ }
+ ]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ commitFiles, _, err := client.PullRequests.ListFiles("o", "r", 1, opt)
+ if err != nil {
+ t.Errorf("PullRequests.ListFiles returned error: %v", err)
+ }
+
+ want := []CommitFile{
+ {
+ SHA: String("6dcb09b5b57875f334f61aebed695e2e4193db5e"),
+ Filename: String("file1.txt"),
+ Additions: Int(103),
+ Deletions: Int(21),
+ Changes: Int(124),
+ Status: String("added"),
+ Patch: String("@@ -132,7 +132,7 @@ module Test @@ -1000,7 +1000,7 @@ module Test"),
+ },
+ {
+ SHA: String("f61aebed695e2e4193db5e6dcb09b5b57875f334"),
+ Filename: String("file2.txt"),
+ Additions: Int(5),
+ Deletions: Int(3),
+ Changes: Int(103),
+ Status: String("modified"),
+ Patch: String("@@ -132,7 +132,7 @@ module Test @@ -1000,7 +1000,7 @@ module Test"),
+ },
+ }
+
+ if !reflect.DeepEqual(commitFiles, want) {
+ t.Errorf("PullRequests.ListFiles returned %+v, want %+v", commitFiles, want)
+ }
+}
+
+func TestPullRequestsService_IsMerged(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/pulls/1/merge", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ isMerged, _, err := client.PullRequests.IsMerged("o", "r", 1)
+ if err != nil {
+ t.Errorf("PullRequests.IsMerged returned error: %v", err)
+ }
+
+ want := true
+ if !reflect.DeepEqual(isMerged, want) {
+ t.Errorf("PullRequests.IsMerged returned %+v, want %+v", isMerged, want)
+ }
+}
+
+func TestPullRequestsService_Merge(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/pulls/1/merge", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "PUT")
+ fmt.Fprint(w, `
+ {
+ "sha": "6dcb09b5b57875f334f61aebed695e2e4193db5e",
+ "merged": true,
+ "message": "Pull Request successfully merged"
+ }`)
+ })
+
+ merge, _, err := client.PullRequests.Merge("o", "r", 1, "merging pull request")
+ if err != nil {
+ t.Errorf("PullRequests.Merge returned error: %v", err)
+ }
+
+ want := &PullRequestMergeResult{
+ SHA: String("6dcb09b5b57875f334f61aebed695e2e4193db5e"),
+ Merged: Bool(true),
+ Message: String("Pull Request successfully merged"),
+ }
+ if !reflect.DeepEqual(merge, want) {
+ t.Errorf("PullRequests.Merge returned %+v, want %+v", merge, want)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/repos_collaborators_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/repos_collaborators_test.go
new file mode 100644
index 000000000..ee6c4989e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/repos_collaborators_test.go
@@ -0,0 +1,135 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestRepositoriesService_ListCollaborators(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/collaborators", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testHeader(t, r, "Accept", mediaTypeOrgPermissionPreview)
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprintf(w, `[{"id":1}, {"id":2}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ users, _, err := client.Repositories.ListCollaborators("o", "r", opt)
+ if err != nil {
+ t.Errorf("Repositories.ListCollaborators returned error: %v", err)
+ }
+
+ want := []User{{ID: Int(1)}, {ID: Int(2)}}
+ if !reflect.DeepEqual(users, want) {
+ t.Errorf("Repositories.ListCollaborators returned %+v, want %+v", users, want)
+ }
+}
+
+func TestRepositoriesService_ListCollaborators_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.ListCollaborators("%", "%", nil)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_IsCollaborator_True(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/collaborators/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ isCollab, _, err := client.Repositories.IsCollaborator("o", "r", "u")
+ if err != nil {
+ t.Errorf("Repositories.IsCollaborator returned error: %v", err)
+ }
+
+ if !isCollab {
+ t.Errorf("Repositories.IsCollaborator returned false, want true")
+ }
+}
+
+func TestRepositoriesService_IsCollaborator_False(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/collaborators/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.WriteHeader(http.StatusNotFound)
+ })
+
+ isCollab, _, err := client.Repositories.IsCollaborator("o", "r", "u")
+ if err != nil {
+ t.Errorf("Repositories.IsCollaborator returned error: %v", err)
+ }
+
+ if isCollab {
+ t.Errorf("Repositories.IsCollaborator returned true, want false")
+ }
+}
+
+func TestRepositoriesService_IsCollaborator_invalidUser(t *testing.T) {
+ _, _, err := client.Repositories.IsCollaborator("%", "%", "%")
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_AddCollaborator(t *testing.T) {
+ setup()
+ defer teardown()
+
+ opt := &RepositoryAddCollaboratorOptions{Permission: "admin"}
+
+ mux.HandleFunc("/repos/o/r/collaborators/u", func(w http.ResponseWriter, r *http.Request) {
+ v := new(RepositoryAddCollaboratorOptions)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PUT")
+ testHeader(t, r, "Accept", mediaTypeOrgPermissionPreview)
+ if !reflect.DeepEqual(v, opt) {
+ t.Errorf("Request body = %+v, want %+v", v, opt)
+ }
+
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ _, err := client.Repositories.AddCollaborator("o", "r", "u", opt)
+ if err != nil {
+ t.Errorf("Repositories.AddCollaborator returned error: %v", err)
+ }
+}
+
+func TestRepositoriesService_AddCollaborator_invalidUser(t *testing.T) {
+ _, err := client.Repositories.AddCollaborator("%", "%", "%", nil)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_RemoveCollaborator(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/collaborators/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ _, err := client.Repositories.RemoveCollaborator("o", "r", "u")
+ if err != nil {
+ t.Errorf("Repositories.RemoveCollaborator returned error: %v", err)
+ }
+}
+
+func TestRepositoriesService_RemoveCollaborator_invalidUser(t *testing.T) {
+ _, err := client.Repositories.RemoveCollaborator("%", "%", "%")
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/repos_comments_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/repos_comments_test.go
new file mode 100644
index 000000000..b5a8786a9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/repos_comments_test.go
@@ -0,0 +1,180 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestRepositoriesService_ListComments(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/comments", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"id":1}, {"id":2}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ comments, _, err := client.Repositories.ListComments("o", "r", opt)
+ if err != nil {
+ t.Errorf("Repositories.ListComments returned error: %v", err)
+ }
+
+ want := []RepositoryComment{{ID: Int(1)}, {ID: Int(2)}}
+ if !reflect.DeepEqual(comments, want) {
+ t.Errorf("Repositories.ListComments returned %+v, want %+v", comments, want)
+ }
+}
+
+func TestRepositoriesService_ListComments_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.ListComments("%", "%", nil)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_ListCommitComments(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/commits/s/comments", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"id":1}, {"id":2}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ comments, _, err := client.Repositories.ListCommitComments("o", "r", "s", opt)
+ if err != nil {
+ t.Errorf("Repositories.ListCommitComments returned error: %v", err)
+ }
+
+ want := []RepositoryComment{{ID: Int(1)}, {ID: Int(2)}}
+ if !reflect.DeepEqual(comments, want) {
+ t.Errorf("Repositories.ListCommitComments returned %+v, want %+v", comments, want)
+ }
+}
+
+func TestRepositoriesService_ListCommitComments_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.ListCommitComments("%", "%", "%", nil)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_CreateComment(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &RepositoryComment{Body: String("b")}
+
+ mux.HandleFunc("/repos/o/r/commits/s/comments", func(w http.ResponseWriter, r *http.Request) {
+ v := new(RepositoryComment)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ comment, _, err := client.Repositories.CreateComment("o", "r", "s", input)
+ if err != nil {
+ t.Errorf("Repositories.CreateComment returned error: %v", err)
+ }
+
+ want := &RepositoryComment{ID: Int(1)}
+ if !reflect.DeepEqual(comment, want) {
+ t.Errorf("Repositories.CreateComment returned %+v, want %+v", comment, want)
+ }
+}
+
+func TestRepositoriesService_CreateComment_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.CreateComment("%", "%", "%", nil)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_GetComment(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/comments/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ comment, _, err := client.Repositories.GetComment("o", "r", 1)
+ if err != nil {
+ t.Errorf("Repositories.GetComment returned error: %v", err)
+ }
+
+ want := &RepositoryComment{ID: Int(1)}
+ if !reflect.DeepEqual(comment, want) {
+ t.Errorf("Repositories.GetComment returned %+v, want %+v", comment, want)
+ }
+}
+
+func TestRepositoriesService_GetComment_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.GetComment("%", "%", 1)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_UpdateComment(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &RepositoryComment{Body: String("b")}
+
+ mux.HandleFunc("/repos/o/r/comments/1", func(w http.ResponseWriter, r *http.Request) {
+ v := new(RepositoryComment)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ comment, _, err := client.Repositories.UpdateComment("o", "r", 1, input)
+ if err != nil {
+ t.Errorf("Repositories.UpdateComment returned error: %v", err)
+ }
+
+ want := &RepositoryComment{ID: Int(1)}
+ if !reflect.DeepEqual(comment, want) {
+ t.Errorf("Repositories.UpdateComment returned %+v, want %+v", comment, want)
+ }
+}
+
+func TestRepositoriesService_UpdateComment_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.UpdateComment("%", "%", 1, nil)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_DeleteComment(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/comments/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Repositories.DeleteComment("o", "r", 1)
+ if err != nil {
+ t.Errorf("Repositories.DeleteComment returned error: %v", err)
+ }
+}
+
+func TestRepositoriesService_DeleteComment_invalidOwner(t *testing.T) {
+ _, err := client.Repositories.DeleteComment("%", "%", 1)
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/repos_commits_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/repos_commits_test.go
new file mode 100644
index 000000000..56ba8a5e0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/repos_commits_test.go
@@ -0,0 +1,191 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestRepositoriesService_ListCommits(t *testing.T) {
+ setup()
+ defer teardown()
+
+ // given
+ mux.HandleFunc("/repos/o/r/commits", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r,
+ values{
+ "sha": "s",
+ "path": "p",
+ "author": "a",
+ "since": "2013-08-01T00:00:00Z",
+ "until": "2013-09-03T00:00:00Z",
+ })
+ fmt.Fprintf(w, `[{"sha": "s"}]`)
+ })
+
+ opt := &CommitsListOptions{
+ SHA: "s",
+ Path: "p",
+ Author: "a",
+ Since: time.Date(2013, time.August, 1, 0, 0, 0, 0, time.UTC),
+ Until: time.Date(2013, time.September, 3, 0, 0, 0, 0, time.UTC),
+ }
+ commits, _, err := client.Repositories.ListCommits("o", "r", opt)
+ if err != nil {
+ t.Errorf("Repositories.ListCommits returned error: %v", err)
+ }
+
+ want := []RepositoryCommit{{SHA: String("s")}}
+ if !reflect.DeepEqual(commits, want) {
+ t.Errorf("Repositories.ListCommits returned %+v, want %+v", commits, want)
+ }
+}
+
+func TestRepositoriesService_GetCommit(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/commits/s", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprintf(w, `{
+ "sha": "s",
+ "commit": { "message": "m" },
+ "author": { "login": "l" },
+ "committer": { "login": "l" },
+ "parents": [ { "sha": "s" } ],
+ "stats": { "additions": 104, "deletions": 4, "total": 108 },
+ "files": [
+ {
+ "filename": "f",
+ "additions": 10,
+ "deletions": 2,
+ "changes": 12,
+ "status": "s",
+ "raw_url": "r",
+ "blob_url": "b",
+ "patch": "p"
+ }
+ ]
+ }`)
+ })
+
+ commit, _, err := client.Repositories.GetCommit("o", "r", "s")
+ if err != nil {
+ t.Errorf("Repositories.GetCommit returned error: %v", err)
+ }
+
+ want := &RepositoryCommit{
+ SHA: String("s"),
+ Commit: &Commit{
+ Message: String("m"),
+ },
+ Author: &User{
+ Login: String("l"),
+ },
+ Committer: &User{
+ Login: String("l"),
+ },
+ Parents: []Commit{
+ {
+ SHA: String("s"),
+ },
+ },
+ Stats: &CommitStats{
+ Additions: Int(104),
+ Deletions: Int(4),
+ Total: Int(108),
+ },
+ Files: []CommitFile{
+ {
+ Filename: String("f"),
+ Additions: Int(10),
+ Deletions: Int(2),
+ Changes: Int(12),
+ Status: String("s"),
+ Patch: String("p"),
+ },
+ },
+ }
+ if !reflect.DeepEqual(commit, want) {
+ t.Errorf("Repositories.GetCommit returned \n%+v, want \n%+v", commit, want)
+ }
+}
+
+func TestRepositoriesService_CompareCommits(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/compare/b...h", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprintf(w, `{
+ "base_commit": {
+ "sha": "s",
+ "commit": {
+ "author": { "name": "n" },
+ "committer": { "name": "n" },
+ "message": "m",
+ "tree": { "sha": "t" }
+ },
+ "author": { "login": "n" },
+ "committer": { "login": "l" },
+ "parents": [ { "sha": "s" } ]
+ },
+ "status": "s",
+ "ahead_by": 1,
+ "behind_by": 2,
+ "total_commits": 1,
+ "commits": [
+ {
+ "sha": "s",
+ "commit": { "author": { "name": "n" } },
+ "author": { "login": "l" },
+ "committer": { "login": "l" },
+ "parents": [ { "sha": "s" } ]
+ }
+ ],
+ "files": [ { "filename": "f" } ]
+ }`)
+ })
+
+ got, _, err := client.Repositories.CompareCommits("o", "r", "b", "h")
+ if err != nil {
+ t.Errorf("Repositories.CompareCommits returned error: %v", err)
+ }
+
+ want := &CommitsComparison{
+ Status: String("s"),
+ AheadBy: Int(1),
+ BehindBy: Int(2),
+ TotalCommits: Int(1),
+ BaseCommit: &RepositoryCommit{
+ Commit: &Commit{
+ Author: &CommitAuthor{Name: String("n")},
+ },
+ Author: &User{Login: String("l")},
+ Committer: &User{Login: String("l")},
+ Message: String("m"),
+ },
+ Commits: []RepositoryCommit{
+ {
+ SHA: String("s"),
+ },
+ },
+ Files: []CommitFile{
+ {
+ Filename: String("f"),
+ },
+ },
+ }
+
+ if reflect.DeepEqual(got, want) {
+ t.Errorf("Repositories.CompareCommits returned \n%+v, want \n%+v", got, want)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/repos_contents_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/repos_contents_test.go
new file mode 100644
index 000000000..8ab3ecdae
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/repos_contents_test.go
@@ -0,0 +1,304 @@
+package github
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestDecode(t *testing.T) {
+ setup()
+ defer teardown()
+ r := RepositoryContent{Encoding: String("base64"), Content: String("aGVsbG8=")}
+ o, err := r.Decode()
+ if err != nil {
+ t.Errorf("Failed to decode content.")
+ }
+ want := "hello"
+ if string(o) != want {
+ t.Errorf("RepositoryContent.Decode returned %+v, want %+v", string(o), want)
+ }
+}
+
+func TestDecodeBadEncoding(t *testing.T) {
+ setup()
+ defer teardown()
+ r := RepositoryContent{Encoding: String("bad")}
+ _, err := r.Decode()
+ if err == nil {
+ t.Errorf("Should fail to decode non-base64")
+ }
+}
+
+func TestRepositoriesService_GetReadme(t *testing.T) {
+ setup()
+ defer teardown()
+ mux.HandleFunc("/repos/o/r/readme", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{
+ "type": "file",
+ "encoding": "base64",
+ "size": 5362,
+ "name": "README.md",
+ "path": "README.md"
+ }`)
+ })
+ readme, _, err := client.Repositories.GetReadme("o", "r", &RepositoryContentGetOptions{})
+ if err != nil {
+ t.Errorf("Repositories.GetReadme returned error: %v", err)
+ }
+ want := &RepositoryContent{Type: String("file"), Name: String("README.md"), Size: Int(5362), Encoding: String("base64"), Path: String("README.md")}
+ if !reflect.DeepEqual(readme, want) {
+ t.Errorf("Repositories.GetReadme returned %+v, want %+v", readme, want)
+ }
+}
+
+func TestRepositoriesService_DownloadContents_Success(t *testing.T) {
+ setup()
+ defer teardown()
+ mux.HandleFunc("/repos/o/r/contents/d", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{
+ "type": "file",
+ "name": "f",
+ "download_url": "`+server.URL+`/download/f"
+ }]`)
+ })
+ mux.HandleFunc("/download/f", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, "foo")
+ })
+
+ r, err := client.Repositories.DownloadContents("o", "r", "d/f", nil)
+ if err != nil {
+ t.Errorf("Repositories.DownloadContents returned error: %v", err)
+ }
+
+ bytes, err := ioutil.ReadAll(r)
+ if err != nil {
+ t.Errorf("Error reading response body: %v", err)
+ }
+ r.Close()
+
+ if got, want := string(bytes), "foo"; got != want {
+ t.Errorf("Repositories.DownloadContents returned %v, want %v", got, want)
+ }
+}
+
+func TestRepositoriesService_DownloadContents_NoDownloadURL(t *testing.T) {
+ setup()
+ defer teardown()
+ mux.HandleFunc("/repos/o/r/contents/d", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{
+ "type": "file",
+ "name": "f",
+ }]`)
+ })
+
+ _, err := client.Repositories.DownloadContents("o", "r", "d/f", nil)
+ if err == nil {
+ t.Errorf("Repositories.DownloadContents did not return expected error")
+ }
+}
+
+func TestRepositoriesService_DownloadContents_NoFile(t *testing.T) {
+ setup()
+ defer teardown()
+ mux.HandleFunc("/repos/o/r/contents/d", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[]`)
+ })
+
+ _, err := client.Repositories.DownloadContents("o", "r", "d/f", nil)
+ if err == nil {
+ t.Errorf("Repositories.DownloadContents did not return expected error")
+ }
+}
+
+func TestRepositoriesService_GetContents_File(t *testing.T) {
+ setup()
+ defer teardown()
+ mux.HandleFunc("/repos/o/r/contents/p", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{
+ "type": "file",
+ "encoding": "base64",
+ "size": 20678,
+ "name": "LICENSE",
+ "path": "LICENSE"
+ }`)
+ })
+ fileContents, _, _, err := client.Repositories.GetContents("o", "r", "p", &RepositoryContentGetOptions{})
+ if err != nil {
+ t.Errorf("Repositories.GetContents returned error: %v", err)
+ }
+ want := &RepositoryContent{Type: String("file"), Name: String("LICENSE"), Size: Int(20678), Encoding: String("base64"), Path: String("LICENSE")}
+ if !reflect.DeepEqual(fileContents, want) {
+ t.Errorf("Repositories.GetContents returned %+v, want %+v", fileContents, want)
+ }
+}
+
+func TestRepositoriesService_GetContents_Directory(t *testing.T) {
+ setup()
+ defer teardown()
+ mux.HandleFunc("/repos/o/r/contents/p", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{
+ "type": "dir",
+ "name": "lib",
+ "path": "lib"
+ },
+ {
+ "type": "file",
+ "size": 20678,
+ "name": "LICENSE",
+ "path": "LICENSE"
+ }]`)
+ })
+ _, directoryContents, _, err := client.Repositories.GetContents("o", "r", "p", &RepositoryContentGetOptions{})
+ if err != nil {
+ t.Errorf("Repositories.GetContents returned error: %v", err)
+ }
+ want := []*RepositoryContent{{Type: String("dir"), Name: String("lib"), Path: String("lib")},
+ {Type: String("file"), Name: String("LICENSE"), Size: Int(20678), Path: String("LICENSE")}}
+ if !reflect.DeepEqual(directoryContents, want) {
+ t.Errorf("Repositories.GetContents_Directory returned %+v, want %+v", directoryContents, want)
+ }
+}
+
+func TestRepositoriesService_CreateFile(t *testing.T) {
+ setup()
+ defer teardown()
+ mux.HandleFunc("/repos/o/r/contents/p", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "PUT")
+ fmt.Fprint(w, `{
+ "content":{
+ "name":"p"
+ },
+ "commit":{
+ "message":"m",
+ "sha":"f5f369044773ff9c6383c087466d12adb6fa0828"
+ }
+ }`)
+ })
+ message := "m"
+ content := []byte("c")
+ repositoryContentsOptions := &RepositoryContentFileOptions{
+ Message: &message,
+ Content: content,
+ Committer: &CommitAuthor{Name: String("n"), Email: String("e")},
+ }
+ createResponse, _, err := client.Repositories.CreateFile("o", "r", "p", repositoryContentsOptions)
+ if err != nil {
+ t.Errorf("Repositories.CreateFile returned error: %v", err)
+ }
+ want := &RepositoryContentResponse{
+ Content: &RepositoryContent{Name: String("p")},
+ Commit: Commit{
+ Message: String("m"),
+ SHA: String("f5f369044773ff9c6383c087466d12adb6fa0828"),
+ },
+ }
+ if !reflect.DeepEqual(createResponse, want) {
+ t.Errorf("Repositories.CreateFile returned %+v, want %+v", createResponse, want)
+ }
+}
+
+func TestRepositoriesService_UpdateFile(t *testing.T) {
+ setup()
+ defer teardown()
+ mux.HandleFunc("/repos/o/r/contents/p", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "PUT")
+ fmt.Fprint(w, `{
+ "content":{
+ "name":"p"
+ },
+ "commit":{
+ "message":"m",
+ "sha":"f5f369044773ff9c6383c087466d12adb6fa0828"
+ }
+ }`)
+ })
+ message := "m"
+ content := []byte("c")
+ sha := "f5f369044773ff9c6383c087466d12adb6fa0828"
+ repositoryContentsOptions := &RepositoryContentFileOptions{
+ Message: &message,
+ Content: content,
+ SHA: &sha,
+ Committer: &CommitAuthor{Name: String("n"), Email: String("e")},
+ }
+ updateResponse, _, err := client.Repositories.UpdateFile("o", "r", "p", repositoryContentsOptions)
+ if err != nil {
+ t.Errorf("Repositories.UpdateFile returned error: %v", err)
+ }
+ want := &RepositoryContentResponse{
+ Content: &RepositoryContent{Name: String("p")},
+ Commit: Commit{
+ Message: String("m"),
+ SHA: String("f5f369044773ff9c6383c087466d12adb6fa0828"),
+ },
+ }
+ if !reflect.DeepEqual(updateResponse, want) {
+ t.Errorf("Repositories.UpdateFile returned %+v, want %+v", updateResponse, want)
+ }
+}
+
+func TestRepositoriesService_DeleteFile(t *testing.T) {
+ setup()
+ defer teardown()
+ mux.HandleFunc("/repos/o/r/contents/p", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ fmt.Fprint(w, `{
+ "content": null,
+ "commit":{
+ "message":"m",
+ "sha":"f5f369044773ff9c6383c087466d12adb6fa0828"
+ }
+ }`)
+ })
+ message := "m"
+ sha := "f5f369044773ff9c6383c087466d12adb6fa0828"
+ repositoryContentsOptions := &RepositoryContentFileOptions{
+ Message: &message,
+ SHA: &sha,
+ Committer: &CommitAuthor{Name: String("n"), Email: String("e")},
+ }
+ deleteResponse, _, err := client.Repositories.DeleteFile("o", "r", "p", repositoryContentsOptions)
+ if err != nil {
+ t.Errorf("Repositories.DeleteFile returned error: %v", err)
+ }
+ want := &RepositoryContentResponse{
+ Content: nil,
+ Commit: Commit{
+ Message: String("m"),
+ SHA: String("f5f369044773ff9c6383c087466d12adb6fa0828"),
+ },
+ }
+ if !reflect.DeepEqual(deleteResponse, want) {
+ t.Errorf("Repositories.DeleteFile returned %+v, want %+v", deleteResponse, want)
+ }
+}
+
+func TestRepositoriesService_GetArchiveLink(t *testing.T) {
+ setup()
+ defer teardown()
+ mux.HandleFunc("/repos/o/r/tarball", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ http.Redirect(w, r, "http://github.com/a", http.StatusFound)
+ })
+ url, resp, err := client.Repositories.GetArchiveLink("o", "r", Tarball, &RepositoryContentGetOptions{})
+ if err != nil {
+ t.Errorf("Repositories.GetArchiveLink returned error: %v", err)
+ }
+ if resp.StatusCode != http.StatusFound {
+ t.Errorf("Repositories.GetArchiveLink returned status: %d, want %d", resp.StatusCode, http.StatusFound)
+ }
+ want := "http://github.com/a"
+ if url.String() != want {
+ t.Errorf("Repositories.GetArchiveLink returned %+v, want %+v", url.String(), want)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/repos_deployments_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/repos_deployments_test.go
new file mode 100644
index 000000000..161a07ccd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/repos_deployments_test.go
@@ -0,0 +1,87 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestRepositoriesService_ListDeployments(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/deployments", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"environment": "test"})
+ fmt.Fprint(w, `[{"id":1}, {"id":2}]`)
+ })
+
+ opt := &DeploymentsListOptions{Environment: "test"}
+ deployments, _, err := client.Repositories.ListDeployments("o", "r", opt)
+ if err != nil {
+ t.Errorf("Repositories.ListDeployments returned error: %v", err)
+ }
+
+ want := []Deployment{{ID: Int(1)}, {ID: Int(2)}}
+ if !reflect.DeepEqual(deployments, want) {
+ t.Errorf("Repositories.ListDeployments returned %+v, want %+v", deployments, want)
+ }
+}
+
+func TestRepositoriesService_CreateDeployment(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &DeploymentRequest{Ref: String("1111"), Task: String("deploy")}
+
+ mux.HandleFunc("/repos/o/r/deployments", func(w http.ResponseWriter, r *http.Request) {
+ v := new(DeploymentRequest)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"ref": "1111", "task": "deploy"}`)
+ })
+
+ deployment, _, err := client.Repositories.CreateDeployment("o", "r", input)
+ if err != nil {
+ t.Errorf("Repositories.CreateDeployment returned error: %v", err)
+ }
+
+ want := &Deployment{Ref: String("1111"), Task: String("deploy")}
+ if !reflect.DeepEqual(deployment, want) {
+ t.Errorf("Repositories.CreateDeployment returned %+v, want %+v", deployment, want)
+ }
+}
+
+func TestRepositoriesService_ListDeploymentStatuses(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/deployments/1/statuses", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"id":1}, {"id":2}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ statutses, _, err := client.Repositories.ListDeploymentStatuses("o", "r", 1, opt)
+ if err != nil {
+ t.Errorf("Repositories.ListDeploymentStatuses returned error: %v", err)
+ }
+
+ want := []DeploymentStatus{{ID: Int(1)}, {ID: Int(2)}}
+ if !reflect.DeepEqual(statutses, want) {
+ t.Errorf("Repositories.ListDeploymentStatuses returned %+v, want %+v", statutses, want)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/repos_forks_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/repos_forks_test.go
new file mode 100644
index 000000000..965a06639
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/repos_forks_test.go
@@ -0,0 +1,73 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestRepositoriesService_ListForks(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/forks", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "sort": "newest",
+ "page": "3",
+ })
+ fmt.Fprint(w, `[{"id":1},{"id":2}]`)
+ })
+
+ opt := &RepositoryListForksOptions{
+ Sort: "newest",
+ ListOptions: ListOptions{Page: 3},
+ }
+ repos, _, err := client.Repositories.ListForks("o", "r", opt)
+ if err != nil {
+ t.Errorf("Repositories.ListForks returned error: %v", err)
+ }
+
+ want := []Repository{{ID: Int(1)}, {ID: Int(2)}}
+ if !reflect.DeepEqual(repos, want) {
+ t.Errorf("Repositories.ListForks returned %+v, want %+v", repos, want)
+ }
+}
+
+func TestRepositoriesService_ListForks_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.ListForks("%", "r", nil)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_CreateFork(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/forks", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "POST")
+ testFormValues(t, r, values{"organization": "o"})
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ opt := &RepositoryCreateForkOptions{Organization: "o"}
+ repo, _, err := client.Repositories.CreateFork("o", "r", opt)
+ if err != nil {
+ t.Errorf("Repositories.CreateFork returned error: %v", err)
+ }
+
+ want := &Repository{ID: Int(1)}
+ if !reflect.DeepEqual(repo, want) {
+ t.Errorf("Repositories.CreateFork returned %+v, want %+v", repo, want)
+ }
+}
+
+func TestRepositoriesService_CreateFork_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.CreateFork("%", "r", nil)
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/repos_hooks_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/repos_hooks_test.go
new file mode 100644
index 000000000..c163a26b2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/repos_hooks_test.go
@@ -0,0 +1,187 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestRepositoriesService_CreateHook(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Hook{Name: String("t")}
+
+ mux.HandleFunc("/repos/o/r/hooks", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Hook)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ hook, _, err := client.Repositories.CreateHook("o", "r", input)
+ if err != nil {
+ t.Errorf("Repositories.CreateHook returned error: %v", err)
+ }
+
+ want := &Hook{ID: Int(1)}
+ if !reflect.DeepEqual(hook, want) {
+ t.Errorf("Repositories.CreateHook returned %+v, want %+v", hook, want)
+ }
+}
+
+func TestRepositoriesService_CreateHook_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.CreateHook("%", "%", nil)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_ListHooks(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/hooks", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"id":1}, {"id":2}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+
+ hooks, _, err := client.Repositories.ListHooks("o", "r", opt)
+ if err != nil {
+ t.Errorf("Repositories.ListHooks returned error: %v", err)
+ }
+
+ want := []Hook{{ID: Int(1)}, {ID: Int(2)}}
+ if !reflect.DeepEqual(hooks, want) {
+ t.Errorf("Repositories.ListHooks returned %+v, want %+v", hooks, want)
+ }
+}
+
+func TestRepositoriesService_ListHooks_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.ListHooks("%", "%", nil)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_GetHook(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/hooks/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ hook, _, err := client.Repositories.GetHook("o", "r", 1)
+ if err != nil {
+ t.Errorf("Repositories.GetHook returned error: %v", err)
+ }
+
+ want := &Hook{ID: Int(1)}
+ if !reflect.DeepEqual(hook, want) {
+ t.Errorf("Repositories.GetHook returned %+v, want %+v", hook, want)
+ }
+}
+
+func TestRepositoriesService_GetHook_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.GetHook("%", "%", 1)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_EditHook(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Hook{Name: String("t")}
+
+ mux.HandleFunc("/repos/o/r/hooks/1", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Hook)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ hook, _, err := client.Repositories.EditHook("o", "r", 1, input)
+ if err != nil {
+ t.Errorf("Repositories.EditHook returned error: %v", err)
+ }
+
+ want := &Hook{ID: Int(1)}
+ if !reflect.DeepEqual(hook, want) {
+ t.Errorf("Repositories.EditHook returned %+v, want %+v", hook, want)
+ }
+}
+
+func TestRepositoriesService_EditHook_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.EditHook("%", "%", 1, nil)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_DeleteHook(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/hooks/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Repositories.DeleteHook("o", "r", 1)
+ if err != nil {
+ t.Errorf("Repositories.DeleteHook returned error: %v", err)
+ }
+}
+
+func TestRepositoriesService_DeleteHook_invalidOwner(t *testing.T) {
+ _, err := client.Repositories.DeleteHook("%", "%", 1)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_PingHook(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/hooks/1/pings", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "POST")
+ })
+
+ _, err := client.Repositories.PingHook("o", "r", 1)
+ if err != nil {
+ t.Errorf("Repositories.PingHook returned error: %v", err)
+ }
+}
+
+func TestRepositoriesService_TestHook(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/hooks/1/tests", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "POST")
+ })
+
+ _, err := client.Repositories.TestHook("o", "r", 1)
+ if err != nil {
+ t.Errorf("Repositories.TestHook returned error: %v", err)
+ }
+}
+
+func TestRepositoriesService_TestHook_invalidOwner(t *testing.T) {
+ _, err := client.Repositories.TestHook("%", "%", 1)
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/repos_keys_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/repos_keys_test.go
new file mode 100644
index 000000000..dcf6c55e4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/repos_keys_test.go
@@ -0,0 +1,153 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestRepositoriesService_ListKeys(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/keys", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ keys, _, err := client.Repositories.ListKeys("o", "r", opt)
+ if err != nil {
+ t.Errorf("Repositories.ListKeys returned error: %v", err)
+ }
+
+ want := []Key{{ID: Int(1)}}
+ if !reflect.DeepEqual(keys, want) {
+ t.Errorf("Repositories.ListKeys returned %+v, want %+v", keys, want)
+ }
+}
+
+func TestRepositoriesService_ListKeys_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.ListKeys("%", "%", nil)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_GetKey(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/keys/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ key, _, err := client.Repositories.GetKey("o", "r", 1)
+ if err != nil {
+ t.Errorf("Repositories.GetKey returned error: %v", err)
+ }
+
+ want := &Key{ID: Int(1)}
+ if !reflect.DeepEqual(key, want) {
+ t.Errorf("Repositories.GetKey returned %+v, want %+v", key, want)
+ }
+}
+
+func TestRepositoriesService_GetKey_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.GetKey("%", "%", 1)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_CreateKey(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Key{Key: String("k"), Title: String("t")}
+
+ mux.HandleFunc("/repos/o/r/keys", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Key)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ key, _, err := client.Repositories.CreateKey("o", "r", input)
+ if err != nil {
+ t.Errorf("Repositories.GetKey returned error: %v", err)
+ }
+
+ want := &Key{ID: Int(1)}
+ if !reflect.DeepEqual(key, want) {
+ t.Errorf("Repositories.GetKey returned %+v, want %+v", key, want)
+ }
+}
+
+func TestRepositoriesService_CreateKey_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.CreateKey("%", "%", nil)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_EditKey(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Key{Key: String("k"), Title: String("t")}
+
+ mux.HandleFunc("/repos/o/r/keys/1", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Key)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ key, _, err := client.Repositories.EditKey("o", "r", 1, input)
+ if err != nil {
+ t.Errorf("Repositories.EditKey returned error: %v", err)
+ }
+
+ want := &Key{ID: Int(1)}
+ if !reflect.DeepEqual(key, want) {
+ t.Errorf("Repositories.EditKey returned %+v, want %+v", key, want)
+ }
+}
+
+func TestRepositoriesService_EditKey_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.EditKey("%", "%", 1, nil)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_DeleteKey(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/keys/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Repositories.DeleteKey("o", "r", 1)
+ if err != nil {
+ t.Errorf("Repositories.DeleteKey returned error: %v", err)
+ }
+}
+
+func TestRepositoriesService_DeleteKey_invalidOwner(t *testing.T) {
+ _, err := client.Repositories.DeleteKey("%", "%", 1)
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/repos_merging_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/repos_merging_test.go
new file mode 100644
index 000000000..166c5e520
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/repos_merging_test.go
@@ -0,0 +1,47 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestRepositoriesService_Merge(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &RepositoryMergeRequest{
+ Base: String("b"),
+ Head: String("h"),
+ CommitMessage: String("c"),
+ }
+
+ mux.HandleFunc("/repos/o/r/merges", func(w http.ResponseWriter, r *http.Request) {
+ v := new(RepositoryMergeRequest)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"sha":"s"}`)
+ })
+
+ commit, _, err := client.Repositories.Merge("o", "r", input)
+ if err != nil {
+ t.Errorf("Repositories.Merge returned error: %v", err)
+ }
+
+ want := &RepositoryCommit{SHA: String("s")}
+ if !reflect.DeepEqual(commit, want) {
+ t.Errorf("Repositories.Merge returned %+v, want %+v", commit, want)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/repos_pages_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/repos_pages_test.go
new file mode 100644
index 000000000..4cbc43a17
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/repos_pages_test.go
@@ -0,0 +1,73 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestRepositoriesService_GetPagesInfo(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/pages", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"url":"u","status":"s","cname":"c","custom_404":false}`)
+ })
+
+ page, _, err := client.Repositories.GetPagesInfo("o", "r")
+ if err != nil {
+ t.Errorf("Repositories.GetPagesInfo returned error: %v", err)
+ }
+
+ want := &Pages{URL: String("u"), Status: String("s"), CNAME: String("c"), Custom404: Bool(false)}
+ if !reflect.DeepEqual(page, want) {
+ t.Errorf("Repositories.GetPagesInfo returned %+v, want %+v", page, want)
+ }
+}
+
+func TestRepositoriesService_ListPagesBuilds(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/pages/builds", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{"url":"u","status":"s","commit":"c"}]`)
+ })
+
+ pages, _, err := client.Repositories.ListPagesBuilds("o", "r")
+ if err != nil {
+ t.Errorf("Repositories.ListPagesBuilds returned error: %v", err)
+ }
+
+ want := []PagesBuild{{URL: String("u"), Status: String("s"), Commit: String("c")}}
+ if !reflect.DeepEqual(pages, want) {
+ t.Errorf("Repositories.ListPagesBuilds returned %+v, want %+v", pages, want)
+ }
+}
+
+func TestRepositoriesService_GetLatestPagesBuild(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/pages/builds/latest", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"url":"u","status":"s","commit":"c"}`)
+ })
+
+ build, _, err := client.Repositories.GetLatestPagesBuild("o", "r")
+ if err != nil {
+ t.Errorf("Repositories.GetLatestPagesBuild returned error: %v", err)
+ }
+
+ want := &PagesBuild{URL: String("u"), Status: String("s"), Commit: String("c")}
+ if !reflect.DeepEqual(build, want) {
+ t.Errorf("Repositories.GetLatestPagesBuild returned %+v, want %+v", build, want)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/repos_releases_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/repos_releases_test.go
new file mode 100644
index 000000000..5b0f094bc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/repos_releases_test.go
@@ -0,0 +1,337 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "reflect"
+ "testing"
+)
+
+func TestRepositoriesService_ListReleases(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/releases", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ releases, _, err := client.Repositories.ListReleases("o", "r", opt)
+ if err != nil {
+ t.Errorf("Repositories.ListReleases returned error: %v", err)
+ }
+ want := []RepositoryRelease{{ID: Int(1)}}
+ if !reflect.DeepEqual(releases, want) {
+ t.Errorf("Repositories.ListReleases returned %+v, want %+v", releases, want)
+ }
+}
+
+func TestRepositoriesService_GetRelease(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/releases/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ release, resp, err := client.Repositories.GetRelease("o", "r", 1)
+ if err != nil {
+ t.Errorf("Repositories.GetRelease returned error: %v\n%v", err, resp.Body)
+ }
+
+ want := &RepositoryRelease{ID: Int(1)}
+ if !reflect.DeepEqual(release, want) {
+ t.Errorf("Repositories.GetRelease returned %+v, want %+v", release, want)
+ }
+}
+
+func TestRepositoriesService_GetLatestRelease(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/releases/latest", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id":3}`)
+ })
+
+ release, resp, err := client.Repositories.GetLatestRelease("o", "r")
+ if err != nil {
+ t.Errorf("Repositories.GetLatestRelease returned error: %v\n%v", err, resp.Body)
+ }
+
+ want := &RepositoryRelease{ID: Int(3)}
+ if !reflect.DeepEqual(release, want) {
+ t.Errorf("Repositories.GetLatestRelease returned %+v, want %+v", release, want)
+ }
+}
+
+func TestRepositoriesService_GetReleaseByTag(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/releases/tags/foo", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id":13}`)
+ })
+
+ release, resp, err := client.Repositories.GetReleaseByTag("o", "r", "foo")
+ if err != nil {
+ t.Errorf("Repositories.GetReleaseByTag returned error: %v\n%v", err, resp.Body)
+ }
+
+ want := &RepositoryRelease{ID: Int(13)}
+ if !reflect.DeepEqual(release, want) {
+ t.Errorf("Repositories.GetReleaseByTag returned %+v, want %+v", release, want)
+ }
+}
+
+func TestRepositoriesService_CreateRelease(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &RepositoryRelease{Name: String("v1.0")}
+
+ mux.HandleFunc("/repos/o/r/releases", func(w http.ResponseWriter, r *http.Request) {
+ v := new(RepositoryRelease)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ release, _, err := client.Repositories.CreateRelease("o", "r", input)
+ if err != nil {
+ t.Errorf("Repositories.CreateRelease returned error: %v", err)
+ }
+
+ want := &RepositoryRelease{ID: Int(1)}
+ if !reflect.DeepEqual(release, want) {
+ t.Errorf("Repositories.CreateRelease returned %+v, want %+v", release, want)
+ }
+}
+
+func TestRepositoriesService_EditRelease(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &RepositoryRelease{Name: String("n")}
+
+ mux.HandleFunc("/repos/o/r/releases/1", func(w http.ResponseWriter, r *http.Request) {
+ v := new(RepositoryRelease)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ release, _, err := client.Repositories.EditRelease("o", "r", 1, input)
+ if err != nil {
+ t.Errorf("Repositories.EditRelease returned error: %v", err)
+ }
+ want := &RepositoryRelease{ID: Int(1)}
+ if !reflect.DeepEqual(release, want) {
+ t.Errorf("Repositories.EditRelease returned = %+v, want %+v", release, want)
+ }
+}
+
+func TestRepositoriesService_DeleteRelease(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/releases/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Repositories.DeleteRelease("o", "r", 1)
+ if err != nil {
+ t.Errorf("Repositories.DeleteRelease returned error: %v", err)
+ }
+}
+
+func TestRepositoriesService_ListReleaseAssets(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/releases/1/assets", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ assets, _, err := client.Repositories.ListReleaseAssets("o", "r", 1, opt)
+ if err != nil {
+ t.Errorf("Repositories.ListReleaseAssets returned error: %v", err)
+ }
+ want := []ReleaseAsset{{ID: Int(1)}}
+ if !reflect.DeepEqual(assets, want) {
+ t.Errorf("Repositories.ListReleaseAssets returned %+v, want %+v", assets, want)
+ }
+}
+
+func TestRepositoriesService_GetReleaseAsset(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ asset, _, err := client.Repositories.GetReleaseAsset("o", "r", 1)
+ if err != nil {
+ t.Errorf("Repositories.GetReleaseAsset returned error: %v", err)
+ }
+ want := &ReleaseAsset{ID: Int(1)}
+ if !reflect.DeepEqual(asset, want) {
+ t.Errorf("Repositories.GetReleaseAsset returned %+v, want %+v", asset, want)
+ }
+}
+
+func TestRepositoriesService_DownloadReleaseAsset_Stream(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testHeader(t, r, "Accept", defaultMediaType)
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Disposition", "attachment; filename=hello-world.txt")
+ fmt.Fprint(w, "Hello World")
+ })
+
+ reader, err := client.Repositories.DownloadReleaseAsset("o", "r", 1)
+ if err != nil {
+ t.Errorf("Repositories.DownloadReleaseAsset returned error: %v", err)
+ }
+ want := []byte("Hello World")
+ content, err := ioutil.ReadAll(reader)
+ if err != nil {
+ t.Errorf("Repositories.DownloadReleaseAsset returned bad reader: %v", err)
+ }
+ if !bytes.Equal(want, content) {
+ t.Errorf("Repositories.DownloadReleaseAsset returned %+v, want %+v", content, want)
+ }
+}
+
+func TestRepositoriesService_DownloadReleaseAsset_Redirect(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testHeader(t, r, "Accept", defaultMediaType)
+ w.Header().Set("Location", server.URL+"/github-cloud/releases/1/hello-world.txt")
+ w.WriteHeader(http.StatusFound)
+ })
+
+ mux.HandleFunc("/github-cloud/releases/1/hello-world.txt", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Disposition", "attachment; filename=hello-world.txt")
+ fmt.Fprint(w, "Hello World")
+ })
+
+ reader, err := client.Repositories.DownloadReleaseAsset("o", "r", 1)
+ if err != nil {
+ t.Errorf("Repositories.DownloadReleaseAsset returned error: %v", err)
+ }
+ want := []byte("Hello World")
+ content, err := ioutil.ReadAll(reader)
+ if err != nil {
+ t.Errorf("Repositories.DownloadReleaseAsset returned bad reader: %v", err)
+ }
+ if !bytes.Equal(want, content) {
+ t.Errorf("Repositories.DownloadReleaseAsset returned %+v, want %+v", content, want)
+ }
+}
+
+func TestRepositoriesService_EditReleaseAsset(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &ReleaseAsset{Name: String("n")}
+
+ mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) {
+ v := new(ReleaseAsset)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ asset, _, err := client.Repositories.EditReleaseAsset("o", "r", 1, input)
+ if err != nil {
+ t.Errorf("Repositories.EditReleaseAsset returned error: %v", err)
+ }
+ want := &ReleaseAsset{ID: Int(1)}
+ if !reflect.DeepEqual(asset, want) {
+ t.Errorf("Repositories.EditReleaseAsset returned = %+v, want %+v", asset, want)
+ }
+}
+
+func TestRepositoriesService_DeleteReleaseAsset(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Repositories.DeleteReleaseAsset("o", "r", 1)
+ if err != nil {
+ t.Errorf("Repositories.DeleteReleaseAsset returned error: %v", err)
+ }
+}
+
+func TestRepositoriesService_UploadReleaseAsset(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/releases/1/assets", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "POST")
+ testHeader(t, r, "Content-Type", "text/plain; charset=utf-8")
+ testHeader(t, r, "Content-Length", "12")
+ testFormValues(t, r, values{"name": "n"})
+ testBody(t, r, "Upload me !\n")
+
+ fmt.Fprintf(w, `{"id":1}`)
+ })
+
+ file, dir, err := openTestFile("upload.txt", "Upload me !\n")
+ if err != nil {
+ t.Fatalf("Unable to create temp file: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ opt := &UploadOptions{Name: "n"}
+ asset, _, err := client.Repositories.UploadReleaseAsset("o", "r", 1, opt, file)
+ if err != nil {
+ t.Errorf("Repositories.UploadReleaseAssert returned error: %v", err)
+ }
+ want := &ReleaseAsset{ID: Int(1)}
+ if !reflect.DeepEqual(asset, want) {
+ t.Errorf("Repositories.UploadReleaseAssert returned %+v, want %+v", asset, want)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/repos_stats_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/repos_stats_test.go
new file mode 100644
index 000000000..3f9fab5ca
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/repos_stats_test.go
@@ -0,0 +1,210 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestRepositoriesService_ListContributorsStats(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/stats/contributors", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+
+ fmt.Fprint(w, `
+[
+ {
+ "author": {
+ "id": 1
+ },
+ "total": 135,
+ "weeks": [
+ {
+ "w": 1367712000,
+ "a": 6898,
+ "d": 77,
+ "c": 10
+ }
+ ]
+ }
+]
+`)
+ })
+
+ stats, _, err := client.Repositories.ListContributorsStats("o", "r")
+ if err != nil {
+ t.Errorf("RepositoriesService.ListContributorsStats returned error: %v", err)
+ }
+
+ want := []ContributorStats{
+ {
+ Author: &Contributor{
+ ID: Int(1),
+ },
+ Total: Int(135),
+ Weeks: []WeeklyStats{
+ {
+ Week: &Timestamp{time.Date(2013, 05, 05, 00, 00, 00, 0, time.UTC).Local()},
+ Additions: Int(6898),
+ Deletions: Int(77),
+ Commits: Int(10),
+ },
+ },
+ },
+ }
+
+ if !reflect.DeepEqual(stats, want) {
+ t.Errorf("RepositoriesService.ListContributorsStats returned %+v, want %+v", stats, want)
+ }
+}
+
+func TestRepositoriesService_ListCommitActivity(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/stats/commit_activity", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+
+ fmt.Fprint(w, `
+[
+ {
+ "days": [0, 3, 26, 20, 39, 1, 0],
+ "total": 89,
+ "week": 1336280400
+ }
+]
+`)
+ })
+
+ activity, _, err := client.Repositories.ListCommitActivity("o", "r")
+ if err != nil {
+ t.Errorf("RepositoriesService.ListCommitActivity returned error: %v", err)
+ }
+
+ want := []WeeklyCommitActivity{
+ {
+ Days: []int{0, 3, 26, 20, 39, 1, 0},
+ Total: Int(89),
+ Week: &Timestamp{time.Date(2012, 05, 06, 05, 00, 00, 0, time.UTC).Local()},
+ },
+ }
+
+ if !reflect.DeepEqual(activity, want) {
+ t.Errorf("RepositoriesService.ListCommitActivity returned %+v, want %+v", activity, want)
+ }
+}
+
+func TestRepositoriesService_ListCodeFrequency(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/stats/code_frequency", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+
+ fmt.Fprint(w, `[[1302998400, 1124, -435]]`)
+ })
+
+ code, _, err := client.Repositories.ListCodeFrequency("o", "r")
+ if err != nil {
+ t.Errorf("RepositoriesService.ListCodeFrequency returned error: %v", err)
+ }
+
+ want := []WeeklyStats{{
+ Week: &Timestamp{time.Date(2011, 04, 17, 00, 00, 00, 0, time.UTC).Local()},
+ Additions: Int(1124),
+ Deletions: Int(-435),
+ }}
+
+ if !reflect.DeepEqual(code, want) {
+ t.Errorf("RepositoriesService.ListCodeFrequency returned %+v, want %+v", code, want)
+ }
+}
+
+func TestRepositoriesService_Participation(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/stats/participation", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+
+ fmt.Fprint(w, `
+{
+ "all": [
+ 11,21,15,2,8,1,8,23,17,21,11,10,33,
+ 91,38,34,22,23,32,3,43,87,71,18,13,5,
+ 13,16,66,27,12,45,110,117,13,8,18,9,19,
+ 26,39,12,20,31,46,91,45,10,24,9,29,7
+ ],
+ "owner": [
+ 3,2,3,0,2,0,5,14,7,9,1,5,0,
+ 48,19,2,0,1,10,2,23,40,35,8,8,2,
+ 10,6,30,0,2,9,53,104,3,3,10,4,7,
+ 11,21,4,4,22,26,63,11,2,14,1,10,3
+ ]
+}
+`)
+ })
+
+ participation, _, err := client.Repositories.ListParticipation("o", "r")
+ if err != nil {
+ t.Errorf("RepositoriesService.ListParticipation returned error: %v", err)
+ }
+
+ want := &RepositoryParticipation{
+ All: []int{
+ 11, 21, 15, 2, 8, 1, 8, 23, 17, 21, 11, 10, 33,
+ 91, 38, 34, 22, 23, 32, 3, 43, 87, 71, 18, 13, 5,
+ 13, 16, 66, 27, 12, 45, 110, 117, 13, 8, 18, 9, 19,
+ 26, 39, 12, 20, 31, 46, 91, 45, 10, 24, 9, 29, 7,
+ },
+ Owner: []int{
+ 3, 2, 3, 0, 2, 0, 5, 14, 7, 9, 1, 5, 0,
+ 48, 19, 2, 0, 1, 10, 2, 23, 40, 35, 8, 8, 2,
+ 10, 6, 30, 0, 2, 9, 53, 104, 3, 3, 10, 4, 7,
+ 11, 21, 4, 4, 22, 26, 63, 11, 2, 14, 1, 10, 3,
+ },
+ }
+
+ if !reflect.DeepEqual(participation, want) {
+ t.Errorf("RepositoriesService.ListParticipation returned %+v, want %+v", participation, want)
+ }
+}
+
+func TestRepositoriesService_ListPunchCard(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/stats/punch_card", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+
+ fmt.Fprint(w, `[
+ [0, 0, 5],
+ [0, 1, 43],
+ [0, 2, 21]
+ ]`)
+ })
+
+ card, _, err := client.Repositories.ListPunchCard("o", "r")
+ if err != nil {
+ t.Errorf("RepositoriesService.ListPunchCard returned error: %v", err)
+ }
+
+ want := []PunchCard{
+ {Day: Int(0), Hour: Int(0), Commits: Int(5)},
+ {Day: Int(0), Hour: Int(1), Commits: Int(43)},
+ {Day: Int(0), Hour: Int(2), Commits: Int(21)},
+ }
+
+ if !reflect.DeepEqual(card, want) {
+ t.Errorf("RepositoriesService.ListPunchCard returned %+v, want %+v", card, want)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/repos_statuses_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/repos_statuses_test.go
new file mode 100644
index 000000000..8b230528c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/repos_statuses_test.go
@@ -0,0 +1,96 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestRepositoriesService_ListStatuses(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/commits/r/statuses", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ statuses, _, err := client.Repositories.ListStatuses("o", "r", "r", opt)
+ if err != nil {
+ t.Errorf("Repositories.ListStatuses returned error: %v", err)
+ }
+
+ want := []RepoStatus{{ID: Int(1)}}
+ if !reflect.DeepEqual(statuses, want) {
+ t.Errorf("Repositories.ListStatuses returned %+v, want %+v", statuses, want)
+ }
+}
+
+func TestRepositoriesService_ListStatuses_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.ListStatuses("%", "r", "r", nil)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_CreateStatus(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &RepoStatus{State: String("s"), TargetURL: String("t"), Description: String("d")}
+
+ mux.HandleFunc("/repos/o/r/statuses/r", func(w http.ResponseWriter, r *http.Request) {
+ v := new(RepoStatus)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ status, _, err := client.Repositories.CreateStatus("o", "r", "r", input)
+ if err != nil {
+ t.Errorf("Repositories.CreateStatus returned error: %v", err)
+ }
+
+ want := &RepoStatus{ID: Int(1)}
+ if !reflect.DeepEqual(status, want) {
+ t.Errorf("Repositories.CreateStatus returned %+v, want %+v", status, want)
+ }
+}
+
+func TestRepositoriesService_CreateStatus_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.CreateStatus("%", "r", "r", nil)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_GetCombinedStatus(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/commits/r/status", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `{"state":"success", "statuses":[{"id":1}]}`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ status, _, err := client.Repositories.GetCombinedStatus("o", "r", "r", opt)
+ if err != nil {
+ t.Errorf("Repositories.GetCombinedStatus returned error: %v", err)
+ }
+
+ want := &CombinedStatus{State: String("success"), Statuses: []RepoStatus{{ID: Int(1)}}}
+ if !reflect.DeepEqual(status, want) {
+ t.Errorf("Repositories.GetCombinedStatus returned %+v, want %+v", status, want)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/repos_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/repos_test.go
new file mode 100644
index 000000000..6e98e2845
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/repos_test.go
@@ -0,0 +1,409 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestRepositoriesService_List_authenticatedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/repos", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testHeader(t, r, "Accept", mediaTypeLicensesPreview)
+ fmt.Fprint(w, `[{"id":1},{"id":2}]`)
+ })
+
+ repos, _, err := client.Repositories.List("", nil)
+ if err != nil {
+ t.Errorf("Repositories.List returned error: %v", err)
+ }
+
+ want := []Repository{{ID: Int(1)}, {ID: Int(2)}}
+ if !reflect.DeepEqual(repos, want) {
+ t.Errorf("Repositories.List returned %+v, want %+v", repos, want)
+ }
+}
+
+func TestRepositoriesService_List_specifiedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u/repos", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testHeader(t, r, "Accept", mediaTypeLicensesPreview)
+ testFormValues(t, r, values{
+ "type": "owner",
+ "sort": "created",
+ "direction": "asc",
+ "page": "2",
+ })
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &RepositoryListOptions{"owner", "created", "asc", ListOptions{Page: 2}}
+ repos, _, err := client.Repositories.List("u", opt)
+ if err != nil {
+ t.Errorf("Repositories.List returned error: %v", err)
+ }
+
+ want := []Repository{{ID: Int(1)}}
+ if !reflect.DeepEqual(repos, want) {
+ t.Errorf("Repositories.List returned %+v, want %+v", repos, want)
+ }
+}
+
+func TestRepositoriesService_List_invalidUser(t *testing.T) {
+ _, _, err := client.Repositories.List("%", nil)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_ListByOrg(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/orgs/o/repos", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testHeader(t, r, "Accept", mediaTypeLicensesPreview)
+ testFormValues(t, r, values{
+ "type": "forks",
+ "page": "2",
+ })
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &RepositoryListByOrgOptions{"forks", ListOptions{Page: 2}}
+ repos, _, err := client.Repositories.ListByOrg("o", opt)
+ if err != nil {
+ t.Errorf("Repositories.ListByOrg returned error: %v", err)
+ }
+
+ want := []Repository{{ID: Int(1)}}
+ if !reflect.DeepEqual(repos, want) {
+ t.Errorf("Repositories.ListByOrg returned %+v, want %+v", repos, want)
+ }
+}
+
+func TestRepositoriesService_ListByOrg_invalidOrg(t *testing.T) {
+ _, _, err := client.Repositories.ListByOrg("%", nil)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_ListAll(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repositories", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "since": "1",
+ "page": "2",
+ "per_page": "3",
+ })
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &RepositoryListAllOptions{1, ListOptions{2, 3}}
+ repos, _, err := client.Repositories.ListAll(opt)
+ if err != nil {
+ t.Errorf("Repositories.ListAll returned error: %v", err)
+ }
+
+ want := []Repository{{ID: Int(1)}}
+ if !reflect.DeepEqual(repos, want) {
+ t.Errorf("Repositories.ListAll returned %+v, want %+v", repos, want)
+ }
+}
+
+func TestRepositoriesService_Create_user(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Repository{Name: String("n")}
+
+ mux.HandleFunc("/user/repos", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Repository)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ repo, _, err := client.Repositories.Create("", input)
+ if err != nil {
+ t.Errorf("Repositories.Create returned error: %v", err)
+ }
+
+ want := &Repository{ID: Int(1)}
+ if !reflect.DeepEqual(repo, want) {
+ t.Errorf("Repositories.Create returned %+v, want %+v", repo, want)
+ }
+}
+
+func TestRepositoriesService_Create_org(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Repository{Name: String("n")}
+
+ mux.HandleFunc("/orgs/o/repos", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Repository)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ repo, _, err := client.Repositories.Create("o", input)
+ if err != nil {
+ t.Errorf("Repositories.Create returned error: %v", err)
+ }
+
+ want := &Repository{ID: Int(1)}
+ if !reflect.DeepEqual(repo, want) {
+ t.Errorf("Repositories.Create returned %+v, want %+v", repo, want)
+ }
+}
+
+func TestRepositoriesService_Create_invalidOrg(t *testing.T) {
+ _, _, err := client.Repositories.Create("%", nil)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_Get(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testHeader(t, r, "Accept", mediaTypeLicensesPreview)
+ fmt.Fprint(w, `{"id":1,"name":"n","description":"d","owner":{"login":"l"},"license":{"key":"mit"}}`)
+ })
+
+ repo, _, err := client.Repositories.Get("o", "r")
+ if err != nil {
+ t.Errorf("Repositories.Get returned error: %v", err)
+ }
+
+ want := &Repository{ID: Int(1), Name: String("n"), Description: String("d"), Owner: &User{Login: String("l")}, License: &License{Key: String("mit")}}
+ if !reflect.DeepEqual(repo, want) {
+ t.Errorf("Repositories.Get returned %+v, want %+v", repo, want)
+ }
+}
+
+func TestRepositoriesService_Edit(t *testing.T) {
+ setup()
+ defer teardown()
+
+ i := true
+ input := &Repository{HasIssues: &i}
+
+ mux.HandleFunc("/repos/o/r", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Repository)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ repo, _, err := client.Repositories.Edit("o", "r", input)
+ if err != nil {
+ t.Errorf("Repositories.Edit returned error: %v", err)
+ }
+
+ want := &Repository{ID: Int(1)}
+ if !reflect.DeepEqual(repo, want) {
+ t.Errorf("Repositories.Edit returned %+v, want %+v", repo, want)
+ }
+}
+
+func TestRepositoriesService_Delete(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Repositories.Delete("o", "r")
+ if err != nil {
+ t.Errorf("Repositories.Delete returned error: %v", err)
+ }
+}
+
+func TestRepositoriesService_Get_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.Get("%", "r")
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_Edit_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.Edit("%", "r", nil)
+ testURLParseError(t, err)
+}
+
+func TestRepositoriesService_ListContributors(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/contributors", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "anon": "true",
+ "page": "2",
+ })
+ fmt.Fprint(w, `[{"contributions":42}]`)
+ })
+
+ opts := &ListContributorsOptions{Anon: "true", ListOptions: ListOptions{Page: 2}}
+ contributors, _, err := client.Repositories.ListContributors("o", "r", opts)
+
+ if err != nil {
+ t.Errorf("Repositories.ListContributors returned error: %v", err)
+ }
+
+ want := []Contributor{{Contributions: Int(42)}}
+ if !reflect.DeepEqual(contributors, want) {
+ t.Errorf("Repositories.ListContributors returned %+v, want %+v", contributors, want)
+ }
+}
+
+func TestRepositoriesService_ListLanguages(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/languages", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"go":1}`)
+ })
+
+ languages, _, err := client.Repositories.ListLanguages("o", "r")
+ if err != nil {
+ t.Errorf("Repositories.ListLanguages returned error: %v", err)
+ }
+
+ want := map[string]int{"go": 1}
+ if !reflect.DeepEqual(languages, want) {
+ t.Errorf("Repositories.ListLanguages returned %+v, want %+v", languages, want)
+ }
+}
+
+func TestRepositoriesService_ListTeams(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/teams", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ teams, _, err := client.Repositories.ListTeams("o", "r", opt)
+ if err != nil {
+ t.Errorf("Repositories.ListTeams returned error: %v", err)
+ }
+
+ want := []Team{{ID: Int(1)}}
+ if !reflect.DeepEqual(teams, want) {
+ t.Errorf("Repositories.ListTeams returned %+v, want %+v", teams, want)
+ }
+}
+
+func TestRepositoriesService_ListTags(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/tags", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"name":"n", "commit" : {"sha" : "s", "url" : "u"}, "zipball_url": "z", "tarball_url": "t"}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ tags, _, err := client.Repositories.ListTags("o", "r", opt)
+ if err != nil {
+ t.Errorf("Repositories.ListTags returned error: %v", err)
+ }
+
+ want := []RepositoryTag{
+ {
+ Name: String("n"),
+ Commit: &Commit{
+ SHA: String("s"),
+ URL: String("u"),
+ },
+ ZipballURL: String("z"),
+ TarballURL: String("t"),
+ },
+ }
+ if !reflect.DeepEqual(tags, want) {
+ t.Errorf("Repositories.ListTags returned %+v, want %+v", tags, want)
+ }
+}
+
+func TestRepositoriesService_ListBranches(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/branches", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"name":"master", "commit" : {"sha" : "a57781", "url" : "https://api.github.com/repos/o/r/commits/a57781"}}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ branches, _, err := client.Repositories.ListBranches("o", "r", opt)
+ if err != nil {
+ t.Errorf("Repositories.ListBranches returned error: %v", err)
+ }
+
+ want := []Branch{{Name: String("master"), Commit: &Commit{SHA: String("a57781"), URL: String("https://api.github.com/repos/o/r/commits/a57781")}}}
+ if !reflect.DeepEqual(branches, want) {
+ t.Errorf("Repositories.ListBranches returned %+v, want %+v", branches, want)
+ }
+}
+
+func TestRepositoriesService_GetBranch(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/repos/o/r/branches/b", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"name":"n", "commit":{"sha":"s"}}`)
+ })
+
+ branch, _, err := client.Repositories.GetBranch("o", "r", "b")
+ if err != nil {
+ t.Errorf("Repositories.GetBranch returned error: %v", err)
+ }
+
+ want := &Branch{Name: String("n"), Commit: &Commit{SHA: String("s")}}
+ if !reflect.DeepEqual(branch, want) {
+ t.Errorf("Repositories.GetBranch returned %+v, want %+v", branch, want)
+ }
+}
+
+func TestRepositoriesService_ListLanguages_invalidOwner(t *testing.T) {
+ _, _, err := client.Repositories.ListLanguages("%", "%")
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/search_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/search_test.go
new file mode 100644
index 000000000..3cfd16243
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/search_test.go
@@ -0,0 +1,196 @@
+package github
+
+import (
+ "fmt"
+ "net/http"
+ "reflect"
+
+ "testing"
+)
+
+func TestSearchService_Repositories(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/search/repositories", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "q": "blah",
+ "sort": "forks",
+ "order": "desc",
+ "page": "2",
+ "per_page": "2",
+ })
+
+ fmt.Fprint(w, `{"total_count": 4, "items": [{"id":1},{"id":2}]}`)
+ })
+
+ opts := &SearchOptions{Sort: "forks", Order: "desc", ListOptions: ListOptions{Page: 2, PerPage: 2}}
+ result, _, err := client.Search.Repositories("blah", opts)
+ if err != nil {
+ t.Errorf("Search.Repositories returned error: %v", err)
+ }
+
+ want := &RepositoriesSearchResult{
+ Total: Int(4),
+ Repositories: []Repository{{ID: Int(1)}, {ID: Int(2)}},
+ }
+ if !reflect.DeepEqual(result, want) {
+ t.Errorf("Search.Repositories returned %+v, want %+v", result, want)
+ }
+}
+
+func TestSearchService_Issues(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/search/issues", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "q": "blah",
+ "sort": "forks",
+ "order": "desc",
+ "page": "2",
+ "per_page": "2",
+ })
+
+ fmt.Fprint(w, `{"total_count": 4, "items": [{"number":1},{"number":2}]}`)
+ })
+
+ opts := &SearchOptions{Sort: "forks", Order: "desc", ListOptions: ListOptions{Page: 2, PerPage: 2}}
+ result, _, err := client.Search.Issues("blah", opts)
+ if err != nil {
+ t.Errorf("Search.Issues returned error: %v", err)
+ }
+
+ want := &IssuesSearchResult{
+ Total: Int(4),
+ Issues: []Issue{{Number: Int(1)}, {Number: Int(2)}},
+ }
+ if !reflect.DeepEqual(result, want) {
+ t.Errorf("Search.Issues returned %+v, want %+v", result, want)
+ }
+}
+
+func TestSearchService_Users(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/search/users", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "q": "blah",
+ "sort": "forks",
+ "order": "desc",
+ "page": "2",
+ "per_page": "2",
+ })
+
+ fmt.Fprint(w, `{"total_count": 4, "items": [{"id":1},{"id":2}]}`)
+ })
+
+ opts := &SearchOptions{Sort: "forks", Order: "desc", ListOptions: ListOptions{Page: 2, PerPage: 2}}
+ result, _, err := client.Search.Users("blah", opts)
+ if err != nil {
+ t.Errorf("Search.Issues returned error: %v", err)
+ }
+
+ want := &UsersSearchResult{
+ Total: Int(4),
+ Users: []User{{ID: Int(1)}, {ID: Int(2)}},
+ }
+ if !reflect.DeepEqual(result, want) {
+ t.Errorf("Search.Users returned %+v, want %+v", result, want)
+ }
+}
+
+func TestSearchService_Code(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/search/code", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{
+ "q": "blah",
+ "sort": "forks",
+ "order": "desc",
+ "page": "2",
+ "per_page": "2",
+ })
+
+ fmt.Fprint(w, `{"total_count": 4, "items": [{"name":"1"},{"name":"2"}]}`)
+ })
+
+ opts := &SearchOptions{Sort: "forks", Order: "desc", ListOptions: ListOptions{Page: 2, PerPage: 2}}
+ result, _, err := client.Search.Code("blah", opts)
+ if err != nil {
+ t.Errorf("Search.Code returned error: %v", err)
+ }
+
+ want := &CodeSearchResult{
+ Total: Int(4),
+ CodeResults: []CodeResult{{Name: String("1")}, {Name: String("2")}},
+ }
+ if !reflect.DeepEqual(result, want) {
+ t.Errorf("Search.Code returned %+v, want %+v", result, want)
+ }
+}
+
+func TestSearchService_CodeTextMatch(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/search/code", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+
+ textMatchResponse := `
+ {
+ "total_count": 1,
+ "items": [
+ {
+ "name":"gopher1",
+ "text_matches": [
+ {
+ "fragment": "I'm afraid my friend what you have found\nIs a gopher who lives to feed",
+ "matches": [
+ {
+ "text": "gopher",
+ "indices": [
+ 14,
+ 21
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ `
+
+ fmt.Fprint(w, textMatchResponse)
+ })
+
+ opts := &SearchOptions{Sort: "forks", Order: "desc", ListOptions: ListOptions{Page: 2, PerPage: 2}, TextMatch: true}
+ result, _, err := client.Search.Code("blah", opts)
+ if err != nil {
+ t.Errorf("Search.Code returned error: %v", err)
+ }
+
+ wantedCodeResult := CodeResult{
+ Name: String("gopher1"),
+ TextMatches: []TextMatch{{
+ Fragment: String("I'm afraid my friend what you have found\nIs a gopher who lives to feed"),
+ Matches: []Match{{Text: String("gopher"), Indices: []int{14, 21}}},
+ },
+ },
+ }
+
+ want := &CodeSearchResult{
+ Total: Int(1),
+ CodeResults: []CodeResult{wantedCodeResult},
+ }
+ if !reflect.DeepEqual(result, want) {
+ t.Errorf("Search.Code returned %+v, want %+v", result, want)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/strings_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/strings_test.go
new file mode 100644
index 000000000..a393eb6cf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/strings_test.go
@@ -0,0 +1,137 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "testing"
+ "time"
+)
+
+func TestStringify(t *testing.T) {
+ var nilPointer *string
+
+ var tests = []struct {
+ in interface{}
+ out string
+ }{
+ // basic types
+ {"foo", `"foo"`},
+ {123, `123`},
+ {1.5, `1.5`},
+ {false, `false`},
+ {
+ []string{"a", "b"},
+ `["a" "b"]`,
+ },
+ {
+ struct {
+ A []string
+ }{nil},
+ // nil slice is skipped
+ `{}`,
+ },
+ {
+ struct {
+ A string
+ }{"foo"},
+ // structs not of a named type get no prefix
+ `{A:"foo"}`,
+ },
+
+ // pointers
+ {nilPointer, ``},
+ {String("foo"), `"foo"`},
+ {Int(123), `123`},
+ {Bool(false), `false`},
+ {
+ []*string{String("a"), String("b")},
+ `["a" "b"]`,
+ },
+
+ // actual GitHub structs
+ {
+ Timestamp{time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC)},
+ `github.Timestamp{2006-01-02 15:04:05 +0000 UTC}`,
+ },
+ {
+ &Timestamp{time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC)},
+ `github.Timestamp{2006-01-02 15:04:05 +0000 UTC}`,
+ },
+ {
+ User{ID: Int(123), Name: String("n")},
+ `github.User{ID:123, Name:"n"}`,
+ },
+ {
+ Repository{Owner: &User{ID: Int(123)}},
+ `github.Repository{Owner:github.User{ID:123}}`,
+ },
+ }
+
+ for i, tt := range tests {
+ s := Stringify(tt.in)
+ if s != tt.out {
+ t.Errorf("%d. Stringify(%q) => %q, want %q", i, tt.in, s, tt.out)
+ }
+ }
+}
+
+// Directly test the String() methods on various GitHub types. We don't do an
+// exaustive test of all the various field types, since TestStringify() above
+// takes care of that. Rather, we just make sure that Stringify() is being
+// used to build the strings, which we do by verifying that pointers are
+// stringified as their underlying value.
+func TestString(t *testing.T) {
+ var tests = []struct {
+ in interface{}
+ out string
+ }{
+ {CodeResult{Name: String("n")}, `github.CodeResult{Name:"n"}`},
+ {CommitAuthor{Name: String("n")}, `github.CommitAuthor{Name:"n"}`},
+ {CommitFile{SHA: String("s")}, `github.CommitFile{SHA:"s"}`},
+ {CommitStats{Total: Int(1)}, `github.CommitStats{Total:1}`},
+ {CommitsComparison{TotalCommits: Int(1)}, `github.CommitsComparison{TotalCommits:1}`},
+ {Commit{SHA: String("s")}, `github.Commit{SHA:"s"}`},
+ {Event{ID: String("1")}, `github.Event{ID:"1"}`},
+ {GistComment{ID: Int(1)}, `github.GistComment{ID:1}`},
+ {GistFile{Size: Int(1)}, `github.GistFile{Size:1}`},
+ {Gist{ID: String("1")}, `github.Gist{ID:"1", Files:map[]}`},
+ {GitObject{SHA: String("s")}, `github.GitObject{SHA:"s"}`},
+ {Gitignore{Name: String("n")}, `github.Gitignore{Name:"n"}`},
+ {Hook{ID: Int(1)}, `github.Hook{Config:map[], ID:1}`},
+ {IssueComment{ID: Int(1)}, `github.IssueComment{ID:1}`},
+ {Issue{Number: Int(1)}, `github.Issue{Number:1}`},
+ {Key{ID: Int(1)}, `github.Key{ID:1}`},
+ {Label{Name: String("l")}, "l"},
+ {Organization{ID: Int(1)}, `github.Organization{ID:1}`},
+ {PullRequestComment{ID: Int(1)}, `github.PullRequestComment{ID:1}`},
+ {PullRequest{Number: Int(1)}, `github.PullRequest{Number:1}`},
+ {PushEventCommit{SHA: String("s")}, `github.PushEventCommit{SHA:"s"}`},
+ {PushEvent{PushID: Int(1)}, `github.PushEvent{PushID:1}`},
+ {Reference{Ref: String("r")}, `github.Reference{Ref:"r"}`},
+ {ReleaseAsset{ID: Int(1)}, `github.ReleaseAsset{ID:1}`},
+ {RepoStatus{ID: Int(1)}, `github.RepoStatus{ID:1}`},
+ {RepositoryComment{ID: Int(1)}, `github.RepositoryComment{ID:1}`},
+ {RepositoryCommit{SHA: String("s")}, `github.RepositoryCommit{SHA:"s"}`},
+ {RepositoryContent{Name: String("n")}, `github.RepositoryContent{Name:"n"}`},
+ {RepositoryRelease{ID: Int(1)}, `github.RepositoryRelease{ID:1}`},
+ {Repository{ID: Int(1)}, `github.Repository{ID:1}`},
+ {Team{ID: Int(1)}, `github.Team{ID:1}`},
+ {TreeEntry{SHA: String("s")}, `github.TreeEntry{SHA:"s"}`},
+ {Tree{SHA: String("s")}, `github.Tree{SHA:"s"}`},
+ {User{ID: Int(1)}, `github.User{ID:1}`},
+ {WebHookAuthor{Name: String("n")}, `github.WebHookAuthor{Name:"n"}`},
+ {WebHookCommit{ID: String("1")}, `github.WebHookCommit{ID:"1"}`},
+ {WebHookPayload{Ref: String("r")}, `github.WebHookPayload{Ref:"r"}`},
+ }
+
+ for i, tt := range tests {
+ s := tt.in.(fmt.Stringer).String()
+ if s != tt.out {
+ t.Errorf("%d. String() => %q, want %q", i, tt.in, tt.out)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/timestamp_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/timestamp_test.go
new file mode 100644
index 000000000..12376c51a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/timestamp_test.go
@@ -0,0 +1,181 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "testing"
+ "time"
+)
+
+const (
+ emptyTimeStr = `"0001-01-01T00:00:00Z"`
+ referenceTimeStr = `"2006-01-02T15:04:05Z"`
+ referenceUnixTimeStr = `1136214245`
+)
+
+var (
+ referenceTime = time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC)
+ unixOrigin = time.Unix(0, 0).In(time.UTC)
+)
+
+func TestTimestamp_Marshal(t *testing.T) {
+ testCases := []struct {
+ desc string
+ data Timestamp
+ want string
+ wantErr bool
+ equal bool
+ }{
+ {"Reference", Timestamp{referenceTime}, referenceTimeStr, false, true},
+ {"Empty", Timestamp{}, emptyTimeStr, false, true},
+ {"Mismatch", Timestamp{}, referenceTimeStr, false, false},
+ }
+ for _, tc := range testCases {
+ out, err := json.Marshal(tc.data)
+ if gotErr := err != nil; gotErr != tc.wantErr {
+ t.Errorf("%s: gotErr=%v, wantErr=%v, err=%v", tc.desc, gotErr, tc.wantErr, err)
+ }
+ got := string(out)
+ equal := got == tc.want
+ if (got == tc.want) != tc.equal {
+ t.Errorf("%s: got=%s, want=%s, equal=%v, want=%v", tc.desc, got, tc.want, equal, tc.equal)
+ }
+ }
+}
+
+func TestTimestamp_Unmarshal(t *testing.T) {
+ testCases := []struct {
+ desc string
+ data string
+ want Timestamp
+ wantErr bool
+ equal bool
+ }{
+ {"Reference", referenceTimeStr, Timestamp{referenceTime}, false, true},
+ {"ReferenceUnix", `1136214245`, Timestamp{referenceTime}, false, true},
+ {"Empty", emptyTimeStr, Timestamp{}, false, true},
+ {"UnixStart", `0`, Timestamp{unixOrigin}, false, true},
+ {"Mismatch", referenceTimeStr, Timestamp{}, false, false},
+ {"MismatchUnix", `0`, Timestamp{}, false, false},
+ {"Invalid", `"asdf"`, Timestamp{referenceTime}, true, false},
+ }
+ for _, tc := range testCases {
+ var got Timestamp
+ err := json.Unmarshal([]byte(tc.data), &got)
+ if gotErr := err != nil; gotErr != tc.wantErr {
+ t.Errorf("%s: gotErr=%v, wantErr=%v, err=%v", tc.desc, gotErr, tc.wantErr, err)
+ continue
+ }
+ equal := got.Equal(tc.want)
+ if equal != tc.equal {
+ t.Errorf("%s: got=%#v, want=%#v, equal=%v, want=%v", tc.desc, got, tc.want, equal, tc.equal)
+ }
+ }
+}
+
+func TestTimstamp_MarshalReflexivity(t *testing.T) {
+ testCases := []struct {
+ desc string
+ data Timestamp
+ }{
+ {"Reference", Timestamp{referenceTime}},
+ {"Empty", Timestamp{}},
+ }
+ for _, tc := range testCases {
+ data, err := json.Marshal(tc.data)
+ if err != nil {
+ t.Errorf("%s: Marshal err=%v", tc.desc, err)
+ }
+ var got Timestamp
+ err = json.Unmarshal(data, &got)
+ if !got.Equal(tc.data) {
+ t.Errorf("%s: %+v != %+v", tc.desc, got, data)
+ }
+ }
+}
+
+type WrappedTimestamp struct {
+ A int
+ Time Timestamp
+}
+
+func TestWrappedTimstamp_Marshal(t *testing.T) {
+ testCases := []struct {
+ desc string
+ data WrappedTimestamp
+ want string
+ wantErr bool
+ equal bool
+ }{
+ {"Reference", WrappedTimestamp{0, Timestamp{referenceTime}}, fmt.Sprintf(`{"A":0,"Time":%s}`, referenceTimeStr), false, true},
+ {"Empty", WrappedTimestamp{}, fmt.Sprintf(`{"A":0,"Time":%s}`, emptyTimeStr), false, true},
+ {"Mismatch", WrappedTimestamp{}, fmt.Sprintf(`{"A":0,"Time":%s}`, referenceTimeStr), false, false},
+ }
+ for _, tc := range testCases {
+ out, err := json.Marshal(tc.data)
+ if gotErr := err != nil; gotErr != tc.wantErr {
+ t.Errorf("%s: gotErr=%v, wantErr=%v, err=%v", tc.desc, gotErr, tc.wantErr, err)
+ }
+ got := string(out)
+ equal := got == tc.want
+ if equal != tc.equal {
+ t.Errorf("%s: got=%s, want=%s, equal=%v, want=%v", tc.desc, got, tc.want, equal, tc.equal)
+ }
+ }
+}
+
+func TestWrappedTimstamp_Unmarshal(t *testing.T) {
+ testCases := []struct {
+ desc string
+ data string
+ want WrappedTimestamp
+ wantErr bool
+ equal bool
+ }{
+ {"Reference", referenceTimeStr, WrappedTimestamp{0, Timestamp{referenceTime}}, false, true},
+ {"ReferenceUnix", referenceUnixTimeStr, WrappedTimestamp{0, Timestamp{referenceTime}}, false, true},
+ {"Empty", emptyTimeStr, WrappedTimestamp{0, Timestamp{}}, false, true},
+ {"UnixStart", `0`, WrappedTimestamp{0, Timestamp{unixOrigin}}, false, true},
+ {"Mismatch", referenceTimeStr, WrappedTimestamp{0, Timestamp{}}, false, false},
+ {"MismatchUnix", `0`, WrappedTimestamp{0, Timestamp{}}, false, false},
+ {"Invalid", `"asdf"`, WrappedTimestamp{0, Timestamp{referenceTime}}, true, false},
+ }
+ for _, tc := range testCases {
+ var got Timestamp
+ err := json.Unmarshal([]byte(tc.data), &got)
+ if gotErr := err != nil; gotErr != tc.wantErr {
+ t.Errorf("%s: gotErr=%v, wantErr=%v, err=%v", tc.desc, gotErr, tc.wantErr, err)
+ continue
+ }
+ equal := got.Time.Equal(tc.want.Time.Time)
+ if equal != tc.equal {
+ t.Errorf("%s: got=%#v, want=%#v, equal=%v, want=%v", tc.desc, got, tc.want, equal, tc.equal)
+ }
+ }
+}
+
+func TestWrappedTimstamp_MarshalReflexivity(t *testing.T) {
+ testCases := []struct {
+ desc string
+ data WrappedTimestamp
+ }{
+ {"Reference", WrappedTimestamp{0, Timestamp{referenceTime}}},
+ {"Empty", WrappedTimestamp{0, Timestamp{}}},
+ }
+ for _, tc := range testCases {
+ bytes, err := json.Marshal(tc.data)
+ if err != nil {
+ t.Errorf("%s: Marshal err=%v", tc.desc, err)
+ }
+ var got WrappedTimestamp
+ err = json.Unmarshal(bytes, &got)
+ if !got.Time.Equal(tc.data.Time) {
+ t.Errorf("%s: %+v != %+v", tc.desc, got, tc.data)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/users_administration_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/users_administration_test.go
new file mode 100644
index 000000000..d415f4d4a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/users_administration_test.go
@@ -0,0 +1,71 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "net/http"
+ "testing"
+)
+
+func TestUsersService_PromoteSiteAdmin(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u/site_admin", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "PUT")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ _, err := client.Users.PromoteSiteAdmin("u")
+ if err != nil {
+ t.Errorf("Users.PromoteSiteAdmin returned error: %v", err)
+ }
+}
+
+func TestUsersService_DemoteSiteAdmin(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u/site_admin", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ _, err := client.Users.DemoteSiteAdmin("u")
+ if err != nil {
+ t.Errorf("Users.DemoteSiteAdmin returned error: %v", err)
+ }
+}
+
+func TestUsersService_Suspend(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u/suspended", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "PUT")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ _, err := client.Users.Suspend("u")
+ if err != nil {
+ t.Errorf("Users.Suspend returned error: %v", err)
+ }
+}
+
+func TestUsersService_Unsuspend(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u/suspended", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ _, err := client.Users.Unsuspend("u")
+ if err != nil {
+ t.Errorf("Users.Unsuspend returned error: %v", err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/users_emails_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/users_emails_test.go
new file mode 100644
index 000000000..7eb650860
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/users_emails_test.go
@@ -0,0 +1,94 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestUsersService_ListEmails(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/emails", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{
+ "email": "user@example.com",
+ "verified": false,
+ "primary": true
+ }]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ emails, _, err := client.Users.ListEmails(opt)
+ if err != nil {
+ t.Errorf("Users.ListEmails returned error: %v", err)
+ }
+
+ want := []UserEmail{{Email: String("user@example.com"), Verified: Bool(false), Primary: Bool(true)}}
+ if !reflect.DeepEqual(emails, want) {
+ t.Errorf("Users.ListEmails returned %+v, want %+v", emails, want)
+ }
+}
+
+func TestUsersService_AddEmails(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := []string{"new@example.com"}
+
+ mux.HandleFunc("/user/emails", func(w http.ResponseWriter, r *http.Request) {
+ v := new([]string)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(*v, input) {
+ t.Errorf("Request body = %+v, want %+v", *v, input)
+ }
+
+ fmt.Fprint(w, `[{"email":"old@example.com"}, {"email":"new@example.com"}]`)
+ })
+
+ emails, _, err := client.Users.AddEmails(input)
+ if err != nil {
+ t.Errorf("Users.AddEmails returned error: %v", err)
+ }
+
+ want := []UserEmail{
+ {Email: String("old@example.com")},
+ {Email: String("new@example.com")},
+ }
+ if !reflect.DeepEqual(emails, want) {
+ t.Errorf("Users.AddEmails returned %+v, want %+v", emails, want)
+ }
+}
+
+func TestUsersService_DeleteEmails(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := []string{"user@example.com"}
+
+ mux.HandleFunc("/user/emails", func(w http.ResponseWriter, r *http.Request) {
+ v := new([]string)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "DELETE")
+ if !reflect.DeepEqual(*v, input) {
+ t.Errorf("Request body = %+v, want %+v", *v, input)
+ }
+ })
+
+ _, err := client.Users.DeleteEmails(input)
+ if err != nil {
+ t.Errorf("Users.DeleteEmails returned error: %v", err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/users_followers_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/users_followers_test.go
new file mode 100644
index 000000000..f4d24578e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/users_followers_test.go
@@ -0,0 +1,222 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestUsersService_ListFollowers_authenticatedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/followers", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ users, _, err := client.Users.ListFollowers("", opt)
+ if err != nil {
+ t.Errorf("Users.ListFollowers returned error: %v", err)
+ }
+
+ want := []User{{ID: Int(1)}}
+ if !reflect.DeepEqual(users, want) {
+ t.Errorf("Users.ListFollowers returned %+v, want %+v", users, want)
+ }
+}
+
+func TestUsersService_ListFollowers_specifiedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u/followers", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ users, _, err := client.Users.ListFollowers("u", nil)
+ if err != nil {
+ t.Errorf("Users.ListFollowers returned error: %v", err)
+ }
+
+ want := []User{{ID: Int(1)}}
+ if !reflect.DeepEqual(users, want) {
+ t.Errorf("Users.ListFollowers returned %+v, want %+v", users, want)
+ }
+}
+
+func TestUsersService_ListFollowers_invalidUser(t *testing.T) {
+ _, _, err := client.Users.ListFollowers("%", nil)
+ testURLParseError(t, err)
+}
+
+func TestUsersService_ListFollowing_authenticatedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/following", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opts := &ListOptions{Page: 2}
+ users, _, err := client.Users.ListFollowing("", opts)
+ if err != nil {
+ t.Errorf("Users.ListFollowing returned error: %v", err)
+ }
+
+ want := []User{{ID: Int(1)}}
+ if !reflect.DeepEqual(users, want) {
+ t.Errorf("Users.ListFollowing returned %+v, want %+v", users, want)
+ }
+}
+
+func TestUsersService_ListFollowing_specifiedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u/following", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ users, _, err := client.Users.ListFollowing("u", nil)
+ if err != nil {
+ t.Errorf("Users.ListFollowing returned error: %v", err)
+ }
+
+ want := []User{{ID: Int(1)}}
+ if !reflect.DeepEqual(users, want) {
+ t.Errorf("Users.ListFollowing returned %+v, want %+v", users, want)
+ }
+}
+
+func TestUsersService_ListFollowing_invalidUser(t *testing.T) {
+ _, _, err := client.Users.ListFollowing("%", nil)
+ testURLParseError(t, err)
+}
+
+func TestUsersService_IsFollowing_authenticatedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/following/t", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ following, _, err := client.Users.IsFollowing("", "t")
+ if err != nil {
+ t.Errorf("Users.IsFollowing returned error: %v", err)
+ }
+ if want := true; following != want {
+ t.Errorf("Users.IsFollowing returned %+v, want %+v", following, want)
+ }
+}
+
+func TestUsersService_IsFollowing_specifiedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u/following/t", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+ following, _, err := client.Users.IsFollowing("u", "t")
+ if err != nil {
+ t.Errorf("Users.IsFollowing returned error: %v", err)
+ }
+ if want := true; following != want {
+ t.Errorf("Users.IsFollowing returned %+v, want %+v", following, want)
+ }
+}
+
+func TestUsersService_IsFollowing_false(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u/following/t", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ w.WriteHeader(http.StatusNotFound)
+ })
+
+ following, _, err := client.Users.IsFollowing("u", "t")
+ if err != nil {
+ t.Errorf("Users.IsFollowing returned error: %v", err)
+ }
+ if want := false; following != want {
+ t.Errorf("Users.IsFollowing returned %+v, want %+v", following, want)
+ }
+}
+
+func TestUsersService_IsFollowing_error(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u/following/t", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ http.Error(w, "BadRequest", http.StatusBadRequest)
+ })
+
+ following, _, err := client.Users.IsFollowing("u", "t")
+ if err == nil {
+ t.Errorf("Expected HTTP 400 response")
+ }
+ if want := false; following != want {
+ t.Errorf("Users.IsFollowing returned %+v, want %+v", following, want)
+ }
+}
+
+func TestUsersService_IsFollowing_invalidUser(t *testing.T) {
+ _, _, err := client.Users.IsFollowing("%", "%")
+ testURLParseError(t, err)
+}
+
+func TestUsersService_Follow(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/following/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "PUT")
+ })
+
+ _, err := client.Users.Follow("u")
+ if err != nil {
+ t.Errorf("Users.Follow returned error: %v", err)
+ }
+}
+
+func TestUsersService_Follow_invalidUser(t *testing.T) {
+ _, err := client.Users.Follow("%")
+ testURLParseError(t, err)
+}
+
+func TestUsersService_Unfollow(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/following/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Users.Unfollow("u")
+ if err != nil {
+ t.Errorf("Users.Follow returned error: %v", err)
+ }
+}
+
+func TestUsersService_Unfollow_invalidUser(t *testing.T) {
+ _, err := client.Users.Unfollow("%")
+ testURLParseError(t, err)
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/users_keys_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/users_keys_test.go
new file mode 100644
index 000000000..e47afd71d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/users_keys_test.go
@@ -0,0 +1,124 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestUsersService_ListKeys_authenticatedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/keys", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"page": "2"})
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ opt := &ListOptions{Page: 2}
+ keys, _, err := client.Users.ListKeys("", opt)
+ if err != nil {
+ t.Errorf("Users.ListKeys returned error: %v", err)
+ }
+
+ want := []Key{{ID: Int(1)}}
+ if !reflect.DeepEqual(keys, want) {
+ t.Errorf("Users.ListKeys returned %+v, want %+v", keys, want)
+ }
+}
+
+func TestUsersService_ListKeys_specifiedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u/keys", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `[{"id":1}]`)
+ })
+
+ keys, _, err := client.Users.ListKeys("u", nil)
+ if err != nil {
+ t.Errorf("Users.ListKeys returned error: %v", err)
+ }
+
+ want := []Key{{ID: Int(1)}}
+ if !reflect.DeepEqual(keys, want) {
+ t.Errorf("Users.ListKeys returned %+v, want %+v", keys, want)
+ }
+}
+
+func TestUsersService_ListKeys_invalidUser(t *testing.T) {
+ _, _, err := client.Users.ListKeys("%", nil)
+ testURLParseError(t, err)
+}
+
+func TestUsersService_GetKey(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/keys/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ key, _, err := client.Users.GetKey(1)
+ if err != nil {
+ t.Errorf("Users.GetKey returned error: %v", err)
+ }
+
+ want := &Key{ID: Int(1)}
+ if !reflect.DeepEqual(key, want) {
+ t.Errorf("Users.GetKey returned %+v, want %+v", key, want)
+ }
+}
+
+func TestUsersService_CreateKey(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &Key{Key: String("k"), Title: String("t")}
+
+ mux.HandleFunc("/user/keys", func(w http.ResponseWriter, r *http.Request) {
+ v := new(Key)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "POST")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ key, _, err := client.Users.CreateKey(input)
+ if err != nil {
+ t.Errorf("Users.GetKey returned error: %v", err)
+ }
+
+ want := &Key{ID: Int(1)}
+ if !reflect.DeepEqual(key, want) {
+ t.Errorf("Users.GetKey returned %+v, want %+v", key, want)
+ }
+}
+
+func TestUsersService_DeleteKey(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user/keys/1", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "DELETE")
+ })
+
+ _, err := client.Users.DeleteKey(1)
+ if err != nil {
+ t.Errorf("Users.DeleteKey returned error: %v", err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-github/github/users_test.go b/Godeps/_workspace/src/github.com/google/go-github/github/users_test.go
new file mode 100644
index 000000000..15ea3e83a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-github/github/users_test.go
@@ -0,0 +1,150 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+func TestUser_marshall(t *testing.T) {
+ testJSONMarshal(t, &User{}, "{}")
+
+ u := &User{
+ Login: String("l"),
+ ID: Int(1),
+ URL: String("u"),
+ AvatarURL: String("a"),
+ GravatarID: String("g"),
+ Name: String("n"),
+ Company: String("c"),
+ Blog: String("b"),
+ Location: String("l"),
+ Email: String("e"),
+ Hireable: Bool(true),
+ PublicRepos: Int(1),
+ Followers: Int(1),
+ Following: Int(1),
+ CreatedAt: &Timestamp{referenceTime},
+ }
+ want := `{
+ "login": "l",
+ "id": 1,
+ "avatar_url": "a",
+ "gravatar_id": "g",
+ "name": "n",
+ "company": "c",
+ "blog": "b",
+ "location": "l",
+ "email": "e",
+ "hireable": true,
+ "public_repos": 1,
+ "followers": 1,
+ "following": 1,
+ "created_at": ` + referenceTimeStr + `,
+ "url": "u"
+ }`
+ testJSONMarshal(t, u, want)
+}
+
+func TestUsersService_Get_authenticatedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/user", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ user, _, err := client.Users.Get("")
+ if err != nil {
+ t.Errorf("Users.Get returned error: %v", err)
+ }
+
+ want := &User{ID: Int(1)}
+ if !reflect.DeepEqual(user, want) {
+ t.Errorf("Users.Get returned %+v, want %+v", user, want)
+ }
+}
+
+func TestUsersService_Get_specifiedUser(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users/u", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ user, _, err := client.Users.Get("u")
+ if err != nil {
+ t.Errorf("Users.Get returned error: %v", err)
+ }
+
+ want := &User{ID: Int(1)}
+ if !reflect.DeepEqual(user, want) {
+ t.Errorf("Users.Get returned %+v, want %+v", user, want)
+ }
+}
+
+func TestUsersService_Get_invalidUser(t *testing.T) {
+ _, _, err := client.Users.Get("%")
+ testURLParseError(t, err)
+}
+
+func TestUsersService_Edit(t *testing.T) {
+ setup()
+ defer teardown()
+
+ input := &User{Name: String("n")}
+
+ mux.HandleFunc("/user", func(w http.ResponseWriter, r *http.Request) {
+ v := new(User)
+ json.NewDecoder(r.Body).Decode(v)
+
+ testMethod(t, r, "PATCH")
+ if !reflect.DeepEqual(v, input) {
+ t.Errorf("Request body = %+v, want %+v", v, input)
+ }
+
+ fmt.Fprint(w, `{"id":1}`)
+ })
+
+ user, _, err := client.Users.Edit(input)
+ if err != nil {
+ t.Errorf("Users.Edit returned error: %v", err)
+ }
+
+ want := &User{ID: Int(1)}
+ if !reflect.DeepEqual(user, want) {
+ t.Errorf("Users.Edit returned %+v, want %+v", user, want)
+ }
+}
+
+func TestUsersService_ListAll(t *testing.T) {
+ setup()
+ defer teardown()
+
+ mux.HandleFunc("/users", func(w http.ResponseWriter, r *http.Request) {
+ testMethod(t, r, "GET")
+ testFormValues(t, r, values{"since": "1"})
+ fmt.Fprint(w, `[{"id":2}]`)
+ })
+
+ opt := &UserListOptions{1}
+ users, _, err := client.Users.ListAll(opt)
+ if err != nil {
+ t.Errorf("Users.Get returned error: %v", err)
+ }
+
+ want := []User{{ID: Int(2)}}
+ if !reflect.DeepEqual(users, want) {
+ t.Errorf("Users.ListAll returned %+v, want %+v", users, want)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/google/go-querystring/query/encode.go b/Godeps/_workspace/src/github.com/google/go-querystring/query/encode.go
index 396d804f3..90dcabb80 100644
--- a/Godeps/_workspace/src/github.com/google/go-querystring/query/encode.go
+++ b/Godeps/_workspace/src/github.com/google/go-querystring/query/encode.go
@@ -167,6 +167,10 @@ func reflectValue(values url.Values, val reflect.Value, scope string) error {
}
if sv.Type().Implements(encoderType) {
+ if !reflect.Indirect(sv).IsValid() {
+ sv = reflect.New(sv.Type().Elem())
+ }
+
m := sv.Interface().(Encoder)
if err := m.EncodeValues(name, &values); err != nil {
return err
diff --git a/Godeps/_workspace/src/github.com/google/go-querystring/query/encode_test.go b/Godeps/_workspace/src/github.com/google/go-querystring/query/encode_test.go
new file mode 100644
index 000000000..e0b2a365a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/google/go-querystring/query/encode_test.go
@@ -0,0 +1,301 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package query
+
+import (
+ "fmt"
+ "net/url"
+ "reflect"
+ "testing"
+ "time"
+)
+
+type Nested struct {
+ A SubNested `url:"a"`
+ B *SubNested `url:"b"`
+ Ptr *SubNested `url:"ptr,omitempty"`
+}
+
+type SubNested struct {
+ Value string `url:"value"`
+}
+
+func TestValues_types(t *testing.T) {
+ str := "string"
+ strPtr := &str
+
+ tests := []struct {
+ in interface{}
+ want url.Values
+ }{
+ {
+ // basic primitives
+ struct {
+ A string
+ B int
+ C uint
+ D float32
+ E bool
+ }{},
+ url.Values{
+ "A": {""},
+ "B": {"0"},
+ "C": {"0"},
+ "D": {"0"},
+ "E": {"false"},
+ },
+ },
+ {
+ // pointers
+ struct {
+ A *string
+ B *int
+ C **string
+ }{A: strPtr, C: &strPtr},
+ url.Values{
+ "A": {str},
+ "B": {""},
+ "C": {str},
+ },
+ },
+ {
+ // slices and arrays
+ struct {
+ A []string
+ B []string `url:",comma"`
+ C []string `url:",space"`
+ D [2]string
+ E [2]string `url:",comma"`
+ F [2]string `url:",space"`
+ G []*string `url:",space"`
+ H []bool `url:",int,space"`
+ I []string `url:",brackets"`
+ }{
+ A: []string{"a", "b"},
+ B: []string{"a", "b"},
+ C: []string{"a", "b"},
+ D: [2]string{"a", "b"},
+ E: [2]string{"a", "b"},
+ F: [2]string{"a", "b"},
+ G: []*string{&str, &str},
+ H: []bool{true, false},
+ I: []string{"a", "b"},
+ },
+ url.Values{
+ "A": {"a", "b"},
+ "B": {"a,b"},
+ "C": {"a b"},
+ "D": {"a", "b"},
+ "E": {"a,b"},
+ "F": {"a b"},
+ "G": {"string string"},
+ "H": {"1 0"},
+ "I[]": {"a", "b"},
+ },
+ },
+ {
+ // other types
+ struct {
+ A time.Time
+ B time.Time `url:",unix"`
+ C bool `url:",int"`
+ D bool `url:",int"`
+ }{
+ A: time.Date(2000, 1, 1, 12, 34, 56, 0, time.UTC),
+ B: time.Date(2000, 1, 1, 12, 34, 56, 0, time.UTC),
+ C: true,
+ D: false,
+ },
+ url.Values{
+ "A": {"2000-01-01T12:34:56Z"},
+ "B": {"946730096"},
+ "C": {"1"},
+ "D": {"0"},
+ },
+ },
+ {
+ struct {
+ Nest Nested `url:"nest"`
+ }{
+ Nested{
+ A: SubNested{
+ Value: "that",
+ },
+ },
+ },
+ url.Values{
+ "nest[a][value]": {"that"},
+ "nest[b]": {""},
+ },
+ },
+ {
+ struct {
+ Nest Nested `url:"nest"`
+ }{
+ Nested{
+ Ptr: &SubNested{
+ Value: "that",
+ },
+ },
+ },
+ url.Values{
+ "nest[a][value]": {""},
+ "nest[b]": {""},
+ "nest[ptr][value]": {"that"},
+ },
+ },
+ {
+ nil,
+ url.Values{},
+ },
+ }
+
+ for i, tt := range tests {
+ v, err := Values(tt.in)
+ if err != nil {
+ t.Errorf("%d. Values(%q) returned error: %v", i, tt.in, err)
+ }
+
+ if !reflect.DeepEqual(tt.want, v) {
+ t.Errorf("%d. Values(%q) returned %v, want %v", i, tt.in, v, tt.want)
+ }
+ }
+}
+
+func TestValues_omitEmpty(t *testing.T) {
+ str := ""
+ s := struct {
+ a string
+ A string
+ B string `url:",omitempty"`
+ C string `url:"-"`
+ D string `url:"omitempty"` // actually named omitempty, not an option
+ E *string `url:",omitempty"`
+ }{E: &str}
+
+ v, err := Values(s)
+ if err != nil {
+ t.Errorf("Values(%q) returned error: %v", s, err)
+ }
+
+ want := url.Values{
+ "A": {""},
+ "omitempty": {""},
+ "E": {""}, // E is included because the pointer is not empty, even though the string being pointed to is
+ }
+ if !reflect.DeepEqual(want, v) {
+ t.Errorf("Values(%q) returned %v, want %v", s, v, want)
+ }
+}
+
+type A struct {
+ B
+}
+
+type B struct {
+ C string
+}
+
+type D struct {
+ B
+ C string
+}
+
+func TestValues_embeddedStructs(t *testing.T) {
+ tests := []struct {
+ in interface{}
+ want url.Values
+ }{
+ {
+ A{B{C: "foo"}},
+ url.Values{"C": {"foo"}},
+ },
+ {
+ D{B: B{C: "bar"}, C: "foo"},
+ url.Values{"C": {"foo", "bar"}},
+ },
+ }
+
+ for i, tt := range tests {
+ v, err := Values(tt.in)
+ if err != nil {
+ t.Errorf("%d. Values(%q) returned error: %v", i, tt.in, err)
+ }
+
+ if !reflect.DeepEqual(tt.want, v) {
+ t.Errorf("%d. Values(%q) returned %v, want %v", i, tt.in, v, tt.want)
+ }
+ }
+}
+
+func TestValues_invalidInput(t *testing.T) {
+ _, err := Values("")
+ if err == nil {
+ t.Errorf("expected Values() to return an error on invalid input")
+ }
+}
+
+type EncodedArgs []string
+
+func (m EncodedArgs) EncodeValues(key string, v *url.Values) error {
+ for i, arg := range m {
+ v.Set(fmt.Sprintf("%s.%d", key, i), arg)
+ }
+ return nil
+}
+
+func TestValues_Marshaler(t *testing.T) {
+ s := struct {
+ Args EncodedArgs `url:"arg"`
+ }{[]string{"a", "b", "c"}}
+ v, err := Values(s)
+ if err != nil {
+ t.Errorf("Values(%q) returned error: %v", s, err)
+ }
+
+ want := url.Values{
+ "arg.0": {"a"},
+ "arg.1": {"b"},
+ "arg.2": {"c"},
+ }
+ if !reflect.DeepEqual(want, v) {
+ t.Errorf("Values(%q) returned %v, want %v", s, v, want)
+ }
+}
+
+func TestValues_MarshalerWithNilPointer(t *testing.T) {
+ s := struct {
+ Args *EncodedArgs `url:"arg"`
+ }{}
+ v, err := Values(s)
+ if err != nil {
+ t.Errorf("Values(%q) returned error: %v", s, err)
+ }
+
+ want := url.Values{}
+ if !reflect.DeepEqual(want, v) {
+ t.Errorf("Values(%q) returned %v, want %v", s, v, want)
+ }
+}
+
+func TestTagParsing(t *testing.T) {
+ name, opts := parseTag("field,foobar,foo")
+ if name != "field" {
+ t.Fatalf("name = %q, want field", name)
+ }
+ for _, tt := range []struct {
+ opt string
+ want bool
+ }{
+ {"foobar", true},
+ {"foo", true},
+ {"bar", false},
+ {"field", false},
+ } {
+ if opts.Contains(tt.opt) != tt.want {
+ t.Errorf("Contains(%q) = %v", tt.opt, !tt.want)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hailocab/go-hostpool/.gitignore b/Godeps/_workspace/src/github.com/hailocab/go-hostpool/.gitignore
new file mode 100644
index 000000000..00268614f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hailocab/go-hostpool/.gitignore
@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
diff --git a/Godeps/_workspace/src/github.com/hailocab/go-hostpool/.travis.yml b/Godeps/_workspace/src/github.com/hailocab/go-hostpool/.travis.yml
new file mode 100644
index 000000000..e69de29bb
diff --git a/Godeps/_workspace/src/github.com/hailocab/go-hostpool/README.md b/Godeps/_workspace/src/github.com/hailocab/go-hostpool/README.md
new file mode 100644
index 000000000..7f4437277
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hailocab/go-hostpool/README.md
@@ -0,0 +1,17 @@
+go-hostpool
+===========
+
+A Go package to intelligently and flexibly pool among multiple hosts from your Go application.
+Host selection can operate in round robin or epsilon greedy mode, and unresponsive hosts are
+avoided.
+Usage example:
+
+```go
+hp := hostpool.NewEpsilonGreedy([]string{"a", "b"}, 0, &hostpool.LinearEpsilonValueCalculator{})
+hostResponse := hp.Get()
+hostname := hostResponse.Host()
+err := _ // (make a request with hostname)
+hostResponse.Mark(err)
+```
+
+View more detailed documentation on [godoc.org](http://godoc.org/github.com/bitly/go-hostpool)
diff --git a/Godeps/_workspace/src/github.com/hailocab/go-hostpool/epsilon_greedy.go b/Godeps/_workspace/src/github.com/hailocab/go-hostpool/epsilon_greedy.go
new file mode 100644
index 000000000..e6fe9a79c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hailocab/go-hostpool/epsilon_greedy.go
@@ -0,0 +1,204 @@
+package hostpool
+
+import (
+ "log"
+ "math/rand"
+ "time"
+)
+
+type epsilonHostPoolResponse struct {
+ standardHostPoolResponse
+ started time.Time
+ ended time.Time
+}
+
+func (r *epsilonHostPoolResponse) Mark(err error) {
+ r.Do(func() {
+ r.ended = time.Now()
+ doMark(err, r)
+ })
+
+}
+
+type epsilonGreedyHostPool struct {
+ standardHostPool // TODO - would be nifty if we could embed HostPool and Locker interfaces
+ epsilon float32 // this is our exploration factor
+ decayDuration time.Duration
+ EpsilonValueCalculator // embed the epsilonValueCalculator
+ timer
+}
+
+// Construct an Epsilon Greedy HostPool
+//
+// Epsilon Greedy is an algorithm that allows HostPool not only to track failure state,
+// but also to learn about "better" options in terms of speed, and to pick from available hosts
+// based on how well they perform. This gives a weighted request rate to better
+// performing hosts, while still distributing requests to all hosts (proportionate to their performance).
+// The interface is the same as the standard HostPool, but be sure to mark the HostResponse immediately
+// after executing the request to the host, as that will stop the implicitly running request timer.
+//
+// A good overview of Epsilon Greedy is here http://stevehanov.ca/blog/index.php?id=132
+//
+// To compute the weighting scores, we perform a weighted average of recent response times, over the course of
+// `decayDuration`. decayDuration may be set to 0 to use the default value of 5 minutes
+// We then use the supplied EpsilonValueCalculator to calculate a score from that weighted average response time.
+func NewEpsilonGreedy(hosts []string, decayDuration time.Duration, calc EpsilonValueCalculator) HostPool {
+
+ if decayDuration <= 0 {
+ decayDuration = defaultDecayDuration
+ }
+ stdHP := New(hosts).(*standardHostPool)
+ p := &epsilonGreedyHostPool{
+ standardHostPool: *stdHP,
+ epsilon: float32(initialEpsilon),
+ decayDuration: decayDuration,
+ EpsilonValueCalculator: calc,
+ timer: &realTimer{},
+ }
+
+ // allocate structures
+ for _, h := range p.hostList {
+ h.epsilonCounts = make([]int64, epsilonBuckets)
+ h.epsilonValues = make([]int64, epsilonBuckets)
+ }
+ go p.epsilonGreedyDecay()
+ return p
+}
+
+func (p *epsilonGreedyHostPool) SetEpsilon(newEpsilon float32) {
+ p.Lock()
+ defer p.Unlock()
+ p.epsilon = newEpsilon
+}
+
+func (p *epsilonGreedyHostPool) SetHosts(hosts []string) {
+ p.Lock()
+ defer p.Unlock()
+ p.standardHostPool.setHosts(hosts)
+ for _, h := range p.hostList {
+ h.epsilonCounts = make([]int64, epsilonBuckets)
+ h.epsilonValues = make([]int64, epsilonBuckets)
+ }
+}
+
+func (p *epsilonGreedyHostPool) epsilonGreedyDecay() {
+ durationPerBucket := p.decayDuration / epsilonBuckets
+ ticker := time.Tick(durationPerBucket)
+ for {
+ <-ticker
+ p.performEpsilonGreedyDecay()
+ }
+}
+func (p *epsilonGreedyHostPool) performEpsilonGreedyDecay() {
+ p.Lock()
+ for _, h := range p.hostList {
+ h.epsilonIndex += 1
+ h.epsilonIndex = h.epsilonIndex % epsilonBuckets
+ h.epsilonCounts[h.epsilonIndex] = 0
+ h.epsilonValues[h.epsilonIndex] = 0
+ }
+ p.Unlock()
+}
+
+func (p *epsilonGreedyHostPool) Get() HostPoolResponse {
+ p.Lock()
+ defer p.Unlock()
+ host := p.getEpsilonGreedy()
+ started := time.Now()
+ return &epsilonHostPoolResponse{
+ standardHostPoolResponse: standardHostPoolResponse{host: host, pool: p},
+ started: started,
+ }
+}
+
+func (p *epsilonGreedyHostPool) getEpsilonGreedy() string {
+ var hostToUse *hostEntry
+
+ // this is our exploration phase
+ if rand.Float32() < p.epsilon {
+ p.epsilon = p.epsilon * epsilonDecay
+ if p.epsilon < minEpsilon {
+ p.epsilon = minEpsilon
+ }
+ return p.getRoundRobin()
+ }
+
+ // calculate values for each host in the 0..1 range (but not ormalized)
+ var possibleHosts []*hostEntry
+ now := time.Now()
+ var sumValues float64
+ for _, h := range p.hostList {
+ if h.canTryHost(now) {
+ v := h.getWeightedAverageResponseTime()
+ if v > 0 {
+ ev := p.CalcValueFromAvgResponseTime(v)
+ h.epsilonValue = ev
+ sumValues += ev
+ possibleHosts = append(possibleHosts, h)
+ }
+ }
+ }
+
+ if len(possibleHosts) != 0 {
+ // now normalize to the 0..1 range to get a percentage
+ for _, h := range possibleHosts {
+ h.epsilonPercentage = h.epsilonValue / sumValues
+ }
+
+ // do a weighted random choice among hosts
+ ceiling := 0.0
+ pickPercentage := rand.Float64()
+ for _, h := range possibleHosts {
+ ceiling += h.epsilonPercentage
+ if pickPercentage <= ceiling {
+ hostToUse = h
+ break
+ }
+ }
+ }
+
+ if hostToUse == nil {
+ if len(possibleHosts) != 0 {
+ log.Println("Failed to randomly choose a host, Dan loses")
+ }
+ return p.getRoundRobin()
+ }
+
+ if hostToUse.dead {
+ hostToUse.willRetryHost(p.maxRetryInterval)
+ }
+ return hostToUse.host
+}
+
+func (p *epsilonGreedyHostPool) markSuccess(hostR HostPoolResponse) {
+ // first do the base markSuccess - a little redundant with host lookup but cleaner than repeating logic
+ p.standardHostPool.markSuccess(hostR)
+ eHostR, ok := hostR.(*epsilonHostPoolResponse)
+ if !ok {
+ log.Printf("Incorrect type in eps markSuccess!") // TODO reflection to print out offending type
+ return
+ }
+ host := eHostR.host
+ duration := p.between(eHostR.started, eHostR.ended)
+
+ p.Lock()
+ defer p.Unlock()
+ h, ok := p.hosts[host]
+ if !ok {
+ log.Fatalf("host %s not in HostPool %v", host, p.Hosts())
+ }
+ h.epsilonCounts[h.epsilonIndex]++
+ h.epsilonValues[h.epsilonIndex] += int64(duration.Seconds() * 1000)
+}
+
+// --- timer: this just exists for testing
+
+type timer interface {
+ between(time.Time, time.Time) time.Duration
+}
+
+type realTimer struct{}
+
+func (rt *realTimer) between(start time.Time, end time.Time) time.Duration {
+ return end.Sub(start)
+}
diff --git a/Godeps/_workspace/src/github.com/hailocab/go-hostpool/epsilon_value_calculators.go b/Godeps/_workspace/src/github.com/hailocab/go-hostpool/epsilon_value_calculators.go
new file mode 100644
index 000000000..9bc3102a9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hailocab/go-hostpool/epsilon_value_calculators.go
@@ -0,0 +1,40 @@
+package hostpool
+
+// --- Value Calculators -----------------
+
+import (
+ "math"
+)
+
+// --- Definitions -----------------------
+
+// Structs implementing this interface are used to convert the average response time for a host
+// into a score that can be used to weight hosts in the epsilon greedy hostpool. Lower response
+// times should yield higher scores (we want to select the faster hosts more often) The default
+// LinearEpsilonValueCalculator just uses the reciprocal of the response time. In practice, any
+// decreasing function from the positive reals to the positive reals should work.
+type EpsilonValueCalculator interface {
+ CalcValueFromAvgResponseTime(float64) float64
+}
+
+type LinearEpsilonValueCalculator struct{}
+type LogEpsilonValueCalculator struct{ LinearEpsilonValueCalculator }
+type PolynomialEpsilonValueCalculator struct {
+ LinearEpsilonValueCalculator
+ Exp float64 // the exponent to which we will raise the value to reweight
+}
+
+// -------- Methods -----------------------
+
+func (c *LinearEpsilonValueCalculator) CalcValueFromAvgResponseTime(v float64) float64 {
+ return 1.0 / v
+}
+
+func (c *LogEpsilonValueCalculator) CalcValueFromAvgResponseTime(v float64) float64 {
+ // we need to add 1 to v so that this will be defined on all positive floats
+ return c.LinearEpsilonValueCalculator.CalcValueFromAvgResponseTime(math.Log(v + 1.0))
+}
+
+func (c *PolynomialEpsilonValueCalculator) CalcValueFromAvgResponseTime(v float64) float64 {
+ return c.LinearEpsilonValueCalculator.CalcValueFromAvgResponseTime(math.Pow(v, c.Exp))
+}
diff --git a/Godeps/_workspace/src/github.com/hailocab/go-hostpool/example_test.go b/Godeps/_workspace/src/github.com/hailocab/go-hostpool/example_test.go
new file mode 100644
index 000000000..88d0e558c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hailocab/go-hostpool/example_test.go
@@ -0,0 +1,13 @@
+package hostpool
+
+import (
+ "github.com/bitly/go-hostpool"
+)
+
+func ExampleNewEpsilonGreedy() {
+ hp := hostpool.NewEpsilonGreedy([]string{"a", "b"}, 0, &hostpool.LinearEpsilonValueCalculator{})
+ hostResponse := hp.Get()
+ hostname := hostResponse.Host()
+ err := nil // (make a request with hostname)
+ hostResponse.Mark(err)
+}
diff --git a/Godeps/_workspace/src/github.com/hailocab/go-hostpool/host_entry.go b/Godeps/_workspace/src/github.com/hailocab/go-hostpool/host_entry.go
new file mode 100644
index 000000000..dcec9a0b7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hailocab/go-hostpool/host_entry.go
@@ -0,0 +1,62 @@
+package hostpool
+
+import (
+ "time"
+)
+
+// --- hostEntry - this is due to get upgraded
+
+type hostEntry struct {
+ host string
+ nextRetry time.Time
+ retryCount int16
+ retryDelay time.Duration
+ dead bool
+ epsilonCounts []int64
+ epsilonValues []int64
+ epsilonIndex int
+ epsilonValue float64
+ epsilonPercentage float64
+}
+
+func (h *hostEntry) canTryHost(now time.Time) bool {
+ if !h.dead {
+ return true
+ }
+ if h.nextRetry.Before(now) {
+ return true
+ }
+ return false
+}
+
+func (h *hostEntry) willRetryHost(maxRetryInterval time.Duration) {
+ h.retryCount += 1
+ newDelay := h.retryDelay * 2
+ if newDelay < maxRetryInterval {
+ h.retryDelay = newDelay
+ } else {
+ h.retryDelay = maxRetryInterval
+ }
+ h.nextRetry = time.Now().Add(h.retryDelay)
+}
+
+func (h *hostEntry) getWeightedAverageResponseTime() float64 {
+ var value float64
+ var lastValue float64
+
+ // start at 1 so we start with the oldest entry
+ for i := 1; i <= epsilonBuckets; i += 1 {
+ pos := (h.epsilonIndex + i) % epsilonBuckets
+ bucketCount := h.epsilonCounts[pos]
+ // Changing the line below to what I think it should be to get the weights right
+ weight := float64(i) / float64(epsilonBuckets)
+ if bucketCount > 0 {
+ currentValue := float64(h.epsilonValues[pos]) / float64(bucketCount)
+ value += currentValue * weight
+ lastValue = currentValue
+ } else {
+ value += lastValue * weight
+ }
+ }
+ return value
+}
diff --git a/Godeps/_workspace/src/github.com/hailocab/go-hostpool/hostpool.go b/Godeps/_workspace/src/github.com/hailocab/go-hostpool/hostpool.go
new file mode 100644
index 000000000..93ed1c7f1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hailocab/go-hostpool/hostpool.go
@@ -0,0 +1,213 @@
+// A Go package to intelligently and flexibly pool among multiple hosts from your Go application.
+// Host selection can operate in round robin or epsilon greedy mode, and unresponsive hosts are
+// avoided. A good overview of Epsilon Greedy is here http://stevehanov.ca/blog/index.php?id=132
+package hostpool
+
+import (
+ "log"
+ "sync"
+ "time"
+)
+
+// Returns current version
+func Version() string {
+ return "0.1"
+}
+
+// --- Response interfaces and structs ----
+
+// This interface represents the response from HostPool. You can retrieve the
+// hostname by calling Host(), and after making a request to the host you should
+// call Mark with any error encountered, which will inform the HostPool issuing
+// the HostPoolResponse of what happened to the request and allow it to update.
+type HostPoolResponse interface {
+ Host() string
+ Mark(error)
+ hostPool() HostPool
+}
+
+type standardHostPoolResponse struct {
+ host string
+ sync.Once
+ pool HostPool
+}
+
+// --- HostPool structs and interfaces ----
+
+// This is the main HostPool interface. Structs implementing this interface
+// allow you to Get a HostPoolResponse (which includes a hostname to use),
+// get the list of all Hosts, and use ResetAll to reset state.
+type HostPool interface {
+ Get() HostPoolResponse
+ // keep the marks separate so we can override independently
+ markSuccess(HostPoolResponse)
+ markFailed(HostPoolResponse)
+
+ ResetAll()
+ Hosts() []string
+ SetHosts([]string)
+}
+
+type standardHostPool struct {
+ sync.RWMutex
+ hosts map[string]*hostEntry
+ hostList []*hostEntry
+ initialRetryDelay time.Duration
+ maxRetryInterval time.Duration
+ nextHostIndex int
+}
+
+// ------ constants -------------------
+
+const epsilonBuckets = 120
+const epsilonDecay = 0.90 // decay the exploration rate
+const minEpsilon = 0.01 // explore one percent of the time
+const initialEpsilon = 0.3
+const defaultDecayDuration = time.Duration(5) * time.Minute
+
+// Construct a basic HostPool using the hostnames provided
+func New(hosts []string) HostPool {
+ p := &standardHostPool{
+ hosts: make(map[string]*hostEntry, len(hosts)),
+ hostList: make([]*hostEntry, len(hosts)),
+ initialRetryDelay: time.Duration(30) * time.Second,
+ maxRetryInterval: time.Duration(900) * time.Second,
+ }
+
+ for i, h := range hosts {
+ e := &hostEntry{
+ host: h,
+ retryDelay: p.initialRetryDelay,
+ }
+ p.hosts[h] = e
+ p.hostList[i] = e
+ }
+
+ return p
+}
+
+func (r *standardHostPoolResponse) Host() string {
+ return r.host
+}
+
+func (r *standardHostPoolResponse) hostPool() HostPool {
+ return r.pool
+}
+
+func (r *standardHostPoolResponse) Mark(err error) {
+ r.Do(func() {
+ doMark(err, r)
+ })
+}
+
+func doMark(err error, r HostPoolResponse) {
+ if err == nil {
+ r.hostPool().markSuccess(r)
+ } else {
+ r.hostPool().markFailed(r)
+ }
+}
+
+// return an entry from the HostPool
+func (p *standardHostPool) Get() HostPoolResponse {
+ p.Lock()
+ defer p.Unlock()
+ host := p.getRoundRobin()
+ return &standardHostPoolResponse{host: host, pool: p}
+}
+
+func (p *standardHostPool) getRoundRobin() string {
+ now := time.Now()
+ hostCount := len(p.hostList)
+ for i := range p.hostList {
+ // iterate via sequenece from where we last iterated
+ currentIndex := (i + p.nextHostIndex) % hostCount
+
+ h := p.hostList[currentIndex]
+ if !h.dead {
+ p.nextHostIndex = currentIndex + 1
+ return h.host
+ }
+ if h.nextRetry.Before(now) {
+ h.willRetryHost(p.maxRetryInterval)
+ p.nextHostIndex = currentIndex + 1
+ return h.host
+ }
+ }
+
+ // all hosts are down. re-add them
+ p.doResetAll()
+ p.nextHostIndex = 0
+ return p.hostList[0].host
+}
+
+func (p *standardHostPool) ResetAll() {
+ p.Lock()
+ defer p.Unlock()
+ p.doResetAll()
+}
+
+func (p *standardHostPool) SetHosts(hosts []string) {
+ p.Lock()
+ defer p.Unlock()
+ p.setHosts(hosts)
+}
+
+func (p *standardHostPool) setHosts(hosts []string) {
+ p.hosts = make(map[string]*hostEntry, len(hosts))
+ p.hostList = make([]*hostEntry, len(hosts))
+
+ for i, h := range hosts {
+ e := &hostEntry{
+ host: h,
+ retryDelay: p.initialRetryDelay,
+ }
+ p.hosts[h] = e
+ p.hostList[i] = e
+ }
+}
+
+// this actually performs the logic to reset,
+// and should only be called when the lock has
+// already been acquired
+func (p *standardHostPool) doResetAll() {
+ for _, h := range p.hosts {
+ h.dead = false
+ }
+}
+
+func (p *standardHostPool) markSuccess(hostR HostPoolResponse) {
+ host := hostR.Host()
+ p.Lock()
+ defer p.Unlock()
+
+ h, ok := p.hosts[host]
+ if !ok {
+ log.Fatalf("host %s not in HostPool %v", host, p.Hosts())
+ }
+ h.dead = false
+}
+
+func (p *standardHostPool) markFailed(hostR HostPoolResponse) {
+ host := hostR.Host()
+ p.Lock()
+ defer p.Unlock()
+ h, ok := p.hosts[host]
+ if !ok {
+ log.Fatalf("host %s not in HostPool %v", host, p.Hosts())
+ }
+ if !h.dead {
+ h.dead = true
+ h.retryCount = 0
+ h.retryDelay = p.initialRetryDelay
+ h.nextRetry = time.Now().Add(h.retryDelay)
+ }
+
+}
+func (p *standardHostPool) Hosts() []string {
+ hosts := make([]string, len(p.hosts))
+ for host := range p.hosts {
+ hosts = append(hosts, host)
+ }
+ return hosts
+}
diff --git a/Godeps/_workspace/src/github.com/hailocab/go-hostpool/hostpool_test.go b/Godeps/_workspace/src/github.com/hailocab/go-hostpool/hostpool_test.go
new file mode 100644
index 000000000..e974aa74c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hailocab/go-hostpool/hostpool_test.go
@@ -0,0 +1,145 @@
+package hostpool
+
+import (
+ "errors"
+ "github.com/bmizerany/assert"
+ "io/ioutil"
+ "log"
+ "math/rand"
+ "os"
+ "testing"
+ "time"
+)
+
+func TestHostPool(t *testing.T) {
+ log.SetOutput(ioutil.Discard)
+ defer log.SetOutput(os.Stdout)
+
+ dummyErr := errors.New("Dummy Error")
+
+ p := New([]string{"a", "b", "c"})
+ assert.Equal(t, p.Get().Host(), "a")
+ assert.Equal(t, p.Get().Host(), "b")
+ assert.Equal(t, p.Get().Host(), "c")
+ respA := p.Get()
+ assert.Equal(t, respA.Host(), "a")
+
+ respA.Mark(dummyErr)
+ respB := p.Get()
+ respB.Mark(dummyErr)
+ respC := p.Get()
+ assert.Equal(t, respC.Host(), "c")
+ respC.Mark(nil)
+ // get again, and verify that it's still c
+ assert.Equal(t, p.Get().Host(), "c")
+ // now try to mark b as success; should fail because already marked
+ respB.Mark(nil)
+ assert.Equal(t, p.Get().Host(), "c") // would be b if it were not dead
+ // now restore a
+ respA = &standardHostPoolResponse{host: "a", pool: p}
+ respA.Mark(nil)
+ assert.Equal(t, p.Get().Host(), "a")
+ assert.Equal(t, p.Get().Host(), "c")
+
+ // ensure that we get *something* back when all hosts fail
+ for _, host := range []string{"a", "b", "c"} {
+ response := &standardHostPoolResponse{host: host, pool: p}
+ response.Mark(dummyErr)
+ }
+ resp := p.Get()
+ assert.NotEqual(t, resp, nil)
+}
+
+type mockTimer struct {
+ t int // the time it will always return
+}
+
+func (t *mockTimer) between(start time.Time, end time.Time) time.Duration {
+ return time.Duration(t.t) * time.Millisecond
+}
+
+func TestEpsilonGreedy(t *testing.T) {
+ log.SetOutput(ioutil.Discard)
+ defer log.SetOutput(os.Stdout)
+
+ rand.Seed(10)
+
+ iterations := 12000
+ p := NewEpsilonGreedy([]string{"a", "b"}, 0, &LinearEpsilonValueCalculator{}).(*epsilonGreedyHostPool)
+
+ timings := make(map[string]int64)
+ timings["a"] = 200
+ timings["b"] = 300
+
+ hitCounts := make(map[string]int)
+ hitCounts["a"] = 0
+ hitCounts["b"] = 0
+
+ log.Printf("starting first run (a, b)")
+
+ for i := 0; i < iterations; i += 1 {
+ if i != 0 && i%100 == 0 {
+ p.performEpsilonGreedyDecay()
+ }
+ hostR := p.Get()
+ host := hostR.Host()
+ hitCounts[host]++
+ timing := timings[host]
+ p.timer = &mockTimer{t: int(timing)}
+ hostR.Mark(nil)
+ }
+
+ for host := range hitCounts {
+ log.Printf("host %s hit %d times (%0.2f percent)", host, hitCounts[host], (float64(hitCounts[host])/float64(iterations))*100.0)
+ }
+
+ assert.Equal(t, hitCounts["a"] > hitCounts["b"], true)
+
+ hitCounts["a"] = 0
+ hitCounts["b"] = 0
+ log.Printf("starting second run (b, a)")
+ timings["a"] = 500
+ timings["b"] = 100
+
+ for i := 0; i < iterations; i += 1 {
+ if i != 0 && i%100 == 0 {
+ p.performEpsilonGreedyDecay()
+ }
+ hostR := p.Get()
+ host := hostR.Host()
+ hitCounts[host]++
+ timing := timings[host]
+ p.timer = &mockTimer{t: int(timing)}
+ hostR.Mark(nil)
+ }
+
+ for host := range hitCounts {
+ log.Printf("host %s hit %d times (%0.2f percent)", host, hitCounts[host], (float64(hitCounts[host])/float64(iterations))*100.0)
+ }
+
+ assert.Equal(t, hitCounts["b"] > hitCounts["a"], true)
+}
+
+func BenchmarkEpsilonGreedy(b *testing.B) {
+ b.StopTimer()
+
+ // Make up some response times
+ zipfDist := rand.NewZipf(rand.New(rand.NewSource(0)), 1.1, 5, 5000)
+ timings := make([]uint64, b.N)
+ for i := 0; i < b.N; i++ {
+ timings[i] = zipfDist.Uint64()
+ }
+
+ // Make the hostpool with a few hosts
+ p := NewEpsilonGreedy([]string{"a", "b"}, 0, &LinearEpsilonValueCalculator{}).(*epsilonGreedyHostPool)
+
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ if i != 0 && i%100 == 0 {
+ p.performEpsilonGreedyDecay()
+ }
+ hostR := p.Get()
+ p.timer = &mockTimer{t: int(timings[i])}
+ hostR.Mark(nil)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md b/Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md
index bce2ebb51..3b7c89ddb 100644
--- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md
+++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md
@@ -17,13 +17,18 @@ Usage
Below is an example of using the Consul client:
```go
-// Get a new client, with KV endpoints
-client, _ := api.NewClient(api.DefaultConfig())
+// Get a new client
+client, err := api.NewClient(api.DefaultConfig())
+if err != nil {
+ panic(err)
+}
+
+// Get a handle to the KV API
kv := client.KV()
// PUT a new KV pair
p := &api.KVPair{Key: "foo", Value: []byte("test")}
-_, err := kv.Put(p, nil)
+_, err = kv.Put(p, nil)
if err != nil {
panic(err)
}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl_test.go
new file mode 100644
index 000000000..2a5207a6e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl_test.go
@@ -0,0 +1,128 @@
+package api
+
+import (
+ "testing"
+)
+
+func TestACL_CreateDestroy(t *testing.T) {
+ t.Parallel()
+ c, s := makeACLClient(t)
+ defer s.Stop()
+
+ acl := c.ACL()
+
+ ae := ACLEntry{
+ Name: "API test",
+ Type: ACLClientType,
+ Rules: `key "" { policy = "deny" }`,
+ }
+
+ id, wm, err := acl.Create(&ae, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if wm.RequestTime == 0 {
+ t.Fatalf("bad: %v", wm)
+ }
+
+ if id == "" {
+ t.Fatalf("invalid: %v", id)
+ }
+
+ ae2, _, err := acl.Info(id, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if ae2.Name != ae.Name || ae2.Type != ae.Type || ae2.Rules != ae.Rules {
+ t.Fatalf("Bad: %#v", ae2)
+ }
+
+ wm, err = acl.Destroy(id, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if wm.RequestTime == 0 {
+ t.Fatalf("bad: %v", wm)
+ }
+}
+
+func TestACL_CloneDestroy(t *testing.T) {
+ t.Parallel()
+ c, s := makeACLClient(t)
+ defer s.Stop()
+
+ acl := c.ACL()
+
+ id, wm, err := acl.Clone(c.config.Token, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if wm.RequestTime == 0 {
+ t.Fatalf("bad: %v", wm)
+ }
+
+ if id == "" {
+ t.Fatalf("invalid: %v", id)
+ }
+
+ wm, err = acl.Destroy(id, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if wm.RequestTime == 0 {
+ t.Fatalf("bad: %v", wm)
+ }
+}
+
+func TestACL_Info(t *testing.T) {
+ t.Parallel()
+ c, s := makeACLClient(t)
+ defer s.Stop()
+
+ acl := c.ACL()
+
+ ae, qm, err := acl.Info(c.config.Token, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if qm.LastIndex == 0 {
+ t.Fatalf("bad: %v", qm)
+ }
+ if !qm.KnownLeader {
+ t.Fatalf("bad: %v", qm)
+ }
+
+ if ae == nil || ae.ID != c.config.Token || ae.Type != ACLManagementType {
+ t.Fatalf("bad: %#v", ae)
+ }
+}
+
+func TestACL_List(t *testing.T) {
+ t.Parallel()
+ c, s := makeACLClient(t)
+ defer s.Stop()
+
+ acl := c.ACL()
+
+ acls, qm, err := acl.List(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if len(acls) < 2 {
+ t.Fatalf("bad: %v", acls)
+ }
+
+ if qm.LastIndex == 0 {
+ t.Fatalf("bad: %v", qm)
+ }
+ if !qm.KnownLeader {
+ t.Fatalf("bad: %v", qm)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent_test.go
new file mode 100644
index 000000000..358c12a6c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent_test.go
@@ -0,0 +1,524 @@
+package api
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestAgent_Self(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ agent := c.Agent()
+
+ info, err := agent.Self()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ name := info["Config"]["NodeName"]
+ if name == "" {
+ t.Fatalf("bad: %v", info)
+ }
+}
+
+func TestAgent_Members(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ agent := c.Agent()
+
+ members, err := agent.Members(false)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if len(members) != 1 {
+ t.Fatalf("bad: %v", members)
+ }
+}
+
+func TestAgent_Services(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ agent := c.Agent()
+
+ reg := &AgentServiceRegistration{
+ Name: "foo",
+ Tags: []string{"bar", "baz"},
+ Port: 8000,
+ Check: &AgentServiceCheck{
+ TTL: "15s",
+ },
+ }
+ if err := agent.ServiceRegister(reg); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ services, err := agent.Services()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if _, ok := services["foo"]; !ok {
+ t.Fatalf("missing service: %v", services)
+ }
+ checks, err := agent.Checks()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ chk, ok := checks["service:foo"]
+ if !ok {
+ t.Fatalf("missing check: %v", checks)
+ }
+
+ // Checks should default to critical
+ if chk.Status != "critical" {
+ t.Fatalf("Bad: %#v", chk)
+ }
+
+ if err := agent.ServiceDeregister("foo"); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestAgent_Services_CheckPassing(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ agent := c.Agent()
+ reg := &AgentServiceRegistration{
+ Name: "foo",
+ Tags: []string{"bar", "baz"},
+ Port: 8000,
+ Check: &AgentServiceCheck{
+ TTL: "15s",
+ Status: "passing",
+ },
+ }
+ if err := agent.ServiceRegister(reg); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ services, err := agent.Services()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if _, ok := services["foo"]; !ok {
+ t.Fatalf("missing service: %v", services)
+ }
+
+ checks, err := agent.Checks()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ chk, ok := checks["service:foo"]
+ if !ok {
+ t.Fatalf("missing check: %v", checks)
+ }
+
+ if chk.Status != "passing" {
+ t.Fatalf("Bad: %#v", chk)
+ }
+ if err := agent.ServiceDeregister("foo"); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestAgent_Services_CheckBadStatus(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ agent := c.Agent()
+ reg := &AgentServiceRegistration{
+ Name: "foo",
+ Tags: []string{"bar", "baz"},
+ Port: 8000,
+ Check: &AgentServiceCheck{
+ TTL: "15s",
+ Status: "fluffy",
+ },
+ }
+ if err := agent.ServiceRegister(reg); err == nil {
+ t.Fatalf("bad status accepted")
+ }
+}
+
+func TestAgent_ServiceAddress(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ agent := c.Agent()
+
+ reg1 := &AgentServiceRegistration{
+ Name: "foo1",
+ Port: 8000,
+ Address: "192.168.0.42",
+ }
+ reg2 := &AgentServiceRegistration{
+ Name: "foo2",
+ Port: 8000,
+ }
+ if err := agent.ServiceRegister(reg1); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if err := agent.ServiceRegister(reg2); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ services, err := agent.Services()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if _, ok := services["foo1"]; !ok {
+ t.Fatalf("missing service: %v", services)
+ }
+ if _, ok := services["foo2"]; !ok {
+ t.Fatalf("missing service: %v", services)
+ }
+
+ if services["foo1"].Address != "192.168.0.42" {
+ t.Fatalf("missing Address field in service foo1: %v", services)
+ }
+ if services["foo2"].Address != "" {
+ t.Fatalf("missing Address field in service foo2: %v", services)
+ }
+
+ if err := agent.ServiceDeregister("foo"); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestAgent_Services_MultipleChecks(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ agent := c.Agent()
+
+ reg := &AgentServiceRegistration{
+ Name: "foo",
+ Tags: []string{"bar", "baz"},
+ Port: 8000,
+ Checks: AgentServiceChecks{
+ &AgentServiceCheck{
+ TTL: "15s",
+ },
+ &AgentServiceCheck{
+ TTL: "30s",
+ },
+ },
+ }
+ if err := agent.ServiceRegister(reg); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ services, err := agent.Services()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if _, ok := services["foo"]; !ok {
+ t.Fatalf("missing service: %v", services)
+ }
+
+ checks, err := agent.Checks()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if _, ok := checks["service:foo:1"]; !ok {
+ t.Fatalf("missing check: %v", checks)
+ }
+ if _, ok := checks["service:foo:2"]; !ok {
+ t.Fatalf("missing check: %v", checks)
+ }
+}
+
+func TestAgent_SetTTLStatus(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ agent := c.Agent()
+
+ reg := &AgentServiceRegistration{
+ Name: "foo",
+ Check: &AgentServiceCheck{
+ TTL: "15s",
+ },
+ }
+ if err := agent.ServiceRegister(reg); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if err := agent.WarnTTL("service:foo", "test"); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ checks, err := agent.Checks()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ chk, ok := checks["service:foo"]
+ if !ok {
+ t.Fatalf("missing check: %v", checks)
+ }
+ if chk.Status != "warning" {
+ t.Fatalf("Bad: %#v", chk)
+ }
+ if chk.Output != "test" {
+ t.Fatalf("Bad: %#v", chk)
+ }
+
+ if err := agent.ServiceDeregister("foo"); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestAgent_Checks(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ agent := c.Agent()
+
+ reg := &AgentCheckRegistration{
+ Name: "foo",
+ }
+ reg.TTL = "15s"
+ if err := agent.CheckRegister(reg); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ checks, err := agent.Checks()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ chk, ok := checks["foo"]
+ if !ok {
+ t.Fatalf("missing check: %v", checks)
+ }
+ if chk.Status != "critical" {
+ t.Fatalf("check not critical: %v", chk)
+ }
+
+ if err := agent.CheckDeregister("foo"); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestAgent_CheckStartPassing(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ agent := c.Agent()
+
+ reg := &AgentCheckRegistration{
+ Name: "foo",
+ AgentServiceCheck: AgentServiceCheck{
+ Status: "passing",
+ },
+ }
+ reg.TTL = "15s"
+ if err := agent.CheckRegister(reg); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ checks, err := agent.Checks()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ chk, ok := checks["foo"]
+ if !ok {
+ t.Fatalf("missing check: %v", checks)
+ }
+ if chk.Status != "passing" {
+ t.Fatalf("check not passing: %v", chk)
+ }
+
+ if err := agent.CheckDeregister("foo"); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestAgent_Checks_serviceBound(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ agent := c.Agent()
+
+ // First register a service
+ serviceReg := &AgentServiceRegistration{
+ Name: "redis",
+ }
+ if err := agent.ServiceRegister(serviceReg); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Register a check bound to the service
+ reg := &AgentCheckRegistration{
+ Name: "redischeck",
+ ServiceID: "redis",
+ }
+ reg.TTL = "15s"
+ if err := agent.CheckRegister(reg); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ checks, err := agent.Checks()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ check, ok := checks["redischeck"]
+ if !ok {
+ t.Fatalf("missing check: %v", checks)
+ }
+ if check.ServiceID != "redis" {
+ t.Fatalf("missing service association for check: %v", check)
+ }
+}
+
+func TestAgent_Join(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ agent := c.Agent()
+
+ info, err := agent.Self()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Join ourself
+ addr := info["Config"]["AdvertiseAddr"].(string)
+ err = agent.Join(addr, false)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestAgent_ForceLeave(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ agent := c.Agent()
+
+ // Eject somebody
+ err := agent.ForceLeave("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestServiceMaintenance(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ agent := c.Agent()
+
+ // First register a service
+ serviceReg := &AgentServiceRegistration{
+ Name: "redis",
+ }
+ if err := agent.ServiceRegister(serviceReg); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Enable maintenance mode
+ if err := agent.EnableServiceMaintenance("redis", "broken"); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Ensure a critical check was added
+ checks, err := agent.Checks()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ found := false
+ for _, check := range checks {
+ if strings.Contains(check.CheckID, "maintenance") {
+ found = true
+ if check.Status != "critical" || check.Notes != "broken" {
+ t.Fatalf("bad: %#v", checks)
+ }
+ }
+ }
+ if !found {
+ t.Fatalf("bad: %#v", checks)
+ }
+
+ // Disable maintenance mode
+ if err := agent.DisableServiceMaintenance("redis"); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Ensure the critical health check was removed
+ checks, err = agent.Checks()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ for _, check := range checks {
+ if strings.Contains(check.CheckID, "maintenance") {
+ t.Fatalf("should have removed health check")
+ }
+ }
+}
+
+func TestNodeMaintenance(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ agent := c.Agent()
+
+ // Enable maintenance mode
+ if err := agent.EnableNodeMaintenance("broken"); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Check that a critical check was added
+ checks, err := agent.Checks()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ found := false
+ for _, check := range checks {
+ if strings.Contains(check.CheckID, "maintenance") {
+ found = true
+ if check.Status != "critical" || check.Notes != "broken" {
+ t.Fatalf("bad: %#v", checks)
+ }
+ }
+ }
+ if !found {
+ t.Fatalf("bad: %#v", checks)
+ }
+
+ // Disable maintenance mode
+ if err := agent.DisableNodeMaintenance(); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Ensure the check was removed
+ checks, err = agent.Checks()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ for _, check := range checks {
+ if strings.Contains(check.CheckID, "maintenance") {
+ t.Fatalf("should have removed health check")
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go
index 7123c8a68..6736aecd2 100644
--- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go
+++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go
@@ -44,6 +44,12 @@ type QueryOptions struct {
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
+
+ // Near is used to provide a node name that will sort the results
+ // in ascending order based on the estimated round trip time from
+ // that node. Setting this to "_agent" will use the agent's node
+ // for the sort.
+ Near string
}
// WriteOptions are used to parameterize a write
@@ -250,6 +256,9 @@ func (r *request) setQueryOptions(q *QueryOptions) {
if q.Token != "" {
r.params.Set("token", q.Token)
}
+ if q.Near != "" {
+ r.params.Set("near", q.Near)
+ }
}
// durToMsec converts a duration to a millisecond specified string
diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/api_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/api_test.go
new file mode 100644
index 000000000..314a89b14
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/api_test.go
@@ -0,0 +1,256 @@
+package api
+
+import (
+ crand "crypto/rand"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/consul/testutil"
+)
+
+type configCallback func(c *Config)
+
+func makeClient(t *testing.T) (*Client, *testutil.TestServer) {
+ return makeClientWithConfig(t, nil, nil)
+}
+
+func makeACLClient(t *testing.T) (*Client, *testutil.TestServer) {
+ return makeClientWithConfig(t, func(clientConfig *Config) {
+ clientConfig.Token = "root"
+ }, func(serverConfig *testutil.TestServerConfig) {
+ serverConfig.ACLMasterToken = "root"
+ serverConfig.ACLDatacenter = "dc1"
+ serverConfig.ACLDefaultPolicy = "deny"
+ })
+}
+
+func makeClientWithConfig(
+ t *testing.T,
+ cb1 configCallback,
+ cb2 testutil.ServerConfigCallback) (*Client, *testutil.TestServer) {
+
+ // Make client config
+ conf := DefaultConfig()
+ if cb1 != nil {
+ cb1(conf)
+ }
+
+ // Create server
+ server := testutil.NewTestServerConfig(t, cb2)
+ conf.Address = server.HTTPAddr
+
+ // Create client
+ client, err := NewClient(conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ return client, server
+}
+
+func testKey() string {
+ buf := make([]byte, 16)
+ if _, err := crand.Read(buf); err != nil {
+ panic(fmt.Errorf("Failed to read random bytes: %v", err))
+ }
+
+ return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
+ buf[0:4],
+ buf[4:6],
+ buf[6:8],
+ buf[8:10],
+ buf[10:16])
+}
+
+func TestDefaultConfig_env(t *testing.T) {
+ t.Parallel()
+ addr := "1.2.3.4:5678"
+ token := "abcd1234"
+ auth := "username:password"
+
+ os.Setenv("CONSUL_HTTP_ADDR", addr)
+ defer os.Setenv("CONSUL_HTTP_ADDR", "")
+ os.Setenv("CONSUL_HTTP_TOKEN", token)
+ defer os.Setenv("CONSUL_HTTP_TOKEN", "")
+ os.Setenv("CONSUL_HTTP_AUTH", auth)
+ defer os.Setenv("CONSUL_HTTP_AUTH", "")
+ os.Setenv("CONSUL_HTTP_SSL", "1")
+ defer os.Setenv("CONSUL_HTTP_SSL", "")
+ os.Setenv("CONSUL_HTTP_SSL_VERIFY", "0")
+ defer os.Setenv("CONSUL_HTTP_SSL_VERIFY", "")
+
+ config := DefaultConfig()
+
+ if config.Address != addr {
+ t.Errorf("expected %q to be %q", config.Address, addr)
+ }
+
+ if config.Token != token {
+ t.Errorf("expected %q to be %q", config.Token, token)
+ }
+
+ if config.HttpAuth == nil {
+ t.Fatalf("expected HttpAuth to be enabled")
+ }
+ if config.HttpAuth.Username != "username" {
+ t.Errorf("expected %q to be %q", config.HttpAuth.Username, "username")
+ }
+ if config.HttpAuth.Password != "password" {
+ t.Errorf("expected %q to be %q", config.HttpAuth.Password, "password")
+ }
+
+ if config.Scheme != "https" {
+ t.Errorf("expected %q to be %q", config.Scheme, "https")
+ }
+
+ if !config.HttpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify {
+ t.Errorf("expected SSL verification to be off")
+ }
+}
+
+func TestSetQueryOptions(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ r := c.newRequest("GET", "/v1/kv/foo")
+ q := &QueryOptions{
+ Datacenter: "foo",
+ AllowStale: true,
+ RequireConsistent: true,
+ WaitIndex: 1000,
+ WaitTime: 100 * time.Second,
+ Token: "12345",
+ Near: "nodex",
+ }
+ r.setQueryOptions(q)
+
+ if r.params.Get("dc") != "foo" {
+ t.Fatalf("bad: %v", r.params)
+ }
+ if _, ok := r.params["stale"]; !ok {
+ t.Fatalf("bad: %v", r.params)
+ }
+ if _, ok := r.params["consistent"]; !ok {
+ t.Fatalf("bad: %v", r.params)
+ }
+ if r.params.Get("index") != "1000" {
+ t.Fatalf("bad: %v", r.params)
+ }
+ if r.params.Get("wait") != "100000ms" {
+ t.Fatalf("bad: %v", r.params)
+ }
+ if r.params.Get("token") != "12345" {
+ t.Fatalf("bad: %v", r.params)
+ }
+ if r.params.Get("near") != "nodex" {
+ t.Fatalf("bad: %v", r.params)
+ }
+}
+
+func TestSetWriteOptions(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ r := c.newRequest("GET", "/v1/kv/foo")
+ q := &WriteOptions{
+ Datacenter: "foo",
+ Token: "23456",
+ }
+ r.setWriteOptions(q)
+
+ if r.params.Get("dc") != "foo" {
+ t.Fatalf("bad: %v", r.params)
+ }
+ if r.params.Get("token") != "23456" {
+ t.Fatalf("bad: %v", r.params)
+ }
+}
+
+func TestRequestToHTTP(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ r := c.newRequest("DELETE", "/v1/kv/foo")
+ q := &QueryOptions{
+ Datacenter: "foo",
+ }
+ r.setQueryOptions(q)
+ req, err := r.toHTTP()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if req.Method != "DELETE" {
+ t.Fatalf("bad: %v", req)
+ }
+ if req.URL.RequestURI() != "/v1/kv/foo?dc=foo" {
+ t.Fatalf("bad: %v", req)
+ }
+}
+
+func TestParseQueryMeta(t *testing.T) {
+ t.Parallel()
+ resp := &http.Response{
+ Header: make(map[string][]string),
+ }
+ resp.Header.Set("X-Consul-Index", "12345")
+ resp.Header.Set("X-Consul-LastContact", "80")
+ resp.Header.Set("X-Consul-KnownLeader", "true")
+
+ qm := &QueryMeta{}
+ if err := parseQueryMeta(resp, qm); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if qm.LastIndex != 12345 {
+ t.Fatalf("Bad: %v", qm)
+ }
+ if qm.LastContact != 80*time.Millisecond {
+ t.Fatalf("Bad: %v", qm)
+ }
+ if !qm.KnownLeader {
+ t.Fatalf("Bad: %v", qm)
+ }
+}
+
+func TestAPI_UnixSocket(t *testing.T) {
+ t.Parallel()
+ if runtime.GOOS == "windows" {
+ t.SkipNow()
+ }
+
+ tempDir, err := ioutil.TempDir("", "consul")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ defer os.RemoveAll(tempDir)
+ socket := filepath.Join(tempDir, "test.sock")
+
+ c, s := makeClientWithConfig(t, func(c *Config) {
+ c.Address = "unix://" + socket
+ }, func(c *testutil.TestServerConfig) {
+ c.Addresses = &testutil.TestAddressConfig{
+ HTTP: "unix://" + socket,
+ }
+ })
+ defer s.Stop()
+
+ agent := c.Agent()
+
+ info, err := agent.Self()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if info["Config"]["NodeName"] == "" {
+ t.Fatalf("bad: %v", info)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog_test.go
new file mode 100644
index 000000000..bb8be25b0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog_test.go
@@ -0,0 +1,279 @@
+package api
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/hashicorp/consul/testutil"
+)
+
+func TestCatalog_Datacenters(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ catalog := c.Catalog()
+
+ testutil.WaitForResult(func() (bool, error) {
+ datacenters, err := catalog.Datacenters()
+ if err != nil {
+ return false, err
+ }
+
+ if len(datacenters) == 0 {
+ return false, fmt.Errorf("Bad: %v", datacenters)
+ }
+
+ return true, nil
+ }, func(err error) {
+ t.Fatalf("err: %s", err)
+ })
+}
+
+func TestCatalog_Nodes(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ catalog := c.Catalog()
+
+ testutil.WaitForResult(func() (bool, error) {
+ nodes, meta, err := catalog.Nodes(nil)
+ if err != nil {
+ return false, err
+ }
+
+ if meta.LastIndex == 0 {
+ return false, fmt.Errorf("Bad: %v", meta)
+ }
+
+ if len(nodes) == 0 {
+ return false, fmt.Errorf("Bad: %v", nodes)
+ }
+
+ return true, nil
+ }, func(err error) {
+ t.Fatalf("err: %s", err)
+ })
+}
+
+func TestCatalog_Services(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ catalog := c.Catalog()
+
+ testutil.WaitForResult(func() (bool, error) {
+ services, meta, err := catalog.Services(nil)
+ if err != nil {
+ return false, err
+ }
+
+ if meta.LastIndex == 0 {
+ return false, fmt.Errorf("Bad: %v", meta)
+ }
+
+ if len(services) == 0 {
+ return false, fmt.Errorf("Bad: %v", services)
+ }
+
+ return true, nil
+ }, func(err error) {
+ t.Fatalf("err: %s", err)
+ })
+}
+
+func TestCatalog_Service(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ catalog := c.Catalog()
+
+ testutil.WaitForResult(func() (bool, error) {
+ services, meta, err := catalog.Service("consul", "", nil)
+ if err != nil {
+ return false, err
+ }
+
+ if meta.LastIndex == 0 {
+ return false, fmt.Errorf("Bad: %v", meta)
+ }
+
+ if len(services) == 0 {
+ return false, fmt.Errorf("Bad: %v", services)
+ }
+
+ return true, nil
+ }, func(err error) {
+ t.Fatalf("err: %s", err)
+ })
+}
+
+func TestCatalog_Node(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ catalog := c.Catalog()
+ name, _ := c.Agent().NodeName()
+
+ testutil.WaitForResult(func() (bool, error) {
+ info, meta, err := catalog.Node(name, nil)
+ if err != nil {
+ return false, err
+ }
+
+ if meta.LastIndex == 0 {
+ return false, fmt.Errorf("Bad: %v", meta)
+ }
+ if len(info.Services) == 0 {
+ return false, fmt.Errorf("Bad: %v", info)
+ }
+
+ return true, nil
+ }, func(err error) {
+ t.Fatalf("err: %s", err)
+ })
+}
+
+func TestCatalog_Registration(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ catalog := c.Catalog()
+
+ service := &AgentService{
+ ID: "redis1",
+ Service: "redis",
+ Tags: []string{"master", "v1"},
+ Port: 8000,
+ }
+
+ check := &AgentCheck{
+ Node: "foobar",
+ CheckID: "service:redis1",
+ Name: "Redis health check",
+ Notes: "Script based health check",
+ Status: "passing",
+ ServiceID: "redis1",
+ }
+
+ reg := &CatalogRegistration{
+ Datacenter: "dc1",
+ Node: "foobar",
+ Address: "192.168.10.10",
+ Service: service,
+ Check: check,
+ }
+
+ testutil.WaitForResult(func() (bool, error) {
+ if _, err := catalog.Register(reg, nil); err != nil {
+ return false, err
+ }
+
+ node, _, err := catalog.Node("foobar", nil)
+ if err != nil {
+ return false, err
+ }
+
+ if _, ok := node.Services["redis1"]; !ok {
+ return false, fmt.Errorf("missing service: redis1")
+ }
+
+ health, _, err := c.Health().Node("foobar", nil)
+ if err != nil {
+ return false, err
+ }
+
+ if health[0].CheckID != "service:redis1" {
+ return false, fmt.Errorf("missing checkid service:redis1")
+ }
+
+ return true, nil
+ }, func(err error) {
+ t.Fatalf("err: %s", err)
+ })
+
+ // Test catalog deregistration of the previously registered service
+ dereg := &CatalogDeregistration{
+ Datacenter: "dc1",
+ Node: "foobar",
+ Address: "192.168.10.10",
+ ServiceID: "redis1",
+ }
+
+ if _, err := catalog.Deregister(dereg, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ testutil.WaitForResult(func() (bool, error) {
+ node, _, err := catalog.Node("foobar", nil)
+ if err != nil {
+ return false, err
+ }
+
+ if _, ok := node.Services["redis1"]; ok {
+ return false, fmt.Errorf("ServiceID:redis1 is not deregistered")
+ }
+
+ return true, nil
+ }, func(err error) {
+ t.Fatalf("err: %s", err)
+ })
+
+ // Test deregistration of the previously registered check
+ dereg = &CatalogDeregistration{
+ Datacenter: "dc1",
+ Node: "foobar",
+ Address: "192.168.10.10",
+ CheckID: "service:redis1",
+ }
+
+ if _, err := catalog.Deregister(dereg, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ testutil.WaitForResult(func() (bool, error) {
+ health, _, err := c.Health().Node("foobar", nil)
+ if err != nil {
+ return false, err
+ }
+
+ if len(health) != 0 {
+ return false, fmt.Errorf("CheckID:service:redis1 is not deregistered")
+ }
+
+ return true, nil
+ }, func(err error) {
+ t.Fatalf("err: %s", err)
+ })
+
+ // Test node deregistration of the previously registered node
+ dereg = &CatalogDeregistration{
+ Datacenter: "dc1",
+ Node: "foobar",
+ Address: "192.168.10.10",
+ }
+
+ if _, err := catalog.Deregister(dereg, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ testutil.WaitForResult(func() (bool, error) {
+ node, _, err := catalog.Node("foobar", nil)
+ if err != nil {
+ return false, err
+ }
+
+ if node != nil {
+ return false, fmt.Errorf("node is not deregistered: %v", node)
+ }
+
+ return true, nil
+ }, func(err error) {
+ t.Fatalf("err: %s", err)
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/coordinate.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/coordinate.go
new file mode 100644
index 000000000..fdff2075c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/coordinate.go
@@ -0,0 +1,66 @@
+package api
+
+import (
+ "github.com/hashicorp/serf/coordinate"
+)
+
+// CoordinateEntry represents a node and its associated network coordinate.
+type CoordinateEntry struct {
+ Node string
+ Coord *coordinate.Coordinate
+}
+
+// CoordinateDatacenterMap represents a datacenter and its associated WAN
+// nodes and their associates coordinates.
+type CoordinateDatacenterMap struct {
+ Datacenter string
+ Coordinates []CoordinateEntry
+}
+
+// Coordinate can be used to query the coordinate endpoints
+type Coordinate struct {
+ c *Client
+}
+
+// Coordinate returns a handle to the coordinate endpoints
+func (c *Client) Coordinate() *Coordinate {
+ return &Coordinate{c}
+}
+
+// Datacenters is used to return the coordinates of all the servers in the WAN
+// pool.
+func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) {
+ r := c.c.newRequest("GET", "/v1/coordinate/datacenters")
+ _, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out []*CoordinateDatacenterMap
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Nodes is used to return the coordinates of all the nodes in the LAN pool.
+func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) {
+ r := c.c.newRequest("GET", "/v1/coordinate/nodes")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*CoordinateEntry
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/coordinate_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/coordinate_test.go
new file mode 100644
index 000000000..9d13d1c39
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/coordinate_test.go
@@ -0,0 +1,54 @@
+package api
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/hashicorp/consul/testutil"
+)
+
+func TestCoordinate_Datacenters(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ coordinate := c.Coordinate()
+
+ testutil.WaitForResult(func() (bool, error) {
+ datacenters, err := coordinate.Datacenters()
+ if err != nil {
+ return false, err
+ }
+
+ if len(datacenters) == 0 {
+ return false, fmt.Errorf("Bad: %v", datacenters)
+ }
+
+ return true, nil
+ }, func(err error) {
+ t.Fatalf("err: %s", err)
+ })
+}
+
+func TestCoordinate_Nodes(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ coordinate := c.Coordinate()
+
+ testutil.WaitForResult(func() (bool, error) {
+ _, _, err := coordinate.Nodes(nil)
+ if err != nil {
+ return false, err
+ }
+
+ // There's not a good way to populate coordinates without
+ // waiting for them to calculate and update, so the best
+ // we can do is call the endpoint and make sure we don't
+ // get an error.
+ return true, nil
+ }, func(err error) {
+ t.Fatalf("err: %s", err)
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/event_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/event_test.go
new file mode 100644
index 000000000..1ca92e233
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/event_test.go
@@ -0,0 +1,49 @@
+package api
+
+import (
+ "testing"
+
+ "github.com/hashicorp/consul/testutil"
+)
+
+func TestEvent_FireList(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ event := c.Event()
+
+ params := &UserEvent{Name: "foo"}
+ id, meta, err := event.Fire(params, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if meta.RequestTime == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+
+ if id == "" {
+ t.Fatalf("invalid: %v", id)
+ }
+
+ var events []*UserEvent
+ var qm *QueryMeta
+ testutil.WaitForResult(func() (bool, error) {
+ events, qm, err = event.List("", nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ return len(events) > 0, err
+ }, func(err error) {
+ t.Fatalf("err: %#v", err)
+ })
+
+ if events[len(events)-1].ID != id {
+ t.Fatalf("bad: %#v", events)
+ }
+
+ if qm.LastIndex != event.IDToIndex(id) {
+ t.Fatalf("Bad: %#v", qm)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/health_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/health_test.go
new file mode 100644
index 000000000..d80a4693a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/health_test.go
@@ -0,0 +1,125 @@
+package api
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/hashicorp/consul/testutil"
+)
+
+func TestHealth_Node(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ agent := c.Agent()
+ health := c.Health()
+
+ info, err := agent.Self()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ name := info["Config"]["NodeName"].(string)
+
+ testutil.WaitForResult(func() (bool, error) {
+ checks, meta, err := health.Node(name, nil)
+ if err != nil {
+ return false, err
+ }
+ if meta.LastIndex == 0 {
+ return false, fmt.Errorf("bad: %v", meta)
+ }
+ if len(checks) == 0 {
+ return false, fmt.Errorf("bad: %v", checks)
+ }
+ return true, nil
+ }, func(err error) {
+ t.Fatalf("err: %s", err)
+ })
+}
+
+func TestHealth_Checks(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ agent := c.Agent()
+ health := c.Health()
+
+ // Make a service with a check
+ reg := &AgentServiceRegistration{
+ Name: "foo",
+ Check: &AgentServiceCheck{
+ TTL: "15s",
+ },
+ }
+ if err := agent.ServiceRegister(reg); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ defer agent.ServiceDeregister("foo")
+
+ testutil.WaitForResult(func() (bool, error) {
+ checks, meta, err := health.Checks("foo", nil)
+ if err != nil {
+ return false, err
+ }
+ if meta.LastIndex == 0 {
+ return false, fmt.Errorf("bad: %v", meta)
+ }
+ if len(checks) == 0 {
+ return false, fmt.Errorf("Bad: %v", checks)
+ }
+ return true, nil
+ }, func(err error) {
+ t.Fatalf("err: %s", err)
+ })
+}
+
+func TestHealth_Service(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ health := c.Health()
+
+ testutil.WaitForResult(func() (bool, error) {
+ // consul service should always exist...
+ checks, meta, err := health.Service("consul", "", true, nil)
+ if err != nil {
+ return false, err
+ }
+ if meta.LastIndex == 0 {
+ return false, fmt.Errorf("bad: %v", meta)
+ }
+ if len(checks) == 0 {
+ return false, fmt.Errorf("Bad: %v", checks)
+ }
+ return true, nil
+ }, func(err error) {
+ t.Fatalf("err: %s", err)
+ })
+}
+
+func TestHealth_State(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ health := c.Health()
+
+ testutil.WaitForResult(func() (bool, error) {
+ checks, meta, err := health.State("any", nil)
+ if err != nil {
+ return false, err
+ }
+ if meta.LastIndex == 0 {
+ return false, fmt.Errorf("bad: %v", meta)
+ }
+ if len(checks) == 0 {
+ return false, fmt.Errorf("Bad: %v", checks)
+ }
+ return true, nil
+ }, func(err error) {
+ t.Fatalf("err: %s", err)
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv_test.go
new file mode 100644
index 000000000..758595d89
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv_test.go
@@ -0,0 +1,447 @@
+package api
+
+import (
+ "bytes"
+ "path"
+ "testing"
+ "time"
+)
+
+func TestClientPutGetDelete(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ kv := c.KV()
+
+ // Get a get without a key
+ key := testKey()
+ pair, _, err := kv.Get(key, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if pair != nil {
+ t.Fatalf("unexpected value: %#v", pair)
+ }
+
+ value := []byte("test")
+
+ // Put a key that begins with a '/', this should fail
+ invalidKey := "/test"
+ p := &KVPair{Key: invalidKey, Flags: 42, Value: value}
+ if _, err := kv.Put(p, nil); err == nil {
+ t.Fatalf("Invalid key not detected: %s", invalidKey)
+ }
+
+ // Put the key
+ p = &KVPair{Key: key, Flags: 42, Value: value}
+ if _, err := kv.Put(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Get should work
+ pair, meta, err := kv.Get(key, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if pair == nil {
+ t.Fatalf("expected value: %#v", pair)
+ }
+ if !bytes.Equal(pair.Value, value) {
+ t.Fatalf("unexpected value: %#v", pair)
+ }
+ if pair.Flags != 42 {
+ t.Fatalf("unexpected value: %#v", pair)
+ }
+ if meta.LastIndex == 0 {
+ t.Fatalf("unexpected value: %#v", meta)
+ }
+
+ // Delete
+ if _, err := kv.Delete(key, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Get should fail
+ pair, _, err = kv.Get(key, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if pair != nil {
+ t.Fatalf("unexpected value: %#v", pair)
+ }
+}
+
+func TestClient_List_DeleteRecurse(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ kv := c.KV()
+
+ // Generate some test keys
+ prefix := testKey()
+ var keys []string
+ for i := 0; i < 100; i++ {
+ keys = append(keys, path.Join(prefix, testKey()))
+ }
+
+ // Set values
+ value := []byte("test")
+ for _, key := range keys {
+ p := &KVPair{Key: key, Value: value}
+ if _, err := kv.Put(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }
+
+ // List the values
+ pairs, meta, err := kv.List(prefix, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(pairs) != len(keys) {
+ t.Fatalf("got %d keys", len(pairs))
+ }
+ for _, pair := range pairs {
+ if !bytes.Equal(pair.Value, value) {
+ t.Fatalf("unexpected value: %#v", pair)
+ }
+ }
+ if meta.LastIndex == 0 {
+ t.Fatalf("unexpected value: %#v", meta)
+ }
+
+ // Delete all
+ if _, err := kv.DeleteTree(prefix, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // List the values
+ pairs, _, err = kv.List(prefix, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(pairs) != 0 {
+ t.Fatalf("got %d keys", len(pairs))
+ }
+}
+
+func TestClient_DeleteCAS(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ kv := c.KV()
+
+ // Put the key
+ key := testKey()
+ value := []byte("test")
+ p := &KVPair{Key: key, Value: value}
+ if work, _, err := kv.CAS(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ } else if !work {
+ t.Fatalf("CAS failure")
+ }
+
+ // Get should work
+ pair, meta, err := kv.Get(key, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if pair == nil {
+ t.Fatalf("expected value: %#v", pair)
+ }
+ if meta.LastIndex == 0 {
+ t.Fatalf("unexpected value: %#v", meta)
+ }
+
+ // CAS update with bad index
+ p.ModifyIndex = 1
+ if work, _, err := kv.DeleteCAS(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ } else if work {
+ t.Fatalf("unexpected CAS")
+ }
+
+ // CAS update with valid index
+ p.ModifyIndex = meta.LastIndex
+ if work, _, err := kv.DeleteCAS(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ } else if !work {
+ t.Fatalf("unexpected CAS failure")
+ }
+}
+
+func TestClient_CAS(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ kv := c.KV()
+
+ // Put the key
+ key := testKey()
+ value := []byte("test")
+ p := &KVPair{Key: key, Value: value}
+ if work, _, err := kv.CAS(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ } else if !work {
+ t.Fatalf("CAS failure")
+ }
+
+ // Get should work
+ pair, meta, err := kv.Get(key, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if pair == nil {
+ t.Fatalf("expected value: %#v", pair)
+ }
+ if meta.LastIndex == 0 {
+ t.Fatalf("unexpected value: %#v", meta)
+ }
+
+ // CAS update with bad index
+ newVal := []byte("foo")
+ p.Value = newVal
+ p.ModifyIndex = 1
+ if work, _, err := kv.CAS(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ } else if work {
+ t.Fatalf("unexpected CAS")
+ }
+
+ // CAS update with valid index
+ p.ModifyIndex = meta.LastIndex
+ if work, _, err := kv.CAS(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ } else if !work {
+ t.Fatalf("unexpected CAS failure")
+ }
+}
+
+func TestClient_WatchGet(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ kv := c.KV()
+
+ // Get a get without a key
+ key := testKey()
+ pair, meta, err := kv.Get(key, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if pair != nil {
+ t.Fatalf("unexpected value: %#v", pair)
+ }
+ if meta.LastIndex == 0 {
+ t.Fatalf("unexpected value: %#v", meta)
+ }
+
+ // Put the key
+ value := []byte("test")
+ go func() {
+ kv := c.KV()
+
+ time.Sleep(100 * time.Millisecond)
+ p := &KVPair{Key: key, Flags: 42, Value: value}
+ if _, err := kv.Put(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }()
+
+ // Get should work
+ options := &QueryOptions{WaitIndex: meta.LastIndex}
+ pair, meta2, err := kv.Get(key, options)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if pair == nil {
+ t.Fatalf("expected value: %#v", pair)
+ }
+ if !bytes.Equal(pair.Value, value) {
+ t.Fatalf("unexpected value: %#v", pair)
+ }
+ if pair.Flags != 42 {
+ t.Fatalf("unexpected value: %#v", pair)
+ }
+ if meta2.LastIndex <= meta.LastIndex {
+ t.Fatalf("unexpected value: %#v", meta2)
+ }
+}
+
+func TestClient_WatchList(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ kv := c.KV()
+
+ // Get a get without a key
+ prefix := testKey()
+ key := path.Join(prefix, testKey())
+ pairs, meta, err := kv.List(prefix, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(pairs) != 0 {
+ t.Fatalf("unexpected value: %#v", pairs)
+ }
+ if meta.LastIndex == 0 {
+ t.Fatalf("unexpected value: %#v", meta)
+ }
+
+ // Put the key
+ value := []byte("test")
+ go func() {
+ kv := c.KV()
+
+ time.Sleep(100 * time.Millisecond)
+ p := &KVPair{Key: key, Flags: 42, Value: value}
+ if _, err := kv.Put(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }()
+
+ // Get should work
+ options := &QueryOptions{WaitIndex: meta.LastIndex}
+ pairs, meta2, err := kv.List(prefix, options)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(pairs) != 1 {
+ t.Fatalf("expected value: %#v", pairs)
+ }
+ if !bytes.Equal(pairs[0].Value, value) {
+ t.Fatalf("unexpected value: %#v", pairs)
+ }
+ if pairs[0].Flags != 42 {
+ t.Fatalf("unexpected value: %#v", pairs)
+ }
+ if meta2.LastIndex <= meta.LastIndex {
+ t.Fatalf("unexpected value: %#v", meta2)
+ }
+
+}
+
+func TestClient_Keys_DeleteRecurse(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ kv := c.KV()
+
+ // Generate some test keys
+ prefix := testKey()
+ var keys []string
+ for i := 0; i < 100; i++ {
+ keys = append(keys, path.Join(prefix, testKey()))
+ }
+
+ // Set values
+ value := []byte("test")
+ for _, key := range keys {
+ p := &KVPair{Key: key, Value: value}
+ if _, err := kv.Put(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }
+
+ // List the values
+ out, meta, err := kv.Keys(prefix, "", nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(out) != len(keys) {
+ t.Fatalf("got %d keys", len(out))
+ }
+ if meta.LastIndex == 0 {
+ t.Fatalf("unexpected value: %#v", meta)
+ }
+
+ // Delete all
+ if _, err := kv.DeleteTree(prefix, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // List the values
+ out, _, err = kv.Keys(prefix, "", nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(out) != 0 {
+ t.Fatalf("got %d keys", len(out))
+ }
+}
+
+func TestClient_AcquireRelease(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ session := c.Session()
+ kv := c.KV()
+
+ // Make a session
+ id, _, err := session.CreateNoChecks(nil, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ defer session.Destroy(id, nil)
+
+ // Acquire the key
+ key := testKey()
+ value := []byte("test")
+ p := &KVPair{Key: key, Value: value, Session: id}
+ if work, _, err := kv.Acquire(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ } else if !work {
+ t.Fatalf("Lock failure")
+ }
+
+ // Get should work
+ pair, meta, err := kv.Get(key, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if pair == nil {
+ t.Fatalf("expected value: %#v", pair)
+ }
+ if pair.LockIndex != 1 {
+ t.Fatalf("Expected lock: %v", pair)
+ }
+ if pair.Session != id {
+ t.Fatalf("Expected lock: %v", pair)
+ }
+ if meta.LastIndex == 0 {
+ t.Fatalf("unexpected value: %#v", meta)
+ }
+
+ // Release
+ if work, _, err := kv.Release(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ } else if !work {
+ t.Fatalf("Release fail")
+ }
+
+ // Get should work
+ pair, meta, err = kv.Get(key, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if pair == nil {
+ t.Fatalf("expected value: %#v", pair)
+ }
+ if pair.LockIndex != 1 {
+ t.Fatalf("Expected lock: %v", pair)
+ }
+ if pair.Session != "" {
+ t.Fatalf("Expected unlock: %v", pair)
+ }
+ if meta.LastIndex == 0 {
+ t.Fatalf("unexpected value: %#v", meta)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock_test.go
new file mode 100644
index 000000000..f4bad9e6b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock_test.go
@@ -0,0 +1,371 @@
+package api
+
+import (
+ "log"
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestLock_LockUnlock(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ lock, err := c.LockKey("test/lock")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Initial unlock should fail
+ err = lock.Unlock()
+ if err != ErrLockNotHeld {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should work
+ leaderCh, err := lock.Lock(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if leaderCh == nil {
+ t.Fatalf("not leader")
+ }
+
+ // Double lock should fail
+ _, err = lock.Lock(nil)
+ if err != ErrLockHeld {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should be leader
+ select {
+ case <-leaderCh:
+ t.Fatalf("should be leader")
+ default:
+ }
+
+ // Initial unlock should work
+ err = lock.Unlock()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Double unlock should fail
+ err = lock.Unlock()
+ if err != ErrLockNotHeld {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should lose leadership
+ select {
+ case <-leaderCh:
+ case <-time.After(time.Second):
+ t.Fatalf("should not be leader")
+ }
+}
+
+func TestLock_ForceInvalidate(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ lock, err := c.LockKey("test/lock")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should work
+ leaderCh, err := lock.Lock(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if leaderCh == nil {
+ t.Fatalf("not leader")
+ }
+ defer lock.Unlock()
+
+ go func() {
+ // Nuke the session, simulator an operator invalidation
+ // or a health check failure
+ session := c.Session()
+ session.Destroy(lock.lockSession, nil)
+ }()
+
+ // Should loose leadership
+ select {
+ case <-leaderCh:
+ case <-time.After(time.Second):
+ t.Fatalf("should not be leader")
+ }
+}
+
+func TestLock_DeleteKey(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ // This uncovered some issues around special-case handling of low index
+ // numbers where it would work with a low number but fail for higher
+ // ones, so we loop this a bit to sweep the index up out of that
+ // territory.
+ for i := 0; i < 10; i++ {
+ func() {
+ lock, err := c.LockKey("test/lock")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should work
+ leaderCh, err := lock.Lock(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if leaderCh == nil {
+ t.Fatalf("not leader")
+ }
+ defer lock.Unlock()
+
+ go func() {
+ // Nuke the key, simulate an operator intervention
+ kv := c.KV()
+ kv.Delete("test/lock", nil)
+ }()
+
+ // Should loose leadership
+ select {
+ case <-leaderCh:
+ case <-time.After(time.Second):
+ t.Fatalf("should not be leader")
+ }
+ }()
+ }
+}
+
+func TestLock_Contend(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ wg := &sync.WaitGroup{}
+ acquired := make([]bool, 3)
+ for idx := range acquired {
+ wg.Add(1)
+ go func(idx int) {
+ defer wg.Done()
+ lock, err := c.LockKey("test/lock")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should work eventually, will contend
+ leaderCh, err := lock.Lock(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if leaderCh == nil {
+ t.Fatalf("not leader")
+ }
+ defer lock.Unlock()
+ log.Printf("Contender %d acquired", idx)
+
+ // Set acquired and then leave
+ acquired[idx] = true
+ }(idx)
+ }
+
+ // Wait for termination
+ doneCh := make(chan struct{})
+ go func() {
+ wg.Wait()
+ close(doneCh)
+ }()
+
+ // Wait for everybody to get a turn
+ select {
+ case <-doneCh:
+ case <-time.After(3 * DefaultLockRetryTime):
+ t.Fatalf("timeout")
+ }
+
+ for idx, did := range acquired {
+ if !did {
+ t.Fatalf("contender %d never acquired", idx)
+ }
+ }
+}
+
+func TestLock_Destroy(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ lock, err := c.LockKey("test/lock")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should work
+ leaderCh, err := lock.Lock(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if leaderCh == nil {
+ t.Fatalf("not leader")
+ }
+
+ // Destroy should fail
+ if err := lock.Destroy(); err != ErrLockHeld {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should be able to release
+ err = lock.Unlock()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Acquire with a different lock
+ l2, err := c.LockKey("test/lock")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should work
+ leaderCh, err = l2.Lock(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if leaderCh == nil {
+ t.Fatalf("not leader")
+ }
+
+ // Destroy should still fail
+ if err := lock.Destroy(); err != ErrLockInUse {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should release
+ err = l2.Unlock()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Destroy should work
+ err = lock.Destroy()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Double destroy should work
+ err = l2.Destroy()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestLock_Conflict(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ sema, err := c.SemaphorePrefix("test/lock/", 2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should work
+ lockCh, err := sema.Acquire(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if lockCh == nil {
+ t.Fatalf("not hold")
+ }
+ defer sema.Release()
+
+ lock, err := c.LockKey("test/lock/.lock")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should conflict with semaphore
+ _, err = lock.Lock(nil)
+ if err != ErrLockConflict {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should conflict with semaphore
+ err = lock.Destroy()
+ if err != ErrLockConflict {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestLock_ReclaimLock(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ session, _, err := c.Session().Create(&SessionEntry{}, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ lock, err := c.LockOpts(&LockOptions{Key: "test/lock", Session: session})
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should work
+ leaderCh, err := lock.Lock(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if leaderCh == nil {
+ t.Fatalf("not leader")
+ }
+ defer lock.Unlock()
+
+ l2, err := c.LockOpts(&LockOptions{Key: "test/lock", Session: session})
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ reclaimed := make(chan (<-chan struct{}), 1)
+ go func() {
+ l2Ch, err := l2.Lock(nil)
+ if err != nil {
+ t.Fatalf("not locked: %v", err)
+ }
+ reclaimed <- l2Ch
+ }()
+
+ // Should reclaim the lock
+ var leader2Ch <-chan struct{}
+
+ select {
+ case leader2Ch = <-reclaimed:
+ case <-time.After(time.Second):
+ t.Fatalf("should have locked")
+ }
+
+ // unlock should work
+ err = l2.Unlock()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ //Both locks should see the unlock
+ select {
+ case <-leader2Ch:
+ case <-time.After(time.Second):
+ t.Fatalf("should not be leader")
+ }
+
+ select {
+ case <-leaderCh:
+ case <-time.After(time.Second):
+ t.Fatalf("should not be leader")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore_test.go
new file mode 100644
index 000000000..5e5e53588
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore_test.go
@@ -0,0 +1,313 @@
+package api
+
+import (
+ "log"
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestSemaphore_AcquireRelease(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ sema, err := c.SemaphorePrefix("test/semaphore", 2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Initial release should fail
+ err = sema.Release()
+ if err != ErrSemaphoreNotHeld {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should work
+ lockCh, err := sema.Acquire(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if lockCh == nil {
+ t.Fatalf("not hold")
+ }
+
+ // Double lock should fail
+ _, err = sema.Acquire(nil)
+ if err != ErrSemaphoreHeld {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should be held
+ select {
+ case <-lockCh:
+ t.Fatalf("should be held")
+ default:
+ }
+
+ // Initial release should work
+ err = sema.Release()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Double unlock should fail
+ err = sema.Release()
+ if err != ErrSemaphoreNotHeld {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should lose resource
+ select {
+ case <-lockCh:
+ case <-time.After(time.Second):
+ t.Fatalf("should not be held")
+ }
+}
+
+func TestSemaphore_ForceInvalidate(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ sema, err := c.SemaphorePrefix("test/semaphore", 2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should work
+ lockCh, err := sema.Acquire(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if lockCh == nil {
+ t.Fatalf("not acquired")
+ }
+ defer sema.Release()
+
+ go func() {
+ // Nuke the session, simulator an operator invalidation
+ // or a health check failure
+ session := c.Session()
+ session.Destroy(sema.lockSession, nil)
+ }()
+
+ // Should loose slot
+ select {
+ case <-lockCh:
+ case <-time.After(time.Second):
+ t.Fatalf("should not be locked")
+ }
+}
+
+func TestSemaphore_DeleteKey(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ sema, err := c.SemaphorePrefix("test/semaphore", 2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should work
+ lockCh, err := sema.Acquire(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if lockCh == nil {
+ t.Fatalf("not locked")
+ }
+ defer sema.Release()
+
+ go func() {
+ // Nuke the key, simulate an operator intervention
+ kv := c.KV()
+ kv.DeleteTree("test/semaphore", nil)
+ }()
+
+ // Should loose leadership
+ select {
+ case <-lockCh:
+ case <-time.After(time.Second):
+ t.Fatalf("should not be locked")
+ }
+}
+
+func TestSemaphore_Contend(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ wg := &sync.WaitGroup{}
+ acquired := make([]bool, 4)
+ for idx := range acquired {
+ wg.Add(1)
+ go func(idx int) {
+ defer wg.Done()
+ sema, err := c.SemaphorePrefix("test/semaphore", 2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should work eventually, will contend
+ lockCh, err := sema.Acquire(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if lockCh == nil {
+ t.Fatalf("not locked")
+ }
+ defer sema.Release()
+ log.Printf("Contender %d acquired", idx)
+
+ // Set acquired and then leave
+ acquired[idx] = true
+ }(idx)
+ }
+
+ // Wait for termination
+ doneCh := make(chan struct{})
+ go func() {
+ wg.Wait()
+ close(doneCh)
+ }()
+
+ // Wait for everybody to get a turn
+ select {
+ case <-doneCh:
+ case <-time.After(3 * DefaultLockRetryTime):
+ t.Fatalf("timeout")
+ }
+
+ for idx, did := range acquired {
+ if !did {
+ t.Fatalf("contender %d never acquired", idx)
+ }
+ }
+}
+
+func TestSemaphore_BadLimit(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ sema, err := c.SemaphorePrefix("test/semaphore", 0)
+ if err == nil {
+ t.Fatalf("should error")
+ }
+
+ sema, err = c.SemaphorePrefix("test/semaphore", 1)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ _, err = sema.Acquire(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ sema2, err := c.SemaphorePrefix("test/semaphore", 2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ _, err = sema2.Acquire(nil)
+ if err.Error() != "semaphore limit conflict (lock: 1, local: 2)" {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestSemaphore_Destroy(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ sema, err := c.SemaphorePrefix("test/semaphore", 2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ sema2, err := c.SemaphorePrefix("test/semaphore", 2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ _, err = sema.Acquire(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ _, err = sema2.Acquire(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Destroy should fail, still held
+ if err := sema.Destroy(); err != ErrSemaphoreHeld {
+ t.Fatalf("err: %v", err)
+ }
+
+ err = sema.Release()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Destroy should fail, still in use
+ if err := sema.Destroy(); err != ErrSemaphoreInUse {
+ t.Fatalf("err: %v", err)
+ }
+
+ err = sema2.Release()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Destroy should work
+ if err := sema.Destroy(); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Destroy should work
+ if err := sema2.Destroy(); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestSemaphore_Conflict(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ lock, err := c.LockKey("test/sema/.lock")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should work
+ leaderCh, err := lock.Lock(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if leaderCh == nil {
+ t.Fatalf("not leader")
+ }
+ defer lock.Unlock()
+
+ sema, err := c.SemaphorePrefix("test/sema/", 2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should conflict with lock
+ _, err = sema.Acquire(nil)
+ if err != ErrSemaphoreConflict {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Should conflict with lock
+ err = sema.Destroy()
+ if err != ErrSemaphoreConflict {
+ t.Fatalf("err: %v", err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/session_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/session_test.go
new file mode 100644
index 000000000..85bea228e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/session_test.go
@@ -0,0 +1,314 @@
+package api
+
+import (
+ "testing"
+ "time"
+)
+
+func TestSession_CreateDestroy(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ session := c.Session()
+
+ id, meta, err := session.Create(nil, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if meta.RequestTime == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+
+ if id == "" {
+ t.Fatalf("invalid: %v", id)
+ }
+
+ meta, err = session.Destroy(id, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if meta.RequestTime == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+}
+
+func TestSession_CreateRenewDestroy(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ session := c.Session()
+
+ se := &SessionEntry{
+ TTL: "10s",
+ }
+
+ id, meta, err := session.Create(se, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ defer session.Destroy(id, nil)
+
+ if meta.RequestTime == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+
+ if id == "" {
+ t.Fatalf("invalid: %v", id)
+ }
+
+ if meta.RequestTime == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+
+ renew, meta, err := session.Renew(id, nil)
+
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if meta.RequestTime == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+
+ if renew == nil {
+ t.Fatalf("should get session")
+ }
+
+ if renew.ID != id {
+ t.Fatalf("should have matching id")
+ }
+
+ if renew.TTL != "10s" {
+ t.Fatalf("should get session with TTL")
+ }
+}
+
+func TestSession_CreateRenewDestroyRenew(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ session := c.Session()
+
+ entry := &SessionEntry{
+ Behavior: SessionBehaviorDelete,
+ TTL: "500s", // disable ttl
+ }
+
+ id, meta, err := session.Create(entry, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if meta.RequestTime == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+
+ if id == "" {
+ t.Fatalf("invalid: %v", id)
+ }
+
+ // Extend right after create. Everything should be fine.
+ entry, _, err = session.Renew(id, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if entry == nil {
+ t.Fatal("session unexpectedly vanished")
+ }
+
+ // Simulate TTL loss by manually destroying the session.
+ meta, err = session.Destroy(id, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if meta.RequestTime == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+
+ // Extend right after delete. The 404 should proxy as a nil.
+ entry, _, err = session.Renew(id, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if entry != nil {
+ t.Fatal("session still exists")
+ }
+}
+
+func TestSession_CreateDestroyRenewPeriodic(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ session := c.Session()
+
+ entry := &SessionEntry{
+ Behavior: SessionBehaviorDelete,
+ TTL: "500s", // disable ttl
+ }
+
+ id, meta, err := session.Create(entry, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if meta.RequestTime == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+
+ if id == "" {
+ t.Fatalf("invalid: %v", id)
+ }
+
+ // This only tests Create/Destroy/RenewPeriodic to avoid the more
+ // difficult case of testing all of the timing code.
+
+ // Simulate TTL loss by manually destroying the session.
+ meta, err = session.Destroy(id, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if meta.RequestTime == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+
+ // Extend right after delete. The 404 should terminate the loop quickly and return ErrSessionExpired.
+ errCh := make(chan error, 1)
+ doneCh := make(chan struct{})
+ go func() { errCh <- session.RenewPeriodic("1s", id, nil, doneCh) }()
+ defer close(doneCh)
+
+ select {
+ case <-time.After(1 * time.Second):
+ t.Fatal("timedout: missing session did not terminate renewal loop")
+ case err = <-errCh:
+ if err != ErrSessionExpired {
+ t.Fatalf("err: %v", err)
+ }
+ }
+}
+
+func TestSession_Info(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ session := c.Session()
+
+ id, _, err := session.Create(nil, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ defer session.Destroy(id, nil)
+
+ info, qm, err := session.Info(id, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if qm.LastIndex == 0 {
+ t.Fatalf("bad: %v", qm)
+ }
+ if !qm.KnownLeader {
+ t.Fatalf("bad: %v", qm)
+ }
+
+ if info == nil {
+ t.Fatalf("should get session")
+ }
+ if info.CreateIndex == 0 {
+ t.Fatalf("bad: %v", info)
+ }
+ if info.ID != id {
+ t.Fatalf("bad: %v", info)
+ }
+ if info.Name != "" {
+ t.Fatalf("bad: %v", info)
+ }
+ if info.Node == "" {
+ t.Fatalf("bad: %v", info)
+ }
+ if len(info.Checks) == 0 {
+ t.Fatalf("bad: %v", info)
+ }
+ if info.LockDelay == 0 {
+ t.Fatalf("bad: %v", info)
+ }
+ if info.Behavior != "release" {
+ t.Fatalf("bad: %v", info)
+ }
+ if info.TTL != "" {
+ t.Fatalf("bad: %v", info)
+ }
+}
+
+func TestSession_Node(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ session := c.Session()
+
+ id, _, err := session.Create(nil, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ defer session.Destroy(id, nil)
+
+ info, qm, err := session.Info(id, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ sessions, qm, err := session.Node(info.Node, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if len(sessions) != 1 {
+ t.Fatalf("bad: %v", sessions)
+ }
+
+ if qm.LastIndex == 0 {
+ t.Fatalf("bad: %v", qm)
+ }
+ if !qm.KnownLeader {
+ t.Fatalf("bad: %v", qm)
+ }
+}
+
+func TestSession_List(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ session := c.Session()
+
+ id, _, err := session.Create(nil, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ defer session.Destroy(id, nil)
+
+ sessions, qm, err := session.List(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if len(sessions) != 1 {
+ t.Fatalf("bad: %v", sessions)
+ }
+
+ if qm.LastIndex == 0 {
+ t.Fatalf("bad: %v", qm)
+ }
+ if !qm.KnownLeader {
+ t.Fatalf("bad: %v", qm)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/status_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/status_test.go
new file mode 100644
index 000000000..62dc1550f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/status_test.go
@@ -0,0 +1,37 @@
+package api
+
+import (
+ "testing"
+)
+
+func TestStatusLeader(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ status := c.Status()
+
+ leader, err := status.Leader()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if leader == "" {
+ t.Fatalf("Expected leader")
+ }
+}
+
+func TestStatusPeers(t *testing.T) {
+ t.Parallel()
+ c, s := makeClient(t)
+ defer s.Stop()
+
+ status := c.Status()
+
+ peers, err := status.Peers()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(peers) == 0 {
+ t.Fatalf("Expected peers ")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/errwrap/errwrap_test.go b/Godeps/_workspace/src/github.com/hashicorp/errwrap/errwrap_test.go
new file mode 100644
index 000000000..5ae5f8e3c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/errwrap/errwrap_test.go
@@ -0,0 +1,94 @@
+package errwrap
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestWrappedError_impl(t *testing.T) {
+ var _ error = new(wrappedError)
+}
+
+func TestGetAll(t *testing.T) {
+ cases := []struct {
+ Err error
+ Msg string
+ Len int
+ }{
+ {},
+ {
+ fmt.Errorf("foo"),
+ "foo",
+ 1,
+ },
+ {
+ fmt.Errorf("bar"),
+ "foo",
+ 0,
+ },
+ {
+ Wrapf("bar", fmt.Errorf("foo")),
+ "foo",
+ 1,
+ },
+ {
+ Wrapf("{{err}}", fmt.Errorf("foo")),
+ "foo",
+ 2,
+ },
+ {
+ Wrapf("bar", Wrapf("baz", fmt.Errorf("foo"))),
+ "foo",
+ 1,
+ },
+ }
+
+ for i, tc := range cases {
+ actual := GetAll(tc.Err, tc.Msg)
+ if len(actual) != tc.Len {
+ t.Fatalf("%d: bad: %#v", i, actual)
+ }
+ for _, v := range actual {
+ if v.Error() != tc.Msg {
+ t.Fatalf("%d: bad: %#v", i, actual)
+ }
+ }
+ }
+}
+
+func TestGetAllType(t *testing.T) {
+ cases := []struct {
+ Err error
+ Type interface{}
+ Len int
+ }{
+ {},
+ {
+ fmt.Errorf("foo"),
+ "foo",
+ 0,
+ },
+ {
+ fmt.Errorf("bar"),
+ fmt.Errorf("foo"),
+ 1,
+ },
+ {
+ Wrapf("bar", fmt.Errorf("foo")),
+ fmt.Errorf("baz"),
+ 2,
+ },
+ {
+ Wrapf("bar", Wrapf("baz", fmt.Errorf("foo"))),
+ Wrapf("", nil),
+ 0,
+ },
+ }
+
+ for i, tc := range cases {
+ actual := GetAllType(tc.Err, tc.Type)
+ if len(actual) != tc.Len {
+ t.Fatalf("%d: bad: %#v", i, actual)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-multierror/append_test.go b/Godeps/_workspace/src/github.com/hashicorp/go-multierror/append_test.go
new file mode 100644
index 000000000..dfa79e289
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/go-multierror/append_test.go
@@ -0,0 +1,64 @@
+package multierror
+
+import (
+ "errors"
+ "testing"
+)
+
+func TestAppend_Error(t *testing.T) {
+ original := &Error{
+ Errors: []error{errors.New("foo")},
+ }
+
+ result := Append(original, errors.New("bar"))
+ if len(result.Errors) != 2 {
+ t.Fatalf("wrong len: %d", len(result.Errors))
+ }
+
+ original = &Error{}
+ result = Append(original, errors.New("bar"))
+ if len(result.Errors) != 1 {
+ t.Fatalf("wrong len: %d", len(result.Errors))
+ }
+
+ // Test when a typed nil is passed
+ var e *Error
+ result = Append(e, errors.New("baz"))
+ if len(result.Errors) != 1 {
+ t.Fatalf("wrong len: %d", len(result.Errors))
+ }
+
+ // Test flattening
+ original = &Error{
+ Errors: []error{errors.New("foo")},
+ }
+
+ result = Append(original, Append(nil, errors.New("foo"), errors.New("bar")))
+ if len(result.Errors) != 3 {
+ t.Fatalf("wrong len: %d", len(result.Errors))
+ }
+}
+
+func TestAppend_NilError(t *testing.T) {
+ var err error
+ result := Append(err, errors.New("bar"))
+ if len(result.Errors) != 1 {
+ t.Fatalf("wrong len: %d", len(result.Errors))
+ }
+}
+
+func TestAppend_NonError(t *testing.T) {
+ original := errors.New("foo")
+ result := Append(original, errors.New("bar"))
+ if len(result.Errors) != 2 {
+ t.Fatalf("wrong len: %d", len(result.Errors))
+ }
+}
+
+func TestAppend_NonError_Error(t *testing.T) {
+ original := errors.New("foo")
+ result := Append(original, Append(nil, errors.New("bar")))
+ if len(result.Errors) != 2 {
+ t.Fatalf("wrong len: %d", len(result.Errors))
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-multierror/flatten_test.go b/Godeps/_workspace/src/github.com/hashicorp/go-multierror/flatten_test.go
new file mode 100644
index 000000000..75218f103
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/go-multierror/flatten_test.go
@@ -0,0 +1,48 @@
+package multierror
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func TestFlatten(t *testing.T) {
+ original := &Error{
+ Errors: []error{
+ errors.New("one"),
+ &Error{
+ Errors: []error{
+ errors.New("two"),
+ &Error{
+ Errors: []error{
+ errors.New("three"),
+ },
+ },
+ },
+ },
+ },
+ }
+
+ expected := strings.TrimSpace(`
+3 error(s) occurred:
+
+* one
+* two
+* three
+ `)
+ actual := fmt.Sprintf("%s", Flatten(original))
+
+ if expected != actual {
+ t.Fatalf("expected: %s, got: %s", expected, actual)
+ }
+}
+
+func TestFlatten_nonError(t *testing.T) {
+ err := errors.New("foo")
+ actual := Flatten(err)
+ if !reflect.DeepEqual(actual, err) {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-multierror/format_test.go b/Godeps/_workspace/src/github.com/hashicorp/go-multierror/format_test.go
new file mode 100644
index 000000000..d7cee5d7d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/go-multierror/format_test.go
@@ -0,0 +1,23 @@
+package multierror
+
+import (
+ "errors"
+ "testing"
+)
+
+func TestListFormatFunc(t *testing.T) {
+ expected := `2 error(s) occurred:
+
+* foo
+* bar`
+
+ errors := []error{
+ errors.New("foo"),
+ errors.New("bar"),
+ }
+
+ actual := ListFormatFunc(errors)
+ if actual != expected {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-multierror/multierror_test.go b/Godeps/_workspace/src/github.com/hashicorp/go-multierror/multierror_test.go
new file mode 100644
index 000000000..3e78079c0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/go-multierror/multierror_test.go
@@ -0,0 +1,70 @@
+package multierror
+
+import (
+ "errors"
+ "reflect"
+ "testing"
+)
+
+func TestError_Impl(t *testing.T) {
+ var _ error = new(Error)
+}
+
+func TestErrorError_custom(t *testing.T) {
+ errors := []error{
+ errors.New("foo"),
+ errors.New("bar"),
+ }
+
+ fn := func(es []error) string {
+ return "foo"
+ }
+
+ multi := &Error{Errors: errors, ErrorFormat: fn}
+ if multi.Error() != "foo" {
+ t.Fatalf("bad: %s", multi.Error())
+ }
+}
+
+func TestErrorError_default(t *testing.T) {
+ expected := `2 error(s) occurred:
+
+* foo
+* bar`
+
+ errors := []error{
+ errors.New("foo"),
+ errors.New("bar"),
+ }
+
+ multi := &Error{Errors: errors}
+ if multi.Error() != expected {
+ t.Fatalf("bad: %s", multi.Error())
+ }
+}
+
+func TestErrorErrorOrNil(t *testing.T) {
+ err := new(Error)
+ if err.ErrorOrNil() != nil {
+ t.Fatalf("bad: %#v", err.ErrorOrNil())
+ }
+
+ err.Errors = []error{errors.New("foo")}
+ if v := err.ErrorOrNil(); v == nil {
+ t.Fatal("should not be nil")
+ } else if !reflect.DeepEqual(v, err) {
+ t.Fatalf("bad: %#v", v)
+ }
+}
+
+func TestErrorWrappedErrors(t *testing.T) {
+ errors := []error{
+ errors.New("foo"),
+ errors.New("bar"),
+ }
+
+ multi := &Error{Errors: errors}
+ if !reflect.DeepEqual(multi.Errors, multi.WrappedErrors()) {
+ t.Fatalf("bad: %s", multi.WrappedErrors())
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-multierror/prefix_test.go b/Godeps/_workspace/src/github.com/hashicorp/go-multierror/prefix_test.go
new file mode 100644
index 000000000..1d4a6f6d3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/go-multierror/prefix_test.go
@@ -0,0 +1,33 @@
+package multierror
+
+import (
+ "errors"
+ "testing"
+)
+
+func TestPrefix_Error(t *testing.T) {
+ original := &Error{
+ Errors: []error{errors.New("foo")},
+ }
+
+ result := Prefix(original, "bar")
+ if result.(*Error).Errors[0].Error() != "bar foo" {
+ t.Fatalf("bad: %s", result)
+ }
+}
+
+func TestPrefix_NilError(t *testing.T) {
+ var err error
+ result := Prefix(err, "bar")
+ if result != nil {
+ t.Fatalf("bad: %#v", result)
+ }
+}
+
+func TestPrefix_NonError(t *testing.T) {
+ original := errors.New("foo")
+ result := Prefix(original, "bar")
+ if result.Error() != "bar foo" {
+ t.Fatalf("bad: %s", result)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru.go b/Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru.go
index 32341c3e4..694ea36fc 100644
--- a/Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru.go
+++ b/Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru.go
@@ -36,7 +36,7 @@ func NewWithEvict(size int, onEvicted func(key interface{}, value interface{}))
c := &Cache{
size: size,
evictList: list.New(),
- items: make(map[interface{}]*list.Element, size),
+ items: make(map[interface{}]*list.Element),
onEvicted: onEvicted,
}
return c, nil
diff --git a/Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru_test.go b/Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru_test.go
new file mode 100644
index 000000000..aeca7b7c4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru_test.go
@@ -0,0 +1,157 @@
+package lru
+
+import "testing"
+
+func TestLRU(t *testing.T) {
+ evictCounter := 0
+ onEvicted := func(k interface{}, v interface{}) {
+ if k != v {
+ t.Fatalf("Evict values not equal (%v!=%v)", k, v)
+ }
+ evictCounter += 1
+ }
+ l, err := NewWithEvict(128, onEvicted)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ for i := 0; i < 256; i++ {
+ l.Add(i, i)
+ }
+ if l.Len() != 128 {
+ t.Fatalf("bad len: %v", l.Len())
+ }
+
+ if evictCounter != 128 {
+ t.Fatalf("bad evict count: %v", evictCounter)
+ }
+
+ for i, k := range l.Keys() {
+ if v, ok := l.Get(k); !ok || v != k || v != i+128 {
+ t.Fatalf("bad key: %v", k)
+ }
+ }
+ for i := 0; i < 128; i++ {
+ _, ok := l.Get(i)
+ if ok {
+ t.Fatalf("should be evicted")
+ }
+ }
+ for i := 128; i < 256; i++ {
+ _, ok := l.Get(i)
+ if !ok {
+ t.Fatalf("should not be evicted")
+ }
+ }
+ for i := 128; i < 192; i++ {
+ l.Remove(i)
+ _, ok := l.Get(i)
+ if ok {
+ t.Fatalf("should be deleted")
+ }
+ }
+
+ l.Get(192) // expect 192 to be last key in l.Keys()
+
+ for i, k := range l.Keys() {
+ if (i < 63 && k != i+193) || (i == 63 && k != 192) {
+ t.Fatalf("out of order key: %v", k)
+ }
+ }
+
+ l.Purge()
+ if l.Len() != 0 {
+ t.Fatalf("bad len: %v", l.Len())
+ }
+ if _, ok := l.Get(200); ok {
+ t.Fatalf("should contain nothing")
+ }
+}
+
+// test that Add returns true/false if an eviction occured
+func TestLRUAdd(t *testing.T) {
+ evictCounter := 0
+ onEvicted := func(k interface{}, v interface{}) {
+ evictCounter += 1
+ }
+
+ l, err := NewWithEvict(1, onEvicted)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if l.Add(1, 1) == true || evictCounter != 0 {
+ t.Errorf("should not have an eviction")
+ }
+ if l.Add(2, 2) == false || evictCounter != 1 {
+ t.Errorf("should have an eviction")
+ }
+}
+
+// test that Contains doesn't update recent-ness
+func TestLRUContains(t *testing.T) {
+ l, err := New(2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ l.Add(1, 1)
+ l.Add(2, 2)
+ if !l.Contains(1) {
+ t.Errorf("1 should be contained")
+ }
+
+ l.Add(3, 3)
+ if l.Contains(1) {
+ t.Errorf("Contains should not have updated recent-ness of 1")
+ }
+}
+
+// test that Contains doesn't update recent-ness
+func TestLRUContainsOrAdd(t *testing.T) {
+ l, err := New(2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ l.Add(1, 1)
+ l.Add(2, 2)
+ contains, evict := l.ContainsOrAdd(1, 1)
+ if !contains {
+ t.Errorf("1 should be contained")
+ }
+ if evict {
+ t.Errorf("nothing should be evicted here")
+ }
+
+ l.Add(3, 3)
+ contains, evict = l.ContainsOrAdd(1, 1)
+ if contains {
+ t.Errorf("1 should not have been contained")
+ }
+ if !evict {
+ t.Errorf("an eviction should have occurred")
+ }
+ if !l.Contains(1) {
+ t.Errorf("now 1 should be contained")
+ }
+}
+
+// test that Peek doesn't update recent-ness
+func TestLRUPeek(t *testing.T) {
+ l, err := New(2)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ l.Add(1, 1)
+ l.Add(2, 2)
+ if v, ok := l.Peek(1); !ok || v != 1 {
+ t.Errorf("1 should be set to 1: %v, %v", v, ok)
+ }
+
+ l.Add(3, 3)
+ if l.Contains(1) {
+ t.Errorf("should not have updated recent-ness of 1")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/hcl/decoder_test.go b/Godeps/_workspace/src/github.com/hashicorp/hcl/decoder_test.go
new file mode 100644
index 000000000..a71163b19
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/hcl/decoder_test.go
@@ -0,0 +1,481 @@
+package hcl
+
+import (
+ "io/ioutil"
+ "path/filepath"
+ "reflect"
+ "testing"
+)
+
+func TestDecode_interface(t *testing.T) {
+ cases := []struct {
+ File string
+ Err bool
+ Out interface{}
+ }{
+ {
+ "basic.hcl",
+ false,
+ map[string]interface{}{
+ "foo": "bar",
+ "bar": "${file(\"bing/bong.txt\")}",
+ },
+ },
+ {
+ "basic_squish.hcl",
+ false,
+ map[string]interface{}{
+ "foo": "bar",
+ "bar": "${file(\"bing/bong.txt\")}",
+ "foo-bar": "baz",
+ },
+ },
+ {
+ "empty.hcl",
+ false,
+ map[string]interface{}{
+ "resource": []map[string]interface{}{
+ map[string]interface{}{
+ "foo": []map[string]interface{}{
+ map[string]interface{}{},
+ },
+ },
+ },
+ },
+ },
+ {
+ "escape.hcl",
+ false,
+ map[string]interface{}{
+ "foo": "bar\"baz\\n",
+ },
+ },
+ {
+ "float.hcl",
+ false,
+ map[string]interface{}{
+ "a": 1.02,
+ },
+ },
+ {
+ "multiline_bad.hcl",
+ false,
+ map[string]interface{}{"foo": "bar\nbaz\n"},
+ },
+ {
+ "multiline.json",
+ false,
+ map[string]interface{}{"foo": "bar\nbaz"},
+ },
+ {
+ "scientific.json",
+ false,
+ map[string]interface{}{
+ "a": 1e-10,
+ "b": 1e+10,
+ "c": 1e10,
+ "d": 1.2e-10,
+ "e": 1.2e+10,
+ "f": 1.2e10,
+ },
+ },
+ {
+ "scientific.hcl",
+ false,
+ map[string]interface{}{
+ "a": 1e-10,
+ "b": 1e+10,
+ "c": 1e10,
+ "d": 1.2e-10,
+ "e": 1.2e+10,
+ "f": 1.2e10,
+ },
+ },
+ {
+ "terraform_heroku.hcl",
+ false,
+ map[string]interface{}{
+ "name": "terraform-test-app",
+ "config_vars": []map[string]interface{}{
+ map[string]interface{}{
+ "FOO": "bar",
+ },
+ },
+ },
+ },
+ {
+ "structure_multi.hcl",
+ false,
+ map[string]interface{}{
+ "foo": []map[string]interface{}{
+ map[string]interface{}{
+ "baz": []map[string]interface{}{
+ map[string]interface{}{"key": 7},
+ },
+ },
+ map[string]interface{}{
+ "bar": []map[string]interface{}{
+ map[string]interface{}{"key": 12},
+ },
+ },
+ },
+ },
+ },
+ {
+ "structure_multi.json",
+ false,
+ map[string]interface{}{
+ "foo": []map[string]interface{}{
+ map[string]interface{}{
+ "baz": []map[string]interface{}{
+ map[string]interface{}{"key": 7},
+ },
+ "bar": []map[string]interface{}{
+ map[string]interface{}{"key": 12},
+ },
+ },
+ },
+ },
+ },
+ {
+ "structure_list.hcl",
+ false,
+ map[string]interface{}{
+ "foo": []map[string]interface{}{
+ map[string]interface{}{
+ "key": 7,
+ },
+ map[string]interface{}{
+ "key": 12,
+ },
+ },
+ },
+ },
+ {
+ "structure_list.json",
+ false,
+ map[string]interface{}{
+ "foo": []interface{}{
+ map[string]interface{}{
+ "key": 7,
+ },
+ map[string]interface{}{
+ "key": 12,
+ },
+ },
+ },
+ },
+ {
+ "structure_list_deep.json",
+ false,
+ map[string]interface{}{
+ "bar": []map[string]interface{}{
+ map[string]interface{}{
+ "foo": []map[string]interface{}{
+ map[string]interface{}{
+ "name": "terraform_example",
+ "ingress": []interface{}{
+ map[string]interface{}{
+ "from_port": 22,
+ },
+ map[string]interface{}{
+ "from_port": 80,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+
+ {
+ "nested_block_comment.hcl",
+ false,
+ map[string]interface{}{
+ "bar": "value",
+ },
+ },
+
+ {
+ "unterminated_block_comment.hcl",
+ true,
+ nil,
+ },
+ }
+
+ for _, tc := range cases {
+ d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.File))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var out interface{}
+ err = Decode(&out, string(d))
+ if (err != nil) != tc.Err {
+ t.Fatalf("Input: %s\n\nError: %s", tc.File, err)
+ }
+
+ if !reflect.DeepEqual(out, tc.Out) {
+ t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", tc.File, out, tc.Out)
+ }
+ }
+}
+
+func TestDecode_equal(t *testing.T) {
+ cases := []struct {
+ One, Two string
+ }{
+ {
+ "basic.hcl",
+ "basic.json",
+ },
+ {
+ "float.hcl",
+ "float.json",
+ },
+ /*
+ {
+ "structure.hcl",
+ "structure.json",
+ },
+ */
+ {
+ "structure.hcl",
+ "structure_flat.json",
+ },
+ {
+ "terraform_heroku.hcl",
+ "terraform_heroku.json",
+ },
+ }
+
+ for _, tc := range cases {
+ p1 := filepath.Join(fixtureDir, tc.One)
+ p2 := filepath.Join(fixtureDir, tc.Two)
+
+ d1, err := ioutil.ReadFile(p1)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ d2, err := ioutil.ReadFile(p2)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var i1, i2 interface{}
+ err = Decode(&i1, string(d1))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = Decode(&i2, string(d2))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !reflect.DeepEqual(i1, i2) {
+ t.Fatalf(
+ "%s != %s\n\n%#v\n\n%#v",
+ tc.One, tc.Two,
+ i1, i2)
+ }
+ }
+}
+
+func TestDecode_flatMap(t *testing.T) {
+ var val map[string]map[string]string
+
+ err := Decode(&val, testReadFile(t, "structure_flatmap.hcl"))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := map[string]map[string]string{
+ "foo": map[string]string{
+ "foo": "bar",
+ "key": "7",
+ },
+ }
+
+ if !reflect.DeepEqual(val, expected) {
+ t.Fatalf("Actual: %#v\n\nExpected: %#v", val, expected)
+ }
+}
+
+func TestDecode_structure(t *testing.T) {
+ type V struct {
+ Key int
+ Foo string
+ }
+
+ var actual V
+
+ err := Decode(&actual, testReadFile(t, "flat.hcl"))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := V{
+ Key: 7,
+ Foo: "bar",
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected)
+ }
+}
+
+func TestDecode_structurePtr(t *testing.T) {
+ type V struct {
+ Key int
+ Foo string
+ }
+
+ var actual *V
+
+ err := Decode(&actual, testReadFile(t, "flat.hcl"))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := &V{
+ Key: 7,
+ Foo: "bar",
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected)
+ }
+}
+
+func TestDecode_structureArray(t *testing.T) {
+ // This test is extracted from a failure in Consul (consul.io),
+ // hence the interesting structure naming.
+
+ type KeyPolicyType string
+
+ type KeyPolicy struct {
+ Prefix string `hcl:",key"`
+ Policy KeyPolicyType
+ }
+
+ type Policy struct {
+ Keys []KeyPolicy `hcl:"key,expand"`
+ }
+
+ expected := Policy{
+ Keys: []KeyPolicy{
+ KeyPolicy{
+ Prefix: "",
+ Policy: "read",
+ },
+ KeyPolicy{
+ Prefix: "foo/",
+ Policy: "write",
+ },
+ KeyPolicy{
+ Prefix: "foo/bar/",
+ Policy: "read",
+ },
+ KeyPolicy{
+ Prefix: "foo/bar/baz",
+ Policy: "deny",
+ },
+ },
+ }
+
+ files := []string{
+ "decode_policy.hcl",
+ "decode_policy.json",
+ }
+
+ for _, f := range files {
+ var actual Policy
+
+ err := Decode(&actual, testReadFile(t, f))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
+ }
+ }
+}
+
+func TestDecode_structureMap(t *testing.T) {
+ // This test is extracted from a failure in Terraform (terraform.io),
+ // hence the interesting structure naming.
+
+ type hclVariable struct {
+ Default interface{}
+ Description string
+ Fields []string `hcl:",decodedFields"`
+ }
+
+ type rawConfig struct {
+ Variable map[string]hclVariable
+ }
+
+ expected := rawConfig{
+ Variable: map[string]hclVariable{
+ "foo": hclVariable{
+ Default: "bar",
+ Description: "bar",
+ Fields: []string{"Default", "Description"},
+ },
+
+ "amis": hclVariable{
+ Default: []map[string]interface{}{
+ map[string]interface{}{
+ "east": "foo",
+ },
+ },
+ Fields: []string{"Default"},
+ },
+ },
+ }
+
+ files := []string{
+ "decode_tf_variable.hcl",
+ "decode_tf_variable.json",
+ }
+
+ for _, f := range files {
+ var actual rawConfig
+
+ err := Decode(&actual, testReadFile(t, f))
+ if err != nil {
+ t.Fatalf("Input: %s\n\nerr: %s", f, err)
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
+ }
+ }
+}
+
+func TestDecode_interfaceNonPointer(t *testing.T) {
+ var value interface{}
+ err := Decode(value, testReadFile(t, "basic_int_string.hcl"))
+ if err == nil {
+ t.Fatal("should error")
+ }
+}
+
+func TestDecode_intString(t *testing.T) {
+ var value struct {
+ Count int
+ }
+
+ err := Decode(&value, testReadFile(t, "basic_int_string.hcl"))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if value.Count != 3 {
+ t.Fatalf("bad: %#v", value.Count)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/hcl_test.go b/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/hcl_test.go
new file mode 100644
index 000000000..dfefd28b3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/hcl_test.go
@@ -0,0 +1,4 @@
+package hcl
+
+// This is the directory where our test fixtures are.
+const fixtureDir = "./test-fixtures"
diff --git a/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/lex_test.go b/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/lex_test.go
new file mode 100644
index 000000000..909c0e2fc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/lex_test.go
@@ -0,0 +1,104 @@
+package hcl
+
+import (
+ "io/ioutil"
+ "path/filepath"
+ "reflect"
+ "testing"
+)
+
+func TestLex(t *testing.T) {
+ cases := []struct {
+ Input string
+ Output []int
+ }{
+ {
+ "comment.hcl",
+ []int{IDENTIFIER, EQUAL, STRING, lexEOF},
+ },
+ {
+ "comment_single.hcl",
+ []int{lexEOF},
+ },
+ {
+ "complex_key.hcl",
+ []int{IDENTIFIER, EQUAL, STRING, lexEOF},
+ },
+ {
+ "multiple.hcl",
+ []int{
+ IDENTIFIER, EQUAL, STRING,
+ IDENTIFIER, EQUAL, NUMBER,
+ lexEOF,
+ },
+ },
+ {
+ "list.hcl",
+ []int{
+ IDENTIFIER, EQUAL, LEFTBRACKET,
+ NUMBER, COMMA, NUMBER, COMMA, STRING,
+ RIGHTBRACKET, lexEOF,
+ },
+ },
+ {
+ "old.hcl",
+ []int{IDENTIFIER, EQUAL, LEFTBRACE, STRING, lexEOF},
+ },
+ {
+ "structure_basic.hcl",
+ []int{
+ IDENTIFIER, LEFTBRACE,
+ IDENTIFIER, EQUAL, NUMBER,
+ STRING, EQUAL, NUMBER,
+ STRING, EQUAL, NUMBER,
+ RIGHTBRACE, lexEOF,
+ },
+ },
+ {
+ "structure.hcl",
+ []int{
+ IDENTIFIER, IDENTIFIER, STRING, LEFTBRACE,
+ IDENTIFIER, EQUAL, NUMBER,
+ IDENTIFIER, EQUAL, STRING,
+ RIGHTBRACE, lexEOF,
+ },
+ },
+ {
+ "array_comment.hcl",
+ []int{
+ IDENTIFIER, EQUAL, LEFTBRACKET,
+ STRING, COMMA,
+ STRING, COMMA,
+ RIGHTBRACKET, lexEOF,
+ },
+ },
+ }
+
+ for _, tc := range cases {
+ d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Input))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ l := &hclLex{Input: string(d)}
+ var actual []int
+ for {
+ token := l.Lex(new(hclSymType))
+ actual = append(actual, token)
+
+ if token == lexEOF {
+ break
+ }
+
+ if len(actual) > 500 {
+ t.Fatalf("Input:%s\n\nExausted.", tc.Input)
+ }
+ }
+
+ if !reflect.DeepEqual(actual, tc.Output) {
+ t.Fatalf(
+ "Input: %s\n\nBad: %#v\n\nExpected: %#v",
+ tc.Input, actual, tc.Output)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/parse_test.go b/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/parse_test.go
new file mode 100644
index 000000000..1666bb544
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl/parse_test.go
@@ -0,0 +1,79 @@
+package hcl
+
+import (
+ "io/ioutil"
+ "path/filepath"
+ "testing"
+)
+
+func TestParse(t *testing.T) {
+ cases := []struct {
+ Name string
+ Err bool
+ }{
+ {
+ "assign_colon.hcl",
+ true,
+ },
+ {
+ "comment.hcl",
+ false,
+ },
+ {
+ "comment_single.hcl",
+ false,
+ },
+ {
+ "empty.hcl",
+ false,
+ },
+ {
+ "list_comma.hcl",
+ false,
+ },
+ {
+ "multiple.hcl",
+ false,
+ },
+ {
+ "structure.hcl",
+ false,
+ },
+ {
+ "structure_basic.hcl",
+ false,
+ },
+ {
+ "structure_empty.hcl",
+ false,
+ },
+ {
+ "complex.hcl",
+ false,
+ },
+ {
+ "assign_deep.hcl",
+ true,
+ },
+ {
+ "types.hcl",
+ false,
+ },
+ {
+ "array_comment.hcl",
+ false,
+ },
+ }
+
+ for _, tc := range cases {
+ d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ _, err = Parse(string(d))
+ if (err != nil) != tc.Err {
+ t.Fatalf("Input: %s\n\nError: %s", tc.Name, err)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl_test.go b/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl_test.go
new file mode 100644
index 000000000..31dff7c9e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/hcl/hcl_test.go
@@ -0,0 +1,19 @@
+package hcl
+
+import (
+ "io/ioutil"
+ "path/filepath"
+ "testing"
+)
+
+// This is the directory where our test fixtures are.
+const fixtureDir = "./test-fixtures"
+
+func testReadFile(t *testing.T, n string) string {
+ d, err := ioutil.ReadFile(filepath.Join(fixtureDir, n))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ return string(d)
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/hcl/json/json_test.go b/Godeps/_workspace/src/github.com/hashicorp/hcl/json/json_test.go
new file mode 100644
index 000000000..418582b4c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/hcl/json/json_test.go
@@ -0,0 +1,4 @@
+package json
+
+// This is the directory where our test fixtures are.
+const fixtureDir = "./test-fixtures"
diff --git a/Godeps/_workspace/src/github.com/hashicorp/hcl/json/lex_test.go b/Godeps/_workspace/src/github.com/hashicorp/hcl/json/lex_test.go
new file mode 100644
index 000000000..f573fba1b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/hcl/json/lex_test.go
@@ -0,0 +1,78 @@
+package json
+
+import (
+ "io/ioutil"
+ "path/filepath"
+ "reflect"
+ "testing"
+)
+
+func TestLexJson(t *testing.T) {
+ cases := []struct {
+ Input string
+ Output []int
+ }{
+ {
+ "basic.json",
+ []int{
+ LEFTBRACE,
+ STRING, COLON, STRING,
+ RIGHTBRACE,
+ lexEOF,
+ },
+ },
+ {
+ "array.json",
+ []int{
+ LEFTBRACE,
+ STRING, COLON, LEFTBRACKET,
+ NUMBER, COMMA, NUMBER, COMMA, STRING,
+ RIGHTBRACKET, COMMA,
+ STRING, COLON, STRING,
+ RIGHTBRACE,
+ lexEOF,
+ },
+ },
+ {
+ "object.json",
+ []int{
+ LEFTBRACE,
+ STRING, COLON, LEFTBRACE,
+ STRING, COLON, LEFTBRACKET,
+ NUMBER, COMMA, NUMBER,
+ RIGHTBRACKET,
+ RIGHTBRACE,
+ RIGHTBRACE,
+ lexEOF,
+ },
+ },
+ }
+
+ for _, tc := range cases {
+ d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Input))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ l := &jsonLex{Input: string(d)}
+ var actual []int
+ for {
+ token := l.Lex(new(jsonSymType))
+ actual = append(actual, token)
+
+ if token == lexEOF {
+ break
+ }
+
+ if len(actual) > 500 {
+ t.Fatalf("Input:%s\n\nExausted.", tc.Input)
+ }
+ }
+
+ if !reflect.DeepEqual(actual, tc.Output) {
+ t.Fatalf(
+ "Input: %s\n\nBad: %#v\n\nExpected: %#v",
+ tc.Input, actual, tc.Output)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/hcl/json/parse_test.go b/Godeps/_workspace/src/github.com/hashicorp/hcl/json/parse_test.go
new file mode 100644
index 000000000..806acb9ab
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/hcl/json/parse_test.go
@@ -0,0 +1,43 @@
+package json
+
+import (
+ "io/ioutil"
+ "path/filepath"
+ "testing"
+)
+
+func TestParse(t *testing.T) {
+ cases := []struct {
+ Name string
+ Err bool
+ }{
+ {
+ "basic.json",
+ false,
+ },
+ {
+ "object.json",
+ false,
+ },
+ {
+ "array.json",
+ false,
+ },
+ {
+ "types.json",
+ false,
+ },
+ }
+
+ for _, tc := range cases {
+ d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ _, err = Parse(string(d))
+ if (err != nil) != tc.Err {
+ t.Fatalf("Input: %s\n\nError: %s", tc.Name, err)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/hcl/lex_test.go b/Godeps/_workspace/src/github.com/hashicorp/hcl/lex_test.go
new file mode 100644
index 000000000..f7ee37886
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/hcl/lex_test.go
@@ -0,0 +1,37 @@
+package hcl
+
+import (
+ "testing"
+)
+
+func TestLexMode(t *testing.T) {
+ cases := []struct {
+ Input string
+ Mode lexModeValue
+ }{
+ {
+ "",
+ lexModeHcl,
+ },
+ {
+ "foo",
+ lexModeHcl,
+ },
+ {
+ "{}",
+ lexModeJson,
+ },
+ {
+ " {}",
+ lexModeJson,
+ },
+ }
+
+ for i, tc := range cases {
+ actual := lexMode(tc.Input)
+
+ if actual != tc.Mode {
+ t.Fatalf("%d: %#v", i, actual)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/logutils/level_benchmark_test.go b/Godeps/_workspace/src/github.com/hashicorp/logutils/level_benchmark_test.go
new file mode 100644
index 000000000..3c2caf70e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/logutils/level_benchmark_test.go
@@ -0,0 +1,37 @@
+package logutils
+
+import (
+ "io/ioutil"
+ "testing"
+)
+
+var messages [][]byte
+
+func init() {
+ messages = [][]byte{
+ []byte("[TRACE] foo"),
+ []byte("[DEBUG] foo"),
+ []byte("[INFO] foo"),
+ []byte("[WARN] foo"),
+ []byte("[ERROR] foo"),
+ }
+}
+
+func BenchmarkDiscard(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ ioutil.Discard.Write(messages[i%len(messages)])
+ }
+}
+
+func BenchmarkLevelFilter(b *testing.B) {
+ filter := &LevelFilter{
+ Levels: []LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"},
+ MinLevel: "WARN",
+ Writer: ioutil.Discard,
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ filter.Write(messages[i%len(messages)])
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/logutils/level_test.go b/Godeps/_workspace/src/github.com/hashicorp/logutils/level_test.go
new file mode 100644
index 000000000..f6b6ac3c3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/logutils/level_test.go
@@ -0,0 +1,94 @@
+package logutils
+
+import (
+ "bytes"
+ "io"
+ "log"
+ "testing"
+)
+
+func TestLevelFilter_impl(t *testing.T) {
+ var _ io.Writer = new(LevelFilter)
+}
+
+func TestLevelFilter(t *testing.T) {
+ buf := new(bytes.Buffer)
+ filter := &LevelFilter{
+ Levels: []LogLevel{"DEBUG", "WARN", "ERROR"},
+ MinLevel: "WARN",
+ Writer: buf,
+ }
+
+ logger := log.New(filter, "", 0)
+ logger.Print("[WARN] foo")
+ logger.Println("[ERROR] bar")
+ logger.Println("[DEBUG] baz")
+ logger.Println("[WARN] buzz")
+
+ result := buf.String()
+ expected := "[WARN] foo\n[ERROR] bar\n[WARN] buzz\n"
+ if result != expected {
+ t.Fatalf("bad: %#v", result)
+ }
+}
+
+func TestLevelFilterCheck(t *testing.T) {
+ filter := &LevelFilter{
+ Levels: []LogLevel{"DEBUG", "WARN", "ERROR"},
+ MinLevel: "WARN",
+ Writer: nil,
+ }
+
+ testCases := []struct {
+ line string
+ check bool
+ }{
+ {"[WARN] foo\n", true},
+ {"[ERROR] bar\n", true},
+ {"[DEBUG] baz\n", false},
+ {"[WARN] buzz\n", true},
+ }
+
+ for _, testCase := range testCases {
+ result := filter.Check([]byte(testCase.line))
+ if result != testCase.check {
+ t.Errorf("Fail: %s", testCase.line)
+ }
+ }
+}
+
+func TestLevelFilter_SetMinLevel(t *testing.T) {
+ filter := &LevelFilter{
+ Levels: []LogLevel{"DEBUG", "WARN", "ERROR"},
+ MinLevel: "ERROR",
+ Writer: nil,
+ }
+
+ testCases := []struct {
+ line string
+ checkBefore bool
+ checkAfter bool
+ }{
+ {"[WARN] foo\n", false, true},
+ {"[ERROR] bar\n", true, true},
+ {"[DEBUG] baz\n", false, false},
+ {"[WARN] buzz\n", false, true},
+ }
+
+ for _, testCase := range testCases {
+ result := filter.Check([]byte(testCase.line))
+ if result != testCase.checkBefore {
+ t.Errorf("Fail: %s", testCase.line)
+ }
+ }
+
+ // Update the minimum level to WARN
+ filter.SetMinLevel("WARN")
+
+ for _, testCase := range testCases {
+ result := filter.Check([]byte(testCase.line))
+ if result != testCase.checkAfter {
+ t.Errorf("Fail: %s", testCase.line)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/README.md b/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/README.md
new file mode 100644
index 000000000..0a96fd3eb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/README.md
@@ -0,0 +1 @@
+# TODO - I'll beef this up as I implement each of the enhancements.
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/client.go b/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/client.go
new file mode 100644
index 000000000..613bfff89
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/client.go
@@ -0,0 +1,180 @@
+package coordinate
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "sync"
+ "time"
+)
+
+// Client manages the estimated network coordinate for a given node, and adjusts
+// it as the node observes round trip times and estimated coordinates from other
+// nodes. The core algorithm is based on Vivaldi, see the documentation for Config
+// for more details.
+type Client struct {
+ // coord is the current estimate of the client's network coordinate.
+ coord *Coordinate
+
+ // origin is a coordinate sitting at the origin.
+ origin *Coordinate
+
+ // config contains the tuning parameters that govern the performance of
+ // the algorithm.
+ config *Config
+
+ // adjustmentIndex is the current index into the adjustmentSamples slice.
+ adjustmentIndex uint
+
+ // adjustment is used to store samples for the adjustment calculation.
+ adjustmentSamples []float64
+
+ // latencyFilterSamples is used to store the last several RTT samples,
+ // keyed by node name. We will use the config's LatencyFilterSamples
+ // value to determine how many samples we keep, per node.
+ latencyFilterSamples map[string][]float64
+
+ // mutex enables safe concurrent access to the client.
+ mutex sync.RWMutex
+}
+
+// NewClient creates a new Client and verifies the configuration is valid.
+func NewClient(config *Config) (*Client, error) {
+ if !(config.Dimensionality > 0) {
+ return nil, fmt.Errorf("dimensionality must be >0")
+ }
+
+ return &Client{
+ coord: NewCoordinate(config),
+ origin: NewCoordinate(config),
+ config: config,
+ adjustmentIndex: 0,
+ adjustmentSamples: make([]float64, config.AdjustmentWindowSize),
+ latencyFilterSamples: make(map[string][]float64),
+ }, nil
+}
+
+// GetCoordinate returns a copy of the coordinate for this client.
+func (c *Client) GetCoordinate() *Coordinate {
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+
+ return c.coord.Clone()
+}
+
+// SetCoordinate forces the client's coordinate to a known state.
+func (c *Client) SetCoordinate(coord *Coordinate) {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ c.coord = coord.Clone()
+}
+
+// ForgetNode removes any client state for the given node.
+func (c *Client) ForgetNode(node string) {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ delete(c.latencyFilterSamples, node)
+}
+
+// latencyFilter applies a simple moving median filter with a new sample for
+// a node. This assumes that the mutex has been locked already.
+func (c *Client) latencyFilter(node string, rttSeconds float64) float64 {
+ samples, ok := c.latencyFilterSamples[node]
+ if !ok {
+ samples = make([]float64, 0, c.config.LatencyFilterSize)
+ }
+
+ // Add the new sample and trim the list, if needed.
+ samples = append(samples, rttSeconds)
+ if len(samples) > int(c.config.LatencyFilterSize) {
+ samples = samples[1:]
+ }
+ c.latencyFilterSamples[node] = samples
+
+ // Sort a copy of the samples and return the median.
+ sorted := make([]float64, len(samples))
+ copy(sorted, samples)
+ sort.Float64s(sorted)
+ return sorted[len(sorted)/2]
+}
+
+// updateVivialdi updates the Vivaldi portion of the client's coordinate. This
+// assumes that the mutex has been locked already.
+func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) {
+ const zeroThreshold = 1.0e-6
+
+ dist := c.coord.DistanceTo(other).Seconds()
+ if rttSeconds < zeroThreshold {
+ rttSeconds = zeroThreshold
+ }
+ wrongness := math.Abs(dist-rttSeconds) / rttSeconds
+
+ totalError := c.coord.Error + other.Error
+ if totalError < zeroThreshold {
+ totalError = zeroThreshold
+ }
+ weight := c.coord.Error / totalError
+
+ c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight)
+ if c.coord.Error > c.config.VivaldiErrorMax {
+ c.coord.Error = c.config.VivaldiErrorMax
+ }
+
+ delta := c.config.VivaldiCC * weight
+ force := delta * (rttSeconds - dist)
+ c.coord = c.coord.ApplyForce(c.config, force, other)
+}
+
+// updateAdjustment updates the adjustment portion of the client's coordinate, if
+// the feature is enabled. This assumes that the mutex has been locked already.
+func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) {
+ if c.config.AdjustmentWindowSize == 0 {
+ return
+ }
+
+ // Note that the existing adjustment factors don't figure in to this
+ // calculation so we use the raw distance here.
+ dist := c.coord.rawDistanceTo(other)
+ c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist
+ c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize
+
+ sum := 0.0
+ for _, sample := range c.adjustmentSamples {
+ sum += sample
+ }
+ c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize))
+}
+
+// updateGravity applies a small amount of gravity to pull coordinates towards
+// the center of the coordinate system to combat drift. This assumes that the
+// mutex is locked already.
+func (c *Client) updateGravity() {
+ dist := c.origin.DistanceTo(c.coord).Seconds()
+ force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0)
+ c.coord = c.coord.ApplyForce(c.config, force, c.origin)
+}
+
+// Update takes other, a coordinate for another node, and rtt, a round trip
+// time observation for a ping to that node, and updates the estimated position of
+// the client's coordinate. Returns the updated coordinate.
+func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) *Coordinate {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ rttSeconds := c.latencyFilter(node, rtt.Seconds())
+ c.updateVivaldi(other, rttSeconds)
+ c.updateAdjustment(other, rttSeconds)
+ c.updateGravity()
+ return c.coord.Clone()
+}
+
+// DistanceTo returns the estimated RTT from the client's coordinate to other, the
+// coordinate for another node.
+func (c *Client) DistanceTo(other *Coordinate) time.Duration {
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+
+ return c.coord.DistanceTo(other)
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/client_test.go b/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/client_test.go
new file mode 100644
index 000000000..ae7d58c0d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/client_test.go
@@ -0,0 +1,109 @@
+package coordinate
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestClient_NewClient(t *testing.T) {
+ config := DefaultConfig()
+
+ config.Dimensionality = 0
+ client, err := NewClient(config)
+ if err == nil || !strings.Contains(err.Error(), "dimensionality") {
+ t.Fatal(err)
+ }
+
+ config.Dimensionality = 7
+ client, err = NewClient(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ origin := NewCoordinate(config)
+ if !reflect.DeepEqual(client.GetCoordinate(), origin) {
+ t.Fatalf("fresh client should be located at the origin")
+ }
+}
+
+func TestClient_Update(t *testing.T) {
+ config := DefaultConfig()
+ config.Dimensionality = 3
+
+ client, err := NewClient(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Make sure the Euclidean part of our coordinate is what we expect.
+ c := client.GetCoordinate()
+ verifyEqualVectors(t, c.Vec, []float64{0.0, 0.0, 0.0})
+
+ // Place a node right above the client and observe an RTT longer than the
+ // client expects, given its distance.
+ other := NewCoordinate(config)
+ other.Vec[2] = 0.001
+ rtt := time.Duration(2.0 * other.Vec[2] * secondsToNanoseconds)
+ c = client.Update("node", other, rtt)
+
+ // The client should have scooted down to get away from it.
+ if !(c.Vec[2] < 0.0) {
+ t.Fatalf("client z coordinate %9.6f should be < 0.0", c.Vec[2])
+ }
+
+ // Set the coordinate to a known state.
+ c.Vec[2] = 99.0
+ client.SetCoordinate(c)
+ c = client.GetCoordinate()
+ verifyEqualFloats(t, c.Vec[2], 99.0)
+}
+
+func TestClient_DistanceTo(t *testing.T) {
+ config := DefaultConfig()
+ config.Dimensionality = 3
+ config.HeightMin = 0
+
+ client, err := NewClient(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Fiddle a raw coordinate to put it a specific number of seconds away.
+ other := NewCoordinate(config)
+ other.Vec[2] = 12.345
+ expected := time.Duration(other.Vec[2] * secondsToNanoseconds)
+ dist := client.DistanceTo(other)
+ if dist != expected {
+ t.Fatalf("distance doesn't match %9.6f != %9.6f", dist.Seconds(), expected.Seconds())
+ }
+}
+
+func TestClient_latencyFilter(t *testing.T) {
+ config := DefaultConfig()
+ config.LatencyFilterSize = 3
+
+ client, err := NewClient(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Make sure we get the median, and that things age properly.
+ verifyEqualFloats(t, client.latencyFilter("alice", 0.201), 0.201)
+ verifyEqualFloats(t, client.latencyFilter("alice", 0.200), 0.201)
+ verifyEqualFloats(t, client.latencyFilter("alice", 0.207), 0.201)
+
+ // This glitch will get median-ed out and never seen by Vivaldi.
+ verifyEqualFloats(t, client.latencyFilter("alice", 1.9), 0.207)
+ verifyEqualFloats(t, client.latencyFilter("alice", 0.203), 0.207)
+ verifyEqualFloats(t, client.latencyFilter("alice", 0.199), 0.203)
+ verifyEqualFloats(t, client.latencyFilter("alice", 0.211), 0.203)
+
+ // Make sure different nodes are not coupled.
+ verifyEqualFloats(t, client.latencyFilter("bob", 0.310), 0.310)
+
+ // Make sure we don't leak coordinates for nodes that leave.
+ client.ForgetNode("alice")
+ verifyEqualFloats(t, client.latencyFilter("alice", 0.888), 0.888)
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/config.go b/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/config.go
new file mode 100644
index 000000000..a5b3aadfe
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/config.go
@@ -0,0 +1,70 @@
+package coordinate
+
+// Config is used to set the parameters of the Vivaldi-based coordinate mapping
+// algorithm.
+//
+// The following references are called out at various points in the documentation
+// here:
+//
+// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system."
+// ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004.
+// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates
+// in the Wild." NSDI. Vol. 7. 2007.
+// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for
+// host-based network coordinate systems." Networking, IEEE/ACM Transactions
+// on 18.1 (2010): 27-40.
+type Config struct {
+ // The dimensionality of the coordinate system. As discussed in [2], more
+ // dimensions improves the accuracy of the estimates up to a point. Per [2]
+ // we chose 4 dimensions plus a non-Euclidean height.
+ Dimensionality uint
+
+ // VivaldiErrorMax is the default error value when a node hasn't yet made
+ // any observations. It also serves as an upper limit on the error value in
+ // case observations cause the error value to increase without bound.
+ VivaldiErrorMax float64
+
+ // VivaldiCE is a tuning factor that controls the maximum impact an
+ // observation can have on a node's confidence. See [1] for more details.
+ VivaldiCE float64
+
+ // VivaldiCC is a tuning factor that controls the maximum impact an
+ // observation can have on a node's coordinate. See [1] for more details.
+ VivaldiCC float64
+
+ // AdjustmentWindowSize is a tuning factor that determines how many samples
+ // we retain to calculate the adjustment factor as discussed in [3]. Setting
+ // this to zero disables this feature.
+ AdjustmentWindowSize uint
+
+ // HeightMin is the minimum value of the height parameter. Since this
+ // always must be positive, it will introduce a small amount error, so
+ // the chosen value should be relatively small compared to "normal"
+ // coordinates.
+ HeightMin float64
+
+ // LatencyFilterSamples is the maximum number of samples that are retained
+ // per node, in order to compute a median. The intent is to ride out blips
+ // but still keep the delay low, since our time to probe any given node is
+ // pretty infrequent. See [2] for more details.
+ LatencyFilterSize uint
+
+ // GravityRho is a tuning factor that sets how much gravity has an effect
+ // to try to re-center coordinates. See [2] for more details.
+ GravityRho float64
+}
+
+// DefaultConfig returns a Config that has some default values suitable for
+// basic testing of the algorithm, but not tuned to any particular type of cluster.
+func DefaultConfig() *Config {
+ return &Config{
+ Dimensionality: 8,
+ VivaldiErrorMax: 1.5,
+ VivaldiCE: 0.25,
+ VivaldiCC: 0.25,
+ AdjustmentWindowSize: 20,
+ HeightMin: 10.0e-6,
+ LatencyFilterSize: 3,
+ GravityRho: 150.0,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/coordinate.go b/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/coordinate.go
new file mode 100644
index 000000000..c9194e048
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/coordinate.go
@@ -0,0 +1,183 @@
+package coordinate
+
+import (
+ "math"
+ "math/rand"
+ "time"
+)
+
+// Coordinate is a specialized structure for holding network coordinates for the
+// Vivaldi-based coordinate mapping algorithm. All of the fields should be public
+// to enable this to be serialized. All values in here are in units of seconds.
+type Coordinate struct {
+ // Vec is the Euclidean portion of the coordinate. This is used along
+ // with the other fields to provide an overall distance estimate. The
+ // units here are seconds.
+ Vec []float64
+
+ // Err reflects the confidence in the given coordinate and is updated
+ // dynamically by the Vivaldi Client. This is dimensionless.
+ Error float64
+
+ // Adjustment is a distance offset computed based on a calculation over
+ // observations from all other nodes over a fixed window and is updated
+ // dynamically by the Vivaldi Client. The units here are seconds.
+ Adjustment float64
+
+ // Height is a distance offset that accounts for non-Euclidean effects
+ // which model the access links from nodes to the core Internet. The access
+ // links are usually set by bandwidth and congestion, and the core links
+ // usually follow distance based on geography.
+ Height float64
+}
+
+const (
+ // secondsToNanoseconds is used to convert float seconds to nanoseconds.
+ secondsToNanoseconds = 1.0e9
+
+ // zeroThreshold is used to decide if two coordinates are on top of each
+ // other.
+ zeroThreshold = 1.0e-6
+)
+
+// ErrDimensionalityConflict will be panic-d if you try to perform operations
+// with incompatible dimensions.
+type DimensionalityConflictError struct{}
+
+// Adds the error interface.
+func (e DimensionalityConflictError) Error() string {
+ return "coordinate dimensionality does not match"
+}
+
+// NewCoordinate creates a new coordinate at the origin, using the given config
+// to supply key initial values.
+func NewCoordinate(config *Config) *Coordinate {
+ return &Coordinate{
+ Vec: make([]float64, config.Dimensionality),
+ Error: config.VivaldiErrorMax,
+ Adjustment: 0.0,
+ Height: config.HeightMin,
+ }
+}
+
+// Clone creates an independent copy of this coordinate.
+func (c *Coordinate) Clone() *Coordinate {
+ vec := make([]float64, len(c.Vec))
+ copy(vec, c.Vec)
+ return &Coordinate{
+ Vec: vec,
+ Error: c.Error,
+ Adjustment: c.Adjustment,
+ Height: c.Height,
+ }
+}
+
+// IsCompatibleWith checks to see if the two coordinates are compatible
+// dimensionally. If this returns true then you are guaranteed to not get
+// any runtime errors operating on them.
+func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool {
+ return len(c.Vec) == len(other.Vec)
+}
+
+// ApplyForce returns the result of applying the force from the direction of the
+// other coordinate.
+func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate {
+ if !c.IsCompatibleWith(other) {
+ panic(DimensionalityConflictError{})
+ }
+
+ ret := c.Clone()
+ unit, mag := unitVectorAt(c.Vec, other.Vec)
+ ret.Vec = add(ret.Vec, mul(unit, force))
+ if mag > zeroThreshold {
+ ret.Height = (ret.Height+other.Height)*force/mag + ret.Height
+ ret.Height = math.Max(ret.Height, config.HeightMin)
+ }
+ return ret
+}
+
+// DistanceTo returns the distance between this coordinate and the other
+// coordinate, including adjustments.
+func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration {
+ if !c.IsCompatibleWith(other) {
+ panic(DimensionalityConflictError{})
+ }
+
+ dist := c.rawDistanceTo(other)
+ adjustedDist := dist + c.Adjustment + other.Adjustment
+ if adjustedDist > 0.0 {
+ dist = adjustedDist
+ }
+ return time.Duration(dist * secondsToNanoseconds)
+}
+
+// rawDistanceTo returns the Vivaldi distance between this coordinate and the
+// other coordinate in seconds, not including adjustments. This assumes the
+// dimensions have already been checked to be compatible.
+func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 {
+ return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height
+}
+
+// add returns the sum of vec1 and vec2. This assumes the dimensions have
+// already been checked to be compatible.
+func add(vec1 []float64, vec2 []float64) []float64 {
+ ret := make([]float64, len(vec1))
+ for i, _ := range ret {
+ ret[i] = vec1[i] + vec2[i]
+ }
+ return ret
+}
+
+// diff returns the difference between the vec1 and vec2. This assumes the
+// dimensions have already been checked to be compatible.
+func diff(vec1 []float64, vec2 []float64) []float64 {
+ ret := make([]float64, len(vec1))
+ for i, _ := range ret {
+ ret[i] = vec1[i] - vec2[i]
+ }
+ return ret
+}
+
+// mul returns vec multiplied by a scalar factor.
+func mul(vec []float64, factor float64) []float64 {
+ ret := make([]float64, len(vec))
+ for i, _ := range vec {
+ ret[i] = vec[i] * factor
+ }
+ return ret
+}
+
+// magnitude computes the magnitude of the vec.
+func magnitude(vec []float64) float64 {
+ sum := 0.0
+ for i, _ := range vec {
+ sum += vec[i] * vec[i]
+ }
+ return math.Sqrt(sum)
+}
+
+// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two
+// positions are the same then a random unit vector is returned. We also return
+// the distance between the points for use in the later height calculation.
+func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) {
+ ret := diff(vec1, vec2)
+
+ // If the coordinates aren't on top of each other we can normalize.
+ if mag := magnitude(ret); mag > zeroThreshold {
+ return mul(ret, 1.0/mag), mag
+ }
+
+ // Otherwise, just return a random unit vector.
+ for i, _ := range ret {
+ ret[i] = rand.Float64() - 0.5
+ }
+ if mag := magnitude(ret); mag > zeroThreshold {
+ return mul(ret, 1.0/mag), 0.0
+ }
+
+ // And finally just give up and make a unit vector along the first
+ // dimension. This should be exceedingly rare.
+ ret = make([]float64, len(ret))
+ ret[0] = 1.0
+ return ret, 0.0
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/coordinate_test.go b/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/coordinate_test.go
new file mode 100644
index 000000000..17f68ac51
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/coordinate_test.go
@@ -0,0 +1,260 @@
+package coordinate
+
+import (
+ "math"
+ "reflect"
+ "testing"
+ "time"
+)
+
+// verifyDimensionPanic will run the supplied func and make sure it panics with
+// the expected error type.
+func verifyDimensionPanic(t *testing.T, f func()) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(DimensionalityConflictError); !ok {
+ t.Fatalf("panic isn't the right type")
+ }
+ } else {
+ t.Fatalf("didn't get expected panic")
+ }
+ }()
+ f()
+}
+
+func TestCoordinate_NewCoordinate(t *testing.T) {
+ config := DefaultConfig()
+ c := NewCoordinate(config)
+ if uint(len(c.Vec)) != config.Dimensionality {
+ t.Fatalf("dimensionality not set correctly %d != %d",
+ len(c.Vec), config.Dimensionality)
+ }
+}
+
+func TestCoordinate_Clone(t *testing.T) {
+ c := NewCoordinate(DefaultConfig())
+ c.Vec[0], c.Vec[1], c.Vec[2] = 1.0, 2.0, 3.0
+ c.Error = 5.0
+ c.Adjustment = 10.0
+ c.Height = 4.2
+
+ other := c.Clone()
+ if !reflect.DeepEqual(c, other) {
+ t.Fatalf("coordinate clone didn't make a proper copy")
+ }
+
+ other.Vec[0] = c.Vec[0] + 0.5
+ if reflect.DeepEqual(c, other) {
+ t.Fatalf("cloned coordinate is still pointing at its ancestor")
+ }
+}
+
+func TestCoordinate_IsCompatibleWith(t *testing.T) {
+ config := DefaultConfig()
+
+ config.Dimensionality = 3
+ c1 := NewCoordinate(config)
+ c2 := NewCoordinate(config)
+
+ config.Dimensionality = 2
+ alien := NewCoordinate(config)
+
+ if !c1.IsCompatibleWith(c1) || !c2.IsCompatibleWith(c2) ||
+ !alien.IsCompatibleWith(alien) {
+ t.Fatalf("coordinates should be compatible with themselves")
+ }
+
+ if !c1.IsCompatibleWith(c2) || !c2.IsCompatibleWith(c1) {
+ t.Fatalf("coordinates should be compatible with each other")
+ }
+
+ if c1.IsCompatibleWith(alien) || c2.IsCompatibleWith(alien) ||
+ alien.IsCompatibleWith(c1) || alien.IsCompatibleWith(c2) {
+ t.Fatalf("alien should not be compatible with the other coordinates")
+ }
+}
+
+func TestCoordinate_ApplyForce(t *testing.T) {
+ config := DefaultConfig()
+ config.Dimensionality = 3
+ config.HeightMin = 0
+
+ origin := NewCoordinate(config)
+
+ // This proves that we normalize, get the direction right, and apply the
+ // force multiplier correctly.
+ above := NewCoordinate(config)
+ above.Vec = []float64{0.0, 0.0, 2.9}
+ c := origin.ApplyForce(config, 5.3, above)
+ verifyEqualVectors(t, c.Vec, []float64{0.0, 0.0, -5.3})
+
+ // Scoot a point not starting at the origin to make sure there's nothing
+ // special there.
+ right := NewCoordinate(config)
+ right.Vec = []float64{3.4, 0.0, -5.3}
+ c = c.ApplyForce(config, 2.0, right)
+ verifyEqualVectors(t, c.Vec, []float64{-2.0, 0.0, -5.3})
+
+ // If the points are right on top of each other, then we should end up
+ // in a random direction, one unit away. This makes sure the unit vector
+ // build up doesn't divide by zero.
+ c = origin.ApplyForce(config, 1.0, origin)
+ verifyEqualFloats(t, origin.DistanceTo(c).Seconds(), 1.0)
+
+ // Enable a minimum height and make sure that gets factored in properly.
+ config.HeightMin = 10.0e-6
+ origin = NewCoordinate(config)
+ c = origin.ApplyForce(config, 5.3, above)
+ verifyEqualVectors(t, c.Vec, []float64{0.0, 0.0, -5.3})
+ verifyEqualFloats(t, c.Height, config.HeightMin+5.3*config.HeightMin/2.9)
+
+ // Make sure the height minimum is enforced.
+ c = origin.ApplyForce(config, -5.3, above)
+ verifyEqualVectors(t, c.Vec, []float64{0.0, 0.0, 5.3})
+ verifyEqualFloats(t, c.Height, config.HeightMin)
+
+ // Shenanigans should get called if the dimensions don't match.
+ bad := c.Clone()
+ bad.Vec = make([]float64, len(bad.Vec)+1)
+ verifyDimensionPanic(t, func() { c.ApplyForce(config, 1.0, bad) })
+}
+
+func TestCoordinate_DistanceTo(t *testing.T) {
+ config := DefaultConfig()
+ config.Dimensionality = 3
+ config.HeightMin = 0
+
+ c1, c2 := NewCoordinate(config), NewCoordinate(config)
+ c1.Vec = []float64{-0.5, 1.3, 2.4}
+ c2.Vec = []float64{1.2, -2.3, 3.4}
+
+ verifyEqualFloats(t, c1.DistanceTo(c1).Seconds(), 0.0)
+ verifyEqualFloats(t, c1.DistanceTo(c2).Seconds(), c2.DistanceTo(c1).Seconds())
+ verifyEqualFloats(t, c1.DistanceTo(c2).Seconds(), 4.104875150354758)
+
+ // Make sure negative adjustment factors are ignored.
+ c1.Adjustment = -1.0e6
+ verifyEqualFloats(t, c1.DistanceTo(c2).Seconds(), 4.104875150354758)
+
+ // Make sure positive adjustment factors affect the distance.
+ c1.Adjustment = 0.1
+ c2.Adjustment = 0.2
+ verifyEqualFloats(t, c1.DistanceTo(c2).Seconds(), 4.104875150354758+0.3)
+
+ // Make sure the heights affect the distance.
+ c1.Height = 0.7
+ c2.Height = 0.1
+ verifyEqualFloats(t, c1.DistanceTo(c2).Seconds(), 4.104875150354758+0.3+0.8)
+
+ // Shenanigans should get called if the dimensions don't match.
+ bad := c1.Clone()
+ bad.Vec = make([]float64, len(bad.Vec)+1)
+ verifyDimensionPanic(t, func() { _ = c1.DistanceTo(bad) })
+}
+
+// dist is a self-contained example that appears in documentation.
+func dist(a *Coordinate, b *Coordinate) time.Duration {
+ // Coordinates will always have the same dimensionality, so this is
+ // just a sanity check.
+ if len(a.Vec) != len(b.Vec) {
+ panic("dimensions aren't compatible")
+ }
+
+ // Calculate the Euclidean distance plus the heights.
+ sumsq := 0.0
+ for i := 0; i < len(a.Vec); i++ {
+ diff := a.Vec[i] - b.Vec[i]
+ sumsq += diff * diff
+ }
+ rtt := math.Sqrt(sumsq) + a.Height + b.Height
+
+ // Apply the adjustment components, guarding against negatives.
+ adjusted := rtt + a.Adjustment + b.Adjustment
+ if adjusted > 0.0 {
+ rtt = adjusted
+ }
+
+ // Go's times are natively nanoseconds, so we convert from seconds.
+ const secondsToNanoseconds = 1.0e9
+ return time.Duration(rtt * secondsToNanoseconds)
+}
+
+func TestCoordinate_dist_Example(t *testing.T) {
+ config := DefaultConfig()
+ c1, c2 := NewCoordinate(config), NewCoordinate(config)
+ c1.Vec = []float64{-0.5, 1.3, 2.4}
+ c2.Vec = []float64{1.2, -2.3, 3.4}
+ c1.Adjustment = 0.1
+ c2.Adjustment = 0.2
+ c1.Height = 0.7
+ c2.Height = 0.1
+ verifyEqualFloats(t, c1.DistanceTo(c2).Seconds(), dist(c1, c2).Seconds())
+}
+
+func TestCoordinate_rawDistanceTo(t *testing.T) {
+ config := DefaultConfig()
+ config.Dimensionality = 3
+ config.HeightMin = 0
+
+ c1, c2 := NewCoordinate(config), NewCoordinate(config)
+ c1.Vec = []float64{-0.5, 1.3, 2.4}
+ c2.Vec = []float64{1.2, -2.3, 3.4}
+
+ verifyEqualFloats(t, c1.rawDistanceTo(c1), 0.0)
+ verifyEqualFloats(t, c1.rawDistanceTo(c2), c2.rawDistanceTo(c1))
+ verifyEqualFloats(t, c1.rawDistanceTo(c2), 4.104875150354758)
+
+ // Make sure that the adjustment doesn't factor into the raw
+ // distance.
+ c1.Adjustment = 1.0e6
+ verifyEqualFloats(t, c1.rawDistanceTo(c2), 4.104875150354758)
+
+ // Make sure the heights affect the distance.
+ c1.Height = 0.7
+ c2.Height = 0.1
+ verifyEqualFloats(t, c1.rawDistanceTo(c2), 4.104875150354758+0.8)
+}
+
+func TestCoordinate_add(t *testing.T) {
+ vec1 := []float64{1.0, -3.0, 3.0}
+ vec2 := []float64{-4.0, 5.0, 6.0}
+ verifyEqualVectors(t, add(vec1, vec2), []float64{-3.0, 2.0, 9.0})
+
+ zero := []float64{0.0, 0.0, 0.0}
+ verifyEqualVectors(t, add(vec1, zero), vec1)
+}
+
+func TestCoordinate_diff(t *testing.T) {
+ vec1 := []float64{1.0, -3.0, 3.0}
+ vec2 := []float64{-4.0, 5.0, 6.0}
+ verifyEqualVectors(t, diff(vec1, vec2), []float64{5.0, -8.0, -3.0})
+
+ zero := []float64{0.0, 0.0, 0.0}
+ verifyEqualVectors(t, diff(vec1, zero), vec1)
+}
+
+func TestCoordinate_magnitude(t *testing.T) {
+ zero := []float64{0.0, 0.0, 0.0}
+ verifyEqualFloats(t, magnitude(zero), 0.0)
+
+ vec := []float64{1.0, -2.0, 3.0}
+ verifyEqualFloats(t, magnitude(vec), 3.7416573867739413)
+}
+
+func TestCoordinate_unitVectorAt(t *testing.T) {
+ vec1 := []float64{1.0, 2.0, 3.0}
+ vec2 := []float64{0.5, 0.6, 0.7}
+ u, mag := unitVectorAt(vec1, vec2)
+ verifyEqualVectors(t, u, []float64{0.18257418583505536, 0.511207720338155, 0.8398412548412546})
+ verifyEqualFloats(t, magnitude(u), 1.0)
+ verifyEqualFloats(t, mag, magnitude(diff(vec1, vec2)))
+
+ // If we give positions that are equal we should get a random unit vector
+ // returned to us, rather than a divide by zero.
+ u, mag = unitVectorAt(vec1, vec1)
+ verifyEqualFloats(t, magnitude(u), 1.0)
+ verifyEqualFloats(t, mag, 0.0)
+
+ // We can't hit the final clause without heroics so I manually forced it
+ // there to verify it works.
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/performance_test.go b/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/performance_test.go
new file mode 100644
index 000000000..fc676e20f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/performance_test.go
@@ -0,0 +1,182 @@
+package coordinate
+
+import (
+ "math"
+ "testing"
+ "time"
+)
+
+func TestPerformance_Line(t *testing.T) {
+ const spacing = 10 * time.Millisecond
+ const nodes, cycles = 10, 1000
+ config := DefaultConfig()
+ clients, err := GenerateClients(nodes, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ truth := GenerateLine(nodes, spacing)
+ Simulate(clients, truth, cycles)
+ stats := Evaluate(clients, truth)
+ if stats.ErrorAvg > 0.0018 || stats.ErrorMax > 0.0092 {
+ t.Fatalf("performance stats are out of spec: %v", stats)
+ }
+}
+
+func TestPerformance_Grid(t *testing.T) {
+ const spacing = 10 * time.Millisecond
+ const nodes, cycles = 25, 1000
+ config := DefaultConfig()
+ clients, err := GenerateClients(nodes, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ truth := GenerateGrid(nodes, spacing)
+ Simulate(clients, truth, cycles)
+ stats := Evaluate(clients, truth)
+ if stats.ErrorAvg > 0.0015 || stats.ErrorMax > 0.022 {
+ t.Fatalf("performance stats are out of spec: %v", stats)
+ }
+}
+
+func TestPerformance_Split(t *testing.T) {
+ const lan, wan = 1 * time.Millisecond, 10 * time.Millisecond
+ const nodes, cycles = 25, 1000
+ config := DefaultConfig()
+ clients, err := GenerateClients(nodes, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ truth := GenerateSplit(nodes, lan, wan)
+ Simulate(clients, truth, cycles)
+ stats := Evaluate(clients, truth)
+ if stats.ErrorAvg > 0.000060 || stats.ErrorMax > 0.00048 {
+ t.Fatalf("performance stats are out of spec: %v", stats)
+ }
+}
+
+func TestPerformance_Height(t *testing.T) {
+ const radius = 100 * time.Millisecond
+ const nodes, cycles = 25, 1000
+
+ // Constrain us to two dimensions so that we can just exactly represent
+ // the circle.
+ config := DefaultConfig()
+ config.Dimensionality = 2
+ clients, err := GenerateClients(nodes, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Generate truth where the first coordinate is in the "middle" because
+ // it's equidistant from all the nodes, but it will have an extra radius
+ // added to the distance, so it should come out above all the others.
+ truth := GenerateCircle(nodes, radius)
+ Simulate(clients, truth, cycles)
+
+ // Make sure the height looks reasonable with the regular nodes all in a
+ // plane, and the center node up above.
+ for i, _ := range clients {
+ coord := clients[i].GetCoordinate()
+ if i == 0 {
+ if coord.Height < 0.97*radius.Seconds() {
+ t.Fatalf("height is out of spec: %9.6f", coord.Height)
+ }
+ } else {
+ if coord.Height > 0.03*radius.Seconds() {
+ t.Fatalf("height is out of spec: %9.6f", coord.Height)
+ }
+ }
+ }
+ stats := Evaluate(clients, truth)
+ if stats.ErrorAvg > 0.0025 || stats.ErrorMax > 0.064 {
+ t.Fatalf("performance stats are out of spec: %v", stats)
+ }
+}
+
+func TestPerformance_Drift(t *testing.T) {
+ const dist = 500 * time.Millisecond
+ const nodes = 4
+ config := DefaultConfig()
+ config.Dimensionality = 2
+ clients, err := GenerateClients(nodes, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Do some icky surgery on the clients to put them into a square, up in
+ // the first quadrant.
+ clients[0].coord.Vec = []float64{0.0, 0.0}
+ clients[1].coord.Vec = []float64{0.0, dist.Seconds()}
+ clients[2].coord.Vec = []float64{dist.Seconds(), dist.Seconds()}
+ clients[3].coord.Vec = []float64{dist.Seconds(), dist.Seconds()}
+
+ // Make a corresponding truth matrix. The nodes are laid out like this
+ // so the distances are all equal, except for the diagonal:
+ //
+ // (1) <- dist -> (2)
+ //
+ // | <- dist |
+ // | |
+ // | dist -> |
+ //
+ // (0) <- dist -> (3)
+ //
+ truth := make([][]time.Duration, nodes)
+ for i := range truth {
+ truth[i] = make([]time.Duration, nodes)
+ }
+ for i := 0; i < nodes; i++ {
+ for j := i + 1; j < nodes; j++ {
+ rtt := dist
+ if (i%2 == 0) && (j%2 == 0) {
+ rtt = time.Duration(math.Sqrt2 * float64(rtt))
+ }
+ truth[i][j], truth[j][i] = rtt, rtt
+ }
+ }
+
+ calcCenterError := func() float64 {
+ min, max := clients[0].GetCoordinate(), clients[0].GetCoordinate()
+ for i := 1; i < nodes; i++ {
+ coord := clients[i].GetCoordinate()
+ for j, v := range coord.Vec {
+ min.Vec[j] = math.Min(min.Vec[j], v)
+ max.Vec[j] = math.Max(max.Vec[j], v)
+ }
+ }
+
+ mid := make([]float64, config.Dimensionality)
+ for i, _ := range mid {
+ mid[i] = min.Vec[i] + (max.Vec[i]-min.Vec[i])/2
+ }
+ return magnitude(mid)
+ }
+
+ // Let the simulation run for a while to stabilize, then snap a baseline
+ // for the center error.
+ Simulate(clients, truth, 1000)
+ baseline := calcCenterError()
+
+ // Now run for a bunch more cycles and see if gravity pulls the center
+ // in the right direction.
+ Simulate(clients, truth, 10000)
+ if error := calcCenterError(); error > 0.8*baseline {
+ t.Fatalf("drift performance out of spec: %9.6f -> %9.6f", baseline, error)
+ }
+}
+
+func TestPerformance_Random(t *testing.T) {
+ const mean, deviation = 100 * time.Millisecond, 10 * time.Millisecond
+ const nodes, cycles = 25, 1000
+ config := DefaultConfig()
+ clients, err := GenerateClients(nodes, config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ truth := GenerateRandom(nodes, mean, deviation)
+ Simulate(clients, truth, cycles)
+ stats := Evaluate(clients, truth)
+ if stats.ErrorAvg > 0.075 || stats.ErrorMax > 0.33 {
+ t.Fatalf("performance stats are out of spec: %v", stats)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/phantom.go b/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/phantom.go
new file mode 100644
index 000000000..6fb033c0c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/phantom.go
@@ -0,0 +1,187 @@
+package coordinate
+
+import (
+ "fmt"
+ "math"
+ "math/rand"
+ "time"
+)
+
+// GenerateClients returns a slice with nodes number of clients, all with the
+// given config.
+func GenerateClients(nodes int, config *Config) ([]*Client, error) {
+ clients := make([]*Client, nodes)
+ for i, _ := range clients {
+ client, err := NewClient(config)
+ if err != nil {
+ return nil, err
+ }
+
+ clients[i] = client
+ }
+ return clients, nil
+}
+
+// GenerateLine returns a truth matrix as if all the nodes are in a straight linke
+// with the given spacing between them.
+func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration {
+ truth := make([][]time.Duration, nodes)
+ for i := range truth {
+ truth[i] = make([]time.Duration, nodes)
+ }
+
+ for i := 0; i < nodes; i++ {
+ for j := i + 1; j < nodes; j++ {
+ rtt := time.Duration(j-i) * spacing
+ truth[i][j], truth[j][i] = rtt, rtt
+ }
+ }
+ return truth
+}
+
+// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional
+// grid with the given spacing between them.
+func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration {
+ truth := make([][]time.Duration, nodes)
+ for i := range truth {
+ truth[i] = make([]time.Duration, nodes)
+ }
+
+ n := int(math.Sqrt(float64(nodes)))
+ for i := 0; i < nodes; i++ {
+ for j := i + 1; j < nodes; j++ {
+ x1, y1 := float64(i%n), float64(i/n)
+ x2, y2 := float64(j%n), float64(j/n)
+ dx, dy := x2-x1, y2-y1
+ dist := math.Sqrt(dx*dx + dy*dy)
+ rtt := time.Duration(dist * float64(spacing))
+ truth[i][j], truth[j][i] = rtt, rtt
+ }
+ }
+ return truth
+}
+
+// GenerateSplit returns a truth matrix as if half the nodes are close together in
+// one location and half the nodes are close together in another. The lan factor
+// is used to separate the nodes locally and the wan factor represents the split
+// between the two sides.
+func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration {
+ truth := make([][]time.Duration, nodes)
+ for i := range truth {
+ truth[i] = make([]time.Duration, nodes)
+ }
+
+ split := nodes / 2
+ for i := 0; i < nodes; i++ {
+ for j := i + 1; j < nodes; j++ {
+ rtt := lan
+ if (i <= split && j > split) || (i > split && j <= split) {
+ rtt += wan
+ }
+ truth[i][j], truth[j][i] = rtt, rtt
+ }
+ }
+ return truth
+}
+
+// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed
+// around a circle with the given radius. The first node is at the "center" of the
+// circle because it's equidistant from all the other nodes, but we place it at
+// double the radius, so it should show up above all the other nodes in height.
+func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration {
+ truth := make([][]time.Duration, nodes)
+ for i := range truth {
+ truth[i] = make([]time.Duration, nodes)
+ }
+
+ for i := 0; i < nodes; i++ {
+ for j := i + 1; j < nodes; j++ {
+ var rtt time.Duration
+ if i == 0 {
+ rtt = 2 * radius
+ } else {
+ t1 := 2.0 * math.Pi * float64(i) / float64(nodes)
+ x1, y1 := math.Cos(t1), math.Sin(t1)
+ t2 := 2.0 * math.Pi * float64(j) / float64(nodes)
+ x2, y2 := math.Cos(t2), math.Sin(t2)
+ dx, dy := x2-x1, y2-y1
+ dist := math.Sqrt(dx*dx + dy*dy)
+ rtt = time.Duration(dist * float64(radius))
+ }
+ truth[i][j], truth[j][i] = rtt, rtt
+ }
+ }
+ return truth
+}
+
+// GenerateRandom returns a truth matrix for a set of nodes with normally
+// distributed delays, with the given mean and deviation. The RNG is re-seeded
+// so you always get the same matrix for a given size.
+func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration {
+ rand.Seed(1)
+
+ truth := make([][]time.Duration, nodes)
+ for i := range truth {
+ truth[i] = make([]time.Duration, nodes)
+ }
+
+ for i := 0; i < nodes; i++ {
+ for j := i + 1; j < nodes; j++ {
+ rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds()
+ rtt := time.Duration(rttSeconds * secondsToNanoseconds)
+ truth[i][j], truth[j][i] = rtt, rtt
+ }
+ }
+ return truth
+}
+
+// Simulate runs the given number of cycles using the given list of clients and
+// truth matrix. On each cycle, each client will pick a random node and observe
+// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for
+// each simulation run to get deterministic results (for this algorithm and the
+// underlying algorithm which will use random numbers for position vectors when
+// starting out with everything at the origin).
+func Simulate(clients []*Client, truth [][]time.Duration, cycles int) {
+ rand.Seed(1)
+
+ nodes := len(clients)
+ for cycle := 0; cycle < cycles; cycle++ {
+ for i, _ := range clients {
+ if j := rand.Intn(nodes); j != i {
+ c := clients[j].GetCoordinate()
+ rtt := truth[i][j]
+ node := fmt.Sprintf("node_%d", j)
+ clients[i].Update(node, c, rtt)
+ }
+ }
+ }
+}
+
+// Stats is returned from the Evaluate function with a summary of the algorithm
+// performance.
+type Stats struct {
+ ErrorMax float64
+ ErrorAvg float64
+}
+
+// Evaluate uses the coordinates of the given clients to calculate estimated
+// distances and compares them with the given truth matrix, returning summary
+// stats.
+func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) {
+ nodes := len(clients)
+ count := 0
+ for i := 0; i < nodes; i++ {
+ for j := i + 1; j < nodes; j++ {
+ est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds()
+ actual := truth[i][j].Seconds()
+ error := math.Abs(est-actual) / actual
+ stats.ErrorMax = math.Max(stats.ErrorMax, error)
+ stats.ErrorAvg += error
+ count += 1
+ }
+ }
+
+ stats.ErrorAvg /= float64(count)
+ fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax)
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/test_util.go b/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/test_util.go
new file mode 100644
index 000000000..116e94933
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/test_util.go
@@ -0,0 +1,27 @@
+package coordinate
+
+import (
+ "math"
+ "testing"
+)
+
+// verifyEqualFloats will compare f1 and f2 and fail if they are not
+// "equal" within a threshold.
+func verifyEqualFloats(t *testing.T, f1 float64, f2 float64) {
+ const zeroThreshold = 1.0e-6
+ if math.Abs(f1-f2) > zeroThreshold {
+ t.Fatalf("equal assertion fail, %9.6f != %9.6f", f1, f2)
+ }
+}
+
+// verifyEqualVectors will compare vec1 and vec2 and fail if they are not
+// "equal" within a threshold.
+func verifyEqualVectors(t *testing.T, vec1 []float64, vec2 []float64) {
+ if len(vec1) != len(vec2) {
+ t.Fatalf("vector length mismatch, %d != %d", len(vec1), len(vec2))
+ }
+
+ for i, _ := range vec1 {
+ verifyEqualFloats(t, vec1[i], vec2[i])
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/hashicorp/uuid/uuid_test.go b/Godeps/_workspace/src/github.com/hashicorp/uuid/uuid_test.go
new file mode 100644
index 000000000..bd3781ef6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/hashicorp/uuid/uuid_test.go
@@ -0,0 +1,22 @@
+package uuid
+
+import (
+ "regexp"
+ "testing"
+)
+
+func TestGenerateUUID(t *testing.T) {
+ prev := GenerateUUID()
+ for i := 0; i < 100; i++ {
+ id := GenerateUUID()
+ if prev == id {
+ t.Fatalf("Should get a new ID!")
+ }
+
+ matched, err := regexp.MatchString(
+ "[\\da-f]{8}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{12}", id)
+ if !matched || err != nil {
+ t.Fatalf("expected match %s %v %s", id, matched, err)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/kardianos/osext/osext_test.go b/Godeps/_workspace/src/github.com/kardianos/osext/osext_test.go
new file mode 100644
index 000000000..77ccc28e9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/kardianos/osext/osext_test.go
@@ -0,0 +1,203 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin linux freebsd netbsd windows
+
+package osext
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "testing"
+)
+
+const (
+ executableEnvVar = "OSTEST_OUTPUT_EXECUTABLE"
+
+ executableEnvValueMatch = "match"
+ executableEnvValueDelete = "delete"
+)
+
+func TestPrintExecutable(t *testing.T) {
+ ef, err := Executable()
+ if err != nil {
+ t.Fatalf("Executable failed: %v", err)
+ }
+ t.Log("Executable:", ef)
+}
+func TestPrintExecutableFolder(t *testing.T) {
+ ef, err := ExecutableFolder()
+ if err != nil {
+ t.Fatalf("ExecutableFolder failed: %v", err)
+ }
+ t.Log("Executable Folder:", ef)
+}
+func TestExecutableFolder(t *testing.T) {
+ ef, err := ExecutableFolder()
+ if err != nil {
+ t.Fatalf("ExecutableFolder failed: %v", err)
+ }
+ if ef[len(ef)-1] == filepath.Separator {
+ t.Fatal("ExecutableFolder ends with a trailing slash.")
+ }
+}
+func TestExecutableMatch(t *testing.T) {
+ ep, err := Executable()
+ if err != nil {
+ t.Fatalf("Executable failed: %v", err)
+ }
+
+ // fullpath to be of the form "dir/prog".
+ dir := filepath.Dir(filepath.Dir(ep))
+ fullpath, err := filepath.Rel(dir, ep)
+ if err != nil {
+ t.Fatalf("filepath.Rel: %v", err)
+ }
+ // Make child start with a relative program path.
+ // Alter argv[0] for child to verify getting real path without argv[0].
+ cmd := &exec.Cmd{
+ Dir: dir,
+ Path: fullpath,
+ Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueMatch)},
+ }
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("exec(self) failed: %v", err)
+ }
+ outs := string(out)
+ if !filepath.IsAbs(outs) {
+ t.Fatalf("Child returned %q, want an absolute path", out)
+ }
+ if !sameFile(outs, ep) {
+ t.Fatalf("Child returned %q, not the same file as %q", out, ep)
+ }
+}
+
+func TestExecutableDelete(t *testing.T) {
+ if runtime.GOOS != "linux" {
+ t.Skip()
+ }
+ fpath, err := Executable()
+ if err != nil {
+ t.Fatalf("Executable failed: %v", err)
+ }
+
+ r, w := io.Pipe()
+ stderrBuff := &bytes.Buffer{}
+ stdoutBuff := &bytes.Buffer{}
+ cmd := &exec.Cmd{
+ Path: fpath,
+ Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueDelete)},
+ Stdin: r,
+ Stderr: stderrBuff,
+ Stdout: stdoutBuff,
+ }
+ err = cmd.Start()
+ if err != nil {
+ t.Fatalf("exec(self) start failed: %v", err)
+ }
+
+ tempPath := fpath + "_copy"
+ _ = os.Remove(tempPath)
+
+ err = copyFile(tempPath, fpath)
+ if err != nil {
+ t.Fatalf("copy file failed: %v", err)
+ }
+ err = os.Remove(fpath)
+ if err != nil {
+ t.Fatalf("remove running test file failed: %v", err)
+ }
+ err = os.Rename(tempPath, fpath)
+ if err != nil {
+ t.Fatalf("rename copy to previous name failed: %v", err)
+ }
+
+ w.Write([]byte{0})
+ w.Close()
+
+ err = cmd.Wait()
+ if err != nil {
+ t.Fatalf("exec wait failed: %v", err)
+ }
+
+ childPath := stderrBuff.String()
+ if !filepath.IsAbs(childPath) {
+ t.Fatalf("Child returned %q, want an absolute path", childPath)
+ }
+ if !sameFile(childPath, fpath) {
+ t.Fatalf("Child returned %q, not the same file as %q", childPath, fpath)
+ }
+}
+
+func sameFile(fn1, fn2 string) bool {
+ fi1, err := os.Stat(fn1)
+ if err != nil {
+ return false
+ }
+ fi2, err := os.Stat(fn2)
+ if err != nil {
+ return false
+ }
+ return os.SameFile(fi1, fi2)
+}
+func copyFile(dest, src string) error {
+ df, err := os.Create(dest)
+ if err != nil {
+ return err
+ }
+ defer df.Close()
+
+ sf, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer sf.Close()
+
+ _, err = io.Copy(df, sf)
+ return err
+}
+
+func TestMain(m *testing.M) {
+ env := os.Getenv(executableEnvVar)
+ switch env {
+ case "":
+ os.Exit(m.Run())
+ case executableEnvValueMatch:
+ // First chdir to another path.
+ dir := "/"
+ if runtime.GOOS == "windows" {
+ dir = filepath.VolumeName(".")
+ }
+ os.Chdir(dir)
+ if ep, err := Executable(); err != nil {
+ fmt.Fprint(os.Stderr, "ERROR: ", err)
+ } else {
+ fmt.Fprint(os.Stderr, ep)
+ }
+ case executableEnvValueDelete:
+ bb := make([]byte, 1)
+ var err error
+ n, err := os.Stdin.Read(bb)
+ if err != nil {
+ fmt.Fprint(os.Stderr, "ERROR: ", err)
+ os.Exit(2)
+ }
+ if n != 1 {
+ fmt.Fprint(os.Stderr, "ERROR: n != 1, n == ", n)
+ os.Exit(2)
+ }
+ if ep, err := Executable(); err != nil {
+ fmt.Fprint(os.Stderr, "ERROR: ", err)
+ } else {
+ fmt.Fprint(os.Stderr, ep)
+ }
+ }
+ os.Exit(0)
+}
diff --git a/Godeps/_workspace/src/github.com/lib/pq/bench_test.go b/Godeps/_workspace/src/github.com/lib/pq/bench_test.go
new file mode 100644
index 000000000..e71f41d06
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/lib/pq/bench_test.go
@@ -0,0 +1,435 @@
+// +build go1.1
+
+package pq
+
+import (
+ "bufio"
+ "bytes"
+ "database/sql"
+ "database/sql/driver"
+ "io"
+ "math/rand"
+ "net"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/lib/pq/oid"
+)
+
+var (
+ selectStringQuery = "SELECT '" + strings.Repeat("0123456789", 10) + "'"
+ selectSeriesQuery = "SELECT generate_series(1, 100)"
+)
+
+func BenchmarkSelectString(b *testing.B) {
+ var result string
+ benchQuery(b, selectStringQuery, &result)
+}
+
+func BenchmarkSelectSeries(b *testing.B) {
+ var result int
+ benchQuery(b, selectSeriesQuery, &result)
+}
+
+func benchQuery(b *testing.B, query string, result interface{}) {
+ b.StopTimer()
+ db := openTestConn(b)
+ defer db.Close()
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ benchQueryLoop(b, db, query, result)
+ }
+}
+
+func benchQueryLoop(b *testing.B, db *sql.DB, query string, result interface{}) {
+ rows, err := db.Query(query)
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer rows.Close()
+ for rows.Next() {
+ err = rows.Scan(result)
+ if err != nil {
+ b.Fatal("failed to scan", err)
+ }
+ }
+}
+
+// reading from circularConn yields content[:prefixLen] once, followed by
+// content[prefixLen:] over and over again. It never returns EOF.
+type circularConn struct {
+ content string
+ prefixLen int
+ pos int
+ net.Conn // for all other net.Conn methods that will never be called
+}
+
+func (r *circularConn) Read(b []byte) (n int, err error) {
+ n = copy(b, r.content[r.pos:])
+ r.pos += n
+ if r.pos >= len(r.content) {
+ r.pos = r.prefixLen
+ }
+ return
+}
+
+func (r *circularConn) Write(b []byte) (n int, err error) { return len(b), nil }
+
+func (r *circularConn) Close() error { return nil }
+
+func fakeConn(content string, prefixLen int) *conn {
+ c := &circularConn{content: content, prefixLen: prefixLen}
+ return &conn{buf: bufio.NewReader(c), c: c}
+}
+
+// This benchmark is meant to be the same as BenchmarkSelectString, but takes
+// out some of the factors this package can't control. The numbers are less noisy,
+// but also the costs of network communication aren't accurately represented.
+func BenchmarkMockSelectString(b *testing.B) {
+ b.StopTimer()
+ // taken from a recorded run of BenchmarkSelectString
+ // See: http://www.postgresql.org/docs/current/static/protocol-message-formats.html
+ const response = "1\x00\x00\x00\x04" +
+ "t\x00\x00\x00\x06\x00\x00" +
+ "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" +
+ "Z\x00\x00\x00\x05I" +
+ "2\x00\x00\x00\x04" +
+ "D\x00\x00\x00n\x00\x01\x00\x00\x00d0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
+ "C\x00\x00\x00\rSELECT 1\x00" +
+ "Z\x00\x00\x00\x05I" +
+ "3\x00\x00\x00\x04" +
+ "Z\x00\x00\x00\x05I"
+ c := fakeConn(response, 0)
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ benchMockQuery(b, c, selectStringQuery)
+ }
+}
+
+var seriesRowData = func() string {
+ var buf bytes.Buffer
+ for i := 1; i <= 100; i++ {
+ digits := byte(2)
+ if i >= 100 {
+ digits = 3
+ } else if i < 10 {
+ digits = 1
+ }
+ buf.WriteString("D\x00\x00\x00")
+ buf.WriteByte(10 + digits)
+ buf.WriteString("\x00\x01\x00\x00\x00")
+ buf.WriteByte(digits)
+ buf.WriteString(strconv.Itoa(i))
+ }
+ return buf.String()
+}()
+
+func BenchmarkMockSelectSeries(b *testing.B) {
+ b.StopTimer()
+ var response = "1\x00\x00\x00\x04" +
+ "t\x00\x00\x00\x06\x00\x00" +
+ "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" +
+ "Z\x00\x00\x00\x05I" +
+ "2\x00\x00\x00\x04" +
+ seriesRowData +
+ "C\x00\x00\x00\x0fSELECT 100\x00" +
+ "Z\x00\x00\x00\x05I" +
+ "3\x00\x00\x00\x04" +
+ "Z\x00\x00\x00\x05I"
+ c := fakeConn(response, 0)
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ benchMockQuery(b, c, selectSeriesQuery)
+ }
+}
+
+func benchMockQuery(b *testing.B, c *conn, query string) {
+ stmt, err := c.Prepare(query)
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer stmt.Close()
+ rows, err := stmt.Query(nil)
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer rows.Close()
+ var dest [1]driver.Value
+ for {
+ if err := rows.Next(dest[:]); err != nil {
+ if err == io.EOF {
+ break
+ }
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkPreparedSelectString(b *testing.B) {
+ var result string
+ benchPreparedQuery(b, selectStringQuery, &result)
+}
+
+func BenchmarkPreparedSelectSeries(b *testing.B) {
+ var result int
+ benchPreparedQuery(b, selectSeriesQuery, &result)
+}
+
+func benchPreparedQuery(b *testing.B, query string, result interface{}) {
+ b.StopTimer()
+ db := openTestConn(b)
+ defer db.Close()
+ stmt, err := db.Prepare(query)
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer stmt.Close()
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ benchPreparedQueryLoop(b, db, stmt, result)
+ }
+}
+
+func benchPreparedQueryLoop(b *testing.B, db *sql.DB, stmt *sql.Stmt, result interface{}) {
+ rows, err := stmt.Query()
+ if err != nil {
+ b.Fatal(err)
+ }
+ if !rows.Next() {
+ rows.Close()
+ b.Fatal("no rows")
+ }
+ defer rows.Close()
+ for rows.Next() {
+ err = rows.Scan(&result)
+ if err != nil {
+ b.Fatal("failed to scan")
+ }
+ }
+}
+
+// See the comment for BenchmarkMockSelectString.
+func BenchmarkMockPreparedSelectString(b *testing.B) {
+ b.StopTimer()
+ const parseResponse = "1\x00\x00\x00\x04" +
+ "t\x00\x00\x00\x06\x00\x00" +
+ "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" +
+ "Z\x00\x00\x00\x05I"
+ const responses = parseResponse +
+ "2\x00\x00\x00\x04" +
+ "D\x00\x00\x00n\x00\x01\x00\x00\x00d0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
+ "C\x00\x00\x00\rSELECT 1\x00" +
+ "Z\x00\x00\x00\x05I"
+ c := fakeConn(responses, len(parseResponse))
+
+ stmt, err := c.Prepare(selectStringQuery)
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ benchPreparedMockQuery(b, c, stmt)
+ }
+}
+
+func BenchmarkMockPreparedSelectSeries(b *testing.B) {
+ b.StopTimer()
+ const parseResponse = "1\x00\x00\x00\x04" +
+ "t\x00\x00\x00\x06\x00\x00" +
+ "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" +
+ "Z\x00\x00\x00\x05I"
+ var responses = parseResponse +
+ "2\x00\x00\x00\x04" +
+ seriesRowData +
+ "C\x00\x00\x00\x0fSELECT 100\x00" +
+ "Z\x00\x00\x00\x05I"
+ c := fakeConn(responses, len(parseResponse))
+
+ stmt, err := c.Prepare(selectSeriesQuery)
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ benchPreparedMockQuery(b, c, stmt)
+ }
+}
+
+func benchPreparedMockQuery(b *testing.B, c *conn, stmt driver.Stmt) {
+ rows, err := stmt.Query(nil)
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer rows.Close()
+ var dest [1]driver.Value
+ for {
+ if err := rows.Next(dest[:]); err != nil {
+ if err == io.EOF {
+ break
+ }
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkEncodeInt64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ encode(¶meterStatus{}, int64(1234), oid.T_int8)
+ }
+}
+
+func BenchmarkEncodeFloat64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ encode(¶meterStatus{}, 3.14159, oid.T_float8)
+ }
+}
+
+var testByteString = []byte("abcdefghijklmnopqrstuvwxyz")
+
+func BenchmarkEncodeByteaHex(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ encode(¶meterStatus{serverVersion: 90000}, testByteString, oid.T_bytea)
+ }
+}
+func BenchmarkEncodeByteaEscape(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ encode(¶meterStatus{serverVersion: 84000}, testByteString, oid.T_bytea)
+ }
+}
+
+func BenchmarkEncodeBool(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ encode(¶meterStatus{}, true, oid.T_bool)
+ }
+}
+
+var testTimestamptz = time.Date(2001, time.January, 1, 0, 0, 0, 0, time.Local)
+
+func BenchmarkEncodeTimestamptz(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ encode(¶meterStatus{}, testTimestamptz, oid.T_timestamptz)
+ }
+}
+
+var testIntBytes = []byte("1234")
+
+func BenchmarkDecodeInt64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ decode(¶meterStatus{}, testIntBytes, oid.T_int8, formatText)
+ }
+}
+
+var testFloatBytes = []byte("3.14159")
+
+func BenchmarkDecodeFloat64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ decode(¶meterStatus{}, testFloatBytes, oid.T_float8, formatText)
+ }
+}
+
+var testBoolBytes = []byte{'t'}
+
+func BenchmarkDecodeBool(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ decode(¶meterStatus{}, testBoolBytes, oid.T_bool, formatText)
+ }
+}
+
+func TestDecodeBool(t *testing.T) {
+ db := openTestConn(t)
+ rows, err := db.Query("select true")
+ if err != nil {
+ t.Fatal(err)
+ }
+ rows.Close()
+}
+
+var testTimestamptzBytes = []byte("2013-09-17 22:15:32.360754-07")
+
+func BenchmarkDecodeTimestamptz(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ decode(¶meterStatus{}, testTimestamptzBytes, oid.T_timestamptz, formatText)
+ }
+}
+
+func BenchmarkDecodeTimestamptzMultiThread(b *testing.B) {
+ oldProcs := runtime.GOMAXPROCS(0)
+ defer runtime.GOMAXPROCS(oldProcs)
+ runtime.GOMAXPROCS(runtime.NumCPU())
+ globalLocationCache = newLocationCache()
+
+ f := func(wg *sync.WaitGroup, loops int) {
+ defer wg.Done()
+ for i := 0; i < loops; i++ {
+ decode(¶meterStatus{}, testTimestamptzBytes, oid.T_timestamptz, formatText)
+ }
+ }
+
+ wg := &sync.WaitGroup{}
+ b.ResetTimer()
+ for j := 0; j < 10; j++ {
+ wg.Add(1)
+ go f(wg, b.N/10)
+ }
+ wg.Wait()
+}
+
+func BenchmarkLocationCache(b *testing.B) {
+ globalLocationCache = newLocationCache()
+ for i := 0; i < b.N; i++ {
+ globalLocationCache.getLocation(rand.Intn(10000))
+ }
+}
+
+func BenchmarkLocationCacheMultiThread(b *testing.B) {
+ oldProcs := runtime.GOMAXPROCS(0)
+ defer runtime.GOMAXPROCS(oldProcs)
+ runtime.GOMAXPROCS(runtime.NumCPU())
+ globalLocationCache = newLocationCache()
+
+ f := func(wg *sync.WaitGroup, loops int) {
+ defer wg.Done()
+ for i := 0; i < loops; i++ {
+ globalLocationCache.getLocation(rand.Intn(10000))
+ }
+ }
+
+ wg := &sync.WaitGroup{}
+ b.ResetTimer()
+ for j := 0; j < 10; j++ {
+ wg.Add(1)
+ go f(wg, b.N/10)
+ }
+ wg.Wait()
+}
+
+// Stress test the performance of parsing results from the wire.
+func BenchmarkResultParsing(b *testing.B) {
+ b.StopTimer()
+
+ db := openTestConn(b)
+ defer db.Close()
+ _, err := db.Exec("BEGIN")
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ res, err := db.Query("SELECT generate_series(1, 50000)")
+ if err != nil {
+ b.Fatal(err)
+ }
+ res.Close()
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/lib/pq/conn_test.go b/Godeps/_workspace/src/github.com/lib/pq/conn_test.go
new file mode 100644
index 000000000..af07e5596
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/lib/pq/conn_test.go
@@ -0,0 +1,1306 @@
+package pq
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+type Fatalistic interface {
+ Fatal(args ...interface{})
+}
+
+func forceBinaryParameters() bool {
+ bp := os.Getenv("PQTEST_BINARY_PARAMETERS")
+ if bp == "yes" {
+ return true
+ } else if bp == "" || bp == "no" {
+ return false
+ } else {
+ panic("unexpected value for PQTEST_BINARY_PARAMETERS")
+ }
+}
+
+func openTestConnConninfo(conninfo string) (*sql.DB, error) {
+ defaultTo := func(envvar string, value string) {
+ if os.Getenv(envvar) == "" {
+ os.Setenv(envvar, value)
+ }
+ }
+ defaultTo("PGDATABASE", "pqgotest")
+ defaultTo("PGSSLMODE", "disable")
+ defaultTo("PGCONNECT_TIMEOUT", "20")
+
+ if forceBinaryParameters() &&
+ !strings.HasPrefix(conninfo, "postgres://") &&
+ !strings.HasPrefix(conninfo, "postgresql://") {
+ conninfo = conninfo + " binary_parameters=yes"
+ }
+
+ return sql.Open("postgres", conninfo)
+}
+
+func openTestConn(t Fatalistic) *sql.DB {
+ conn, err := openTestConnConninfo("")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return conn
+}
+
+func getServerVersion(t *testing.T, db *sql.DB) int {
+ var version int
+ err := db.QueryRow("SHOW server_version_num").Scan(&version)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return version
+}
+
+func TestReconnect(t *testing.T) {
+ db1 := openTestConn(t)
+ defer db1.Close()
+ tx, err := db1.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var pid1 int
+ err = tx.QueryRow("SELECT pg_backend_pid()").Scan(&pid1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ db2 := openTestConn(t)
+ defer db2.Close()
+ _, err = db2.Exec("SELECT pg_terminate_backend($1)", pid1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // The rollback will probably "fail" because we just killed
+ // its connection above
+ _ = tx.Rollback()
+
+ const expected int = 42
+ var result int
+ err = db1.QueryRow(fmt.Sprintf("SELECT %d", expected)).Scan(&result)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if result != expected {
+ t.Errorf("got %v; expected %v", result, expected)
+ }
+}
+
+func TestCommitInFailedTransaction(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ txn, err := db.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+ rows, err := txn.Query("SELECT error")
+ if err == nil {
+ rows.Close()
+ t.Fatal("expected failure")
+ }
+ err = txn.Commit()
+ if err != ErrInFailedTransaction {
+ t.Fatalf("expected ErrInFailedTransaction; got %#v", err)
+ }
+}
+
+func TestOpenURL(t *testing.T) {
+ testURL := func(url string) {
+ db, err := openTestConnConninfo(url)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer db.Close()
+ // database/sql might not call our Open at all unless we do something with
+ // the connection
+ txn, err := db.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+ txn.Rollback()
+ }
+ testURL("postgres://")
+ testURL("postgresql://")
+}
+
+func TestExec(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ _, err := db.Exec("CREATE TEMP TABLE temp (a int)")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ r, err := db.Exec("INSERT INTO temp VALUES (1)")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if n, _ := r.RowsAffected(); n != 1 {
+ t.Fatalf("expected 1 row affected, not %d", n)
+ }
+
+ r, err = db.Exec("INSERT INTO temp VALUES ($1), ($2), ($3)", 1, 2, 3)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if n, _ := r.RowsAffected(); n != 3 {
+ t.Fatalf("expected 3 rows affected, not %d", n)
+ }
+
+ // SELECT doesn't send the number of returned rows in the command tag
+ // before 9.0
+ if getServerVersion(t, db) >= 90000 {
+ r, err = db.Exec("SELECT g FROM generate_series(1, 2) g")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n, _ := r.RowsAffected(); n != 2 {
+ t.Fatalf("expected 2 rows affected, not %d", n)
+ }
+
+ r, err = db.Exec("SELECT g FROM generate_series(1, $1) g", 3)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n, _ := r.RowsAffected(); n != 3 {
+ t.Fatalf("expected 3 rows affected, not %d", n)
+ }
+ }
+}
+
+func TestStatment(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ st, err := db.Prepare("SELECT 1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ st1, err := db.Prepare("SELECT 2")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ r, err := st.Query()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer r.Close()
+
+ if !r.Next() {
+ t.Fatal("expected row")
+ }
+
+ var i int
+ err = r.Scan(&i)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if i != 1 {
+ t.Fatalf("expected 1, got %d", i)
+ }
+
+ // st1
+
+ r1, err := st1.Query()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer r1.Close()
+
+ if !r1.Next() {
+ if r.Err() != nil {
+ t.Fatal(r1.Err())
+ }
+ t.Fatal("expected row")
+ }
+
+ err = r1.Scan(&i)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if i != 2 {
+ t.Fatalf("expected 2, got %d", i)
+ }
+}
+
+func TestRowsCloseBeforeDone(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ r, err := db.Query("SELECT 1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = r.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if r.Next() {
+ t.Fatal("unexpected row")
+ }
+
+ if r.Err() != nil {
+ t.Fatal(r.Err())
+ }
+}
+
+func TestParameterCountMismatch(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ var notused int
+ err := db.QueryRow("SELECT false", 1).Scan(¬used)
+ if err == nil {
+ t.Fatal("expected err")
+ }
+ // make sure we clean up correctly
+ err = db.QueryRow("SELECT 1").Scan(¬used)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = db.QueryRow("SELECT $1").Scan(¬used)
+ if err == nil {
+ t.Fatal("expected err")
+ }
+ // make sure we clean up correctly
+ err = db.QueryRow("SELECT 1").Scan(¬used)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Test that EmptyQueryResponses are handled correctly.
+func TestEmptyQuery(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ _, err := db.Exec("")
+ if err != nil {
+ t.Fatal(err)
+ }
+ rows, err := db.Query("")
+ if err != nil {
+ t.Fatal(err)
+ }
+ cols, err := rows.Columns()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(cols) != 0 {
+ t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols))
+ }
+ if rows.Next() {
+ t.Fatal("unexpected row")
+ }
+ if rows.Err() != nil {
+ t.Fatal(rows.Err())
+ }
+
+ stmt, err := db.Prepare("")
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = stmt.Exec()
+ if err != nil {
+ t.Fatal(err)
+ }
+ rows, err = stmt.Query()
+ if err != nil {
+ t.Fatal(err)
+ }
+ cols, err = rows.Columns()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(cols) != 0 {
+ t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols))
+ }
+ if rows.Next() {
+ t.Fatal("unexpected row")
+ }
+ if rows.Err() != nil {
+ t.Fatal(rows.Err())
+ }
+}
+
+func TestEncodeDecode(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ q := `
+ SELECT
+ E'\\000\\001\\002'::bytea,
+ 'foobar'::text,
+ NULL::integer,
+ '2000-1-1 01:02:03.04-7'::timestamptz,
+ 0::boolean,
+ 123,
+ -321,
+ 3.14::float8
+ WHERE
+ E'\\000\\001\\002'::bytea = $1
+ AND 'foobar'::text = $2
+ AND $3::integer is NULL
+ `
+ // AND '2000-1-1 12:00:00.000000-7'::timestamp = $3
+
+ exp1 := []byte{0, 1, 2}
+ exp2 := "foobar"
+
+ r, err := db.Query(q, exp1, exp2, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer r.Close()
+
+ if !r.Next() {
+ if r.Err() != nil {
+ t.Fatal(r.Err())
+ }
+ t.Fatal("expected row")
+ }
+
+ var got1 []byte
+ var got2 string
+ var got3 = sql.NullInt64{Valid: true}
+ var got4 time.Time
+ var got5, got6, got7, got8 interface{}
+
+ err = r.Scan(&got1, &got2, &got3, &got4, &got5, &got6, &got7, &got8)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(exp1, got1) {
+ t.Errorf("expected %q byte: %q", exp1, got1)
+ }
+
+ if !reflect.DeepEqual(exp2, got2) {
+ t.Errorf("expected %q byte: %q", exp2, got2)
+ }
+
+ if got3.Valid {
+ t.Fatal("expected invalid")
+ }
+
+ if got4.Year() != 2000 {
+ t.Fatal("wrong year")
+ }
+
+ if got5 != false {
+ t.Fatalf("expected false, got %q", got5)
+ }
+
+ if got6 != int64(123) {
+ t.Fatalf("expected 123, got %d", got6)
+ }
+
+ if got7 != int64(-321) {
+ t.Fatalf("expected -321, got %d", got7)
+ }
+
+ if got8 != float64(3.14) {
+ t.Fatalf("expected 3.14, got %f", got8)
+ }
+}
+
+func TestNoData(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ st, err := db.Prepare("SELECT 1 WHERE true = false")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer st.Close()
+
+ r, err := st.Query()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer r.Close()
+
+ if r.Next() {
+ if r.Err() != nil {
+ t.Fatal(r.Err())
+ }
+ t.Fatal("unexpected row")
+ }
+
+ _, err = db.Query("SELECT * FROM nonexistenttable WHERE age=$1", 20)
+ if err == nil {
+ t.Fatal("Should have raised an error on non existent table")
+ }
+
+ _, err = db.Query("SELECT * FROM nonexistenttable")
+ if err == nil {
+ t.Fatal("Should have raised an error on non existent table")
+ }
+}
+
+func TestErrorDuringStartup(t *testing.T) {
+ // Don't use the normal connection setup, this is intended to
+ // blow up in the startup packet from a non-existent user.
+ db, err := openTestConnConninfo("user=thisuserreallydoesntexist")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer db.Close()
+
+ _, err = db.Begin()
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ e, ok := err.(*Error)
+ if !ok {
+ t.Fatalf("expected Error, got %#v", err)
+ } else if e.Code.Name() != "invalid_authorization_specification" && e.Code.Name() != "invalid_password" {
+ t.Fatalf("expected invalid_authorization_specification or invalid_password, got %s (%+v)", e.Code.Name(), err)
+ }
+}
+
+func TestBadConn(t *testing.T) {
+ var err error
+
+ cn := conn{}
+ func() {
+ defer cn.errRecover(&err)
+ panic(io.EOF)
+ }()
+ if err != driver.ErrBadConn {
+ t.Fatalf("expected driver.ErrBadConn, got: %#v", err)
+ }
+ if !cn.bad {
+ t.Fatalf("expected cn.bad")
+ }
+
+ cn = conn{}
+ func() {
+ defer cn.errRecover(&err)
+ e := &Error{Severity: Efatal}
+ panic(e)
+ }()
+ if err != driver.ErrBadConn {
+ t.Fatalf("expected driver.ErrBadConn, got: %#v", err)
+ }
+ if !cn.bad {
+ t.Fatalf("expected cn.bad")
+ }
+}
+
+func TestErrorOnExec(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ txn, err := db.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer txn.Rollback()
+
+ _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = txn.Exec("INSERT INTO foo VALUES (0), (0)")
+ if err == nil {
+ t.Fatal("Should have raised error")
+ }
+
+ e, ok := err.(*Error)
+ if !ok {
+ t.Fatalf("expected Error, got %#v", err)
+ } else if e.Code.Name() != "unique_violation" {
+ t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err)
+ }
+}
+
+func TestErrorOnQuery(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ txn, err := db.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer txn.Rollback()
+
+ _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = txn.Query("INSERT INTO foo VALUES (0), (0)")
+ if err == nil {
+ t.Fatal("Should have raised error")
+ }
+
+ e, ok := err.(*Error)
+ if !ok {
+ t.Fatalf("expected Error, got %#v", err)
+ } else if e.Code.Name() != "unique_violation" {
+ t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err)
+ }
+}
+
+func TestErrorOnQueryRowSimpleQuery(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ txn, err := db.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer txn.Rollback()
+
+ _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var v int
+ err = txn.QueryRow("INSERT INTO foo VALUES (0), (0)").Scan(&v)
+ if err == nil {
+ t.Fatal("Should have raised error")
+ }
+
+ e, ok := err.(*Error)
+ if !ok {
+ t.Fatalf("expected Error, got %#v", err)
+ } else if e.Code.Name() != "unique_violation" {
+ t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err)
+ }
+}
+
+// Test the QueryRow bug workarounds in stmt.exec() and simpleQuery()
+func TestQueryRowBugWorkaround(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ // stmt.exec()
+ _, err := db.Exec("CREATE TEMP TABLE notnulltemp (a varchar(10) not null)")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var a string
+ err = db.QueryRow("INSERT INTO notnulltemp(a) values($1) RETURNING a", nil).Scan(&a)
+ if err == sql.ErrNoRows {
+ t.Fatalf("expected constraint violation error; got: %v", err)
+ }
+ pge, ok := err.(*Error)
+ if !ok {
+ t.Fatalf("expected *Error; got: %#v", err)
+ }
+ if pge.Code.Name() != "not_null_violation" {
+ t.Fatalf("expected not_null_violation; got: %s (%+v)", pge.Code.Name(), err)
+ }
+
+ // Test workaround in simpleQuery()
+ tx, err := db.Begin()
+ if err != nil {
+ t.Fatalf("unexpected error %s in Begin", err)
+ }
+ defer tx.Rollback()
+
+ _, err = tx.Exec("SET LOCAL check_function_bodies TO FALSE")
+ if err != nil {
+ t.Fatalf("could not disable check_function_bodies: %s", err)
+ }
+ _, err = tx.Exec(`
+CREATE OR REPLACE FUNCTION bad_function()
+RETURNS integer
+-- hack to prevent the function from being inlined
+SET check_function_bodies TO TRUE
+AS $$
+ SELECT text 'bad'
+$$ LANGUAGE sql`)
+ if err != nil {
+ t.Fatalf("could not create function: %s", err)
+ }
+
+ err = tx.QueryRow("SELECT * FROM bad_function()").Scan(&a)
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+ pge, ok = err.(*Error)
+ if !ok {
+ t.Fatalf("expected *Error; got: %#v", err)
+ }
+ if pge.Code.Name() != "invalid_function_definition" {
+ t.Fatalf("expected invalid_function_definition; got: %s (%+v)", pge.Code.Name(), err)
+ }
+
+ err = tx.Rollback()
+ if err != nil {
+ t.Fatalf("unexpected error %s in Rollback", err)
+ }
+
+ // Also test that simpleQuery()'s workaround works when the query fails
+ // after a row has been received.
+ rows, err := db.Query(`
+select
+ (select generate_series(1, ss.i))
+from (select gs.i
+ from generate_series(1, 2) gs(i)
+ order by gs.i limit 2) ss`)
+ if err != nil {
+ t.Fatalf("query failed: %s", err)
+ }
+ if !rows.Next() {
+ t.Fatalf("expected at least one result row; got %s", rows.Err())
+ }
+ var i int
+ err = rows.Scan(&i)
+ if err != nil {
+ t.Fatalf("rows.Scan() failed: %s", err)
+ }
+ if i != 1 {
+ t.Fatalf("unexpected value for i: %d", i)
+ }
+ if rows.Next() {
+ t.Fatalf("unexpected row")
+ }
+ pge, ok = rows.Err().(*Error)
+ if !ok {
+ t.Fatalf("expected *Error; got: %#v", err)
+ }
+ if pge.Code.Name() != "cardinality_violation" {
+ t.Fatalf("expected cardinality_violation; got: %s (%+v)", pge.Code.Name(), rows.Err())
+ }
+}
+
+func TestSimpleQuery(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ r, err := db.Query("select 1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer r.Close()
+
+ if !r.Next() {
+ t.Fatal("expected row")
+ }
+}
+
+func TestBindError(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ _, err := db.Exec("create temp table test (i integer)")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.Query("select * from test where i=$1", "hhh")
+ if err == nil {
+ t.Fatal("expected an error")
+ }
+
+ // Should not get error here
+ r, err := db.Query("select * from test where i=$1", 1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer r.Close()
+}
+
+func TestParseErrorInExtendedQuery(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ rows, err := db.Query("PARSE_ERROR $1", 1)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ rows, err = db.Query("SELECT 1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ rows.Close()
+}
+
+// TestReturning tests that an INSERT query using the RETURNING clause returns a row.
+func TestReturning(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ _, err := db.Exec("CREATE TEMP TABLE distributors (did integer default 0, dname text)")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ rows, err := db.Query("INSERT INTO distributors (did, dname) VALUES (DEFAULT, 'XYZ Widgets') " +
+ "RETURNING did;")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !rows.Next() {
+ t.Fatal("no rows")
+ }
+ var did int
+ err = rows.Scan(&did)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if did != 0 {
+ t.Fatalf("bad value for did: got %d, want %d", did, 0)
+ }
+
+ if rows.Next() {
+ t.Fatal("unexpected next row")
+ }
+ err = rows.Err()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestIssue186(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ // Exec() a query which returns results
+ _, err := db.Exec("VALUES (1), (2), (3)")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.Exec("VALUES ($1), ($2), ($3)", 1, 2, 3)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Query() a query which doesn't return any results
+ txn, err := db.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer txn.Rollback()
+
+ rows, err := txn.Query("CREATE TEMP TABLE foo(f1 int)")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err = rows.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ // small trick to get NoData from a parameterized query
+ _, err = txn.Exec("CREATE RULE nodata AS ON INSERT TO foo DO INSTEAD NOTHING")
+ if err != nil {
+ t.Fatal(err)
+ }
+ rows, err = txn.Query("INSERT INTO foo VALUES ($1)", 1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err = rows.Close(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestIssue196(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ row := db.QueryRow("SELECT float4 '0.10000122' = $1, float8 '35.03554004971999' = $2",
+ float32(0.10000122), float64(35.03554004971999))
+
+ var float4match, float8match bool
+ err := row.Scan(&float4match, &float8match)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !float4match {
+ t.Errorf("Expected float4 fidelity to be maintained; got no match")
+ }
+ if !float8match {
+ t.Errorf("Expected float8 fidelity to be maintained; got no match")
+ }
+}
+
+// Test that any CommandComplete messages sent before the query results are
+// ignored.
+func TestIssue282(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ var search_path string
+ err := db.QueryRow(`
+ SET LOCAL search_path TO pg_catalog;
+ SET LOCAL search_path TO pg_catalog;
+ SHOW search_path`).Scan(&search_path)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if search_path != "pg_catalog" {
+ t.Fatalf("unexpected search_path %s", search_path)
+ }
+}
+
+func TestReadFloatPrecision(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ row := db.QueryRow("SELECT float4 '0.10000122', float8 '35.03554004971999'")
+ var float4val float32
+ var float8val float64
+ err := row.Scan(&float4val, &float8val)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if float4val != float32(0.10000122) {
+ t.Errorf("Expected float4 fidelity to be maintained; got no match")
+ }
+ if float8val != float64(35.03554004971999) {
+ t.Errorf("Expected float8 fidelity to be maintained; got no match")
+ }
+}
+
+func TestXactMultiStmt(t *testing.T) {
+ // minified test case based on bug reports from
+ // pico303@gmail.com and rangelspam@gmail.com
+ t.Skip("Skipping failing test")
+ db := openTestConn(t)
+ defer db.Close()
+
+ tx, err := db.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer tx.Commit()
+
+ rows, err := tx.Query("select 1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if rows.Next() {
+ var val int32
+ if err = rows.Scan(&val); err != nil {
+ t.Fatal(err)
+ }
+ } else {
+ t.Fatal("Expected at least one row in first query in xact")
+ }
+
+ rows2, err := tx.Query("select 2")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if rows2.Next() {
+ var val2 int32
+ if err := rows2.Scan(&val2); err != nil {
+ t.Fatal(err)
+ }
+ } else {
+ t.Fatal("Expected at least one row in second query in xact")
+ }
+
+ if err = rows.Err(); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = rows2.Err(); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = tx.Commit(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+var envParseTests = []struct {
+ Expected map[string]string
+ Env []string
+}{
+ {
+ Env: []string{"PGDATABASE=hello", "PGUSER=goodbye"},
+ Expected: map[string]string{"dbname": "hello", "user": "goodbye"},
+ },
+ {
+ Env: []string{"PGDATESTYLE=ISO, MDY"},
+ Expected: map[string]string{"datestyle": "ISO, MDY"},
+ },
+ {
+ Env: []string{"PGCONNECT_TIMEOUT=30"},
+ Expected: map[string]string{"connect_timeout": "30"},
+ },
+}
+
+func TestParseEnviron(t *testing.T) {
+ for i, tt := range envParseTests {
+ results := parseEnviron(tt.Env)
+ if !reflect.DeepEqual(tt.Expected, results) {
+ t.Errorf("%d: Expected: %#v Got: %#v", i, tt.Expected, results)
+ }
+ }
+}
+
+func TestParseComplete(t *testing.T) {
+ tpc := func(commandTag string, command string, affectedRows int64, shouldFail bool) {
+ defer func() {
+ if p := recover(); p != nil {
+ if !shouldFail {
+ t.Error(p)
+ }
+ }
+ }()
+ cn := &conn{}
+ res, c := cn.parseComplete(commandTag)
+ if c != command {
+ t.Errorf("Expected %v, got %v", command, c)
+ }
+ n, err := res.RowsAffected()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != affectedRows {
+ t.Errorf("Expected %d, got %d", affectedRows, n)
+ }
+ }
+
+ tpc("ALTER TABLE", "ALTER TABLE", 0, false)
+ tpc("INSERT 0 1", "INSERT", 1, false)
+ tpc("UPDATE 100", "UPDATE", 100, false)
+ tpc("SELECT 100", "SELECT", 100, false)
+ tpc("FETCH 100", "FETCH", 100, false)
+ // allow COPY (and others) without row count
+ tpc("COPY", "COPY", 0, false)
+ // don't fail on command tags we don't recognize
+ tpc("UNKNOWNCOMMANDTAG", "UNKNOWNCOMMANDTAG", 0, false)
+
+ // failure cases
+ tpc("INSERT 1", "", 0, true) // missing oid
+ tpc("UPDATE 0 1", "", 0, true) // too many numbers
+ tpc("SELECT foo", "", 0, true) // invalid row count
+}
+
+func TestExecerInterface(t *testing.T) {
+ // Gin up a straw man private struct just for the type check
+ cn := &conn{c: nil}
+ var cni interface{} = cn
+
+ _, ok := cni.(driver.Execer)
+ if !ok {
+ t.Fatal("Driver doesn't implement Execer")
+ }
+}
+
+func TestNullAfterNonNull(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ r, err := db.Query("SELECT 9::integer UNION SELECT NULL::integer")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var n sql.NullInt64
+
+ if !r.Next() {
+ if r.Err() != nil {
+ t.Fatal(err)
+ }
+ t.Fatal("expected row")
+ }
+
+ if err := r.Scan(&n); err != nil {
+ t.Fatal(err)
+ }
+
+ if n.Int64 != 9 {
+ t.Fatalf("expected 2, not %d", n.Int64)
+ }
+
+ if !r.Next() {
+ if r.Err() != nil {
+ t.Fatal(err)
+ }
+ t.Fatal("expected row")
+ }
+
+ if err := r.Scan(&n); err != nil {
+ t.Fatal(err)
+ }
+
+ if n.Valid {
+ t.Fatal("expected n to be invalid")
+ }
+
+ if n.Int64 != 0 {
+ t.Fatalf("expected n to 2, not %d", n.Int64)
+ }
+}
+
+func Test64BitErrorChecking(t *testing.T) {
+ defer func() {
+ if err := recover(); err != nil {
+ t.Fatal("panic due to 0xFFFFFFFF != -1 " +
+ "when int is 64 bits")
+ }
+ }()
+
+ db := openTestConn(t)
+ defer db.Close()
+
+ r, err := db.Query(`SELECT *
+FROM (VALUES (0::integer, NULL::text), (1, 'test string')) AS t;`)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ defer r.Close()
+
+ for r.Next() {
+ }
+}
+
+func TestCommit(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ _, err := db.Exec("CREATE TEMP TABLE temp (a int)")
+ if err != nil {
+ t.Fatal(err)
+ }
+ sqlInsert := "INSERT INTO temp VALUES (1)"
+ sqlSelect := "SELECT * FROM temp"
+ tx, err := db.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = tx.Exec(sqlInsert)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = tx.Commit()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var i int
+ err = db.QueryRow(sqlSelect).Scan(&i)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if i != 1 {
+ t.Fatalf("expected 1, got %d", i)
+ }
+}
+
+func TestErrorClass(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ _, err := db.Query("SELECT int 'notint'")
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ pge, ok := err.(*Error)
+ if !ok {
+ t.Fatalf("expected *pq.Error, got %#+v", err)
+ }
+ if pge.Code.Class() != "22" {
+ t.Fatalf("expected class 28, got %v", pge.Code.Class())
+ }
+ if pge.Code.Class().Name() != "data_exception" {
+ t.Fatalf("expected data_exception, got %v", pge.Code.Class().Name())
+ }
+}
+
+func TestParseOpts(t *testing.T) {
+ tests := []struct {
+ in string
+ expected values
+ valid bool
+ }{
+ {"dbname=hello user=goodbye", values{"dbname": "hello", "user": "goodbye"}, true},
+ {"dbname=hello user=goodbye ", values{"dbname": "hello", "user": "goodbye"}, true},
+ {"dbname = hello user=goodbye", values{"dbname": "hello", "user": "goodbye"}, true},
+ {"dbname=hello user =goodbye", values{"dbname": "hello", "user": "goodbye"}, true},
+ {"dbname=hello user= goodbye", values{"dbname": "hello", "user": "goodbye"}, true},
+ {"host=localhost password='correct horse battery staple'", values{"host": "localhost", "password": "correct horse battery staple"}, true},
+ {"dbname=データベース password=パスワード", values{"dbname": "データベース", "password": "パスワード"}, true},
+ {"dbname=hello user=''", values{"dbname": "hello", "user": ""}, true},
+ {"user='' dbname=hello", values{"dbname": "hello", "user": ""}, true},
+ // The last option value is an empty string if there's no non-whitespace after its =
+ {"dbname=hello user= ", values{"dbname": "hello", "user": ""}, true},
+
+ // The parser ignores spaces after = and interprets the next set of non-whitespace characters as the value.
+ {"user= password=foo", values{"user": "password=foo"}, true},
+
+ // Backslash escapes next char
+ {`user=a\ \'\\b`, values{"user": `a '\b`}, true},
+ {`user='a \'b'`, values{"user": `a 'b`}, true},
+
+ // Incomplete escape
+ {`user=x\`, values{}, false},
+
+ // No '=' after the key
+ {"postgre://marko@internet", values{}, false},
+ {"dbname user=goodbye", values{}, false},
+ {"user=foo blah", values{}, false},
+ {"user=foo blah ", values{}, false},
+
+ // Unterminated quoted value
+ {"dbname=hello user='unterminated", values{}, false},
+ }
+
+ for _, test := range tests {
+ o := make(values)
+ err := parseOpts(test.in, o)
+
+ switch {
+ case err != nil && test.valid:
+ t.Errorf("%q got unexpected error: %s", test.in, err)
+ case err == nil && test.valid && !reflect.DeepEqual(test.expected, o):
+ t.Errorf("%q got: %#v want: %#v", test.in, o, test.expected)
+ case err == nil && !test.valid:
+ t.Errorf("%q expected an error", test.in)
+ }
+ }
+}
+
+func TestRuntimeParameters(t *testing.T) {
+ type RuntimeTestResult int
+ const (
+ ResultUnknown RuntimeTestResult = iota
+ ResultSuccess
+ ResultError // other error
+ )
+
+ tests := []struct {
+ conninfo string
+ param string
+ expected string
+ expectedOutcome RuntimeTestResult
+ }{
+ // invalid parameter
+ {"DOESNOTEXIST=foo", "", "", ResultError},
+ // we can only work with a specific value for these two
+ {"client_encoding=SQL_ASCII", "", "", ResultError},
+ {"datestyle='ISO, YDM'", "", "", ResultError},
+ // "options" should work exactly as it does in libpq
+ {"options='-c search_path=pqgotest'", "search_path", "pqgotest", ResultSuccess},
+ // pq should override client_encoding in this case
+ {"options='-c client_encoding=SQL_ASCII'", "client_encoding", "UTF8", ResultSuccess},
+ // allow client_encoding to be set explicitly
+ {"client_encoding=UTF8", "client_encoding", "UTF8", ResultSuccess},
+ // test a runtime parameter not supported by libpq
+ {"work_mem='139kB'", "work_mem", "139kB", ResultSuccess},
+ // test fallback_application_name
+ {"application_name=foo fallback_application_name=bar", "application_name", "foo", ResultSuccess},
+ {"application_name='' fallback_application_name=bar", "application_name", "", ResultSuccess},
+ {"fallback_application_name=bar", "application_name", "bar", ResultSuccess},
+ }
+
+ for _, test := range tests {
+ db, err := openTestConnConninfo(test.conninfo)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // application_name didn't exist before 9.0
+ if test.param == "application_name" && getServerVersion(t, db) < 90000 {
+ db.Close()
+ continue
+ }
+
+ tryGetParameterValue := func() (value string, outcome RuntimeTestResult) {
+ defer db.Close()
+ row := db.QueryRow("SELECT current_setting($1)", test.param)
+ err = row.Scan(&value)
+ if err != nil {
+ return "", ResultError
+ }
+ return value, ResultSuccess
+ }
+
+ value, outcome := tryGetParameterValue()
+ if outcome != test.expectedOutcome && outcome == ResultError {
+ t.Fatalf("%v: unexpected error: %v", test.conninfo, err)
+ }
+ if outcome != test.expectedOutcome {
+ t.Fatalf("unexpected outcome %v (was expecting %v) for conninfo \"%s\"",
+ outcome, test.expectedOutcome, test.conninfo)
+ }
+ if value != test.expected {
+ t.Fatalf("bad value for %s: got %s, want %s with conninfo \"%s\"",
+ test.param, value, test.expected, test.conninfo)
+ }
+ }
+}
+
+func TestIsUTF8(t *testing.T) {
+ var cases = []struct {
+ name string
+ want bool
+ }{
+ {"unicode", true},
+ {"utf-8", true},
+ {"utf_8", true},
+ {"UTF-8", true},
+ {"UTF8", true},
+ {"utf8", true},
+ {"u n ic_ode", true},
+ {"ut_f%8", true},
+ {"ubf8", false},
+ {"punycode", false},
+ }
+
+ for _, test := range cases {
+ if g := isUTF8(test.name); g != test.want {
+ t.Errorf("isUTF8(%q) = %v want %v", test.name, g, test.want)
+ }
+ }
+}
+
+func TestQuoteIdentifier(t *testing.T) {
+ var cases = []struct {
+ input string
+ want string
+ }{
+ {`foo`, `"foo"`},
+ {`foo bar baz`, `"foo bar baz"`},
+ {`foo"bar`, `"foo""bar"`},
+ {"foo\x00bar", `"foo"`},
+ {"\x00foo", `""`},
+ }
+
+ for _, test := range cases {
+ got := QuoteIdentifier(test.input)
+ if got != test.want {
+ t.Errorf("QuoteIdentifier(%q) = %v want %v", test.input, got, test.want)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/lib/pq/copy_test.go b/Godeps/_workspace/src/github.com/lib/pq/copy_test.go
new file mode 100644
index 000000000..6af4c9c76
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/lib/pq/copy_test.go
@@ -0,0 +1,462 @@
+package pq
+
+import (
+ "bytes"
+ "database/sql"
+ "strings"
+ "testing"
+)
+
+func TestCopyInStmt(t *testing.T) {
+ var stmt string
+ stmt = CopyIn("table name")
+ if stmt != `COPY "table name" () FROM STDIN` {
+ t.Fatal(stmt)
+ }
+
+ stmt = CopyIn("table name", "column 1", "column 2")
+ if stmt != `COPY "table name" ("column 1", "column 2") FROM STDIN` {
+ t.Fatal(stmt)
+ }
+
+ stmt = CopyIn(`table " name """`, `co"lumn""`)
+ if stmt != `COPY "table "" name """"""" ("co""lumn""""") FROM STDIN` {
+ t.Fatal(stmt)
+ }
+}
+
+func TestCopyInSchemaStmt(t *testing.T) {
+ var stmt string
+ stmt = CopyInSchema("schema name", "table name")
+ if stmt != `COPY "schema name"."table name" () FROM STDIN` {
+ t.Fatal(stmt)
+ }
+
+ stmt = CopyInSchema("schema name", "table name", "column 1", "column 2")
+ if stmt != `COPY "schema name"."table name" ("column 1", "column 2") FROM STDIN` {
+ t.Fatal(stmt)
+ }
+
+ stmt = CopyInSchema(`schema " name """`, `table " name """`, `co"lumn""`)
+ if stmt != `COPY "schema "" name """"""".`+
+ `"table "" name """"""" ("co""lumn""""") FROM STDIN` {
+ t.Fatal(stmt)
+ }
+}
+
+func TestCopyInMultipleValues(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ txn, err := db.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer txn.Rollback()
+
+ _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ stmt, err := txn.Prepare(CopyIn("temp", "a", "b"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ longString := strings.Repeat("#", 500)
+
+ for i := 0; i < 500; i++ {
+ _, err = stmt.Exec(int64(i), longString)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ _, err = stmt.Exec()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = stmt.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var num int
+ err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if num != 500 {
+ t.Fatalf("expected 500 items, not %d", num)
+ }
+}
+
+func TestCopyInRaiseStmtTrigger(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ if getServerVersion(t, db) < 90000 {
+ var exists int
+ err := db.QueryRow("SELECT 1 FROM pg_language WHERE lanname = 'plpgsql'").Scan(&exists)
+ if err == sql.ErrNoRows {
+ t.Skip("language PL/PgSQL does not exist; skipping TestCopyInRaiseStmtTrigger")
+ } else if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ txn, err := db.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer txn.Rollback()
+
+ _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = txn.Exec(`
+ CREATE OR REPLACE FUNCTION pg_temp.temptest()
+ RETURNS trigger AS
+ $BODY$ begin
+ raise notice 'Hello world';
+ return new;
+ end $BODY$
+ LANGUAGE plpgsql`)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = txn.Exec(`
+ CREATE TRIGGER temptest_trigger
+ BEFORE INSERT
+ ON temp
+ FOR EACH ROW
+ EXECUTE PROCEDURE pg_temp.temptest()`)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ stmt, err := txn.Prepare(CopyIn("temp", "a", "b"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ longString := strings.Repeat("#", 500)
+
+ _, err = stmt.Exec(int64(1), longString)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = stmt.Exec()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = stmt.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var num int
+ err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if num != 1 {
+ t.Fatalf("expected 1 items, not %d", num)
+ }
+}
+
+func TestCopyInTypes(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ txn, err := db.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer txn.Rollback()
+
+ _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER, text VARCHAR, blob BYTEA, nothing VARCHAR)")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ stmt, err := txn.Prepare(CopyIn("temp", "num", "text", "blob", "nothing"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = stmt.Exec(int64(1234567890), "Héllö\n ☃!\r\t\\", []byte{0, 255, 9, 10, 13}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = stmt.Exec()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = stmt.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var num int
+ var text string
+ var blob []byte
+ var nothing sql.NullString
+
+ err = txn.QueryRow("SELECT * FROM temp").Scan(&num, &text, &blob, ¬hing)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if num != 1234567890 {
+ t.Fatal("unexpected result", num)
+ }
+ if text != "Héllö\n ☃!\r\t\\" {
+ t.Fatal("unexpected result", text)
+ }
+ if bytes.Compare(blob, []byte{0, 255, 9, 10, 13}) != 0 {
+ t.Fatal("unexpected result", blob)
+ }
+ if nothing.Valid {
+ t.Fatal("unexpected result", nothing.String)
+ }
+}
+
+func TestCopyInWrongType(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ txn, err := db.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer txn.Rollback()
+
+ _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ stmt, err := txn.Prepare(CopyIn("temp", "num"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer stmt.Close()
+
+ _, err = stmt.Exec("Héllö\n ☃!\r\t\\")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = stmt.Exec()
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ if pge := err.(*Error); pge.Code.Name() != "invalid_text_representation" {
+ t.Fatalf("expected 'invalid input syntax for integer' error, got %s (%+v)", pge.Code.Name(), pge)
+ }
+}
+
+func TestCopyOutsideOfTxnError(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ _, err := db.Prepare(CopyIn("temp", "num"))
+ if err == nil {
+ t.Fatal("COPY outside of transaction did not return an error")
+ }
+ if err != errCopyNotSupportedOutsideTxn {
+ t.Fatalf("expected %s, got %s", err, err.Error())
+ }
+}
+
+func TestCopyInBinaryError(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ txn, err := db.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer txn.Rollback()
+
+ _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)")
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = txn.Prepare("COPY temp (num) FROM STDIN WITH binary")
+ if err != errBinaryCopyNotSupported {
+ t.Fatalf("expected %s, got %+v", errBinaryCopyNotSupported, err)
+ }
+ // check that the protocol is in a valid state
+ err = txn.Rollback()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestCopyFromError(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ txn, err := db.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer txn.Rollback()
+
+ _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)")
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = txn.Prepare("COPY temp (num) TO STDOUT")
+ if err != errCopyToNotSupported {
+ t.Fatalf("expected %s, got %+v", errCopyToNotSupported, err)
+ }
+ // check that the protocol is in a valid state
+ err = txn.Rollback()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestCopySyntaxError(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ txn, err := db.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer txn.Rollback()
+
+ _, err = txn.Prepare("COPY ")
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ if pge := err.(*Error); pge.Code.Name() != "syntax_error" {
+ t.Fatalf("expected syntax error, got %s (%+v)", pge.Code.Name(), pge)
+ }
+ // check that the protocol is in a valid state
+ err = txn.Rollback()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Tests for connection errors in copyin.resploop()
+func TestCopyRespLoopConnectionError(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ txn, err := db.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer txn.Rollback()
+
+ var pid int
+ err = txn.QueryRow("SELECT pg_backend_pid()").Scan(&pid)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = txn.Exec("CREATE TEMP TABLE temp (a int)")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ stmt, err := txn.Prepare(CopyIn("temp", "a"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.Exec("SELECT pg_terminate_backend($1)", pid)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if getServerVersion(t, db) < 90500 {
+ // We have to try and send something over, since postgres before
+ // version 9.5 won't process SIGTERMs while it's waiting for
+ // CopyData/CopyEnd messages; see tcop/postgres.c.
+ _, err = stmt.Exec(1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ _, err = stmt.Exec()
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+ pge, ok := err.(*Error)
+ if !ok {
+ t.Fatalf("expected *pq.Error, got %+#v", err)
+ } else if pge.Code.Name() != "admin_shutdown" {
+ t.Fatalf("expected admin_shutdown, got %s", pge.Code.Name())
+ }
+
+ err = stmt.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func BenchmarkCopyIn(b *testing.B) {
+ db := openTestConn(b)
+ defer db.Close()
+
+ txn, err := db.Begin()
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer txn.Rollback()
+
+ _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)")
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ stmt, err := txn.Prepare(CopyIn("temp", "a", "b"))
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ for i := 0; i < b.N; i++ {
+ _, err = stmt.Exec(int64(i), "hello world!")
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+
+ _, err = stmt.Exec()
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ err = stmt.Close()
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ var num int
+ err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ if num != b.N {
+ b.Fatalf("expected %d items, not %d", b.N, num)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/lib/pq/encode_test.go b/Godeps/_workspace/src/github.com/lib/pq/encode_test.go
new file mode 100644
index 000000000..97b663886
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/lib/pq/encode_test.go
@@ -0,0 +1,719 @@
+package pq
+
+import (
+ "bytes"
+ "database/sql"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/lib/pq/oid"
+)
+
+func TestScanTimestamp(t *testing.T) {
+ var nt NullTime
+ tn := time.Now()
+ nt.Scan(tn)
+ if !nt.Valid {
+ t.Errorf("Expected Valid=false")
+ }
+ if nt.Time != tn {
+ t.Errorf("Time value mismatch")
+ }
+}
+
+func TestScanNilTimestamp(t *testing.T) {
+ var nt NullTime
+ nt.Scan(nil)
+ if nt.Valid {
+ t.Errorf("Expected Valid=false")
+ }
+}
+
+var timeTests = []struct {
+ str string
+ timeval time.Time
+}{
+ {"22001-02-03", time.Date(22001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))},
+ {"2001-02-03", time.Date(2001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))},
+ {"2001-02-03 04:05:06", time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 0))},
+ {"2001-02-03 04:05:06.000001", time.Date(2001, time.February, 3, 4, 5, 6, 1000, time.FixedZone("", 0))},
+ {"2001-02-03 04:05:06.00001", time.Date(2001, time.February, 3, 4, 5, 6, 10000, time.FixedZone("", 0))},
+ {"2001-02-03 04:05:06.0001", time.Date(2001, time.February, 3, 4, 5, 6, 100000, time.FixedZone("", 0))},
+ {"2001-02-03 04:05:06.001", time.Date(2001, time.February, 3, 4, 5, 6, 1000000, time.FixedZone("", 0))},
+ {"2001-02-03 04:05:06.01", time.Date(2001, time.February, 3, 4, 5, 6, 10000000, time.FixedZone("", 0))},
+ {"2001-02-03 04:05:06.1", time.Date(2001, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))},
+ {"2001-02-03 04:05:06.12", time.Date(2001, time.February, 3, 4, 5, 6, 120000000, time.FixedZone("", 0))},
+ {"2001-02-03 04:05:06.123", time.Date(2001, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
+ {"2001-02-03 04:05:06.1234", time.Date(2001, time.February, 3, 4, 5, 6, 123400000, time.FixedZone("", 0))},
+ {"2001-02-03 04:05:06.12345", time.Date(2001, time.February, 3, 4, 5, 6, 123450000, time.FixedZone("", 0))},
+ {"2001-02-03 04:05:06.123456", time.Date(2001, time.February, 3, 4, 5, 6, 123456000, time.FixedZone("", 0))},
+ {"2001-02-03 04:05:06.123-07", time.Date(2001, time.February, 3, 4, 5, 6, 123000000,
+ time.FixedZone("", -7*60*60))},
+ {"2001-02-03 04:05:06-07", time.Date(2001, time.February, 3, 4, 5, 6, 0,
+ time.FixedZone("", -7*60*60))},
+ {"2001-02-03 04:05:06-07:42", time.Date(2001, time.February, 3, 4, 5, 6, 0,
+ time.FixedZone("", -(7*60*60+42*60)))},
+ {"2001-02-03 04:05:06-07:30:09", time.Date(2001, time.February, 3, 4, 5, 6, 0,
+ time.FixedZone("", -(7*60*60+30*60+9)))},
+ {"2001-02-03 04:05:06+07", time.Date(2001, time.February, 3, 4, 5, 6, 0,
+ time.FixedZone("", 7*60*60))},
+ {"0011-02-03 04:05:06 BC", time.Date(-10, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 0))},
+ {"0011-02-03 04:05:06.123 BC", time.Date(-10, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
+ {"0011-02-03 04:05:06.123-07 BC", time.Date(-10, time.February, 3, 4, 5, 6, 123000000,
+ time.FixedZone("", -7*60*60))},
+ {"0001-02-03 04:05:06.123", time.Date(1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
+ {"0001-02-03 04:05:06.123 BC", time.Date(1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)},
+ {"0001-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
+ {"0002-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)},
+ {"0002-02-03 04:05:06.123 BC", time.Date(-1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))},
+ {"12345-02-03 04:05:06.1", time.Date(12345, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))},
+ {"123456-02-03 04:05:06.1", time.Date(123456, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))},
+}
+
+// Helper function for the two tests below
+func tryParse(str string) (t time.Time, err error) {
+ defer func() {
+ if p := recover(); p != nil {
+ err = fmt.Errorf("%v", p)
+ return
+ }
+ }()
+ i := parseTs(nil, str)
+ t, ok := i.(time.Time)
+ if !ok {
+ err = fmt.Errorf("Not a time.Time type, got %#v", i)
+ }
+ return
+}
+
+// Test that parsing the string results in the expected value.
+func TestParseTs(t *testing.T) {
+ for i, tt := range timeTests {
+ val, err := tryParse(tt.str)
+ if err != nil {
+ t.Errorf("%d: got error: %v", i, err)
+ } else if val.String() != tt.timeval.String() {
+ t.Errorf("%d: expected to parse %q into %q; got %q",
+ i, tt.str, tt.timeval, val)
+ }
+ }
+}
+
+// Now test that sending the value into the database and parsing it back
+// returns the same time.Time value.
+func TestEncodeAndParseTs(t *testing.T) {
+ db, err := openTestConnConninfo("timezone='Etc/UTC'")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer db.Close()
+
+ for i, tt := range timeTests {
+ var dbstr string
+ err = db.QueryRow("SELECT ($1::timestamptz)::text", tt.timeval).Scan(&dbstr)
+ if err != nil {
+ t.Errorf("%d: could not send value %q to the database: %s", i, tt.timeval, err)
+ continue
+ }
+
+ val, err := tryParse(dbstr)
+ if err != nil {
+ t.Errorf("%d: could not parse value %q: %s", i, dbstr, err)
+ continue
+ }
+ val = val.In(tt.timeval.Location())
+ if val.String() != tt.timeval.String() {
+ t.Errorf("%d: expected to parse %q into %q; got %q", i, dbstr, tt.timeval, val)
+ }
+ }
+}
+
+var formatTimeTests = []struct {
+ time time.Time
+ expected string
+}{
+ {time.Time{}, "0001-01-01T00:00:00Z"},
+ {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "2001-02-03T04:05:06.123456789Z"},
+ {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "2001-02-03T04:05:06.123456789+02:00"},
+ {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "2001-02-03T04:05:06.123456789-06:00"},
+ {time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "2001-02-03T04:05:06-07:30:09"},
+
+ {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03T04:05:06.123456789Z"},
+ {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03T04:05:06.123456789+02:00"},
+ {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03T04:05:06.123456789-06:00"},
+
+ {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03T04:05:06.123456789Z BC"},
+ {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03T04:05:06.123456789+02:00 BC"},
+ {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03T04:05:06.123456789-06:00 BC"},
+
+ {time.Date(1, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03T04:05:06-07:30:09"},
+ {time.Date(0, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03T04:05:06-07:30:09 BC"},
+}
+
+func TestFormatTs(t *testing.T) {
+ for i, tt := range formatTimeTests {
+ val := string(formatTs(tt.time))
+ if val != tt.expected {
+ t.Errorf("%d: incorrect time format %q, want %q", i, val, tt.expected)
+ }
+ }
+}
+
+func TestTimestampWithTimeZone(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ tx, err := db.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer tx.Rollback()
+
+ // try several different locations, all included in Go's zoneinfo.zip
+ for _, locName := range []string{
+ "UTC",
+ "America/Chicago",
+ "America/New_York",
+ "Australia/Darwin",
+ "Australia/Perth",
+ } {
+ loc, err := time.LoadLocation(locName)
+ if err != nil {
+ t.Logf("Could not load time zone %s - skipping", locName)
+ continue
+ }
+
+ // Postgres timestamps have a resolution of 1 microsecond, so don't
+ // use the full range of the Nanosecond argument
+ refTime := time.Date(2012, 11, 6, 10, 23, 42, 123456000, loc)
+
+ for _, pgTimeZone := range []string{"US/Eastern", "Australia/Darwin"} {
+ // Switch Postgres's timezone to test different output timestamp formats
+ _, err = tx.Exec(fmt.Sprintf("set time zone '%s'", pgTimeZone))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var gotTime time.Time
+ row := tx.QueryRow("select $1::timestamp with time zone", refTime)
+ err = row.Scan(&gotTime)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !refTime.Equal(gotTime) {
+ t.Errorf("timestamps not equal: %s != %s", refTime, gotTime)
+ }
+
+ // check that the time zone is set correctly based on TimeZone
+ pgLoc, err := time.LoadLocation(pgTimeZone)
+ if err != nil {
+ t.Logf("Could not load time zone %s - skipping", pgLoc)
+ continue
+ }
+ translated := refTime.In(pgLoc)
+ if translated.String() != gotTime.String() {
+ t.Errorf("timestamps not equal: %s != %s", translated, gotTime)
+ }
+ }
+ }
+}
+
+func TestTimestampWithOutTimezone(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ test := func(ts, pgts string) {
+ r, err := db.Query("SELECT $1::timestamp", pgts)
+ if err != nil {
+ t.Fatalf("Could not run query: %v", err)
+ }
+
+ n := r.Next()
+
+ if n != true {
+ t.Fatal("Expected at least one row")
+ }
+
+ var result time.Time
+ err = r.Scan(&result)
+ if err != nil {
+ t.Fatalf("Did not expect error scanning row: %v", err)
+ }
+
+ expected, err := time.Parse(time.RFC3339, ts)
+ if err != nil {
+ t.Fatalf("Could not parse test time literal: %v", err)
+ }
+
+ if !result.Equal(expected) {
+ t.Fatalf("Expected time to match %v: got mismatch %v",
+ expected, result)
+ }
+
+ n = r.Next()
+ if n != false {
+ t.Fatal("Expected only one row")
+ }
+ }
+
+ test("2000-01-01T00:00:00Z", "2000-01-01T00:00:00")
+
+ // Test higher precision time
+ test("2013-01-04T20:14:58.80033Z", "2013-01-04 20:14:58.80033")
+}
+
+func TestInfinityTimestamp(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+ var err error
+ var resultT time.Time
+
+ expectedError := fmt.Errorf(`sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time`)
+ type testCases []struct {
+ Query string
+ Param string
+ ExpectedErr error
+ ExpectedVal interface{}
+ }
+ tc := testCases{
+ {"SELECT $1::timestamp", "-infinity", expectedError, "-infinity"},
+ {"SELECT $1::timestamptz", "-infinity", expectedError, "-infinity"},
+ {"SELECT $1::timestamp", "infinity", expectedError, "infinity"},
+ {"SELECT $1::timestamptz", "infinity", expectedError, "infinity"},
+ }
+ // try to assert []byte to time.Time
+ for _, q := range tc {
+ err = db.QueryRow(q.Query, q.Param).Scan(&resultT)
+ if err.Error() != q.ExpectedErr.Error() {
+ t.Errorf("Scanning -/+infinity, expected error, %q, got %q", q.ExpectedErr, err)
+ }
+ }
+ // yield []byte
+ for _, q := range tc {
+ var resultI interface{}
+ err = db.QueryRow(q.Query, q.Param).Scan(&resultI)
+ if err != nil {
+ t.Errorf("Scanning -/+infinity, expected no error, got %q", err)
+ }
+ result, ok := resultI.([]byte)
+ if !ok {
+ t.Errorf("Scanning -/+infinity, expected []byte, got %#v", resultI)
+ }
+ if string(result) != q.ExpectedVal {
+ t.Errorf("Scanning -/+infinity, expected %q, got %q", q.ExpectedVal, result)
+ }
+ }
+
+ y1500 := time.Date(1500, time.January, 1, 0, 0, 0, 0, time.UTC)
+ y2500 := time.Date(2500, time.January, 1, 0, 0, 0, 0, time.UTC)
+ EnableInfinityTs(y1500, y2500)
+
+ err = db.QueryRow("SELECT $1::timestamp", "infinity").Scan(&resultT)
+ if err != nil {
+ t.Errorf("Scanning infinity, expected no error, got %q", err)
+ }
+ if !resultT.Equal(y2500) {
+ t.Errorf("Scanning infinity, expected %q, got %q", y2500, resultT)
+ }
+
+ err = db.QueryRow("SELECT $1::timestamptz", "infinity").Scan(&resultT)
+ if err != nil {
+ t.Errorf("Scanning infinity, expected no error, got %q", err)
+ }
+ if !resultT.Equal(y2500) {
+ t.Errorf("Scanning Infinity, expected time %q, got %q", y2500, resultT.String())
+ }
+
+ err = db.QueryRow("SELECT $1::timestamp", "-infinity").Scan(&resultT)
+ if err != nil {
+ t.Errorf("Scanning -infinity, expected no error, got %q", err)
+ }
+ if !resultT.Equal(y1500) {
+ t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, resultT.String())
+ }
+
+ err = db.QueryRow("SELECT $1::timestamptz", "-infinity").Scan(&resultT)
+ if err != nil {
+ t.Errorf("Scanning -infinity, expected no error, got %q", err)
+ }
+ if !resultT.Equal(y1500) {
+ t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, resultT.String())
+ }
+
+ y_1500 := time.Date(-1500, time.January, 1, 0, 0, 0, 0, time.UTC)
+ y11500 := time.Date(11500, time.January, 1, 0, 0, 0, 0, time.UTC)
+ var s string
+ err = db.QueryRow("SELECT $1::timestamp::text", y_1500).Scan(&s)
+ if err != nil {
+ t.Errorf("Encoding -infinity, expected no error, got %q", err)
+ }
+ if s != "-infinity" {
+ t.Errorf("Encoding -infinity, expected %q, got %q", "-infinity", s)
+ }
+ err = db.QueryRow("SELECT $1::timestamptz::text", y_1500).Scan(&s)
+ if err != nil {
+ t.Errorf("Encoding -infinity, expected no error, got %q", err)
+ }
+ if s != "-infinity" {
+ t.Errorf("Encoding -infinity, expected %q, got %q", "-infinity", s)
+ }
+
+ err = db.QueryRow("SELECT $1::timestamp::text", y11500).Scan(&s)
+ if err != nil {
+ t.Errorf("Encoding infinity, expected no error, got %q", err)
+ }
+ if s != "infinity" {
+ t.Errorf("Encoding infinity, expected %q, got %q", "infinity", s)
+ }
+ err = db.QueryRow("SELECT $1::timestamptz::text", y11500).Scan(&s)
+ if err != nil {
+ t.Errorf("Encoding infinity, expected no error, got %q", err)
+ }
+ if s != "infinity" {
+ t.Errorf("Encoding infinity, expected %q, got %q", "infinity", s)
+ }
+
+ disableInfinityTs()
+
+ var panicErrorString string
+ func() {
+ defer func() {
+ panicErrorString, _ = recover().(string)
+ }()
+ EnableInfinityTs(y2500, y1500)
+ }()
+ if panicErrorString != infinityTsNegativeMustBeSmaller {
+ t.Errorf("Expected error, %q, got %q", infinityTsNegativeMustBeSmaller, panicErrorString)
+ }
+}
+
+func TestStringWithNul(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ hello0world := string("hello\x00world")
+ _, err := db.Query("SELECT $1::text", &hello0world)
+ if err == nil {
+ t.Fatal("Postgres accepts a string with nul in it; " +
+ "injection attacks may be plausible")
+ }
+}
+
+func TestByteSliceToText(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ b := []byte("hello world")
+ row := db.QueryRow("SELECT $1::text", b)
+
+ var result []byte
+ err := row.Scan(&result)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if string(result) != string(b) {
+ t.Fatalf("expected %v but got %v", b, result)
+ }
+}
+
+func TestStringToBytea(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ b := "hello world"
+ row := db.QueryRow("SELECT $1::bytea", b)
+
+ var result []byte
+ err := row.Scan(&result)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(result, []byte(b)) {
+ t.Fatalf("expected %v but got %v", b, result)
+ }
+}
+
+func TestTextByteSliceToUUID(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ b := []byte("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11")
+ row := db.QueryRow("SELECT $1::uuid", b)
+
+ var result string
+ err := row.Scan(&result)
+ if forceBinaryParameters() {
+ pqErr := err.(*Error)
+ if pqErr == nil {
+ t.Errorf("Expected to get error")
+ } else if pqErr.Code != "22P03" {
+ t.Fatalf("Expected to get invalid binary encoding error (22P03), got %s", pqErr.Code)
+ }
+ } else {
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if result != string(b) {
+ t.Fatalf("expected %v but got %v", b, result)
+ }
+ }
+}
+
+func TestBinaryByteSlicetoUUID(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ b := []byte{'\xa0','\xee','\xbc','\x99',
+ '\x9c', '\x0b',
+ '\x4e', '\xf8',
+ '\xbb', '\x00', '\x6b',
+ '\xb9', '\xbd', '\x38', '\x0a', '\x11'}
+ row := db.QueryRow("SELECT $1::uuid", b)
+
+ var result string
+ err := row.Scan(&result)
+ if forceBinaryParameters() {
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if result != string("a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11") {
+ t.Fatalf("expected %v but got %v", b, result)
+ }
+ } else {
+ pqErr := err.(*Error)
+ if pqErr == nil {
+ t.Errorf("Expected to get error")
+ } else if pqErr.Code != "22021" {
+ t.Fatalf("Expected to get invalid byte sequence for encoding error (22021), got %s", pqErr.Code)
+ }
+ }
+}
+
+func TestStringToUUID(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ s := "a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11"
+ row := db.QueryRow("SELECT $1::uuid", s)
+
+ var result string
+ err := row.Scan(&result)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if result != s {
+ t.Fatalf("expected %v but got %v", s, result)
+ }
+}
+
+func TestTextByteSliceToInt(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ expected := 12345678
+ b := []byte(fmt.Sprintf("%d", expected))
+ row := db.QueryRow("SELECT $1::int", b)
+
+ var result int
+ err := row.Scan(&result)
+ if forceBinaryParameters() {
+ pqErr := err.(*Error)
+ if pqErr == nil {
+ t.Errorf("Expected to get error")
+ } else if pqErr.Code != "22P03" {
+ t.Fatalf("Expected to get invalid binary encoding error (22P03), got %s", pqErr.Code)
+ }
+ } else {
+ if err != nil {
+ t.Fatal(err)
+ }
+ if result != expected {
+ t.Fatalf("expected %v but got %v", expected, result)
+ }
+ }
+}
+
+func TestBinaryByteSliceToInt(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ expected := 12345678
+ b := []byte{'\x00', '\xbc', '\x61', '\x4e'}
+ row := db.QueryRow("SELECT $1::int", b)
+
+ var result int
+ err := row.Scan(&result)
+ if forceBinaryParameters() {
+ if err != nil {
+ t.Fatal(err)
+ }
+ if result != expected {
+ t.Fatalf("expected %v but got %v", expected, result)
+ }
+ } else {
+ pqErr := err.(*Error)
+ if pqErr == nil {
+ t.Errorf("Expected to get error")
+ } else if pqErr.Code != "22021" {
+ t.Fatalf("Expected to get invalid byte sequence for encoding error (22021), got %s", pqErr.Code)
+ }
+ }
+}
+
+func TestByteaOutputFormatEncoding(t *testing.T) {
+ input := []byte("\\x\x00\x01\x02\xFF\xFEabcdefg0123")
+ want := []byte("\\x5c78000102fffe6162636465666730313233")
+ got := encode(¶meterStatus{serverVersion: 90000}, input, oid.T_bytea)
+ if !bytes.Equal(want, got) {
+ t.Errorf("invalid hex bytea output, got %v but expected %v", got, want)
+ }
+
+ want = []byte("\\\\x\\000\\001\\002\\377\\376abcdefg0123")
+ got = encode(¶meterStatus{serverVersion: 84000}, input, oid.T_bytea)
+ if !bytes.Equal(want, got) {
+ t.Errorf("invalid escape bytea output, got %v but expected %v", got, want)
+ }
+}
+
+func TestByteaOutputFormats(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ if getServerVersion(t, db) < 90000 {
+ // skip
+ return
+ }
+
+ testByteaOutputFormat := func(f string, usePrepared bool) {
+ expectedData := []byte("\x5c\x78\x00\xff\x61\x62\x63\x01\x08")
+ sqlQuery := "SELECT decode('5c7800ff6162630108', 'hex')"
+
+ var data []byte
+
+ // use a txn to avoid relying on getting the same connection
+ txn, err := db.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer txn.Rollback()
+
+ _, err = txn.Exec("SET LOCAL bytea_output TO " + f)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var rows *sql.Rows
+ var stmt *sql.Stmt
+ if usePrepared {
+ stmt, err = txn.Prepare(sqlQuery)
+ if err != nil {
+ t.Fatal(err)
+ }
+ rows, err = stmt.Query()
+ } else {
+ // use Query; QueryRow would hide the actual error
+ rows, err = txn.Query(sqlQuery)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !rows.Next() {
+ if rows.Err() != nil {
+ t.Fatal(rows.Err())
+ }
+ t.Fatal("shouldn't happen")
+ }
+ err = rows.Scan(&data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = rows.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if stmt != nil {
+ err = stmt.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ if !bytes.Equal(data, expectedData) {
+ t.Errorf("unexpected bytea value %v for format %s; expected %v", data, f, expectedData)
+ }
+ }
+
+ testByteaOutputFormat("hex", false)
+ testByteaOutputFormat("escape", false)
+ testByteaOutputFormat("hex", true)
+ testByteaOutputFormat("escape", true)
+}
+
+func TestAppendEncodedText(t *testing.T) {
+ var buf []byte
+
+ buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, int64(10))
+ buf = append(buf, '\t')
+ buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, 42.0000000001)
+ buf = append(buf, '\t')
+ buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, "hello\tworld")
+ buf = append(buf, '\t')
+ buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, []byte{0, 128, 255})
+
+ if string(buf) != "10\t42.0000000001\thello\\tworld\t\\\\x0080ff" {
+ t.Fatal(string(buf))
+ }
+}
+
+func TestAppendEscapedText(t *testing.T) {
+ if esc := appendEscapedText(nil, "hallo\tescape"); string(esc) != "hallo\\tescape" {
+ t.Fatal(string(esc))
+ }
+ if esc := appendEscapedText(nil, "hallo\\tescape\n"); string(esc) != "hallo\\\\tescape\\n" {
+ t.Fatal(string(esc))
+ }
+ if esc := appendEscapedText(nil, "\n\r\t\f"); string(esc) != "\\n\\r\\t\f" {
+ t.Fatal(string(esc))
+ }
+}
+
+func TestAppendEscapedTextExistingBuffer(t *testing.T) {
+ var buf []byte
+ buf = []byte("123\t")
+ if esc := appendEscapedText(buf, "hallo\tescape"); string(esc) != "123\thallo\\tescape" {
+ t.Fatal(string(esc))
+ }
+ buf = []byte("123\t")
+ if esc := appendEscapedText(buf, "hallo\\tescape\n"); string(esc) != "123\thallo\\\\tescape\\n" {
+ t.Fatal(string(esc))
+ }
+ buf = []byte("123\t")
+ if esc := appendEscapedText(buf, "\n\r\t\f"); string(esc) != "123\t\\n\\r\\t\f" {
+ t.Fatal(string(esc))
+ }
+}
+
+func BenchmarkAppendEscapedText(b *testing.B) {
+ longString := ""
+ for i := 0; i < 100; i++ {
+ longString += "123456789\n"
+ }
+ for i := 0; i < b.N; i++ {
+ appendEscapedText(nil, longString)
+ }
+}
+
+func BenchmarkAppendEscapedTextNoEscape(b *testing.B) {
+ longString := ""
+ for i := 0; i < 100; i++ {
+ longString += "1234567890"
+ }
+ for i := 0; i < b.N; i++ {
+ appendEscapedText(nil, longString)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/lib/pq/hstore/hstore_test.go b/Godeps/_workspace/src/github.com/lib/pq/hstore/hstore_test.go
new file mode 100644
index 000000000..c9c108fc3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/lib/pq/hstore/hstore_test.go
@@ -0,0 +1,148 @@
+package hstore
+
+import (
+ "database/sql"
+ "os"
+ "testing"
+
+ _ "github.com/lib/pq"
+)
+
+type Fatalistic interface {
+ Fatal(args ...interface{})
+}
+
+func openTestConn(t Fatalistic) *sql.DB {
+ datname := os.Getenv("PGDATABASE")
+ sslmode := os.Getenv("PGSSLMODE")
+
+ if datname == "" {
+ os.Setenv("PGDATABASE", "pqgotest")
+ }
+
+ if sslmode == "" {
+ os.Setenv("PGSSLMODE", "disable")
+ }
+
+ conn, err := sql.Open("postgres", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return conn
+}
+
+func TestHstore(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ // quitely create hstore if it doesn't exist
+ _, err := db.Exec("CREATE EXTENSION IF NOT EXISTS hstore")
+ if err != nil {
+ t.Skipf("Skipping hstore tests - hstore extension create failed: %s", err.Error())
+ }
+
+ hs := Hstore{}
+
+ // test for null-valued hstores
+ err = db.QueryRow("SELECT NULL::hstore").Scan(&hs)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if hs.Map != nil {
+ t.Fatalf("expected null map")
+ }
+
+ err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs)
+ if err != nil {
+ t.Fatalf("re-query null map failed: %s", err.Error())
+ }
+ if hs.Map != nil {
+ t.Fatalf("expected null map")
+ }
+
+ // test for empty hstores
+ err = db.QueryRow("SELECT ''::hstore").Scan(&hs)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if hs.Map == nil {
+ t.Fatalf("expected empty map, got null map")
+ }
+ if len(hs.Map) != 0 {
+ t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map))
+ }
+
+ err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs)
+ if err != nil {
+ t.Fatalf("re-query empty map failed: %s", err.Error())
+ }
+ if hs.Map == nil {
+ t.Fatalf("expected empty map, got null map")
+ }
+ if len(hs.Map) != 0 {
+ t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map))
+ }
+
+ // a few example maps to test out
+ hsOnePair := Hstore{
+ Map: map[string]sql.NullString{
+ "key1": {"value1", true},
+ },
+ }
+
+ hsThreePairs := Hstore{
+ Map: map[string]sql.NullString{
+ "key1": {"value1", true},
+ "key2": {"value2", true},
+ "key3": {"value3", true},
+ },
+ }
+
+ hsSmorgasbord := Hstore{
+ Map: map[string]sql.NullString{
+ "nullstring": {"NULL", true},
+ "actuallynull": {"", false},
+ "NULL": {"NULL string key", true},
+ "withbracket": {"value>42", true},
+ "withequal": {"value=42", true},
+ `"withquotes1"`: {`this "should" be fine`, true},
+ `"withquotes"2"`: {`this "should\" also be fine`, true},
+ "embedded1": {"value1=>x1", true},
+ "embedded2": {`"value2"=>x2`, true},
+ "withnewlines": {"\n\nvalue\t=>2", true},
+ "<>": {`this, "should,\" also, => be fine`, true},
+ },
+ }
+
+ // test encoding in query params, then decoding during Scan
+ testBidirectional := func(h Hstore) {
+ err = db.QueryRow("SELECT $1::hstore", h).Scan(&hs)
+ if err != nil {
+ t.Fatalf("re-query %d-pair map failed: %s", len(h.Map), err.Error())
+ }
+ if hs.Map == nil {
+ t.Fatalf("expected %d-pair map, got null map", len(h.Map))
+ }
+ if len(hs.Map) != len(h.Map) {
+ t.Fatalf("expected %d-pair map, got len(map)=%d", len(h.Map), len(hs.Map))
+ }
+
+ for key, val := range hs.Map {
+ otherval, found := h.Map[key]
+ if !found {
+ t.Fatalf(" key '%v' not found in %d-pair map", key, len(h.Map))
+ }
+ if otherval.Valid != val.Valid {
+ t.Fatalf(" value %v <> %v in %d-pair map", otherval, val, len(h.Map))
+ }
+ if otherval.String != val.String {
+ t.Fatalf(" value '%v' <> '%v' in %d-pair map", otherval.String, val.String, len(h.Map))
+ }
+ }
+ }
+
+ testBidirectional(hsOnePair)
+ testBidirectional(hsThreePairs)
+ testBidirectional(hsSmorgasbord)
+}
diff --git a/Godeps/_workspace/src/github.com/lib/pq/notify_test.go b/Godeps/_workspace/src/github.com/lib/pq/notify_test.go
new file mode 100644
index 000000000..fe8941a4e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/lib/pq/notify_test.go
@@ -0,0 +1,574 @@
+package pq
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+var errNilNotification = errors.New("nil notification")
+
+func expectNotification(t *testing.T, ch <-chan *Notification, relname string, extra string) error {
+ select {
+ case n := <-ch:
+ if n == nil {
+ return errNilNotification
+ }
+ if n.Channel != relname || n.Extra != extra {
+ return fmt.Errorf("unexpected notification %v", n)
+ }
+ return nil
+ case <-time.After(1500 * time.Millisecond):
+ return fmt.Errorf("timeout")
+ }
+}
+
+func expectNoNotification(t *testing.T, ch <-chan *Notification) error {
+ select {
+ case n := <-ch:
+ return fmt.Errorf("unexpected notification %v", n)
+ case <-time.After(100 * time.Millisecond):
+ return nil
+ }
+}
+
+func expectEvent(t *testing.T, eventch <-chan ListenerEventType, et ListenerEventType) error {
+ select {
+ case e := <-eventch:
+ if e != et {
+ return fmt.Errorf("unexpected event %v", e)
+ }
+ return nil
+ case <-time.After(1500 * time.Millisecond):
+ panic("expectEvent timeout")
+ }
+}
+
+func expectNoEvent(t *testing.T, eventch <-chan ListenerEventType) error {
+ select {
+ case e := <-eventch:
+ return fmt.Errorf("unexpected event %v", e)
+ case <-time.After(100 * time.Millisecond):
+ return nil
+ }
+}
+
+func newTestListenerConn(t *testing.T) (*ListenerConn, <-chan *Notification) {
+ datname := os.Getenv("PGDATABASE")
+ sslmode := os.Getenv("PGSSLMODE")
+
+ if datname == "" {
+ os.Setenv("PGDATABASE", "pqgotest")
+ }
+
+ if sslmode == "" {
+ os.Setenv("PGSSLMODE", "disable")
+ }
+
+ notificationChan := make(chan *Notification)
+ l, err := NewListenerConn("", notificationChan)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return l, notificationChan
+}
+
+func TestNewListenerConn(t *testing.T) {
+ l, _ := newTestListenerConn(t)
+
+ defer l.Close()
+}
+
+func TestConnListen(t *testing.T) {
+ l, channel := newTestListenerConn(t)
+
+ defer l.Close()
+
+ db := openTestConn(t)
+ defer db.Close()
+
+ ok, err := l.Listen("notify_test")
+ if !ok || err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.Exec("NOTIFY notify_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = expectNotification(t, channel, "notify_test", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestConnUnlisten(t *testing.T) {
+ l, channel := newTestListenerConn(t)
+
+ defer l.Close()
+
+ db := openTestConn(t)
+ defer db.Close()
+
+ ok, err := l.Listen("notify_test")
+ if !ok || err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.Exec("NOTIFY notify_test")
+
+ err = expectNotification(t, channel, "notify_test", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ok, err = l.Unlisten("notify_test")
+ if !ok || err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.Exec("NOTIFY notify_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = expectNoNotification(t, channel)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestConnUnlistenAll(t *testing.T) {
+ l, channel := newTestListenerConn(t)
+
+ defer l.Close()
+
+ db := openTestConn(t)
+ defer db.Close()
+
+ ok, err := l.Listen("notify_test")
+ if !ok || err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.Exec("NOTIFY notify_test")
+
+ err = expectNotification(t, channel, "notify_test", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ok, err = l.UnlistenAll()
+ if !ok || err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.Exec("NOTIFY notify_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = expectNoNotification(t, channel)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestConnClose(t *testing.T) {
+ l, _ := newTestListenerConn(t)
+ defer l.Close()
+
+ err := l.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = l.Close()
+ if err != errListenerConnClosed {
+ t.Fatalf("expected errListenerConnClosed; got %v", err)
+ }
+}
+
+func TestConnPing(t *testing.T) {
+ l, _ := newTestListenerConn(t)
+ defer l.Close()
+ err := l.Ping()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = l.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = l.Ping()
+ if err != errListenerConnClosed {
+ t.Fatalf("expected errListenerConnClosed; got %v", err)
+ }
+}
+
+// Test for deadlock where a query fails while another one is queued
+func TestConnExecDeadlock(t *testing.T) {
+ l, _ := newTestListenerConn(t)
+ defer l.Close()
+
+ var wg sync.WaitGroup
+ wg.Add(2)
+
+ go func() {
+ l.ExecSimpleQuery("SELECT pg_sleep(60)")
+ wg.Done()
+ }()
+ runtime.Gosched()
+ go func() {
+ l.ExecSimpleQuery("SELECT 1")
+ wg.Done()
+ }()
+ // give the two goroutines some time to get into position
+ runtime.Gosched()
+ // calls Close on the net.Conn; equivalent to a network failure
+ l.Close()
+
+ var done int32 = 0
+ go func() {
+ time.Sleep(10 * time.Second)
+ if atomic.LoadInt32(&done) != 1 {
+ panic("timed out")
+ }
+ }()
+ wg.Wait()
+ atomic.StoreInt32(&done, 1)
+}
+
+// Test for ListenerConn being closed while a slow query is executing
+func TestListenerConnCloseWhileQueryIsExecuting(t *testing.T) {
+ l, _ := newTestListenerConn(t)
+ defer l.Close()
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ go func() {
+ sent, err := l.ExecSimpleQuery("SELECT pg_sleep(60)")
+ if sent {
+ panic("expected sent=false")
+ }
+ // could be any of a number of errors
+ if err == nil {
+ panic("expected error")
+ }
+ wg.Done()
+ }()
+ // give the above goroutine some time to get into position
+ runtime.Gosched()
+ err := l.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var done int32 = 0
+ go func() {
+ time.Sleep(10 * time.Second)
+ if atomic.LoadInt32(&done) != 1 {
+ panic("timed out")
+ }
+ }()
+ wg.Wait()
+ atomic.StoreInt32(&done, 1)
+}
+
+func TestNotifyExtra(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ if getServerVersion(t, db) < 90000 {
+ t.Skip("skipping NOTIFY payload test since the server does not appear to support it")
+ }
+
+ l, channel := newTestListenerConn(t)
+ defer l.Close()
+
+ ok, err := l.Listen("notify_test")
+ if !ok || err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.Exec("NOTIFY notify_test, 'something'")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = expectNotification(t, channel, "notify_test", "something")
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// create a new test listener and also set the timeouts
+func newTestListenerTimeout(t *testing.T, min time.Duration, max time.Duration) (*Listener, <-chan ListenerEventType) {
+ datname := os.Getenv("PGDATABASE")
+ sslmode := os.Getenv("PGSSLMODE")
+
+ if datname == "" {
+ os.Setenv("PGDATABASE", "pqgotest")
+ }
+
+ if sslmode == "" {
+ os.Setenv("PGSSLMODE", "disable")
+ }
+
+ eventch := make(chan ListenerEventType, 16)
+ l := NewListener("", min, max, func(t ListenerEventType, err error) { eventch <- t })
+ err := expectEvent(t, eventch, ListenerEventConnected)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return l, eventch
+}
+
+func newTestListener(t *testing.T) (*Listener, <-chan ListenerEventType) {
+ return newTestListenerTimeout(t, time.Hour, time.Hour)
+}
+
+func TestListenerListen(t *testing.T) {
+ l, _ := newTestListener(t)
+ defer l.Close()
+
+ db := openTestConn(t)
+ defer db.Close()
+
+ err := l.Listen("notify_listen_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.Exec("NOTIFY notify_listen_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = expectNotification(t, l.Notify, "notify_listen_test", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestListenerUnlisten(t *testing.T) {
+ l, _ := newTestListener(t)
+ defer l.Close()
+
+ db := openTestConn(t)
+ defer db.Close()
+
+ err := l.Listen("notify_listen_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.Exec("NOTIFY notify_listen_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = l.Unlisten("notify_listen_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = expectNotification(t, l.Notify, "notify_listen_test", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.Exec("NOTIFY notify_listen_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = expectNoNotification(t, l.Notify)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestListenerUnlistenAll(t *testing.T) {
+ l, _ := newTestListener(t)
+ defer l.Close()
+
+ db := openTestConn(t)
+ defer db.Close()
+
+ err := l.Listen("notify_listen_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.Exec("NOTIFY notify_listen_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = l.UnlistenAll()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = expectNotification(t, l.Notify, "notify_listen_test", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.Exec("NOTIFY notify_listen_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = expectNoNotification(t, l.Notify)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestListenerFailedQuery(t *testing.T) {
+ l, eventch := newTestListener(t)
+ defer l.Close()
+
+ db := openTestConn(t)
+ defer db.Close()
+
+ err := l.Listen("notify_listen_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.Exec("NOTIFY notify_listen_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = expectNotification(t, l.Notify, "notify_listen_test", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // shouldn't cause a disconnect
+ ok, err := l.cn.ExecSimpleQuery("SELECT error")
+ if !ok {
+ t.Fatalf("could not send query to server: %v", err)
+ }
+ _, ok = err.(PGError)
+ if !ok {
+ t.Fatalf("unexpected error %v", err)
+ }
+ err = expectNoEvent(t, eventch)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // should still work
+ _, err = db.Exec("NOTIFY notify_listen_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = expectNotification(t, l.Notify, "notify_listen_test", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestListenerReconnect(t *testing.T) {
+ l, eventch := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour)
+ defer l.Close()
+
+ db := openTestConn(t)
+ defer db.Close()
+
+ err := l.Listen("notify_listen_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.Exec("NOTIFY notify_listen_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = expectNotification(t, l.Notify, "notify_listen_test", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // kill the connection and make sure it comes back up
+ ok, err := l.cn.ExecSimpleQuery("SELECT pg_terminate_backend(pg_backend_pid())")
+ if ok {
+ t.Fatalf("could not kill the connection: %v", err)
+ }
+ if err != io.EOF {
+ t.Fatalf("unexpected error %v", err)
+ }
+ err = expectEvent(t, eventch, ListenerEventDisconnected)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = expectEvent(t, eventch, ListenerEventReconnected)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // should still work
+ _, err = db.Exec("NOTIFY notify_listen_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // should get nil after Reconnected
+ err = expectNotification(t, l.Notify, "", "")
+ if err != errNilNotification {
+ t.Fatal(err)
+ }
+
+ err = expectNotification(t, l.Notify, "notify_listen_test", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestListenerClose(t *testing.T) {
+ l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour)
+ defer l.Close()
+
+ err := l.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = l.Close()
+ if err != errListenerClosed {
+ t.Fatalf("expected errListenerClosed; got %v", err)
+ }
+}
+
+func TestListenerPing(t *testing.T) {
+ l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour)
+ defer l.Close()
+
+ err := l.Ping()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = l.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = l.Ping()
+ if err != errListenerClosed {
+ t.Fatalf("expected errListenerClosed; got %v", err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/lib/pq/ssl_test.go b/Godeps/_workspace/src/github.com/lib/pq/ssl_test.go
new file mode 100644
index 000000000..932b336f5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/lib/pq/ssl_test.go
@@ -0,0 +1,226 @@
+package pq
+
+// This file contains SSL tests
+
+import (
+ _ "crypto/sha256"
+ "crypto/x509"
+ "database/sql"
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func maybeSkipSSLTests(t *testing.T) {
+ // Require some special variables for testing certificates
+ if os.Getenv("PQSSLCERTTEST_PATH") == "" {
+ t.Skip("PQSSLCERTTEST_PATH not set, skipping SSL tests")
+ }
+
+ value := os.Getenv("PQGOSSLTESTS")
+ if value == "" || value == "0" {
+ t.Skip("PQGOSSLTESTS not enabled, skipping SSL tests")
+ } else if value != "1" {
+ t.Fatalf("unexpected value %q for PQGOSSLTESTS", value)
+ }
+}
+
+func openSSLConn(t *testing.T, conninfo string) (*sql.DB, error) {
+ db, err := openTestConnConninfo(conninfo)
+ if err != nil {
+ // should never fail
+ t.Fatal(err)
+ }
+ // Do something with the connection to see whether it's working or not.
+ tx, err := db.Begin()
+ if err == nil {
+ return db, tx.Rollback()
+ }
+ _ = db.Close()
+ return nil, err
+}
+
+func checkSSLSetup(t *testing.T, conninfo string) {
+ db, err := openSSLConn(t, conninfo)
+ if err == nil {
+ db.Close()
+ t.Fatalf("expected error with conninfo=%q", conninfo)
+ }
+}
+
+// Connect over SSL and run a simple query to test the basics
+func TestSSLConnection(t *testing.T) {
+ maybeSkipSSLTests(t)
+ // Environment sanity check: should fail without SSL
+ checkSSLSetup(t, "sslmode=disable user=pqgossltest")
+
+ db, err := openSSLConn(t, "sslmode=require user=pqgossltest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ rows, err := db.Query("SELECT 1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ rows.Close()
+}
+
+// Test sslmode=verify-full
+func TestSSLVerifyFull(t *testing.T) {
+ maybeSkipSSLTests(t)
+ // Environment sanity check: should fail without SSL
+ checkSSLSetup(t, "sslmode=disable user=pqgossltest")
+
+ // Not OK according to the system CA
+ _, err := openSSLConn(t, "host=postgres sslmode=verify-full user=pqgossltest")
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ _, ok := err.(x509.UnknownAuthorityError)
+ if !ok {
+ t.Fatalf("expected x509.UnknownAuthorityError, got %#+v", err)
+ }
+
+ rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt")
+ rootCert := "sslrootcert=" + rootCertPath + " "
+ // No match on Common Name
+ _, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-full user=pqgossltest")
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ _, ok = err.(x509.HostnameError)
+ if !ok {
+ t.Fatalf("expected x509.HostnameError, got %#+v", err)
+ }
+ // OK
+ _, err = openSSLConn(t, rootCert+"host=postgres sslmode=verify-full user=pqgossltest")
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Test sslmode=verify-ca
+func TestSSLVerifyCA(t *testing.T) {
+ maybeSkipSSLTests(t)
+ // Environment sanity check: should fail without SSL
+ checkSSLSetup(t, "sslmode=disable user=pqgossltest")
+
+ // Not OK according to the system CA
+ _, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest")
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ _, ok := err.(x509.UnknownAuthorityError)
+ if !ok {
+ t.Fatalf("expected x509.UnknownAuthorityError, got %#+v", err)
+ }
+
+ rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt")
+ rootCert := "sslrootcert=" + rootCertPath + " "
+ // No match on Common Name, but that's OK
+ _, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-ca user=pqgossltest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Everything OK
+ _, err = openSSLConn(t, rootCert+"host=postgres sslmode=verify-ca user=pqgossltest")
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func getCertConninfo(t *testing.T, source string) string {
+ var sslkey string
+ var sslcert string
+
+ certpath := os.Getenv("PQSSLCERTTEST_PATH")
+
+ switch source {
+ case "missingkey":
+ sslkey = "/tmp/filedoesnotexist"
+ sslcert = filepath.Join(certpath, "postgresql.crt")
+ case "missingcert":
+ sslkey = filepath.Join(certpath, "postgresql.key")
+ sslcert = "/tmp/filedoesnotexist"
+ case "certtwice":
+ sslkey = filepath.Join(certpath, "postgresql.crt")
+ sslcert = filepath.Join(certpath, "postgresql.crt")
+ case "valid":
+ sslkey = filepath.Join(certpath, "postgresql.key")
+ sslcert = filepath.Join(certpath, "postgresql.crt")
+ default:
+ t.Fatalf("invalid source %q", source)
+ }
+ return fmt.Sprintf("sslmode=require user=pqgosslcert sslkey=%s sslcert=%s", sslkey, sslcert)
+}
+
+// Authenticate over SSL using client certificates
+func TestSSLClientCertificates(t *testing.T) {
+ maybeSkipSSLTests(t)
+ // Environment sanity check: should fail without SSL
+ checkSSLSetup(t, "sslmode=disable user=pqgossltest")
+
+ // Should also fail without a valid certificate
+ db, err := openSSLConn(t, "sslmode=require user=pqgosslcert")
+ if err == nil {
+ db.Close()
+ t.Fatal("expected error")
+ }
+ pge, ok := err.(*Error)
+ if !ok {
+ t.Fatal("expected pq.Error")
+ }
+ if pge.Code.Name() != "invalid_authorization_specification" {
+ t.Fatalf("unexpected error code %q", pge.Code.Name())
+ }
+
+ // Should work
+ db, err = openSSLConn(t, getCertConninfo(t, "valid"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ rows, err := db.Query("SELECT 1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ rows.Close()
+}
+
+// Test errors with ssl certificates
+func TestSSLClientCertificatesMissingFiles(t *testing.T) {
+ maybeSkipSSLTests(t)
+ // Environment sanity check: should fail without SSL
+ checkSSLSetup(t, "sslmode=disable user=pqgossltest")
+
+ // Key missing, should fail
+ _, err := openSSLConn(t, getCertConninfo(t, "missingkey"))
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ // should be a PathError
+ _, ok := err.(*os.PathError)
+ if !ok {
+ t.Fatalf("expected PathError, got %#+v", err)
+ }
+
+ // Cert missing, should fail
+ _, err = openSSLConn(t, getCertConninfo(t, "missingcert"))
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ // should be a PathError
+ _, ok = err.(*os.PathError)
+ if !ok {
+ t.Fatalf("expected PathError, got %#+v", err)
+ }
+
+ // Key has wrong permissions, should fail
+ _, err = openSSLConn(t, getCertConninfo(t, "certtwice"))
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ if err != ErrSSLKeyHasWorldPermissions {
+ t.Fatalf("expected ErrSSLKeyHasWorldPermissions, got %#+v", err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/lib/pq/url_test.go b/Godeps/_workspace/src/github.com/lib/pq/url_test.go
new file mode 100644
index 000000000..29f4a7c75
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/lib/pq/url_test.go
@@ -0,0 +1,54 @@
+package pq
+
+import (
+ "testing"
+)
+
+func TestSimpleParseURL(t *testing.T) {
+ expected := "host=hostname.remote"
+ str, err := ParseURL("postgres://hostname.remote")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if str != expected {
+ t.Fatalf("unexpected result from ParseURL:\n+ %v\n- %v", str, expected)
+ }
+}
+
+func TestFullParseURL(t *testing.T) {
+ expected := `dbname=database host=hostname.remote password=top\ secret port=1234 user=username`
+ str, err := ParseURL("postgres://username:top%20secret@hostname.remote:1234/database")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if str != expected {
+ t.Fatalf("unexpected result from ParseURL:\n+ %s\n- %s", str, expected)
+ }
+}
+
+func TestInvalidProtocolParseURL(t *testing.T) {
+ _, err := ParseURL("http://hostname.remote")
+ switch err {
+ case nil:
+ t.Fatal("Expected an error from parsing invalid protocol")
+ default:
+ msg := "invalid connection protocol: http"
+ if err.Error() != msg {
+ t.Fatalf("Unexpected error message:\n+ %s\n- %s",
+ err.Error(), msg)
+ }
+ }
+}
+
+func TestMinimalURL(t *testing.T) {
+ cs, err := ParseURL("postgres://")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if cs != "" {
+ t.Fatalf("expected blank connection string, got: %q", cs)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/mitchellh/cli/cli_test.go b/Godeps/_workspace/src/github.com/mitchellh/cli/cli_test.go
new file mode 100644
index 000000000..8b0af00e0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mitchellh/cli/cli_test.go
@@ -0,0 +1,252 @@
+package cli
+
+import (
+ "bytes"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func TestCLIIsHelp(t *testing.T) {
+ testCases := []struct {
+ args []string
+ isHelp bool
+ }{
+ {[]string{"-h"}, true},
+ {[]string{"-help"}, true},
+ {[]string{"--help"}, true},
+ {[]string{"-h", "foo"}, true},
+ {[]string{"foo", "bar"}, false},
+ {[]string{"-v", "bar"}, false},
+ {[]string{"foo", "-h"}, false},
+ {[]string{"foo", "-help"}, false},
+ {[]string{"foo", "--help"}, false},
+ }
+
+ for _, testCase := range testCases {
+ cli := &CLI{Args: testCase.args}
+ result := cli.IsHelp()
+
+ if result != testCase.isHelp {
+ t.Errorf("Expected '%#v'. Args: %#v", testCase.isHelp, testCase.args)
+ }
+ }
+}
+
+func TestCLIIsVersion(t *testing.T) {
+ testCases := []struct {
+ args []string
+ isVersion bool
+ }{
+ {[]string{"-v"}, true},
+ {[]string{"-version"}, true},
+ {[]string{"--version"}, true},
+ {[]string{"-v", "foo"}, true},
+ {[]string{"foo", "bar"}, false},
+ {[]string{"-h", "bar"}, false},
+ {[]string{"foo", "-v"}, false},
+ {[]string{"foo", "-version"}, false},
+ {[]string{"foo", "--version"}, false},
+ }
+
+ for _, testCase := range testCases {
+ cli := &CLI{Args: testCase.args}
+ result := cli.IsVersion()
+
+ if result != testCase.isVersion {
+ t.Errorf("Expected '%#v'. Args: %#v", testCase.isVersion, testCase.args)
+ }
+ }
+}
+
+func TestCLIRun(t *testing.T) {
+ command := new(MockCommand)
+ cli := &CLI{
+ Args: []string{"foo", "-bar", "-baz"},
+ Commands: map[string]CommandFactory{
+ "foo": func() (Command, error) {
+ return command, nil
+ },
+ },
+ }
+
+ exitCode, err := cli.Run()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if exitCode != command.RunResult {
+ t.Fatalf("bad: %d", exitCode)
+ }
+
+ if !command.RunCalled {
+ t.Fatalf("run should be called")
+ }
+
+ if !reflect.DeepEqual(command.RunArgs, []string{"-bar", "-baz"}) {
+ t.Fatalf("bad args: %#v", command.RunArgs)
+ }
+}
+
+func TestCLIRun_blank(t *testing.T) {
+ command := new(MockCommand)
+ cli := &CLI{
+ Args: []string{"", "foo", "-bar", "-baz"},
+ Commands: map[string]CommandFactory{
+ "foo": func() (Command, error) {
+ return command, nil
+ },
+ },
+ }
+
+ exitCode, err := cli.Run()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if exitCode != command.RunResult {
+ t.Fatalf("bad: %d", exitCode)
+ }
+
+ if !command.RunCalled {
+ t.Fatalf("run should be called")
+ }
+
+ if !reflect.DeepEqual(command.RunArgs, []string{"-bar", "-baz"}) {
+ t.Fatalf("bad args: %#v", command.RunArgs)
+ }
+}
+
+func TestCLIRun_default(t *testing.T) {
+ commandBar := new(MockCommand)
+ commandBar.RunResult = 42
+
+ cli := &CLI{
+ Args: []string{"-bar", "-baz"},
+ Commands: map[string]CommandFactory{
+ "": func() (Command, error) {
+ return commandBar, nil
+ },
+ "foo": func() (Command, error) {
+ return new(MockCommand), nil
+ },
+ },
+ }
+
+ exitCode, err := cli.Run()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if exitCode != commandBar.RunResult {
+ t.Fatalf("bad: %d", exitCode)
+ }
+
+ if !commandBar.RunCalled {
+ t.Fatalf("run should be called")
+ }
+
+ if !reflect.DeepEqual(commandBar.RunArgs, []string{"-bar", "-baz"}) {
+ t.Fatalf("bad args: %#v", commandBar.RunArgs)
+ }
+}
+
+func TestCLIRun_printHelp(t *testing.T) {
+ testCases := [][]string{
+ {},
+ {"-h"},
+ {"i-dont-exist"},
+ {"-bad-flag", "foo"},
+ }
+
+ for _, testCase := range testCases {
+ buf := new(bytes.Buffer)
+ helpText := "foo"
+
+ cli := &CLI{
+ Args: testCase,
+ Commands: map[string]CommandFactory{
+ "foo": func() (Command, error) {
+ return new(MockCommand), nil
+ },
+ },
+ HelpFunc: func(map[string]CommandFactory) string {
+ return helpText
+ },
+ HelpWriter: buf,
+ }
+
+ code, err := cli.Run()
+ if err != nil {
+ t.Errorf("Args: %#v. Error: %s", testCase, err)
+ continue
+ }
+
+ if code != 1 {
+ t.Errorf("Args: %#v. Code: %d", testCase, code)
+ continue
+ }
+
+ if !strings.Contains(buf.String(), helpText) {
+ t.Errorf("Args: %#v. Text: %v", testCase, buf.String())
+ }
+ }
+}
+
+func TestCLIRun_printCommandHelp(t *testing.T) {
+ testCases := [][]string{
+ {"--help", "foo"},
+ {"-h", "foo"},
+ }
+
+ for _, args := range testCases {
+ command := &MockCommand{
+ HelpText: "donuts",
+ }
+
+ buf := new(bytes.Buffer)
+ cli := &CLI{
+ Args: args,
+ Commands: map[string]CommandFactory{
+ "foo": func() (Command, error) {
+ return command, nil
+ },
+ },
+ HelpWriter: buf,
+ }
+
+ exitCode, err := cli.Run()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if exitCode != 1 {
+ t.Fatalf("bad exit code: %d", exitCode)
+ }
+
+ if buf.String() != (command.HelpText + "\n") {
+ t.Fatalf("bad: %#v", buf.String())
+ }
+ }
+}
+
+func TestCLISubcommand(t *testing.T) {
+ testCases := []struct {
+ args []string
+ subcommand string
+ }{
+ {[]string{"bar"}, "bar"},
+ {[]string{"foo", "-h"}, "foo"},
+ {[]string{"-h", "bar"}, "bar"},
+ }
+
+ for _, testCase := range testCases {
+ cli := &CLI{Args: testCase.args}
+ result := cli.Subcommand()
+
+ if result != testCase.subcommand {
+ t.Errorf("Expected %#v, got %#v. Args: %#v",
+ testCase.subcommand, result, testCase.args)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/mitchellh/cli/command_mock_test.go b/Godeps/_workspace/src/github.com/mitchellh/cli/command_mock_test.go
new file mode 100644
index 000000000..241f33939
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mitchellh/cli/command_mock_test.go
@@ -0,0 +1,9 @@
+package cli
+
+import (
+ "testing"
+)
+
+func TestMockCommand_implements(t *testing.T) {
+ var _ Command = new(MockCommand)
+}
diff --git a/Godeps/_workspace/src/github.com/mitchellh/cli/ui_colored_test.go b/Godeps/_workspace/src/github.com/mitchellh/cli/ui_colored_test.go
new file mode 100644
index 000000000..35bbbf589
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mitchellh/cli/ui_colored_test.go
@@ -0,0 +1,74 @@
+package cli
+
+import (
+ "testing"
+)
+
+func TestColoredUi_impl(t *testing.T) {
+ var _ Ui = new(ColoredUi)
+}
+
+func TestColoredUi_noColor(t *testing.T) {
+ mock := new(MockUi)
+ ui := &ColoredUi{
+ ErrorColor: UiColorNone,
+ Ui: mock,
+ }
+ ui.Error("foo")
+
+ if mock.ErrorWriter.String() != "foo\n" {
+ t.Fatalf("bad: %#v", mock.ErrorWriter.String())
+ }
+}
+
+func TestColoredUi_Error(t *testing.T) {
+ mock := new(MockUi)
+ ui := &ColoredUi{
+ ErrorColor: UiColor{Code: 33},
+ Ui: mock,
+ }
+ ui.Error("foo")
+
+ if mock.ErrorWriter.String() != "\033[0;33mfoo\033[0m\n" {
+ t.Fatalf("bad: %#v", mock.ErrorWriter.String())
+ }
+}
+
+func TestColoredUi_Info(t *testing.T) {
+ mock := new(MockUi)
+ ui := &ColoredUi{
+ InfoColor: UiColor{Code: 33},
+ Ui: mock,
+ }
+ ui.Info("foo")
+
+ if mock.OutputWriter.String() != "\033[0;33mfoo\033[0m\n" {
+ t.Fatalf("bad: %#v %#v", mock.OutputWriter.String())
+ }
+}
+
+func TestColoredUi_Output(t *testing.T) {
+ mock := new(MockUi)
+ ui := &ColoredUi{
+ OutputColor: UiColor{Code: 33},
+ Ui: mock,
+ }
+ ui.Output("foo")
+
+ if mock.OutputWriter.String() != "\033[0;33mfoo\033[0m\n" {
+ t.Fatalf("bad: %#v %#v", mock.OutputWriter.String())
+ }
+}
+
+func TestColoredUi_Warn(t *testing.T) {
+ mock := new(MockUi)
+ ui := &ColoredUi{
+ WarnColor: UiColor{Code: 33},
+ Ui: mock,
+ }
+ ui.Warn("foo")
+
+ if mock.ErrorWriter.String() != "\033[0;33mfoo\033[0m\n" {
+ t.Fatalf("bad: %#v %#v", mock.ErrorWriter.String())
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/mitchellh/cli/ui_concurrent_test.go b/Godeps/_workspace/src/github.com/mitchellh/cli/ui_concurrent_test.go
new file mode 100644
index 000000000..d03e49809
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mitchellh/cli/ui_concurrent_test.go
@@ -0,0 +1,9 @@
+package cli
+
+import (
+ "testing"
+)
+
+func TestConcurrentUi_impl(t *testing.T) {
+ var _ Ui = new(ConcurrentUi)
+}
diff --git a/Godeps/_workspace/src/github.com/mitchellh/cli/ui_mock_test.go b/Godeps/_workspace/src/github.com/mitchellh/cli/ui_mock_test.go
new file mode 100644
index 000000000..4cce0bef4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mitchellh/cli/ui_mock_test.go
@@ -0,0 +1,9 @@
+package cli
+
+import (
+ "testing"
+)
+
+func TestMockUi_implements(t *testing.T) {
+ var _ Ui = new(MockUi)
+}
diff --git a/Godeps/_workspace/src/github.com/mitchellh/cli/ui_test.go b/Godeps/_workspace/src/github.com/mitchellh/cli/ui_test.go
new file mode 100644
index 000000000..ac795ba84
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mitchellh/cli/ui_test.go
@@ -0,0 +1,162 @@
+package cli
+
+import (
+ "bytes"
+ "io"
+ "testing"
+)
+
+func TestBasicUi_implements(t *testing.T) {
+ var _ Ui = new(BasicUi)
+}
+
+func TestBasicUi_Ask(t *testing.T) {
+ in_r, in_w := io.Pipe()
+ defer in_r.Close()
+ defer in_w.Close()
+
+ writer := new(bytes.Buffer)
+ ui := &BasicUi{
+ Reader: in_r,
+ Writer: writer,
+ }
+
+ go in_w.Write([]byte("foo bar\nbaz\n"))
+
+ result, err := ui.Ask("Name?")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if writer.String() != "Name? " {
+ t.Fatalf("bad: %#v", writer.String())
+ }
+
+ if result != "foo bar" {
+ t.Fatalf("bad: %#v", result)
+ }
+}
+
+func TestBasicUi_AskSecret(t *testing.T) {
+ in_r, in_w := io.Pipe()
+ defer in_r.Close()
+ defer in_w.Close()
+
+ writer := new(bytes.Buffer)
+ ui := &BasicUi{
+ Reader: in_r,
+ Writer: writer,
+ }
+
+ go in_w.Write([]byte("foo bar\nbaz\n"))
+
+ result, err := ui.AskSecret("Name?")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if writer.String() != "Name? " {
+ t.Fatalf("bad: %#v", writer.String())
+ }
+
+ if result != "foo bar" {
+ t.Fatalf("bad: %#v", result)
+ }
+}
+
+func TestBasicUi_Error(t *testing.T) {
+ writer := new(bytes.Buffer)
+ ui := &BasicUi{Writer: writer}
+ ui.Error("HELLO")
+
+ if writer.String() != "HELLO\n" {
+ t.Fatalf("bad: %s", writer.String())
+ }
+}
+
+func TestBasicUi_Error_ErrorWriter(t *testing.T) {
+ writer := new(bytes.Buffer)
+ ewriter := new(bytes.Buffer)
+ ui := &BasicUi{Writer: writer, ErrorWriter: ewriter}
+ ui.Error("HELLO")
+
+ if ewriter.String() != "HELLO\n" {
+ t.Fatalf("bad: %s", ewriter.String())
+ }
+}
+
+func TestBasicUi_Output(t *testing.T) {
+ writer := new(bytes.Buffer)
+ ui := &BasicUi{Writer: writer}
+ ui.Output("HELLO")
+
+ if writer.String() != "HELLO\n" {
+ t.Fatalf("bad: %s", writer.String())
+ }
+}
+
+func TestBasicUi_Warn(t *testing.T) {
+ writer := new(bytes.Buffer)
+ ui := &BasicUi{Writer: writer}
+ ui.Warn("HELLO")
+
+ if writer.String() != "HELLO\n" {
+ t.Fatalf("bad: %s", writer.String())
+ }
+}
+
+func TestPrefixedUi_implements(t *testing.T) {
+ var _ Ui = new(PrefixedUi)
+}
+
+func TestPrefixedUiError(t *testing.T) {
+ ui := new(MockUi)
+ p := &PrefixedUi{
+ ErrorPrefix: "foo",
+ Ui: ui,
+ }
+
+ p.Error("bar")
+ if ui.ErrorWriter.String() != "foobar\n" {
+ t.Fatalf("bad: %s", ui.ErrorWriter.String())
+ }
+}
+
+func TestPrefixedUiInfo(t *testing.T) {
+ ui := new(MockUi)
+ p := &PrefixedUi{
+ InfoPrefix: "foo",
+ Ui: ui,
+ }
+
+ p.Info("bar")
+ if ui.OutputWriter.String() != "foobar\n" {
+ t.Fatalf("bad: %s", ui.OutputWriter.String())
+ }
+}
+
+func TestPrefixedUiOutput(t *testing.T) {
+ ui := new(MockUi)
+ p := &PrefixedUi{
+ OutputPrefix: "foo",
+ Ui: ui,
+ }
+
+ p.Output("bar")
+ if ui.OutputWriter.String() != "foobar\n" {
+ t.Fatalf("bad: %s", ui.OutputWriter.String())
+ }
+}
+
+func TestPrefixedUiWarn(t *testing.T) {
+ ui := new(MockUi)
+ p := &PrefixedUi{
+ WarnPrefix: "foo",
+ Ui: ui,
+ }
+
+ p.Warn("bar")
+ if ui.ErrorWriter.String() != "foobar\n" {
+ t.Fatalf("bad: %s", ui.ErrorWriter.String())
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/mitchellh/cli/ui_writer_test.go b/Godeps/_workspace/src/github.com/mitchellh/cli/ui_writer_test.go
new file mode 100644
index 000000000..62da6e3a9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mitchellh/cli/ui_writer_test.go
@@ -0,0 +1,24 @@
+package cli
+
+import (
+ "io"
+ "testing"
+)
+
+func TestUiWriter_impl(t *testing.T) {
+ var _ io.Writer = new(UiWriter)
+}
+
+func TestUiWriter(t *testing.T) {
+ ui := new(MockUi)
+ w := &UiWriter{
+ Ui: ui,
+ }
+
+ w.Write([]byte("foo\n"))
+ w.Write([]byte("bar\n"))
+
+ if ui.OutputWriter.String() != "foo\nbar\n" {
+ t.Fatalf("bad: %s", ui.OutputWriter.String())
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/mitchellh/copystructure/copier_time_test.go b/Godeps/_workspace/src/github.com/mitchellh/copystructure/copier_time_test.go
new file mode 100644
index 000000000..5506a0ff1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mitchellh/copystructure/copier_time_test.go
@@ -0,0 +1,17 @@
+package copystructure
+
+import (
+ "testing"
+ "time"
+)
+
+func TestTimeCopier(t *testing.T) {
+ v := time.Now().UTC()
+ result, err := timeCopier(v)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if result.(time.Time) != v {
+ t.Fatalf("bad: %#v\n\n%#v", v, result)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/mitchellh/copystructure/copystructure_examples_test.go b/Godeps/_workspace/src/github.com/mitchellh/copystructure/copystructure_examples_test.go
new file mode 100644
index 000000000..e094b8626
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mitchellh/copystructure/copystructure_examples_test.go
@@ -0,0 +1,22 @@
+package copystructure
+
+import (
+ "fmt"
+)
+
+func ExampleCopy() {
+ input := map[string]interface{}{
+ "bob": map[string]interface{}{
+ "emails": []string{"a", "b"},
+ },
+ }
+
+ dup, err := Copy(input)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("%#v", dup)
+ // Output:
+ // map[string]interface {}{"bob":map[string]interface {}{"emails":[]string{"a", "b"}}}
+}
diff --git a/Godeps/_workspace/src/github.com/mitchellh/copystructure/copystructure_test.go b/Godeps/_workspace/src/github.com/mitchellh/copystructure/copystructure_test.go
new file mode 100644
index 000000000..2d18fab1d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mitchellh/copystructure/copystructure_test.go
@@ -0,0 +1,175 @@
+package copystructure
+
+import (
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestCopy_complex(t *testing.T) {
+ v := map[string]interface{}{
+ "foo": []string{"a", "b"},
+ "bar": "baz",
+ }
+
+ result, err := Copy(v)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !reflect.DeepEqual(result, v) {
+ t.Fatalf("bad: %#v", result)
+ }
+}
+
+func TestCopy_primitive(t *testing.T) {
+ cases := []interface{}{
+ 42,
+ "foo",
+ 1.2,
+ }
+
+ for _, tc := range cases {
+ result, err := Copy(tc)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if result != tc {
+ t.Fatalf("bad: %#v", result)
+ }
+ }
+}
+
+func TestCopy_primitivePtr(t *testing.T) {
+ cases := []interface{}{
+ 42,
+ "foo",
+ 1.2,
+ }
+
+ for _, tc := range cases {
+ result, err := Copy(&tc)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !reflect.DeepEqual(result, &tc) {
+ t.Fatalf("bad: %#v", result)
+ }
+ }
+}
+
+func TestCopy_map(t *testing.T) {
+ v := map[string]interface{}{
+ "bar": "baz",
+ }
+
+ result, err := Copy(v)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !reflect.DeepEqual(result, v) {
+ t.Fatalf("bad: %#v", result)
+ }
+}
+
+func TestCopy_slice(t *testing.T) {
+ v := []string{"bar", "baz"}
+
+ result, err := Copy(v)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !reflect.DeepEqual(result, v) {
+ t.Fatalf("bad: %#v", result)
+ }
+}
+
+func TestCopy_struct(t *testing.T) {
+ type test struct {
+ Value string
+ }
+
+ v := test{Value: "foo"}
+
+ result, err := Copy(v)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !reflect.DeepEqual(result, v) {
+ t.Fatalf("bad: %#v", result)
+ }
+}
+
+func TestCopy_structPtr(t *testing.T) {
+ type test struct {
+ Value string
+ }
+
+ v := &test{Value: "foo"}
+
+ result, err := Copy(v)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !reflect.DeepEqual(result, v) {
+ t.Fatalf("bad: %#v", result)
+ }
+}
+
+func TestCopy_structNil(t *testing.T) {
+ type test struct {
+ Value string
+ }
+
+ var v *test
+ result, err := Copy(v)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if v, ok := result.(*test); !ok {
+ t.Fatalf("bad: %#v", result)
+ } else if v != nil {
+ t.Fatalf("bad: %#v", v)
+ }
+}
+
+func TestCopy_structNested(t *testing.T) {
+ type TestInner struct{}
+
+ type Test struct {
+ Test *TestInner
+ }
+
+ v := Test{}
+
+ result, err := Copy(v)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !reflect.DeepEqual(result, v) {
+ t.Fatalf("bad: %#v", result)
+ }
+}
+
+func TestCopy_time(t *testing.T) {
+ type test struct {
+ Value time.Time
+ }
+
+ v := test{Value: time.Now().UTC()}
+
+ result, err := Copy(v)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !reflect.DeepEqual(result, v) {
+ t.Fatalf("bad: %#v", result)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/mitchellh/go-homedir/homedir.go b/Godeps/_workspace/src/github.com/mitchellh/go-homedir/homedir.go
index ed920dec2..6944957d5 100644
--- a/Godeps/_workspace/src/github.com/mitchellh/go-homedir/homedir.go
+++ b/Godeps/_workspace/src/github.com/mitchellh/go-homedir/homedir.go
@@ -7,6 +7,7 @@ import (
"os/exec"
"path/filepath"
"runtime"
+ "strconv"
"strings"
"sync"
)
@@ -81,9 +82,28 @@ func dirUnix() (string, error) {
return home, nil
}
- // If that fails, try the shell
+ // If that fails, try getent
var stdout bytes.Buffer
- cmd := exec.Command("sh", "-c", "eval echo ~$USER")
+ cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ // If "getent" is missing, ignore it
+ if err != exec.ErrNotFound {
+ return "", err
+ }
+ } else {
+ if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
+ // username:password:uid:gid:gecos:home:shell
+ passwdParts := strings.SplitN(passwd, ":", 7)
+ if len(passwdParts) > 5 {
+ return passwdParts[5], nil
+ }
+ }
+ }
+
+ // If all else fails, try the shell
+ stdout.Reset()
+ cmd = exec.Command("sh", "-c", "cd && pwd")
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
return "", err
diff --git a/Godeps/_workspace/src/github.com/mitchellh/go-homedir/homedir_test.go b/Godeps/_workspace/src/github.com/mitchellh/go-homedir/homedir_test.go
new file mode 100644
index 000000000..c34dbc7f2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mitchellh/go-homedir/homedir_test.go
@@ -0,0 +1,112 @@
+package homedir
+
+import (
+ "fmt"
+ "os"
+ "os/user"
+ "testing"
+)
+
+func patchEnv(key, value string) func() {
+ bck := os.Getenv(key)
+ deferFunc := func() {
+ os.Setenv(key, bck)
+ }
+
+ os.Setenv(key, value)
+ return deferFunc
+}
+
+func BenchmarkDir(b *testing.B) {
+ // We do this for any "warmups"
+ for i := 0; i < 10; i++ {
+ Dir()
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ Dir()
+ }
+}
+
+func TestDir(t *testing.T) {
+ u, err := user.Current()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ dir, err := Dir()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if u.HomeDir != dir {
+ t.Fatalf("%#v != %#v", u.HomeDir, dir)
+ }
+}
+
+func TestExpand(t *testing.T) {
+ u, err := user.Current()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ cases := []struct {
+ Input string
+ Output string
+ Err bool
+ }{
+ {
+ "/foo",
+ "/foo",
+ false,
+ },
+
+ {
+ "~/foo",
+ fmt.Sprintf("%s/foo", u.HomeDir),
+ false,
+ },
+
+ {
+ "",
+ "",
+ false,
+ },
+
+ {
+ "~",
+ u.HomeDir,
+ false,
+ },
+
+ {
+ "~foo/foo",
+ "",
+ true,
+ },
+ }
+
+ for _, tc := range cases {
+ actual, err := Expand(tc.Input)
+ if (err != nil) != tc.Err {
+ t.Fatalf("Input: %#v\n\nErr: %s", tc.Input, err)
+ }
+
+ if actual != tc.Output {
+ t.Fatalf("Input: %#v\n\nOutput: %#v", tc.Input, actual)
+ }
+ }
+
+ DisableCache = true
+ defer func() { DisableCache = false }()
+ defer patchEnv("HOME", "/custom/path/")()
+ expected := "/custom/path/foo/bar"
+ actual, err := Expand("~/foo/bar")
+
+ if err != nil {
+ t.Errorf("No error is expected, got: %v", err)
+ } else if actual != "/custom/path/foo/bar" {
+ t.Errorf("Expected: %v; actual: %v", expected, actual)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks_test.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks_test.go
new file mode 100644
index 000000000..53289afcf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks_test.go
@@ -0,0 +1,229 @@
+package mapstructure
+
+import (
+ "errors"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestComposeDecodeHookFunc(t *testing.T) {
+ f1 := func(
+ f reflect.Kind,
+ t reflect.Kind,
+ data interface{}) (interface{}, error) {
+ return data.(string) + "foo", nil
+ }
+
+ f2 := func(
+ f reflect.Kind,
+ t reflect.Kind,
+ data interface{}) (interface{}, error) {
+ return data.(string) + "bar", nil
+ }
+
+ f := ComposeDecodeHookFunc(f1, f2)
+
+ result, err := DecodeHookExec(
+ f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), "")
+ if err != nil {
+ t.Fatalf("bad: %s", err)
+ }
+ if result.(string) != "foobar" {
+ t.Fatalf("bad: %#v", result)
+ }
+}
+
+func TestComposeDecodeHookFunc_err(t *testing.T) {
+ f1 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) {
+ return nil, errors.New("foo")
+ }
+
+ f2 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) {
+ panic("NOPE")
+ }
+
+ f := ComposeDecodeHookFunc(f1, f2)
+
+ _, err := DecodeHookExec(
+ f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), 42)
+ if err.Error() != "foo" {
+ t.Fatalf("bad: %s", err)
+ }
+}
+
+func TestComposeDecodeHookFunc_kinds(t *testing.T) {
+ var f2From reflect.Kind
+
+ f1 := func(
+ f reflect.Kind,
+ t reflect.Kind,
+ data interface{}) (interface{}, error) {
+ return int(42), nil
+ }
+
+ f2 := func(
+ f reflect.Kind,
+ t reflect.Kind,
+ data interface{}) (interface{}, error) {
+ f2From = f
+ return data, nil
+ }
+
+ f := ComposeDecodeHookFunc(f1, f2)
+
+ _, err := DecodeHookExec(
+ f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), "")
+ if err != nil {
+ t.Fatalf("bad: %s", err)
+ }
+ if f2From != reflect.Int {
+ t.Fatalf("bad: %#v", f2From)
+ }
+}
+
+func TestStringToSliceHookFunc(t *testing.T) {
+ f := StringToSliceHookFunc(",")
+
+ strType := reflect.TypeOf("")
+ sliceType := reflect.TypeOf([]byte(""))
+ cases := []struct {
+ f, t reflect.Type
+ data interface{}
+ result interface{}
+ err bool
+ }{
+ {sliceType, sliceType, 42, 42, false},
+ {strType, strType, 42, 42, false},
+ {
+ strType,
+ sliceType,
+ "foo,bar,baz",
+ []string{"foo", "bar", "baz"},
+ false,
+ },
+ {
+ strType,
+ sliceType,
+ "",
+ []string{},
+ false,
+ },
+ }
+
+ for i, tc := range cases {
+ actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data)
+ if tc.err != (err != nil) {
+ t.Fatalf("case %d: expected err %#v", i, tc.err)
+ }
+ if !reflect.DeepEqual(actual, tc.result) {
+ t.Fatalf(
+ "case %d: expected %#v, got %#v",
+ i, tc.result, actual)
+ }
+ }
+}
+
+func TestStringToTimeDurationHookFunc(t *testing.T) {
+ f := StringToTimeDurationHookFunc()
+
+ strType := reflect.TypeOf("")
+ timeType := reflect.TypeOf(time.Duration(5))
+ cases := []struct {
+ f, t reflect.Type
+ data interface{}
+ result interface{}
+ err bool
+ }{
+ {strType, timeType, "5s", 5 * time.Second, false},
+ {strType, timeType, "5", time.Duration(0), true},
+ {strType, strType, "5", "5", false},
+ }
+
+ for i, tc := range cases {
+ actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data)
+ if tc.err != (err != nil) {
+ t.Fatalf("case %d: expected err %#v", i, tc.err)
+ }
+ if !reflect.DeepEqual(actual, tc.result) {
+ t.Fatalf(
+ "case %d: expected %#v, got %#v",
+ i, tc.result, actual)
+ }
+ }
+}
+
+func TestWeaklyTypedHook(t *testing.T) {
+ var f DecodeHookFunc = WeaklyTypedHook
+
+ boolType := reflect.TypeOf(true)
+ strType := reflect.TypeOf("")
+ sliceType := reflect.TypeOf([]byte(""))
+ cases := []struct {
+ f, t reflect.Type
+ data interface{}
+ result interface{}
+ err bool
+ }{
+ // TO STRING
+ {
+ boolType,
+ strType,
+ false,
+ "0",
+ false,
+ },
+
+ {
+ boolType,
+ strType,
+ true,
+ "1",
+ false,
+ },
+
+ {
+ reflect.TypeOf(float32(1)),
+ strType,
+ float32(7),
+ "7",
+ false,
+ },
+
+ {
+ reflect.TypeOf(int(1)),
+ strType,
+ int(7),
+ "7",
+ false,
+ },
+
+ {
+ sliceType,
+ strType,
+ []uint8("foo"),
+ "foo",
+ false,
+ },
+
+ {
+ reflect.TypeOf(uint(1)),
+ strType,
+ uint(7),
+ "7",
+ false,
+ },
+ }
+
+ for i, tc := range cases {
+ actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data)
+ if tc.err != (err != nil) {
+ t.Fatalf("case %d: expected err %#v", i, tc.err)
+ }
+ if !reflect.DeepEqual(actual, tc.result) {
+ t.Fatalf(
+ "case %d: expected %#v, got %#v",
+ i, tc.result, actual)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go
new file mode 100644
index 000000000..41d2a41f7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go
@@ -0,0 +1,279 @@
+package mapstructure
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func Benchmark_Decode(b *testing.B) {
+ type Person struct {
+ Name string
+ Age int
+ Emails []string
+ Extra map[string]string
+ }
+
+ input := map[string]interface{}{
+ "name": "Mitchell",
+ "age": 91,
+ "emails": []string{"one", "two", "three"},
+ "extra": map[string]string{
+ "twitter": "mitchellh",
+ },
+ }
+
+ var result Person
+ for i := 0; i < b.N; i++ {
+ Decode(input, &result)
+ }
+}
+
+// decodeViaJSON takes the map data and passes it through encoding/json to convert it into the
+// given Go native structure pointed to by v. v must be a pointer to a struct.
+func decodeViaJSON(data interface{}, v interface{}) error {
+ // Perform the task by simply marshalling the input into JSON,
+ // then unmarshalling it into target native Go struct.
+ b, err := json.Marshal(data)
+ if err != nil {
+ return err
+ }
+ return json.Unmarshal(b, v)
+}
+
+func Benchmark_DecodeViaJSON(b *testing.B) {
+ type Person struct {
+ Name string
+ Age int
+ Emails []string
+ Extra map[string]string
+ }
+
+ input := map[string]interface{}{
+ "name": "Mitchell",
+ "age": 91,
+ "emails": []string{"one", "two", "three"},
+ "extra": map[string]string{
+ "twitter": "mitchellh",
+ },
+ }
+
+ var result Person
+ for i := 0; i < b.N; i++ {
+ decodeViaJSON(input, &result)
+ }
+}
+
+func Benchmark_DecodeBasic(b *testing.B) {
+ input := map[string]interface{}{
+ "vstring": "foo",
+ "vint": 42,
+ "Vuint": 42,
+ "vbool": true,
+ "Vfloat": 42.42,
+ "vsilent": true,
+ "vdata": 42,
+ }
+
+ var result Basic
+ for i := 0; i < b.N; i++ {
+ Decode(input, &result)
+ }
+}
+
+func Benchmark_DecodeEmbedded(b *testing.B) {
+ input := map[string]interface{}{
+ "vstring": "foo",
+ "Basic": map[string]interface{}{
+ "vstring": "innerfoo",
+ },
+ "vunique": "bar",
+ }
+
+ var result Embedded
+ for i := 0; i < b.N; i++ {
+ Decode(input, &result)
+ }
+}
+
+func Benchmark_DecodeTypeConversion(b *testing.B) {
+ input := map[string]interface{}{
+ "IntToFloat": 42,
+ "IntToUint": 42,
+ "IntToBool": 1,
+ "IntToString": 42,
+ "UintToInt": 42,
+ "UintToFloat": 42,
+ "UintToBool": 42,
+ "UintToString": 42,
+ "BoolToInt": true,
+ "BoolToUint": true,
+ "BoolToFloat": true,
+ "BoolToString": true,
+ "FloatToInt": 42.42,
+ "FloatToUint": 42.42,
+ "FloatToBool": 42.42,
+ "FloatToString": 42.42,
+ "StringToInt": "42",
+ "StringToUint": "42",
+ "StringToBool": "1",
+ "StringToFloat": "42.42",
+ "SliceToMap": []interface{}{},
+ "MapToSlice": map[string]interface{}{},
+ }
+
+ var resultStrict TypeConversionResult
+ for i := 0; i < b.N; i++ {
+ Decode(input, &resultStrict)
+ }
+}
+
+func Benchmark_DecodeMap(b *testing.B) {
+ input := map[string]interface{}{
+ "vfoo": "foo",
+ "vother": map[interface{}]interface{}{
+ "foo": "foo",
+ "bar": "bar",
+ },
+ }
+
+ var result Map
+ for i := 0; i < b.N; i++ {
+ Decode(input, &result)
+ }
+}
+
+func Benchmark_DecodeMapOfStruct(b *testing.B) {
+ input := map[string]interface{}{
+ "value": map[string]interface{}{
+ "foo": map[string]string{"vstring": "one"},
+ "bar": map[string]string{"vstring": "two"},
+ },
+ }
+
+ var result MapOfStruct
+ for i := 0; i < b.N; i++ {
+ Decode(input, &result)
+ }
+}
+
+func Benchmark_DecodeSlice(b *testing.B) {
+ input := map[string]interface{}{
+ "vfoo": "foo",
+ "vbar": []string{"foo", "bar", "baz"},
+ }
+
+ var result Slice
+ for i := 0; i < b.N; i++ {
+ Decode(input, &result)
+ }
+}
+
+func Benchmark_DecodeSliceOfStruct(b *testing.B) {
+ input := map[string]interface{}{
+ "value": []map[string]interface{}{
+ {"vstring": "one"},
+ {"vstring": "two"},
+ },
+ }
+
+ var result SliceOfStruct
+ for i := 0; i < b.N; i++ {
+ Decode(input, &result)
+ }
+}
+
+func Benchmark_DecodeWeaklyTypedInput(b *testing.B) {
+ type Person struct {
+ Name string
+ Age int
+ Emails []string
+ }
+
+ // This input can come from anywhere, but typically comes from
+ // something like decoding JSON, generated by a weakly typed language
+ // such as PHP.
+ input := map[string]interface{}{
+ "name": 123, // number => string
+ "age": "42", // string => number
+ "emails": map[string]interface{}{}, // empty map => empty array
+ }
+
+ var result Person
+ config := &DecoderConfig{
+ WeaklyTypedInput: true,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ panic(err)
+ }
+
+ for i := 0; i < b.N; i++ {
+ decoder.Decode(input)
+ }
+}
+
+func Benchmark_DecodeMetadata(b *testing.B) {
+ type Person struct {
+ Name string
+ Age int
+ }
+
+ input := map[string]interface{}{
+ "name": "Mitchell",
+ "age": 91,
+ "email": "foo@bar.com",
+ }
+
+ var md Metadata
+ var result Person
+ config := &DecoderConfig{
+ Metadata: &md,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ panic(err)
+ }
+
+ for i := 0; i < b.N; i++ {
+ decoder.Decode(input)
+ }
+}
+
+func Benchmark_DecodeMetadataEmbedded(b *testing.B) {
+ input := map[string]interface{}{
+ "vstring": "foo",
+ "vunique": "bar",
+ }
+
+ var md Metadata
+ var result EmbeddedSquash
+ config := &DecoderConfig{
+ Metadata: &md,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ b.Fatalf("err: %s", err)
+ }
+
+ for i := 0; i < b.N; i++ {
+ decoder.Decode(input)
+ }
+}
+
+func Benchmark_DecodeTagged(b *testing.B) {
+ input := map[string]interface{}{
+ "foo": "bar",
+ "bar": "value",
+ }
+
+ var result Tagged
+ for i := 0; i < b.N; i++ {
+ Decode(input, &result)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go
new file mode 100644
index 000000000..7054f1ac9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go
@@ -0,0 +1,47 @@
+package mapstructure
+
+import "testing"
+
+// GH-1
+func TestDecode_NilValue(t *testing.T) {
+ input := map[string]interface{}{
+ "vfoo": nil,
+ "vother": nil,
+ }
+
+ var result Map
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("should not error: %s", err)
+ }
+
+ if result.Vfoo != "" {
+ t.Fatalf("value should be default: %s", result.Vfoo)
+ }
+
+ if result.Vother != nil {
+ t.Fatalf("Vother should be nil: %s", result.Vother)
+ }
+}
+
+// GH-10
+func TestDecode_mapInterfaceInterface(t *testing.T) {
+ input := map[interface{}]interface{}{
+ "vfoo": nil,
+ "vother": nil,
+ }
+
+ var result Map
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("should not error: %s", err)
+ }
+
+ if result.Vfoo != "" {
+ t.Fatalf("value should be default: %s", result.Vfoo)
+ }
+
+ if result.Vother != nil {
+ t.Fatalf("Vother should be nil: %s", result.Vother)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_examples_test.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_examples_test.go
new file mode 100644
index 000000000..f17c214a8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_examples_test.go
@@ -0,0 +1,203 @@
+package mapstructure
+
+import (
+ "fmt"
+)
+
+func ExampleDecode() {
+ type Person struct {
+ Name string
+ Age int
+ Emails []string
+ Extra map[string]string
+ }
+
+ // This input can come from anywhere, but typically comes from
+ // something like decoding JSON where we're not quite sure of the
+ // struct initially.
+ input := map[string]interface{}{
+ "name": "Mitchell",
+ "age": 91,
+ "emails": []string{"one", "two", "three"},
+ "extra": map[string]string{
+ "twitter": "mitchellh",
+ },
+ }
+
+ var result Person
+ err := Decode(input, &result)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("%#v", result)
+ // Output:
+ // mapstructure.Person{Name:"Mitchell", Age:91, Emails:[]string{"one", "two", "three"}, Extra:map[string]string{"twitter":"mitchellh"}}
+}
+
+func ExampleDecode_errors() {
+ type Person struct {
+ Name string
+ Age int
+ Emails []string
+ Extra map[string]string
+ }
+
+ // This input can come from anywhere, but typically comes from
+ // something like decoding JSON where we're not quite sure of the
+ // struct initially.
+ input := map[string]interface{}{
+ "name": 123,
+ "age": "bad value",
+ "emails": []int{1, 2, 3},
+ }
+
+ var result Person
+ err := Decode(input, &result)
+ if err == nil {
+ panic("should have an error")
+ }
+
+ fmt.Println(err.Error())
+ // Output:
+ // 5 error(s) decoding:
+ //
+ // * 'Age' expected type 'int', got unconvertible type 'string'
+ // * 'Emails[0]' expected type 'string', got unconvertible type 'int'
+ // * 'Emails[1]' expected type 'string', got unconvertible type 'int'
+ // * 'Emails[2]' expected type 'string', got unconvertible type 'int'
+ // * 'Name' expected type 'string', got unconvertible type 'int'
+}
+
+func ExampleDecode_metadata() {
+ type Person struct {
+ Name string
+ Age int
+ }
+
+ // This input can come from anywhere, but typically comes from
+ // something like decoding JSON where we're not quite sure of the
+ // struct initially.
+ input := map[string]interface{}{
+ "name": "Mitchell",
+ "age": 91,
+ "email": "foo@bar.com",
+ }
+
+ // For metadata, we make a more advanced DecoderConfig so we can
+ // more finely configure the decoder that is used. In this case, we
+ // just tell the decoder we want to track metadata.
+ var md Metadata
+ var result Person
+ config := &DecoderConfig{
+ Metadata: &md,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ panic(err)
+ }
+
+ if err := decoder.Decode(input); err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("Unused keys: %#v", md.Unused)
+ // Output:
+ // Unused keys: []string{"email"}
+}
+
+func ExampleDecode_weaklyTypedInput() {
+ type Person struct {
+ Name string
+ Age int
+ Emails []string
+ }
+
+ // This input can come from anywhere, but typically comes from
+ // something like decoding JSON, generated by a weakly typed language
+ // such as PHP.
+ input := map[string]interface{}{
+ "name": 123, // number => string
+ "age": "42", // string => number
+ "emails": map[string]interface{}{}, // empty map => empty array
+ }
+
+ var result Person
+ config := &DecoderConfig{
+ WeaklyTypedInput: true,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ panic(err)
+ }
+
+ err = decoder.Decode(input)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("%#v", result)
+ // Output: mapstructure.Person{Name:"123", Age:42, Emails:[]string{}}
+}
+
+func ExampleDecode_tags() {
+ // Note that the mapstructure tags defined in the struct type
+ // can indicate which fields the values are mapped to.
+ type Person struct {
+ Name string `mapstructure:"person_name"`
+ Age int `mapstructure:"person_age"`
+ }
+
+ input := map[string]interface{}{
+ "person_name": "Mitchell",
+ "person_age": 91,
+ }
+
+ var result Person
+ err := Decode(input, &result)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("%#v", result)
+ // Output:
+ // mapstructure.Person{Name:"Mitchell", Age:91}
+}
+
+func ExampleDecode_embeddedStruct() {
+ // Squashing multiple embedded structs is allowed using the squash tag.
+ // This is demonstrated by creating a composite struct of multiple types
+ // and decoding into it. In this case, a person can carry with it both
+ // a Family and a Location, as well as their own FirstName.
+ type Family struct {
+ LastName string
+ }
+ type Location struct {
+ City string
+ }
+ type Person struct {
+ Family `mapstructure:",squash"`
+ Location `mapstructure:",squash"`
+ FirstName string
+ }
+
+ input := map[string]interface{}{
+ "FirstName": "Mitchell",
+ "LastName": "Hashimoto",
+ "City": "San Francisco",
+ }
+
+ var result Person
+ err := Decode(input, &result)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("%s %s, %s", result.FirstName, result.LastName, result.City)
+ // Output:
+ // Mitchell Hashimoto, San Francisco
+}
diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_test.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_test.go
new file mode 100644
index 000000000..8a27647b5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_test.go
@@ -0,0 +1,999 @@
+package mapstructure
+
+import (
+ "reflect"
+ "sort"
+ "testing"
+)
+
+type Basic struct {
+ Vstring string
+ Vint int
+ Vuint uint
+ Vbool bool
+ Vfloat float64
+ Vextra string
+ vsilent bool
+ Vdata interface{}
+}
+
+type BasicSquash struct {
+ Test Basic `mapstructure:",squash"`
+}
+
+type Embedded struct {
+ Basic
+ Vunique string
+}
+
+type EmbeddedPointer struct {
+ *Basic
+ Vunique string
+}
+
+type EmbeddedSquash struct {
+ Basic `mapstructure:",squash"`
+ Vunique string
+}
+
+type Map struct {
+ Vfoo string
+ Vother map[string]string
+}
+
+type MapOfStruct struct {
+ Value map[string]Basic
+}
+
+type Nested struct {
+ Vfoo string
+ Vbar Basic
+}
+
+type NestedPointer struct {
+ Vfoo string
+ Vbar *Basic
+}
+
+type Slice struct {
+ Vfoo string
+ Vbar []string
+}
+
+type SliceOfStruct struct {
+ Value []Basic
+}
+
+type Tagged struct {
+ Extra string `mapstructure:"bar,what,what"`
+ Value string `mapstructure:"foo"`
+}
+
+type TypeConversionResult struct {
+ IntToFloat float32
+ IntToUint uint
+ IntToBool bool
+ IntToString string
+ UintToInt int
+ UintToFloat float32
+ UintToBool bool
+ UintToString string
+ BoolToInt int
+ BoolToUint uint
+ BoolToFloat float32
+ BoolToString string
+ FloatToInt int
+ FloatToUint uint
+ FloatToBool bool
+ FloatToString string
+ SliceUint8ToString string
+ StringToInt int
+ StringToUint uint
+ StringToBool bool
+ StringToFloat float32
+ SliceToMap map[string]interface{}
+ MapToSlice []interface{}
+}
+
+func TestBasicTypes(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vstring": "foo",
+ "vint": 42,
+ "Vuint": 42,
+ "vbool": true,
+ "Vfloat": 42.42,
+ "vsilent": true,
+ "vdata": 42,
+ }
+
+ var result Basic
+ err := Decode(input, &result)
+ if err != nil {
+ t.Errorf("got an err: %s", err.Error())
+ t.FailNow()
+ }
+
+ if result.Vstring != "foo" {
+ t.Errorf("vstring value should be 'foo': %#v", result.Vstring)
+ }
+
+ if result.Vint != 42 {
+ t.Errorf("vint value should be 42: %#v", result.Vint)
+ }
+
+ if result.Vuint != 42 {
+ t.Errorf("vuint value should be 42: %#v", result.Vuint)
+ }
+
+ if result.Vbool != true {
+ t.Errorf("vbool value should be true: %#v", result.Vbool)
+ }
+
+ if result.Vfloat != 42.42 {
+ t.Errorf("vfloat value should be 42.42: %#v", result.Vfloat)
+ }
+
+ if result.Vextra != "" {
+ t.Errorf("vextra value should be empty: %#v", result.Vextra)
+ }
+
+ if result.vsilent != false {
+ t.Error("vsilent should not be set, it is unexported")
+ }
+
+ if result.Vdata != 42 {
+ t.Error("vdata should be valid")
+ }
+}
+
+func TestBasic_IntWithFloat(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vint": float64(42),
+ }
+
+ var result Basic
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an err: %s", err)
+ }
+}
+
+func TestBasic_Merge(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vint": 42,
+ }
+
+ var result Basic
+ result.Vuint = 100
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an err: %s", err)
+ }
+
+ expected := Basic{
+ Vint: 42,
+ Vuint: 100,
+ }
+ if !reflect.DeepEqual(result, expected) {
+ t.Fatalf("bad: %#v", result)
+ }
+}
+
+func TestDecode_BasicSquash(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vstring": "foo",
+ }
+
+ var result BasicSquash
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an err: %s", err.Error())
+ }
+
+ if result.Test.Vstring != "foo" {
+ t.Errorf("vstring value should be 'foo': %#v", result.Test.Vstring)
+ }
+}
+
+func TestDecode_Embedded(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vstring": "foo",
+ "Basic": map[string]interface{}{
+ "vstring": "innerfoo",
+ },
+ "vunique": "bar",
+ }
+
+ var result Embedded
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an err: %s", err.Error())
+ }
+
+ if result.Vstring != "innerfoo" {
+ t.Errorf("vstring value should be 'innerfoo': %#v", result.Vstring)
+ }
+
+ if result.Vunique != "bar" {
+ t.Errorf("vunique value should be 'bar': %#v", result.Vunique)
+ }
+}
+
+func TestDecode_EmbeddedPointer(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vstring": "foo",
+ "Basic": map[string]interface{}{
+ "vstring": "innerfoo",
+ },
+ "vunique": "bar",
+ }
+
+ var result EmbeddedPointer
+ err := Decode(input, &result)
+ if err == nil {
+ t.Fatal("should get error")
+ }
+}
+
+func TestDecode_EmbeddedSquash(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vstring": "foo",
+ "vunique": "bar",
+ }
+
+ var result EmbeddedSquash
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an err: %s", err.Error())
+ }
+
+ if result.Vstring != "foo" {
+ t.Errorf("vstring value should be 'foo': %#v", result.Vstring)
+ }
+
+ if result.Vunique != "bar" {
+ t.Errorf("vunique value should be 'bar': %#v", result.Vunique)
+ }
+}
+
+func TestDecode_DecodeHook(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vint": "WHAT",
+ }
+
+ decodeHook := func(from reflect.Kind, to reflect.Kind, v interface{}) (interface{}, error) {
+ if from == reflect.String && to != reflect.String {
+ return 5, nil
+ }
+
+ return v, nil
+ }
+
+ var result Basic
+ config := &DecoderConfig{
+ DecodeHook: decodeHook,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = decoder.Decode(input)
+ if err != nil {
+ t.Fatalf("got an err: %s", err)
+ }
+
+ if result.Vint != 5 {
+ t.Errorf("vint should be 5: %#v", result.Vint)
+ }
+}
+
+func TestDecode_DecodeHookType(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vint": "WHAT",
+ }
+
+ decodeHook := func(from reflect.Type, to reflect.Type, v interface{}) (interface{}, error) {
+ if from.Kind() == reflect.String &&
+ to.Kind() != reflect.String {
+ return 5, nil
+ }
+
+ return v, nil
+ }
+
+ var result Basic
+ config := &DecoderConfig{
+ DecodeHook: decodeHook,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = decoder.Decode(input)
+ if err != nil {
+ t.Fatalf("got an err: %s", err)
+ }
+
+ if result.Vint != 5 {
+ t.Errorf("vint should be 5: %#v", result.Vint)
+ }
+}
+
+func TestDecode_Nil(t *testing.T) {
+ t.Parallel()
+
+ var input interface{} = nil
+ result := Basic{
+ Vstring: "foo",
+ }
+
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if result.Vstring != "foo" {
+ t.Fatalf("bad: %#v", result.Vstring)
+ }
+}
+
+func TestDecode_NonStruct(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "foo": "bar",
+ "bar": "baz",
+ }
+
+ var result map[string]string
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if result["foo"] != "bar" {
+ t.Fatal("foo is not bar")
+ }
+}
+
+func TestDecode_StructMatch(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vbar": Basic{
+ Vstring: "foo",
+ },
+ }
+
+ var result Nested
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an err: %s", err.Error())
+ }
+
+ if result.Vbar.Vstring != "foo" {
+ t.Errorf("bad: %#v", result)
+ }
+}
+
+func TestDecode_TypeConversion(t *testing.T) {
+ input := map[string]interface{}{
+ "IntToFloat": 42,
+ "IntToUint": 42,
+ "IntToBool": 1,
+ "IntToString": 42,
+ "UintToInt": 42,
+ "UintToFloat": 42,
+ "UintToBool": 42,
+ "UintToString": 42,
+ "BoolToInt": true,
+ "BoolToUint": true,
+ "BoolToFloat": true,
+ "BoolToString": true,
+ "FloatToInt": 42.42,
+ "FloatToUint": 42.42,
+ "FloatToBool": 42.42,
+ "FloatToString": 42.42,
+ "SliceUint8ToString": []uint8("foo"),
+ "StringToInt": "42",
+ "StringToUint": "42",
+ "StringToBool": "1",
+ "StringToFloat": "42.42",
+ "SliceToMap": []interface{}{},
+ "MapToSlice": map[string]interface{}{},
+ }
+
+ expectedResultStrict := TypeConversionResult{
+ IntToFloat: 42.0,
+ IntToUint: 42,
+ UintToInt: 42,
+ UintToFloat: 42,
+ BoolToInt: 0,
+ BoolToUint: 0,
+ BoolToFloat: 0,
+ FloatToInt: 42,
+ FloatToUint: 42,
+ }
+
+ expectedResultWeak := TypeConversionResult{
+ IntToFloat: 42.0,
+ IntToUint: 42,
+ IntToBool: true,
+ IntToString: "42",
+ UintToInt: 42,
+ UintToFloat: 42,
+ UintToBool: true,
+ UintToString: "42",
+ BoolToInt: 1,
+ BoolToUint: 1,
+ BoolToFloat: 1,
+ BoolToString: "1",
+ FloatToInt: 42,
+ FloatToUint: 42,
+ FloatToBool: true,
+ FloatToString: "42.42",
+ SliceUint8ToString: "foo",
+ StringToInt: 42,
+ StringToUint: 42,
+ StringToBool: true,
+ StringToFloat: 42.42,
+ SliceToMap: map[string]interface{}{},
+ MapToSlice: []interface{}{},
+ }
+
+ // Test strict type conversion
+ var resultStrict TypeConversionResult
+ err := Decode(input, &resultStrict)
+ if err == nil {
+ t.Errorf("should return an error")
+ }
+ if !reflect.DeepEqual(resultStrict, expectedResultStrict) {
+ t.Errorf("expected %v, got: %v", expectedResultStrict, resultStrict)
+ }
+
+ // Test weak type conversion
+ var decoder *Decoder
+ var resultWeak TypeConversionResult
+
+ config := &DecoderConfig{
+ WeaklyTypedInput: true,
+ Result: &resultWeak,
+ }
+
+ decoder, err = NewDecoder(config)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = decoder.Decode(input)
+ if err != nil {
+ t.Fatalf("got an err: %s", err)
+ }
+
+ if !reflect.DeepEqual(resultWeak, expectedResultWeak) {
+ t.Errorf("expected \n%#v, got: \n%#v", expectedResultWeak, resultWeak)
+ }
+}
+
+func TestDecoder_ErrorUnused(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vstring": "hello",
+ "foo": "bar",
+ }
+
+ var result Basic
+ config := &DecoderConfig{
+ ErrorUnused: true,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = decoder.Decode(input)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+}
+
+func TestMap(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vfoo": "foo",
+ "vother": map[interface{}]interface{}{
+ "foo": "foo",
+ "bar": "bar",
+ },
+ }
+
+ var result Map
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an error: %s", err)
+ }
+
+ if result.Vfoo != "foo" {
+ t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo)
+ }
+
+ if result.Vother == nil {
+ t.Fatal("vother should not be nil")
+ }
+
+ if len(result.Vother) != 2 {
+ t.Error("vother should have two items")
+ }
+
+ if result.Vother["foo"] != "foo" {
+ t.Errorf("'foo' key should be foo, got: %#v", result.Vother["foo"])
+ }
+
+ if result.Vother["bar"] != "bar" {
+ t.Errorf("'bar' key should be bar, got: %#v", result.Vother["bar"])
+ }
+}
+
+func TestMapMerge(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vfoo": "foo",
+ "vother": map[interface{}]interface{}{
+ "foo": "foo",
+ "bar": "bar",
+ },
+ }
+
+ var result Map
+ result.Vother = map[string]string{"hello": "world"}
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an error: %s", err)
+ }
+
+ if result.Vfoo != "foo" {
+ t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo)
+ }
+
+ expected := map[string]string{
+ "foo": "foo",
+ "bar": "bar",
+ "hello": "world",
+ }
+ if !reflect.DeepEqual(result.Vother, expected) {
+ t.Errorf("bad: %#v", result.Vother)
+ }
+}
+
+func TestMapOfStruct(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "value": map[string]interface{}{
+ "foo": map[string]string{"vstring": "one"},
+ "bar": map[string]string{"vstring": "two"},
+ },
+ }
+
+ var result MapOfStruct
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an err: %s", err)
+ }
+
+ if result.Value == nil {
+ t.Fatal("value should not be nil")
+ }
+
+ if len(result.Value) != 2 {
+ t.Error("value should have two items")
+ }
+
+ if result.Value["foo"].Vstring != "one" {
+ t.Errorf("foo value should be 'one', got: %s", result.Value["foo"].Vstring)
+ }
+
+ if result.Value["bar"].Vstring != "two" {
+ t.Errorf("bar value should be 'two', got: %s", result.Value["bar"].Vstring)
+ }
+}
+
+func TestNestedType(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vfoo": "foo",
+ "vbar": map[string]interface{}{
+ "vstring": "foo",
+ "vint": 42,
+ "vbool": true,
+ },
+ }
+
+ var result Nested
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an err: %s", err.Error())
+ }
+
+ if result.Vfoo != "foo" {
+ t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo)
+ }
+
+ if result.Vbar.Vstring != "foo" {
+ t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring)
+ }
+
+ if result.Vbar.Vint != 42 {
+ t.Errorf("vint value should be 42: %#v", result.Vbar.Vint)
+ }
+
+ if result.Vbar.Vbool != true {
+ t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool)
+ }
+
+ if result.Vbar.Vextra != "" {
+ t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra)
+ }
+}
+
+func TestNestedTypePointer(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vfoo": "foo",
+ "vbar": &map[string]interface{}{
+ "vstring": "foo",
+ "vint": 42,
+ "vbool": true,
+ },
+ }
+
+ var result NestedPointer
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an err: %s", err.Error())
+ }
+
+ if result.Vfoo != "foo" {
+ t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo)
+ }
+
+ if result.Vbar.Vstring != "foo" {
+ t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring)
+ }
+
+ if result.Vbar.Vint != 42 {
+ t.Errorf("vint value should be 42: %#v", result.Vbar.Vint)
+ }
+
+ if result.Vbar.Vbool != true {
+ t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool)
+ }
+
+ if result.Vbar.Vextra != "" {
+ t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra)
+ }
+}
+
+func TestSlice(t *testing.T) {
+ t.Parallel()
+
+ inputStringSlice := map[string]interface{}{
+ "vfoo": "foo",
+ "vbar": []string{"foo", "bar", "baz"},
+ }
+
+ inputStringSlicePointer := map[string]interface{}{
+ "vfoo": "foo",
+ "vbar": &[]string{"foo", "bar", "baz"},
+ }
+
+ outputStringSlice := &Slice{
+ "foo",
+ []string{"foo", "bar", "baz"},
+ }
+
+ testSliceInput(t, inputStringSlice, outputStringSlice)
+ testSliceInput(t, inputStringSlicePointer, outputStringSlice)
+}
+
+func TestInvalidSlice(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vfoo": "foo",
+ "vbar": 42,
+ }
+
+ result := Slice{}
+ err := Decode(input, &result)
+ if err == nil {
+ t.Errorf("expected failure")
+ }
+}
+
+func TestSliceOfStruct(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "value": []map[string]interface{}{
+ {"vstring": "one"},
+ {"vstring": "two"},
+ },
+ }
+
+ var result SliceOfStruct
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got unexpected error: %s", err)
+ }
+
+ if len(result.Value) != 2 {
+ t.Fatalf("expected two values, got %d", len(result.Value))
+ }
+
+ if result.Value[0].Vstring != "one" {
+ t.Errorf("first value should be 'one', got: %s", result.Value[0].Vstring)
+ }
+
+ if result.Value[1].Vstring != "two" {
+ t.Errorf("second value should be 'two', got: %s", result.Value[1].Vstring)
+ }
+}
+
+func TestInvalidType(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vstring": 42,
+ }
+
+ var result Basic
+ err := Decode(input, &result)
+ if err == nil {
+ t.Fatal("error should exist")
+ }
+
+ derr, ok := err.(*Error)
+ if !ok {
+ t.Fatalf("error should be kind of Error, instead: %#v", err)
+ }
+
+ if derr.Errors[0] != "'Vstring' expected type 'string', got unconvertible type 'int'" {
+ t.Errorf("got unexpected error: %s", err)
+ }
+
+ inputNegIntUint := map[string]interface{}{
+ "vuint": -42,
+ }
+
+ err = Decode(inputNegIntUint, &result)
+ if err == nil {
+ t.Fatal("error should exist")
+ }
+
+ derr, ok = err.(*Error)
+ if !ok {
+ t.Fatalf("error should be kind of Error, instead: %#v", err)
+ }
+
+ if derr.Errors[0] != "cannot parse 'Vuint', -42 overflows uint" {
+ t.Errorf("got unexpected error: %s", err)
+ }
+
+ inputNegFloatUint := map[string]interface{}{
+ "vuint": -42.0,
+ }
+
+ err = Decode(inputNegFloatUint, &result)
+ if err == nil {
+ t.Fatal("error should exist")
+ }
+
+ derr, ok = err.(*Error)
+ if !ok {
+ t.Fatalf("error should be kind of Error, instead: %#v", err)
+ }
+
+ if derr.Errors[0] != "cannot parse 'Vuint', -42.000000 overflows uint" {
+ t.Errorf("got unexpected error: %s", err)
+ }
+}
+
+func TestMetadata(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vfoo": "foo",
+ "vbar": map[string]interface{}{
+ "vstring": "foo",
+ "Vuint": 42,
+ "foo": "bar",
+ },
+ "bar": "nil",
+ }
+
+ var md Metadata
+ var result Nested
+ config := &DecoderConfig{
+ Metadata: &md,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = decoder.Decode(input)
+ if err != nil {
+ t.Fatalf("err: %s", err.Error())
+ }
+
+ expectedKeys := []string{"Vbar", "Vbar.Vstring", "Vbar.Vuint", "Vfoo"}
+ sort.Strings(md.Keys)
+ if !reflect.DeepEqual(md.Keys, expectedKeys) {
+ t.Fatalf("bad keys: %#v", md.Keys)
+ }
+
+ expectedUnused := []string{"Vbar.foo", "bar"}
+ if !reflect.DeepEqual(md.Unused, expectedUnused) {
+ t.Fatalf("bad unused: %#v", md.Unused)
+ }
+}
+
+func TestMetadata_Embedded(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vstring": "foo",
+ "vunique": "bar",
+ }
+
+ var md Metadata
+ var result EmbeddedSquash
+ config := &DecoderConfig{
+ Metadata: &md,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = decoder.Decode(input)
+ if err != nil {
+ t.Fatalf("err: %s", err.Error())
+ }
+
+ expectedKeys := []string{"Vstring", "Vunique"}
+
+ sort.Strings(md.Keys)
+ if !reflect.DeepEqual(md.Keys, expectedKeys) {
+ t.Fatalf("bad keys: %#v", md.Keys)
+ }
+
+ expectedUnused := []string{}
+ if !reflect.DeepEqual(md.Unused, expectedUnused) {
+ t.Fatalf("bad unused: %#v", md.Unused)
+ }
+}
+
+func TestNonPtrValue(t *testing.T) {
+ t.Parallel()
+
+ err := Decode(map[string]interface{}{}, Basic{})
+ if err == nil {
+ t.Fatal("error should exist")
+ }
+
+ if err.Error() != "result must be a pointer" {
+ t.Errorf("got unexpected error: %s", err)
+ }
+}
+
+func TestTagged(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "foo": "bar",
+ "bar": "value",
+ }
+
+ var result Tagged
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ if result.Value != "bar" {
+ t.Errorf("value should be 'bar', got: %#v", result.Value)
+ }
+
+ if result.Extra != "value" {
+ t.Errorf("extra should be 'value', got: %#v", result.Extra)
+ }
+}
+
+func TestWeakDecode(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "foo": "4",
+ "bar": "value",
+ }
+
+ var result struct {
+ Foo int
+ Bar string
+ }
+
+ if err := WeakDecode(input, &result); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if result.Foo != 4 {
+ t.Fatalf("bad: %#v", result)
+ }
+ if result.Bar != "value" {
+ t.Fatalf("bad: %#v", result)
+ }
+}
+
+func testSliceInput(t *testing.T, input map[string]interface{}, expected *Slice) {
+ var result Slice
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got error: %s", err)
+ }
+
+ if result.Vfoo != expected.Vfoo {
+ t.Errorf("Vfoo expected '%s', got '%s'", expected.Vfoo, result.Vfoo)
+ }
+
+ if result.Vbar == nil {
+ t.Fatalf("Vbar a slice, got '%#v'", result.Vbar)
+ }
+
+ if len(result.Vbar) != len(expected.Vbar) {
+ t.Errorf("Vbar length should be %d, got %d", len(expected.Vbar), len(result.Vbar))
+ }
+
+ for i, v := range result.Vbar {
+ if v != expected.Vbar[i] {
+ t.Errorf(
+ "Vbar[%d] should be '%#v', got '%#v'",
+ i, expected.Vbar[i], v)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/mitchellh/reflectwalk/reflectwalk_test.go b/Godeps/_workspace/src/github.com/mitchellh/reflectwalk/reflectwalk_test.go
new file mode 100644
index 000000000..4ec1066e7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mitchellh/reflectwalk/reflectwalk_test.go
@@ -0,0 +1,377 @@
+package reflectwalk
+
+import (
+ "reflect"
+ "testing"
+)
+
+type TestEnterExitWalker struct {
+ Locs []Location
+}
+
+func (t *TestEnterExitWalker) Enter(l Location) error {
+ if t.Locs == nil {
+ t.Locs = make([]Location, 0, 5)
+ }
+
+ t.Locs = append(t.Locs, l)
+ return nil
+}
+
+func (t *TestEnterExitWalker) Exit(l Location) error {
+ t.Locs = append(t.Locs, l)
+ return nil
+}
+
+type TestPointerWalker struct {
+ Ps []bool
+}
+
+func (t *TestPointerWalker) PointerEnter(v bool) error {
+ t.Ps = append(t.Ps, v)
+ return nil
+}
+
+func (t *TestPointerWalker) PointerExit(v bool) error {
+ return nil
+}
+
+type TestPrimitiveWalker struct {
+ Value reflect.Value
+}
+
+func (t *TestPrimitiveWalker) Primitive(v reflect.Value) error {
+ t.Value = v
+ return nil
+}
+
+type TestPrimitiveCountWalker struct {
+ Count int
+}
+
+func (t *TestPrimitiveCountWalker) Primitive(v reflect.Value) error {
+ t.Count += 1
+ return nil
+}
+
+type TestPrimitiveReplaceWalker struct {
+ Value reflect.Value
+}
+
+func (t *TestPrimitiveReplaceWalker) Primitive(v reflect.Value) error {
+ v.Set(reflect.ValueOf("bar"))
+ return nil
+}
+
+type TestMapWalker struct {
+ MapVal reflect.Value
+ Keys []string
+ Values []string
+}
+
+func (t *TestMapWalker) Map(m reflect.Value) error {
+ t.MapVal = m
+ return nil
+}
+
+func (t *TestMapWalker) MapElem(m, k, v reflect.Value) error {
+ if t.Keys == nil {
+ t.Keys = make([]string, 0, 1)
+ t.Values = make([]string, 0, 1)
+ }
+
+ t.Keys = append(t.Keys, k.Interface().(string))
+ t.Values = append(t.Values, v.Interface().(string))
+ return nil
+}
+
+type TestSliceWalker struct {
+ Count int
+ SliceVal reflect.Value
+}
+
+func (t *TestSliceWalker) Slice(v reflect.Value) error {
+ t.SliceVal = v
+ return nil
+}
+
+func (t *TestSliceWalker) SliceElem(int, reflect.Value) error {
+ t.Count++
+ return nil
+}
+
+type TestStructWalker struct {
+ Fields []string
+}
+
+func (t *TestStructWalker) Struct(v reflect.Value) error {
+ return nil
+}
+
+func (t *TestStructWalker) StructField(sf reflect.StructField, v reflect.Value) error {
+ if t.Fields == nil {
+ t.Fields = make([]string, 0, 1)
+ }
+
+ t.Fields = append(t.Fields, sf.Name)
+ return nil
+}
+
+func TestTestStructs(t *testing.T) {
+ var raw interface{}
+ raw = new(TestEnterExitWalker)
+ if _, ok := raw.(EnterExitWalker); !ok {
+ t.Fatal("EnterExitWalker is bad")
+ }
+
+ raw = new(TestPrimitiveWalker)
+ if _, ok := raw.(PrimitiveWalker); !ok {
+ t.Fatal("PrimitiveWalker is bad")
+ }
+
+ raw = new(TestMapWalker)
+ if _, ok := raw.(MapWalker); !ok {
+ t.Fatal("MapWalker is bad")
+ }
+
+ raw = new(TestSliceWalker)
+ if _, ok := raw.(SliceWalker); !ok {
+ t.Fatal("SliceWalker is bad")
+ }
+
+ raw = new(TestStructWalker)
+ if _, ok := raw.(StructWalker); !ok {
+ t.Fatal("StructWalker is bad")
+ }
+}
+
+func TestWalk_Basic(t *testing.T) {
+ w := new(TestPrimitiveWalker)
+
+ type S struct {
+ Foo string
+ }
+
+ data := &S{
+ Foo: "foo",
+ }
+
+ err := Walk(data, w)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if w.Value.Kind() != reflect.String {
+ t.Fatalf("bad: %#v", w.Value)
+ }
+}
+
+func TestWalk_Basic_Replace(t *testing.T) {
+ w := new(TestPrimitiveReplaceWalker)
+
+ type S struct {
+ Foo string
+ Bar []interface{}
+ }
+
+ data := &S{
+ Foo: "foo",
+ Bar: []interface{}{[]string{"what"}},
+ }
+
+ err := Walk(data, w)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if data.Foo != "bar" {
+ t.Fatalf("bad: %#v", data.Foo)
+ }
+ if data.Bar[0].([]string)[0] != "bar" {
+ t.Fatalf("bad: %#v", data.Bar)
+ }
+}
+
+func TestWalk_EnterExit(t *testing.T) {
+ w := new(TestEnterExitWalker)
+
+ type S struct {
+ A string
+ M map[string]string
+ }
+
+ data := &S{
+ A: "foo",
+ M: map[string]string{
+ "a": "b",
+ },
+ }
+
+ err := Walk(data, w)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := []Location{
+ WalkLoc,
+ Struct,
+ StructField,
+ StructField,
+ StructField,
+ Map,
+ MapKey,
+ MapKey,
+ MapValue,
+ MapValue,
+ Map,
+ StructField,
+ Struct,
+ WalkLoc,
+ }
+ if !reflect.DeepEqual(w.Locs, expected) {
+ t.Fatalf("Bad: %#v", w.Locs)
+ }
+}
+
+func TestWalk_Interface(t *testing.T) {
+ w := new(TestPrimitiveCountWalker)
+
+ type S struct {
+ Foo string
+ Bar []interface{}
+ }
+
+ var data interface{} = &S{
+ Foo: "foo",
+ Bar: []interface{}{[]string{"bar", "what"}, "baz"},
+ }
+
+ err := Walk(data, w)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if w.Count != 4 {
+ t.Fatalf("bad: %#v", w.Count)
+ }
+}
+
+func TestWalk_Interface_nil(t *testing.T) {
+ w := new(TestPrimitiveCountWalker)
+
+ type S struct {
+ Bar interface{}
+ }
+
+ var data interface{} = &S{}
+
+ err := Walk(data, w)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestWalk_Map(t *testing.T) {
+ w := new(TestMapWalker)
+
+ type S struct {
+ Foo map[string]string
+ }
+
+ data := &S{
+ Foo: map[string]string{
+ "foo": "foov",
+ "bar": "barv",
+ },
+ }
+
+ err := Walk(data, w)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !reflect.DeepEqual(w.MapVal.Interface(), data.Foo) {
+ t.Fatalf("Bad: %#v", w.MapVal.Interface())
+ }
+
+ expectedK := []string{"foo", "bar"}
+ if !reflect.DeepEqual(w.Keys, expectedK) {
+ t.Fatalf("Bad keys: %#v", w.Keys)
+ }
+
+ expectedV := []string{"foov", "barv"}
+ if !reflect.DeepEqual(w.Values, expectedV) {
+ t.Fatalf("Bad values: %#v", w.Values)
+ }
+}
+
+func TestWalk_Pointer(t *testing.T) {
+ w := new(TestPointerWalker)
+
+ type S struct {
+ Foo string
+ }
+
+ data := &S{
+ Foo: "foo",
+ }
+
+ err := Walk(data, w)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := []bool{true, false}
+ if !reflect.DeepEqual(w.Ps, expected) {
+ t.Fatalf("bad: %#v", w.Ps)
+ }
+}
+
+func TestWalk_Slice(t *testing.T) {
+ w := new(TestSliceWalker)
+
+ type S struct {
+ Foo []string
+ }
+
+ data := &S{
+ Foo: []string{"a", "b", "c"},
+ }
+
+ err := Walk(data, w)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !reflect.DeepEqual(w.SliceVal.Interface(), data.Foo) {
+ t.Fatalf("bad: %#v", w.SliceVal.Interface())
+ }
+
+ if w.Count != 3 {
+ t.Fatalf("Bad count: %d", w.Count)
+ }
+}
+
+func TestWalk_Struct(t *testing.T) {
+ w := new(TestStructWalker)
+
+ type S struct {
+ Foo string
+ Bar string
+ }
+
+ data := &S{
+ Foo: "foo",
+ Bar: "bar",
+ }
+
+ err := Walk(data, w)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := []string{"Foo", "Bar"}
+ if !reflect.DeepEqual(w.Fields, expected) {
+ t.Fatalf("bad: %#v", w.Fields)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/ryanuber/columnize/columnize_test.go b/Godeps/_workspace/src/github.com/ryanuber/columnize/columnize_test.go
new file mode 100644
index 000000000..7bec3901f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/ryanuber/columnize/columnize_test.go
@@ -0,0 +1,242 @@
+package columnize
+
+import "testing"
+
+func TestListOfStringsInput(t *testing.T) {
+ input := []string{
+ "Column A | Column B | Column C",
+ "x | y | z",
+ }
+
+ config := DefaultConfig()
+ output := Format(input, config)
+
+ expected := "Column A Column B Column C\n"
+ expected += "x y z"
+
+ if output != expected {
+ t.Fatalf("\nexpected:\n%s\n\ngot:\n%s", expected, output)
+ }
+}
+
+func TestEmptyLinesOutput(t *testing.T) {
+ input := []string{
+ "Column A | Column B | Column C",
+ "",
+ "x | y | z",
+ }
+
+ config := DefaultConfig()
+ output := Format(input, config)
+
+ expected := "Column A Column B Column C\n"
+ expected += "\n"
+ expected += "x y z"
+
+ if output != expected {
+ t.Fatalf("\nexpected:\n%s\n\ngot:\n%s", expected, output)
+ }
+}
+
+func TestLeadingSpacePreserved(t *testing.T) {
+ input := []string{
+ "| Column B | Column C",
+ "x | y | z",
+ }
+
+ config := DefaultConfig()
+ output := Format(input, config)
+
+ expected := " Column B Column C\n"
+ expected += "x y z"
+
+ if output != expected {
+ t.Fatalf("\nexpected:\n%s\n\ngot:\n%s", expected, output)
+ }
+}
+
+func TestColumnWidthCalculator(t *testing.T) {
+ input := []string{
+ "Column A | Column B | Column C",
+ "Longer than A | Longer than B | Longer than C",
+ "short | short | short",
+ }
+
+ config := DefaultConfig()
+ output := Format(input, config)
+
+ expected := "Column A Column B Column C\n"
+ expected += "Longer than A Longer than B Longer than C\n"
+ expected += "short short short"
+
+ if output != expected {
+ t.Fatalf("\nexpected:\n%s\n\ngot:\n%s", expected, output)
+ }
+}
+
+func TestVariedInputSpacing(t *testing.T) {
+ input := []string{
+ "Column A |Column B| Column C",
+ "x|y| z",
+ }
+
+ config := DefaultConfig()
+ output := Format(input, config)
+
+ expected := "Column A Column B Column C\n"
+ expected += "x y z"
+
+ if output != expected {
+ t.Fatalf("\nexpected:\n%s\n\ngot:\n%s", expected, output)
+ }
+}
+
+func TestUnmatchedColumnCounts(t *testing.T) {
+ input := []string{
+ "Column A | Column B | Column C",
+ "Value A | Value B",
+ "Value A | Value B | Value C | Value D",
+ }
+
+ config := DefaultConfig()
+ output := Format(input, config)
+
+ expected := "Column A Column B Column C\n"
+ expected += "Value A Value B\n"
+ expected += "Value A Value B Value C Value D"
+
+ if output != expected {
+ t.Fatalf("\nexpected:\n%s\n\ngot:\n%s", expected, output)
+ }
+}
+
+func TestAlternateDelimiter(t *testing.T) {
+ input := []string{
+ "Column | A % Column | B % Column | C",
+ "Value A % Value B % Value C",
+ }
+
+ config := DefaultConfig()
+ config.Delim = "%"
+ output := Format(input, config)
+
+ expected := "Column | A Column | B Column | C\n"
+ expected += "Value A Value B Value C"
+
+ if output != expected {
+ t.Fatalf("\nexpected:\n%s\n\ngot:\n%s", expected, output)
+ }
+}
+
+func TestAlternateSpacingString(t *testing.T) {
+ input := []string{
+ "Column A | Column B | Column C",
+ "x | y | z",
+ }
+
+ config := DefaultConfig()
+ config.Glue = " "
+ output := Format(input, config)
+
+ expected := "Column A Column B Column C\n"
+ expected += "x y z"
+
+ if output != expected {
+ t.Fatalf("\nexpected:\n%s\n\ngot:\n%s", expected, output)
+ }
+}
+
+func TestSimpleFormat(t *testing.T) {
+ input := []string{
+ "Column A | Column B | Column C",
+ "x | y | z",
+ }
+
+ output := SimpleFormat(input)
+
+ expected := "Column A Column B Column C\n"
+ expected += "x y z"
+
+ if output != expected {
+ t.Fatalf("\nexpected:\n%s\n\ngot:\n%s", expected, output)
+ }
+}
+
+func TestAlternatePrefixString(t *testing.T) {
+ input := []string{
+ "Column A | Column B | Column C",
+ "x | y | z",
+ }
+
+ config := DefaultConfig()
+ config.Prefix = " "
+ output := Format(input, config)
+
+ expected := " Column A Column B Column C\n"
+ expected += " x y z"
+
+ if output != expected {
+ t.Fatalf("\nexpected:\n%s\n\ngot:\n%s", expected, output)
+ }
+}
+
+func TestEmptyFieldReplacement(t *testing.T) {
+ input := []string{
+ "Column A | Column B | Column C",
+ "x | | z",
+ }
+
+ config := DefaultConfig()
+ config.Empty = ""
+ output := Format(input, config)
+
+ expected := "Column A Column B Column C\n"
+ expected += "x z"
+
+ if output != expected {
+ t.Fatalf("\nexpected:\n%s\n\ngot:\n%s", expected, output)
+ }
+}
+
+func TestEmptyConfigValues(t *testing.T) {
+ input := []string{
+ "Column A | Column B | Column C",
+ "x | y | z",
+ }
+
+ config := Config{}
+ output := Format(input, &config)
+
+ expected := "Column A Column B Column C\n"
+ expected += "x y z"
+
+ if output != expected {
+ t.Fatalf("\nexpected:\n%s\n\ngot:\n%s", expected, output)
+ }
+}
+
+func TestMergeConfig(t *testing.T) {
+ conf1 := &Config{Delim: "a", Glue: "a", Prefix: "a", Empty: "a"}
+ conf2 := &Config{Delim: "b", Glue: "b", Prefix: "b", Empty: "b"}
+ conf3 := &Config{Delim: "c", Prefix: "c"}
+
+ m := MergeConfig(conf1, conf2)
+ if m.Delim != "b" || m.Glue != "b" || m.Prefix != "b" || m.Empty != "b" {
+ t.Fatalf("bad: %#v", m)
+ }
+
+ m = MergeConfig(conf1, conf3)
+ if m.Delim != "c" || m.Glue != "a" || m.Prefix != "c" || m.Empty != "a" {
+ t.Fatalf("bad: %#v", m)
+ }
+
+ m = MergeConfig(conf1, nil)
+ if m.Delim != "a" || m.Glue != "a" || m.Prefix != "a" || m.Empty != "a" {
+ t.Fatalf("bad: %#v", m)
+ }
+
+ m = MergeConfig(conf1, &Config{})
+ if m.Delim != "a" || m.Glue != "a" || m.Prefix != "a" || m.Empty != "a" {
+ t.Fatalf("bad: %#v", m)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/cluster_test.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/cluster_test.go
new file mode 100644
index 000000000..445f305c8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/cluster_test.go
@@ -0,0 +1,294 @@
+package zk
+
+import (
+ "sync"
+ "testing"
+ "time"
+)
+
+type logWriter struct {
+ t *testing.T
+ p string
+}
+
+func (lw logWriter) Write(b []byte) (int, error) {
+ lw.t.Logf("%s%s", lw.p, string(b))
+ return len(b), nil
+}
+
+func TestBasicCluster(t *testing.T) {
+ ts, err := StartTestCluster(3, nil, logWriter{t: t, p: "[ZKERR] "})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ts.Stop()
+ zk1, err := ts.Connect(0)
+ if err != nil {
+ t.Fatalf("Connect returned error: %+v", err)
+ }
+ defer zk1.Close()
+ zk2, err := ts.Connect(1)
+ if err != nil {
+ t.Fatalf("Connect returned error: %+v", err)
+ }
+ defer zk2.Close()
+
+ time.Sleep(time.Second * 5)
+
+ if _, err := zk1.Create("/gozk-test", []byte("foo-cluster"), 0, WorldACL(PermAll)); err != nil {
+ t.Fatalf("Create failed on node 1: %+v", err)
+ }
+ if by, _, err := zk2.Get("/gozk-test"); err != nil {
+ t.Fatalf("Get failed on node 2: %+v", err)
+ } else if string(by) != "foo-cluster" {
+ t.Fatal("Wrong data for node 2")
+ }
+}
+
+// If the current leader dies, then the session is reestablished with the new one.
+func TestClientClusterFailover(t *testing.T) {
+ tc, err := StartTestCluster(3, nil, logWriter{t: t, p: "[ZKERR] "})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer tc.Stop()
+ zk, evCh, err := tc.ConnectAll()
+ if err != nil {
+ t.Fatalf("Connect returned error: %+v", err)
+ }
+ defer zk.Close()
+
+ sl := NewStateLogger(evCh)
+
+ hasSessionEvent1 := sl.NewWatcher(sessionStateMatcher(StateHasSession)).Wait(8 * time.Second)
+ if hasSessionEvent1 == nil {
+ t.Fatalf("Failed to connect and get session")
+ }
+
+ if _, err := zk.Create("/gozk-test", []byte("foo-cluster"), 0, WorldACL(PermAll)); err != nil {
+ t.Fatalf("Create failed on node 1: %+v", err)
+ }
+
+ hasSessionWatcher2 := sl.NewWatcher(sessionStateMatcher(StateHasSession))
+
+ // Kill the current leader
+ tc.StopServer(hasSessionEvent1.Server)
+
+ // Wait for the session to be reconnected with the new leader.
+ hasSessionWatcher2.Wait(8 * time.Second)
+ if hasSessionWatcher2 == nil {
+ t.Fatalf("Failover failed")
+ }
+
+ if by, _, err := zk.Get("/gozk-test"); err != nil {
+ t.Fatalf("Get failed on node 2: %+v", err)
+ } else if string(by) != "foo-cluster" {
+ t.Fatal("Wrong data for node 2")
+ }
+}
+
+// If a ZooKeeper cluster looses quorum then a session is reconnected as soon
+// as the quorum is restored.
+func TestNoQuorum(t *testing.T) {
+ tc, err := StartTestCluster(3, nil, logWriter{t: t, p: "[ZKERR] "})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer tc.Stop()
+ zk, evCh, err := tc.ConnectAllTimeout(4 * time.Second)
+ if err != nil {
+ t.Fatalf("Connect returned error: %+v", err)
+ }
+ defer zk.Close()
+ sl := NewStateLogger(evCh)
+
+ // Wait for initial session to be established
+ hasSessionEvent1 := sl.NewWatcher(sessionStateMatcher(StateHasSession)).Wait(8 * time.Second)
+ if hasSessionEvent1 == nil {
+ t.Fatalf("Failed to connect and get session")
+ }
+ initialSessionID := zk.sessionID
+ DefaultLogger.Printf(" Session established: id=%d, timeout=%d", zk.sessionID, zk.sessionTimeoutMs)
+
+ // Kill the ZooKeeper leader and wait for the session to reconnect.
+ DefaultLogger.Printf(" Kill the leader")
+ hasSessionWatcher2 := sl.NewWatcher(sessionStateMatcher(StateHasSession))
+ tc.StopServer(hasSessionEvent1.Server)
+ hasSessionEvent2 := hasSessionWatcher2.Wait(8 * time.Second)
+ if hasSessionEvent2 == nil {
+ t.Fatalf("Failover failed")
+ }
+
+ // Kill the ZooKeeper leader leaving the cluster without quorum.
+ DefaultLogger.Printf(" Kill the leader")
+ tc.StopServer(hasSessionEvent2.Server)
+
+ // Make sure that we keep retrying connecting to the only remaining
+ // ZooKeeper server, but the attempts are being dropped because there is
+ // no quorum.
+ DefaultLogger.Printf(" Retrying no luck...")
+ var firstDisconnect *Event
+ begin := time.Now()
+ for time.Now().Sub(begin) < 6*time.Second {
+ disconnectedEvent := sl.NewWatcher(sessionStateMatcher(StateDisconnected)).Wait(4 * time.Second)
+ if disconnectedEvent == nil {
+ t.Fatalf("Disconnected event expected")
+ }
+ if firstDisconnect == nil {
+ firstDisconnect = disconnectedEvent
+ continue
+ }
+ if disconnectedEvent.Server != firstDisconnect.Server {
+ t.Fatalf("Disconnect from wrong server: expected=%s, actual=%s",
+ firstDisconnect.Server, disconnectedEvent.Server)
+ }
+ }
+
+ // Start a ZooKeeper node to restore quorum.
+ hasSessionWatcher3 := sl.NewWatcher(sessionStateMatcher(StateHasSession))
+ tc.StartServer(hasSessionEvent1.Server)
+
+ // Make sure that session is reconnected with the same ID.
+ hasSessionEvent3 := hasSessionWatcher3.Wait(8 * time.Second)
+ if hasSessionEvent3 == nil {
+ t.Fatalf("Session has not been reconnected")
+ }
+ if zk.sessionID != initialSessionID {
+ t.Fatalf("Wrong session ID: expected=%d, actual=%d", initialSessionID, zk.sessionID)
+ }
+
+ // Make sure that the session is not dropped soon after reconnect
+ e := sl.NewWatcher(sessionStateMatcher(StateDisconnected)).Wait(6 * time.Second)
+ if e != nil {
+ t.Fatalf("Unexpected disconnect")
+ }
+}
+
+func TestWaitForClose(t *testing.T) {
+ ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ts.Stop()
+ zk, err := ts.Connect(0)
+ if err != nil {
+ t.Fatalf("Connect returned error: %+v", err)
+ }
+ timeout := time.After(30 * time.Second)
+CONNECTED:
+ for {
+ select {
+ case ev := <-zk.eventChan:
+ if ev.State == StateConnected {
+ break CONNECTED
+ }
+ case <-timeout:
+ zk.Close()
+ t.Fatal("Timeout")
+ }
+ }
+ zk.Close()
+ for {
+ select {
+ case _, ok := <-zk.eventChan:
+ if !ok {
+ return
+ }
+ case <-timeout:
+ t.Fatal("Timeout")
+ }
+ }
+}
+
+func TestBadSession(t *testing.T) {
+ ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ts.Stop()
+ zk, _, err := ts.ConnectAll()
+ if err != nil {
+ t.Fatalf("Connect returned error: %+v", err)
+ }
+ defer zk.Close()
+
+ if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode {
+ t.Fatalf("Delete returned error: %+v", err)
+ }
+
+ zk.conn.Close()
+ time.Sleep(time.Millisecond * 100)
+
+ if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode {
+ t.Fatalf("Delete returned error: %+v", err)
+ }
+}
+
+type EventLogger struct {
+ events []Event
+ watchers []*EventWatcher
+ lock sync.Mutex
+ wg sync.WaitGroup
+}
+
+func NewStateLogger(eventCh <-chan Event) *EventLogger {
+ el := &EventLogger{}
+ el.wg.Add(1)
+ go func() {
+ defer el.wg.Done()
+ for event := range eventCh {
+ el.lock.Lock()
+ for _, sw := range el.watchers {
+ if !sw.triggered && sw.matcher(event) {
+ sw.triggered = true
+ sw.matchCh <- event
+ }
+ }
+ DefaultLogger.Printf(" event received: %v\n", event)
+ el.events = append(el.events, event)
+ el.lock.Unlock()
+ }
+ }()
+ return el
+}
+
+func (el *EventLogger) NewWatcher(matcher func(Event) bool) *EventWatcher {
+ ew := &EventWatcher{matcher: matcher, matchCh: make(chan Event, 1)}
+ el.lock.Lock()
+ el.watchers = append(el.watchers, ew)
+ el.lock.Unlock()
+ return ew
+}
+
+func (el *EventLogger) Events() []Event {
+ el.lock.Lock()
+ transitions := make([]Event, len(el.events))
+ copy(transitions, el.events)
+ el.lock.Unlock()
+ return transitions
+}
+
+func (el *EventLogger) Wait4Stop() {
+ el.wg.Wait()
+}
+
+type EventWatcher struct {
+ matcher func(Event) bool
+ matchCh chan Event
+ triggered bool
+}
+
+func (ew *EventWatcher) Wait(timeout time.Duration) *Event {
+ select {
+ case event := <-ew.matchCh:
+ return &event
+ case <-time.After(timeout):
+ return nil
+ }
+}
+
+func sessionStateMatcher(s State) func(Event) bool {
+ return func(e Event) bool {
+ return e.Type == EventSession && e.State == s
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/constants_test.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/constants_test.go
new file mode 100644
index 000000000..9fe6b04ce
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/constants_test.go
@@ -0,0 +1,24 @@
+package zk
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestModeString(t *testing.T) {
+ if fmt.Sprintf("%v", ModeUnknown) != "unknown" {
+ t.Errorf("unknown value should be 'unknown'")
+ }
+
+ if fmt.Sprintf("%v", ModeLeader) != "leader" {
+ t.Errorf("leader value should be 'leader'")
+ }
+
+ if fmt.Sprintf("%v", ModeFollower) != "follower" {
+ t.Errorf("follower value should be 'follower'")
+ }
+
+ if fmt.Sprintf("%v", ModeStandalone) != "standalone" {
+ t.Errorf("standlone value should be 'standalone'")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/flw_test.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/flw_test.go
new file mode 100644
index 000000000..63907268d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/flw_test.go
@@ -0,0 +1,367 @@
+package zk
+
+import (
+ "net"
+ "testing"
+ "time"
+)
+
+var (
+ zkSrvrOut = `Zookeeper version: 3.4.6-1569965, built on 02/20/2014 09:09 GMT
+Latency min/avg/max: 0/1/10
+Received: 4207
+Sent: 4220
+Connections: 81
+Outstanding: 1
+Zxid: 0x110a7a8f37
+Mode: leader
+Node count: 306
+`
+ zkConsOut = ` /10.42.45.231:45361[1](queued=0,recved=9435,sent=9457,sid=0x94c2989e04716b5,lop=PING,est=1427238717217,to=20001,lcxid=0x55120915,lzxid=0xffffffffffffffff,lresp=1427259255908,llat=0,minlat=0,avglat=1,maxlat=17)
+ /10.55.33.98:34342[1](queued=0,recved=9338,sent=9350,sid=0x94c2989e0471731,lop=PING,est=1427238849319,to=20001,lcxid=0x55120944,lzxid=0xffffffffffffffff,lresp=1427259252294,llat=0,minlat=0,avglat=1,maxlat=18)
+ /10.44.145.114:46556[1](queued=0,recved=109253,sent=109617,sid=0x94c2989e0471709,lop=DELE,est=1427238791305,to=20001,lcxid=0x55139618,lzxid=0x110a7b187d,lresp=1427259257423,llat=2,minlat=0,avglat=1,maxlat=23)
+
+`
+)
+
+func TestFLWRuok(t *testing.T) {
+ l, err := net.Listen("tcp", "127.0.0.1:2181")
+
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+
+ go tcpServer(l, "")
+
+ var oks []bool
+ var ok bool
+
+ oks = FLWRuok([]string{"127.0.0.1"}, time.Second*10)
+
+ // close the connection, and pause shortly
+ // to cheat around a race condition
+ l.Close()
+ time.Sleep(time.Millisecond * 1)
+
+ if len(oks) == 0 {
+ t.Errorf("no values returned")
+ }
+
+ ok = oks[0]
+
+ if !ok {
+ t.Errorf("instance should be marked as OK")
+ }
+
+ //
+ // Confirm that it also returns false for dead instances
+ //
+ l, err = net.Listen("tcp", "127.0.0.1:2181")
+
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+
+ defer l.Close()
+
+ go tcpServer(l, "dead")
+
+ oks = FLWRuok([]string{"127.0.0.1"}, time.Second*10)
+
+ if len(oks) == 0 {
+ t.Errorf("no values returned")
+ }
+
+ ok = oks[0]
+
+ if ok {
+ t.Errorf("instance should be marked as not OK")
+ }
+}
+
+func TestFLWSrvr(t *testing.T) {
+ l, err := net.Listen("tcp", "127.0.0.1:2181")
+
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+
+ defer l.Close()
+
+ go tcpServer(l, "")
+
+ var statsSlice []*ServerStats
+ var stats *ServerStats
+ var ok bool
+
+ statsSlice, ok = FLWSrvr([]string{"127.0.0.1:2181"}, time.Second*10)
+
+ if !ok {
+ t.Errorf("failure indicated on 'srvr' parsing")
+ }
+
+ if len(statsSlice) == 0 {
+ t.Errorf("no *ServerStats instances returned")
+ }
+
+ stats = statsSlice[0]
+
+ if stats.Error != nil {
+ t.Fatalf("error seen in stats: %v", err.Error())
+ }
+
+ if stats.Sent != 4220 {
+ t.Errorf("Sent != 4220")
+ }
+
+ if stats.Received != 4207 {
+ t.Errorf("Received != 4207")
+ }
+
+ if stats.NodeCount != 306 {
+ t.Errorf("NodeCount != 306")
+ }
+
+ if stats.MinLatency != 0 {
+ t.Errorf("MinLatency != 0")
+ }
+
+ if stats.AvgLatency != 1 {
+ t.Errorf("AvgLatency != 1")
+ }
+
+ if stats.MaxLatency != 10 {
+ t.Errorf("MaxLatency != 10")
+ }
+
+ if stats.Connections != 81 {
+ t.Errorf("Connection != 81")
+ }
+
+ if stats.Outstanding != 1 {
+ t.Errorf("Outstanding != 1")
+ }
+
+ if stats.Epoch != 17 {
+ t.Errorf("Epoch != 17")
+ }
+
+ if stats.Counter != 175804215 {
+ t.Errorf("Counter != 175804215")
+ }
+
+ if stats.Mode != ModeLeader {
+ t.Errorf("Mode != ModeLeader")
+ }
+
+ if stats.Version != "3.4.6-1569965" {
+ t.Errorf("Version expected: 3.4.6-1569965")
+ }
+
+ buildTime, err := time.Parse("01/02/2006 15:04 MST", "02/20/2014 09:09 GMT")
+
+ if !stats.BuildTime.Equal(buildTime) {
+
+ }
+}
+
+func TestFLWCons(t *testing.T) {
+ l, err := net.Listen("tcp", "127.0.0.1:2181")
+
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+
+ defer l.Close()
+
+ go tcpServer(l, "")
+
+ var clients []*ServerClients
+ var ok bool
+
+ clients, ok = FLWCons([]string{"127.0.0.1"}, time.Second*10)
+
+ if !ok {
+ t.Errorf("failure indicated on 'cons' parsing")
+ }
+
+ if len(clients) == 0 {
+ t.Errorf("no *ServerClients instances returned")
+ }
+
+ results := []*ServerClient{
+ &ServerClient{
+ Queued: 0,
+ Received: 9435,
+ Sent: 9457,
+ SessionID: 669956116721374901,
+ LastOperation: "PING",
+ Established: time.Unix(1427238717217, 0),
+ Timeout: 20001,
+ Lcxid: 1427245333,
+ Lzxid: -1,
+ LastResponse: time.Unix(1427259255908, 0),
+ LastLatency: 0,
+ MinLatency: 0,
+ AvgLatency: 1,
+ MaxLatency: 17,
+ Addr: "10.42.45.231:45361",
+ },
+ &ServerClient{
+ Queued: 0,
+ Received: 9338,
+ Sent: 9350,
+ SessionID: 669956116721375025,
+ LastOperation: "PING",
+ Established: time.Unix(1427238849319, 0),
+ Timeout: 20001,
+ Lcxid: 1427245380,
+ Lzxid: -1,
+ LastResponse: time.Unix(1427259252294, 0),
+ LastLatency: 0,
+ MinLatency: 0,
+ AvgLatency: 1,
+ MaxLatency: 18,
+ Addr: "10.55.33.98:34342",
+ },
+ &ServerClient{
+ Queued: 0,
+ Received: 109253,
+ Sent: 109617,
+ SessionID: 669956116721374985,
+ LastOperation: "DELE",
+ Established: time.Unix(1427238791305, 0),
+ Timeout: 20001,
+ Lcxid: 1427346968,
+ Lzxid: 73190283389,
+ LastResponse: time.Unix(1427259257423, 0),
+ LastLatency: 2,
+ MinLatency: 0,
+ AvgLatency: 1,
+ MaxLatency: 23,
+ Addr: "10.44.145.114:46556",
+ },
+ }
+
+ for _, z := range clients {
+ if z.Error != nil {
+ t.Errorf("error seen: %v", err.Error())
+ }
+
+ for i, v := range z.Clients {
+ c := results[i]
+
+ if v.Error != nil {
+ t.Errorf("client error seen: %v", err.Error())
+ }
+
+ if v.Queued != c.Queued {
+ t.Errorf("Queued value mismatch (%d/%d)", v.Queued, c.Queued)
+ }
+
+ if v.Received != c.Received {
+ t.Errorf("Received value mismatch (%d/%d)", v.Received, c.Received)
+ }
+
+ if v.Sent != c.Sent {
+ t.Errorf("Sent value mismatch (%d/%d)", v.Sent, c.Sent)
+ }
+
+ if v.SessionID != c.SessionID {
+ t.Errorf("SessionID value mismatch (%d/%d)", v.SessionID, c.SessionID)
+ }
+
+ if v.LastOperation != c.LastOperation {
+ t.Errorf("LastOperation value mismatch ('%v'/'%v')", v.LastOperation, c.LastOperation)
+ }
+
+ if v.Timeout != c.Timeout {
+ t.Errorf("Timeout value mismatch (%d/%d)", v.Timeout, c.Timeout)
+ }
+
+ if v.Lcxid != c.Lcxid {
+ t.Errorf("Lcxid value mismatch (%d/%d)", v.Lcxid, c.Lcxid)
+ }
+
+ if v.Lzxid != c.Lzxid {
+ t.Errorf("Lzxid value mismatch (%d/%d)", v.Lzxid, c.Lzxid)
+ }
+
+ if v.LastLatency != c.LastLatency {
+ t.Errorf("LastLatency value mismatch (%d/%d)", v.LastLatency, c.LastLatency)
+ }
+
+ if v.MinLatency != c.MinLatency {
+ t.Errorf("MinLatency value mismatch (%d/%d)", v.MinLatency, c.MinLatency)
+ }
+
+ if v.AvgLatency != c.AvgLatency {
+ t.Errorf("AvgLatency value mismatch (%d/%d)", v.AvgLatency, c.AvgLatency)
+ }
+
+ if v.MaxLatency != c.MaxLatency {
+ t.Errorf("MaxLatency value mismatch (%d/%d)", v.MaxLatency, c.MaxLatency)
+ }
+
+ if v.Addr != c.Addr {
+ t.Errorf("Addr value mismatch ('%v'/'%v')", v.Addr, c.Addr)
+ }
+
+ if !c.Established.Equal(v.Established) {
+ t.Errorf("Established value mismatch (%v/%v)", c.Established, v.Established)
+ }
+
+ if !c.LastResponse.Equal(v.LastResponse) {
+ t.Errorf("Established value mismatch (%v/%v)", c.LastResponse, v.LastResponse)
+ }
+ }
+ }
+}
+
+func tcpServer(listener net.Listener, thing string) {
+ for {
+ conn, err := listener.Accept()
+ if err != nil {
+ return
+ }
+ go connHandler(conn, thing)
+ }
+}
+
+func connHandler(conn net.Conn, thing string) {
+ defer conn.Close()
+
+ data := make([]byte, 4)
+
+ _, err := conn.Read(data)
+
+ if err != nil {
+ return
+ }
+
+ switch string(data) {
+ case "ruok":
+ switch thing {
+ case "dead":
+ return
+ default:
+ conn.Write([]byte("imok"))
+ }
+ case "srvr":
+ switch thing {
+ case "dead":
+ return
+ default:
+ conn.Write([]byte(zkSrvrOut))
+ }
+ case "cons":
+ switch thing {
+ case "dead":
+ return
+ default:
+ conn.Write([]byte(zkConsOut))
+ }
+ default:
+ conn.Write([]byte("This ZooKeeper instance is not currently serving requests."))
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/lock_test.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/lock_test.go
new file mode 100644
index 000000000..8a3478a33
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/lock_test.go
@@ -0,0 +1,94 @@
+package zk
+
+import (
+ "testing"
+ "time"
+)
+
+func TestLock(t *testing.T) {
+ ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ts.Stop()
+ zk, _, err := ts.ConnectAll()
+ if err != nil {
+ t.Fatalf("Connect returned error: %+v", err)
+ }
+ defer zk.Close()
+
+ acls := WorldACL(PermAll)
+
+ l := NewLock(zk, "/test", acls)
+ if err := l.Lock(); err != nil {
+ t.Fatal(err)
+ }
+ if err := l.Unlock(); err != nil {
+ t.Fatal(err)
+ }
+
+ val := make(chan int, 3)
+
+ if err := l.Lock(); err != nil {
+ t.Fatal(err)
+ }
+
+ l2 := NewLock(zk, "/test", acls)
+ go func() {
+ if err := l2.Lock(); err != nil {
+ t.Fatal(err)
+ }
+ val <- 2
+ if err := l2.Unlock(); err != nil {
+ t.Fatal(err)
+ }
+ val <- 3
+ }()
+ time.Sleep(time.Millisecond * 100)
+
+ val <- 1
+ if err := l.Unlock(); err != nil {
+ t.Fatal(err)
+ }
+ if x := <-val; x != 1 {
+ t.Fatalf("Expected 1 instead of %d", x)
+ }
+ if x := <-val; x != 2 {
+ t.Fatalf("Expected 2 instead of %d", x)
+ }
+ if x := <-val; x != 3 {
+ t.Fatalf("Expected 3 instead of %d", x)
+ }
+}
+
+// This tests creating a lock with a path that's more than 1 node deep (e.g. "/test-multi-level/lock"),
+// when a part of that path already exists (i.e. "/test-multi-level" node already exists).
+func TestMultiLevelLock(t *testing.T) {
+ ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ts.Stop()
+ zk, _, err := ts.ConnectAll()
+ if err != nil {
+ t.Fatalf("Connect returned error: %+v", err)
+ }
+ defer zk.Close()
+
+ acls := WorldACL(PermAll)
+ path := "/test-multi-level"
+ if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
+ t.Fatalf("Create returned error: %+v", err)
+ } else if p != path {
+ t.Fatalf("Create returned different path '%s' != '%s'", p, path)
+ }
+ l := NewLock(zk, "/test-multi-level/lock", acls)
+ defer zk.Delete("/test-multi-level", -1) // Clean up what we've created for this test
+ defer zk.Delete("/test-multi-level/lock", -1)
+ if err := l.Lock(); err != nil {
+ t.Fatal(err)
+ }
+ if err := l.Unlock(); err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/structs_test.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/structs_test.go
new file mode 100644
index 000000000..cafbbd95c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/structs_test.go
@@ -0,0 +1,71 @@
+package zk
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestEncodeDecodePacket(t *testing.T) {
+ encodeDecodeTest(t, &requestHeader{-2, 5})
+ encodeDecodeTest(t, &connectResponse{1, 2, 3, nil})
+ encodeDecodeTest(t, &connectResponse{1, 2, 3, []byte{4, 5, 6}})
+ encodeDecodeTest(t, &getAclResponse{[]ACL{{12, "s", "anyone"}}, Stat{}})
+ encodeDecodeTest(t, &getChildrenResponse{[]string{"foo", "bar"}})
+ encodeDecodeTest(t, &pathWatchRequest{"path", true})
+ encodeDecodeTest(t, &pathWatchRequest{"path", false})
+ encodeDecodeTest(t, &CheckVersionRequest{"/", -1})
+ encodeDecodeTest(t, &multiRequest{Ops: []multiRequestOp{{multiHeader{opCheck, false, -1}, &CheckVersionRequest{"/", -1}}}})
+}
+
+func encodeDecodeTest(t *testing.T, r interface{}) {
+ buf := make([]byte, 1024)
+ n, err := encodePacket(buf, r)
+ if err != nil {
+ t.Errorf("encodePacket returned non-nil error %+v\n", err)
+ return
+ }
+ t.Logf("%+v %x", r, buf[:n])
+ r2 := reflect.New(reflect.ValueOf(r).Elem().Type()).Interface()
+ n2, err := decodePacket(buf[:n], r2)
+ if err != nil {
+ t.Errorf("decodePacket returned non-nil error %+v\n", err)
+ return
+ }
+ if n != n2 {
+ t.Errorf("sizes don't match: %d != %d", n, n2)
+ return
+ }
+ if !reflect.DeepEqual(r, r2) {
+ t.Errorf("results don't match: %+v != %+v", r, r2)
+ return
+ }
+}
+
+func TestEncodeShortBuffer(t *testing.T) {
+ buf := make([]byte, 0)
+ _, err := encodePacket(buf, &requestHeader{1, 2})
+ if err != ErrShortBuffer {
+ t.Errorf("encodePacket should return ErrShortBuffer on a short buffer instead of '%+v'", err)
+ return
+ }
+}
+
+func TestDecodeShortBuffer(t *testing.T) {
+ buf := make([]byte, 0)
+ _, err := decodePacket(buf, &responseHeader{})
+ if err != ErrShortBuffer {
+ t.Errorf("decodePacket should return ErrShortBuffer on a short buffer instead of '%+v'", err)
+ return
+ }
+}
+
+func BenchmarkEncode(b *testing.B) {
+ buf := make([]byte, 4096)
+ st := &connectRequest{Passwd: []byte("1234567890")}
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ if _, err := encodePacket(buf, st); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/util_test.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/util_test.go
new file mode 100644
index 000000000..b56f77550
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/util_test.go
@@ -0,0 +1,17 @@
+package zk
+
+import "testing"
+
+func TestFormatServers(t *testing.T) {
+ servers := []string{"127.0.0.1:2181", "127.0.0.42", "127.0.42.1:8811"}
+ r := []string{"127.0.0.1:2181", "127.0.0.42:2181", "127.0.42.1:8811"}
+
+ var s []string
+ s = FormatServers(servers)
+
+ for i := range s {
+ if s[i] != r[i] {
+ t.Errorf("%v should equal %v", s[i], r[i])
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/zk_test.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/zk_test.go
new file mode 100644
index 000000000..10e0b5862
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/zk_test.go
@@ -0,0 +1,520 @@
+package zk
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "strings"
+ "testing"
+ "time"
+
+ "camlistore.org/pkg/throttle"
+)
+
+func TestCreate(t *testing.T) {
+ ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ts.Stop()
+ zk, _, err := ts.ConnectAll()
+ if err != nil {
+ t.Fatalf("Connect returned error: %+v", err)
+ }
+ defer zk.Close()
+
+ path := "/gozk-test"
+
+ if err := zk.Delete(path, -1); err != nil && err != ErrNoNode {
+ t.Fatalf("Delete returned error: %+v", err)
+ }
+ if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
+ t.Fatalf("Create returned error: %+v", err)
+ } else if p != path {
+ t.Fatalf("Create returned different path '%s' != '%s'", p, path)
+ }
+ if data, stat, err := zk.Get(path); err != nil {
+ t.Fatalf("Get returned error: %+v", err)
+ } else if stat == nil {
+ t.Fatal("Get returned nil stat")
+ } else if len(data) < 4 {
+ t.Fatal("Get returned wrong size data")
+ }
+}
+
+func TestMulti(t *testing.T) {
+ ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ts.Stop()
+ zk, _, err := ts.ConnectAll()
+ if err != nil {
+ t.Fatalf("Connect returned error: %+v", err)
+ }
+ defer zk.Close()
+
+ path := "/gozk-test"
+
+ if err := zk.Delete(path, -1); err != nil && err != ErrNoNode {
+ t.Fatalf("Delete returned error: %+v", err)
+ }
+ ops := []interface{}{
+ &CreateRequest{Path: path, Data: []byte{1, 2, 3, 4}, Acl: WorldACL(PermAll)},
+ &SetDataRequest{Path: path, Data: []byte{1, 2, 3, 4}, Version: -1},
+ }
+ if res, err := zk.Multi(ops...); err != nil {
+ t.Fatalf("Multi returned error: %+v", err)
+ } else if len(res) != 2 {
+ t.Fatalf("Expected 2 responses got %d", len(res))
+ } else {
+ t.Logf("%+v", res)
+ }
+ if data, stat, err := zk.Get(path); err != nil {
+ t.Fatalf("Get returned error: %+v", err)
+ } else if stat == nil {
+ t.Fatal("Get returned nil stat")
+ } else if len(data) < 4 {
+ t.Fatal("Get returned wrong size data")
+ }
+}
+
+func TestGetSetACL(t *testing.T) {
+ ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ts.Stop()
+ zk, _, err := ts.ConnectAll()
+ if err != nil {
+ t.Fatalf("Connect returned error: %+v", err)
+ }
+ defer zk.Close()
+
+ if err := zk.AddAuth("digest", []byte("blah")); err != nil {
+ t.Fatalf("AddAuth returned error %+v", err)
+ }
+
+ path := "/gozk-test"
+
+ if err := zk.Delete(path, -1); err != nil && err != ErrNoNode {
+ t.Fatalf("Delete returned error: %+v", err)
+ }
+ if path, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
+ t.Fatalf("Create returned error: %+v", err)
+ } else if path != "/gozk-test" {
+ t.Fatalf("Create returned different path '%s' != '/gozk-test'", path)
+ }
+
+ expected := WorldACL(PermAll)
+
+ if acl, stat, err := zk.GetACL(path); err != nil {
+ t.Fatalf("GetACL returned error %+v", err)
+ } else if stat == nil {
+ t.Fatalf("GetACL returned nil Stat")
+ } else if len(acl) != 1 || expected[0] != acl[0] {
+ t.Fatalf("GetACL mismatch expected %+v instead of %+v", expected, acl)
+ }
+
+ expected = []ACL{{PermAll, "ip", "127.0.0.1"}}
+
+ if stat, err := zk.SetACL(path, expected, -1); err != nil {
+ t.Fatalf("SetACL returned error %+v", err)
+ } else if stat == nil {
+ t.Fatalf("SetACL returned nil Stat")
+ }
+
+ if acl, stat, err := zk.GetACL(path); err != nil {
+ t.Fatalf("GetACL returned error %+v", err)
+ } else if stat == nil {
+ t.Fatalf("GetACL returned nil Stat")
+ } else if len(acl) != 1 || expected[0] != acl[0] {
+ t.Fatalf("GetACL mismatch expected %+v instead of %+v", expected, acl)
+ }
+}
+
+func TestAuth(t *testing.T) {
+ ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ts.Stop()
+ zk, _, err := ts.ConnectAll()
+ if err != nil {
+ t.Fatalf("Connect returned error: %+v", err)
+ }
+ defer zk.Close()
+
+ path := "/gozk-digest-test"
+ if err := zk.Delete(path, -1); err != nil && err != ErrNoNode {
+ t.Fatalf("Delete returned error: %+v", err)
+ }
+
+ acl := DigestACL(PermAll, "user", "password")
+
+ if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, acl); err != nil {
+ t.Fatalf("Create returned error: %+v", err)
+ } else if p != path {
+ t.Fatalf("Create returned different path '%s' != '%s'", p, path)
+ }
+
+ if a, stat, err := zk.GetACL(path); err != nil {
+ t.Fatalf("GetACL returned error %+v", err)
+ } else if stat == nil {
+ t.Fatalf("GetACL returned nil Stat")
+ } else if len(a) != 1 || acl[0] != a[0] {
+ t.Fatalf("GetACL mismatch expected %+v instead of %+v", acl, a)
+ }
+
+ if _, _, err := zk.Get(path); err != ErrNoAuth {
+ t.Fatalf("Get returned error %+v instead of ErrNoAuth", err)
+ }
+
+ if err := zk.AddAuth("digest", []byte("user:password")); err != nil {
+ t.Fatalf("AddAuth returned error %+v", err)
+ }
+
+ if data, stat, err := zk.Get(path); err != nil {
+ t.Fatalf("Get returned error %+v", err)
+ } else if stat == nil {
+ t.Fatalf("Get returned nil Stat")
+ } else if len(data) != 4 {
+ t.Fatalf("Get returned wrong data length")
+ }
+}
+
+func TestChildWatch(t *testing.T) {
+ ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ts.Stop()
+ zk, _, err := ts.ConnectAll()
+ if err != nil {
+ t.Fatalf("Connect returned error: %+v", err)
+ }
+ defer zk.Close()
+
+ if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode {
+ t.Fatalf("Delete returned error: %+v", err)
+ }
+
+ children, stat, childCh, err := zk.ChildrenW("/")
+ if err != nil {
+ t.Fatalf("Children returned error: %+v", err)
+ } else if stat == nil {
+ t.Fatal("Children returned nil stat")
+ } else if len(children) < 1 {
+ t.Fatal("Children should return at least 1 child")
+ }
+
+ if path, err := zk.Create("/gozk-test", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
+ t.Fatalf("Create returned error: %+v", err)
+ } else if path != "/gozk-test" {
+ t.Fatalf("Create returned different path '%s' != '/gozk-test'", path)
+ }
+
+ select {
+ case ev := <-childCh:
+ if ev.Err != nil {
+ t.Fatalf("Child watcher error %+v", ev.Err)
+ }
+ if ev.Path != "/" {
+ t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/")
+ }
+ case _ = <-time.After(time.Second * 2):
+ t.Fatal("Child watcher timed out")
+ }
+
+ // Delete of the watched node should trigger the watch
+
+ children, stat, childCh, err = zk.ChildrenW("/gozk-test")
+ if err != nil {
+ t.Fatalf("Children returned error: %+v", err)
+ } else if stat == nil {
+ t.Fatal("Children returned nil stat")
+ } else if len(children) != 0 {
+ t.Fatal("Children should return 0 children")
+ }
+
+ if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode {
+ t.Fatalf("Delete returned error: %+v", err)
+ }
+
+ select {
+ case ev := <-childCh:
+ if ev.Err != nil {
+ t.Fatalf("Child watcher error %+v", ev.Err)
+ }
+ if ev.Path != "/gozk-test" {
+ t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/")
+ }
+ case _ = <-time.After(time.Second * 2):
+ t.Fatal("Child watcher timed out")
+ }
+}
+
+func TestSetWatchers(t *testing.T) {
+ ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ts.Stop()
+ zk, _, err := ts.ConnectAll()
+ if err != nil {
+ t.Fatalf("Connect returned error: %+v", err)
+ }
+ defer zk.Close()
+
+ zk.reconnectDelay = time.Second
+
+ zk2, _, err := ts.ConnectAll()
+ if err != nil {
+ t.Fatalf("Connect returned error: %+v", err)
+ }
+ defer zk2.Close()
+
+ if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode {
+ t.Fatalf("Delete returned error: %+v", err)
+ }
+
+ testPath, err := zk.Create("/gozk-test-2", []byte{}, 0, WorldACL(PermAll))
+ if err != nil {
+ t.Fatalf("Create returned: %+v", err)
+ }
+
+ _, _, testEvCh, err := zk.GetW(testPath)
+ if err != nil {
+ t.Fatalf("GetW returned: %+v", err)
+ }
+
+ children, stat, childCh, err := zk.ChildrenW("/")
+ if err != nil {
+ t.Fatalf("Children returned error: %+v", err)
+ } else if stat == nil {
+ t.Fatal("Children returned nil stat")
+ } else if len(children) < 1 {
+ t.Fatal("Children should return at least 1 child")
+ }
+
+ // Simulate network error by brutally closing the network connection.
+ zk.conn.Close()
+ if err := zk2.Delete(testPath, -1); err != nil && err != ErrNoNode {
+ t.Fatalf("Delete returned error: %+v", err)
+ }
+ // Allow some time for the `zk` session to reconnect and set watches.
+ time.Sleep(time.Millisecond * 100)
+
+ if path, err := zk2.Create("/gozk-test", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
+ t.Fatalf("Create returned error: %+v", err)
+ } else if path != "/gozk-test" {
+ t.Fatalf("Create returned different path '%s' != '/gozk-test'", path)
+ }
+
+ select {
+ case ev := <-testEvCh:
+ if ev.Err != nil {
+ t.Fatalf("GetW watcher error %+v", ev.Err)
+ }
+ if ev.Path != testPath {
+ t.Fatalf("GetW watcher wrong path %s instead of %s", ev.Path, testPath)
+ }
+ case <-time.After(2 * time.Second):
+ t.Fatal("GetW watcher timed out")
+ }
+
+ select {
+ case ev := <-childCh:
+ if ev.Err != nil {
+ t.Fatalf("Child watcher error %+v", ev.Err)
+ }
+ if ev.Path != "/" {
+ t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/")
+ }
+ case <-time.After(2 * time.Second):
+ t.Fatal("Child watcher timed out")
+ }
+}
+
+func TestExpiringWatch(t *testing.T) {
+ ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ts.Stop()
+ zk, _, err := ts.ConnectAll()
+ if err != nil {
+ t.Fatalf("Connect returned error: %+v", err)
+ }
+ defer zk.Close()
+
+ if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode {
+ t.Fatalf("Delete returned error: %+v", err)
+ }
+
+ children, stat, childCh, err := zk.ChildrenW("/")
+ if err != nil {
+ t.Fatalf("Children returned error: %+v", err)
+ } else if stat == nil {
+ t.Fatal("Children returned nil stat")
+ } else if len(children) < 1 {
+ t.Fatal("Children should return at least 1 child")
+ }
+
+ zk.sessionID = 99999
+ zk.conn.Close()
+
+ select {
+ case ev := <-childCh:
+ if ev.Err != ErrSessionExpired {
+ t.Fatalf("Child watcher error %+v instead of expected ErrSessionExpired", ev.Err)
+ }
+ if ev.Path != "/" {
+ t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/")
+ }
+ case <-time.After(2 * time.Second):
+ t.Fatal("Child watcher timed out")
+ }
+}
+
+func TestRequestFail(t *testing.T) {
+ // If connecting fails to all servers in the list then pending requests
+ // should be errored out so they don't hang forever.
+
+ zk, _, err := Connect([]string{"127.0.0.1:32444"}, time.Second*15)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer zk.Close()
+
+ ch := make(chan error)
+ go func() {
+ _, _, err := zk.Get("/blah")
+ ch <- err
+ }()
+ select {
+ case err := <-ch:
+ if err == nil {
+ t.Fatal("Expected non-nil error on failed request due to connection failure")
+ }
+ case <-time.After(time.Second * 2):
+ t.Fatal("Get hung when connection could not be made")
+ }
+}
+
+func TestSlowServer(t *testing.T) {
+ ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ts.Stop()
+
+ realAddr := fmt.Sprintf("127.0.0.1:%d", ts.Servers[0].Port)
+ proxyAddr, stopCh, err := startSlowProxy(t,
+ throttle.Rate{}, throttle.Rate{},
+ realAddr, func(ln *throttle.Listener) {
+ if ln.Up.Latency == 0 {
+ ln.Up.Latency = time.Millisecond * 2000
+ ln.Down.Latency = time.Millisecond * 2000
+ } else {
+ ln.Up.Latency = 0
+ ln.Down.Latency = 0
+ }
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer close(stopCh)
+
+ zk, _, err := Connect([]string{proxyAddr}, time.Millisecond*500)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer zk.Close()
+
+ _, _, wch, err := zk.ChildrenW("/")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Force a reconnect to get a throttled connection
+ zk.conn.Close()
+
+ time.Sleep(time.Millisecond * 100)
+
+ if err := zk.Delete("/gozk-test", -1); err == nil {
+ t.Fatal("Delete should have failed")
+ }
+
+ // The previous request should have timed out causing the server to be disconnected and reconnected
+
+ if _, err := zk.Create("/gozk-test", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
+ t.Fatal(err)
+ }
+
+ // Make sure event is still returned because the session should not have been affected
+ select {
+ case ev := <-wch:
+ t.Logf("Received event: %+v", ev)
+ case <-time.After(time.Second):
+ t.Fatal("Expected to receive a watch event")
+ }
+}
+
+func startSlowProxy(t *testing.T, up, down throttle.Rate, upstream string, adj func(ln *throttle.Listener)) (string, chan bool, error) {
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return "", nil, err
+ }
+ tln := &throttle.Listener{
+ Listener: ln,
+ Up: up,
+ Down: down,
+ }
+ stopCh := make(chan bool)
+ go func() {
+ <-stopCh
+ tln.Close()
+ }()
+ go func() {
+ for {
+ cn, err := tln.Accept()
+ if err != nil {
+ if !strings.Contains(err.Error(), "use of closed network connection") {
+ t.Fatalf("Accept failed: %s", err.Error())
+ }
+ return
+ }
+ if adj != nil {
+ adj(tln)
+ }
+ go func(cn net.Conn) {
+ defer cn.Close()
+ upcn, err := net.Dial("tcp", upstream)
+ if err != nil {
+ t.Log(err)
+ return
+ }
+ // This will leave hanging goroutines util stopCh is closed
+ // but it doesn't matter in the context of running tests.
+ go func() {
+ <-stopCh
+ upcn.Close()
+ }()
+ go func() {
+ if _, err := io.Copy(upcn, cn); err != nil {
+ if !strings.Contains(err.Error(), "use of closed network connection") {
+ // log.Printf("Upstream write failed: %s", err.Error())
+ }
+ }
+ }()
+ if _, err := io.Copy(cn, upcn); err != nil {
+ if !strings.Contains(err.Error(), "use of closed network connection") {
+ // log.Printf("Upstream read failed: %s", err.Error())
+ }
+ }
+ }(cn)
+ }
+ }()
+ return ln.Addr().String(), stopCh, nil
+}
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/0doc.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/0doc.go
index dd8b589de..caa7e0a3b 100644
--- a/Godeps/_workspace/src/github.com/ugorji/go/codec/0doc.go
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/0doc.go
@@ -98,7 +98,21 @@ with the standard net/rpc package.
Usage
-Typical usage model:
+The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification.
+
+The Encoder and Decoder are NOT safe for concurrent use.
+
+Consequently, the usage model is basically:
+
+ - Create and initialize the Handle before any use.
+ Once created, DO NOT modify it.
+ - Multiple Encoders or Decoders can now use the Handle concurrently.
+ They only read information off the Handle (never write).
+ - However, each Encoder or Decoder MUST not be used concurrently
+ - To re-use an Encoder/Decoder, call Reset(...) on it first.
+ This allows you use state maintained on the Encoder/Decoder.
+
+Sample usage model:
// create and configure Handle
var (
@@ -148,3 +162,32 @@ Typical usage model:
*/
package codec
+// Benefits of go-codec:
+//
+// - encoding/json always reads whole file into memory first.
+// This makes it unsuitable for parsing very large files.
+// - encoding/xml cannot parse into a map[string]interface{}
+// I found this out on reading https://github.com/clbanning/mxj
+
+// TODO:
+//
+// - (En|De)coder should store an error when it occurs.
+// Until reset, subsequent calls return that error that was stored.
+// This means that free panics must go away.
+// All errors must be raised through errorf method.
+// - Decoding using a chan is good, but incurs concurrency costs.
+// This is because there's no fast way to use a channel without it
+// having to switch goroutines constantly.
+// Callback pattern is still the best. Maybe cnsider supporting something like:
+// type X struct {
+// Name string
+// Ys []Y
+// Ys chan <- Y
+// Ys func(interface{}) -> call this interface for each entry in there.
+// }
+// - Consider adding a isZeroer interface { isZero() bool }
+// It is used within isEmpty, for omitEmpty support.
+// - Consider making Handle used AS-IS within the encoding/decoding session.
+// This means that we don't cache Handle information within the (En|De)coder,
+// except we really need it at Reset(...)
+// - Handle recursive types during encoding/decoding?
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/binc.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/binc.go
index 645376479..c884d14dc 100644
--- a/Godeps/_workspace/src/github.com/ugorji/go/codec/binc.go
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/binc.go
@@ -59,8 +59,8 @@ type bincEncDriver struct {
e *Encoder
w encWriter
m map[string]uint16 // symbols
- s uint16 // symbols sequencer
b [scratchByteArrayLen]byte
+ s uint16 // symbols sequencer
encNoSeparator
}
@@ -318,9 +318,9 @@ func (e *bincEncDriver) encLenNumber(bd byte, v uint64) {
//------------------------------------
type bincDecSymbol struct {
- i uint16
s string
b []byte
+ i uint16
}
type bincDecDriver struct {
@@ -329,7 +329,6 @@ type bincDecDriver struct {
r decReader
br bool // bytes reader
bdRead bool
- bdType valueType
bd byte
vd byte
vs byte
@@ -347,24 +346,23 @@ func (d *bincDecDriver) readNextBd() {
d.vd = d.bd >> 4
d.vs = d.bd & 0x0f
d.bdRead = true
- d.bdType = valueTypeUnset
}
-func (d *bincDecDriver) IsContainerType(vt valueType) (b bool) {
- switch vt {
- case valueTypeNil:
- return d.vd == bincVdSpecial && d.vs == bincSpNil
- case valueTypeBytes:
- return d.vd == bincVdByteArray
- case valueTypeString:
- return d.vd == bincVdString
- case valueTypeArray:
- return d.vd == bincVdArray
- case valueTypeMap:
- return d.vd == bincVdMap
+func (d *bincDecDriver) ContainerType() (vt valueType) {
+ if d.vd == bincVdSpecial && d.vs == bincSpNil {
+ return valueTypeNil
+ } else if d.vd == bincVdByteArray {
+ return valueTypeBytes
+ } else if d.vd == bincVdString {
+ return valueTypeString
+ } else if d.vd == bincVdArray {
+ return valueTypeArray
+ } else if d.vd == bincVdMap {
+ return valueTypeMap
+ } else {
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
}
- d.d.errorf("isContainerType: unsupported parameter: %v", vt)
- return // "unreachable"
+ return valueTypeUnset
}
func (d *bincDecDriver) TryDecodeAsNil() bool {
@@ -695,7 +693,7 @@ func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool)
if withString {
s = string(bs2)
}
- d.s = append(d.s, bincDecSymbol{symbol, s, bs2})
+ d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2})
}
default:
d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x",
@@ -784,97 +782,95 @@ func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []b
return
}
-func (d *bincDecDriver) DecodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
+func (d *bincDecDriver) DecodeNaked() {
if !d.bdRead {
d.readNextBd()
}
+ n := &d.d.n
+ var decodeFurther bool
+
switch d.vd {
case bincVdSpecial:
switch d.vs {
case bincSpNil:
- vt = valueTypeNil
+ n.v = valueTypeNil
case bincSpFalse:
- vt = valueTypeBool
- v = false
+ n.v = valueTypeBool
+ n.b = false
case bincSpTrue:
- vt = valueTypeBool
- v = true
+ n.v = valueTypeBool
+ n.b = true
case bincSpNan:
- vt = valueTypeFloat
- v = math.NaN()
+ n.v = valueTypeFloat
+ n.f = math.NaN()
case bincSpPosInf:
- vt = valueTypeFloat
- v = math.Inf(1)
+ n.v = valueTypeFloat
+ n.f = math.Inf(1)
case bincSpNegInf:
- vt = valueTypeFloat
- v = math.Inf(-1)
+ n.v = valueTypeFloat
+ n.f = math.Inf(-1)
case bincSpZeroFloat:
- vt = valueTypeFloat
- v = float64(0)
+ n.v = valueTypeFloat
+ n.f = float64(0)
case bincSpZero:
- vt = valueTypeUint
- v = uint64(0) // int8(0)
+ n.v = valueTypeUint
+ n.u = uint64(0) // int8(0)
case bincSpNegOne:
- vt = valueTypeInt
- v = int64(-1) // int8(-1)
+ n.v = valueTypeInt
+ n.i = int64(-1) // int8(-1)
default:
d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs)
- return
}
case bincVdSmallInt:
- vt = valueTypeUint
- v = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1
+ n.v = valueTypeUint
+ n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1
case bincVdPosInt:
- vt = valueTypeUint
- v = d.decUint()
+ n.v = valueTypeUint
+ n.u = d.decUint()
case bincVdNegInt:
- vt = valueTypeInt
- v = -(int64(d.decUint()))
+ n.v = valueTypeInt
+ n.i = -(int64(d.decUint()))
case bincVdFloat:
- vt = valueTypeFloat
- v = d.decFloat()
+ n.v = valueTypeFloat
+ n.f = d.decFloat()
case bincVdSymbol:
- vt = valueTypeSymbol
- v = d.DecodeString()
+ n.v = valueTypeSymbol
+ n.s = d.DecodeString()
case bincVdString:
- vt = valueTypeString
- v = d.DecodeString()
+ n.v = valueTypeString
+ n.s = d.DecodeString()
case bincVdByteArray:
- vt = valueTypeBytes
- v = d.DecodeBytes(nil, false, false)
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
case bincVdTimestamp:
- vt = valueTypeTimestamp
+ n.v = valueTypeTimestamp
tt, err := decodeTime(d.r.readx(int(d.vs)))
if err != nil {
panic(err)
}
- v = tt
+ n.t = tt
case bincVdCustomExt:
- vt = valueTypeExt
+ n.v = valueTypeExt
l := d.decLen()
- var re RawExt
- re.Tag = uint64(d.r.readn1())
- re.Data = d.r.readx(l)
- v = &re
- vt = valueTypeExt
+ n.u = uint64(d.r.readn1())
+ n.l = d.r.readx(l)
case bincVdArray:
- vt = valueTypeArray
+ n.v = valueTypeArray
decodeFurther = true
case bincVdMap:
- vt = valueTypeMap
+ n.v = valueTypeMap
decodeFurther = true
default:
d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd)
- return
}
if !decodeFurther {
d.bdRead = false
}
- if vt == valueTypeUint && d.h.SignedInteger {
- d.bdType = valueTypeInt
- v = int64(v.(uint64))
+ if n.v == valueTypeUint && d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = int64(n.u)
}
return
}
@@ -898,6 +894,10 @@ type BincHandle struct {
binaryEncodingType
}
+func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+ return h.SetExt(rt, tag, &setExtWrapper{b: ext})
+}
+
func (h *BincHandle) newEncDriver(e *Encoder) encDriver {
return &bincEncDriver{e: e, w: e.w}
}
@@ -906,8 +906,12 @@ func (h *BincHandle) newDecDriver(d *Decoder) decDriver {
return &bincDecDriver{d: d, r: d.r, h: h, br: d.bytes}
}
-func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
- return h.SetExt(rt, tag, &setExtWrapper{b: ext})
+func (e *bincEncDriver) reset() {
+ e.w = e.e.w
+}
+
+func (d *bincDecDriver) reset() {
+ d.r = d.d.r
}
var _ decDriver = (*bincDecDriver)(nil)
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor.go
index 8b6e13a89..0e5d32b2e 100644
--- a/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor.go
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor.go
@@ -60,11 +60,11 @@ const (
// -------------------
type cborEncDriver struct {
+ noBuiltInTypes
+ encNoSeparator
e *Encoder
w encWriter
h *CborHandle
- noBuiltInTypes
- encNoSeparator
x [8]byte
}
@@ -175,11 +175,10 @@ type cborDecDriver struct {
d *Decoder
h *CborHandle
r decReader
+ b [scratchByteArrayLen]byte
br bool // bytes reader
bdRead bool
- bdType valueType
bd byte
- b [scratchByteArrayLen]byte
noBuiltInTypes
decNoSeparator
}
@@ -187,24 +186,23 @@ type cborDecDriver struct {
func (d *cborDecDriver) readNextBd() {
d.bd = d.r.readn1()
d.bdRead = true
- d.bdType = valueTypeUnset
}
-func (d *cborDecDriver) IsContainerType(vt valueType) (bv bool) {
- switch vt {
- case valueTypeNil:
- return d.bd == cborBdNil
- case valueTypeBytes:
- return d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString)
- case valueTypeString:
- return d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray)
- case valueTypeArray:
- return d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap)
- case valueTypeMap:
- return d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag)
+func (d *cborDecDriver) ContainerType() (vt valueType) {
+ if d.bd == cborBdNil {
+ return valueTypeNil
+ } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) {
+ return valueTypeBytes
+ } else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) {
+ return valueTypeString
+ } else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) {
+ return valueTypeArray
+ } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) {
+ return valueTypeMap
+ } else {
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
}
- d.d.errorf("isContainerType: unsupported parameter: %v", vt)
- return // "unreachable"
+ return valueTypeUnset
}
func (d *cborDecDriver) TryDecodeAsNil() bool {
@@ -446,71 +444,72 @@ func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxta
return
}
-func (d *cborDecDriver) DecodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
+func (d *cborDecDriver) DecodeNaked() {
if !d.bdRead {
d.readNextBd()
}
+ n := &d.d.n
+ var decodeFurther bool
+
switch d.bd {
case cborBdNil:
- vt = valueTypeNil
+ n.v = valueTypeNil
case cborBdFalse:
- vt = valueTypeBool
- v = false
+ n.v = valueTypeBool
+ n.b = false
case cborBdTrue:
- vt = valueTypeBool
- v = true
+ n.v = valueTypeBool
+ n.b = true
case cborBdFloat16, cborBdFloat32:
- vt = valueTypeFloat
- v = d.DecodeFloat(true)
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat(true)
case cborBdFloat64:
- vt = valueTypeFloat
- v = d.DecodeFloat(false)
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat(false)
case cborBdIndefiniteBytes:
- vt = valueTypeBytes
- v = d.DecodeBytes(nil, false, false)
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
case cborBdIndefiniteString:
- vt = valueTypeString
- v = d.DecodeString()
+ n.v = valueTypeString
+ n.s = d.DecodeString()
case cborBdIndefiniteArray:
- vt = valueTypeArray
+ n.v = valueTypeArray
decodeFurther = true
case cborBdIndefiniteMap:
- vt = valueTypeMap
+ n.v = valueTypeMap
decodeFurther = true
default:
switch {
case d.bd >= cborBaseUint && d.bd < cborBaseNegInt:
if d.h.SignedInteger {
- vt = valueTypeInt
- v = d.DecodeInt(64)
+ n.v = valueTypeInt
+ n.i = d.DecodeInt(64)
} else {
- vt = valueTypeUint
- v = d.DecodeUint(64)
+ n.v = valueTypeUint
+ n.u = d.DecodeUint(64)
}
case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
- vt = valueTypeInt
- v = d.DecodeInt(64)
+ n.v = valueTypeInt
+ n.i = d.DecodeInt(64)
case d.bd >= cborBaseBytes && d.bd < cborBaseString:
- vt = valueTypeBytes
- v = d.DecodeBytes(nil, false, false)
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
case d.bd >= cborBaseString && d.bd < cborBaseArray:
- vt = valueTypeString
- v = d.DecodeString()
+ n.v = valueTypeString
+ n.s = d.DecodeString()
case d.bd >= cborBaseArray && d.bd < cborBaseMap:
- vt = valueTypeArray
+ n.v = valueTypeArray
decodeFurther = true
case d.bd >= cborBaseMap && d.bd < cborBaseTag:
- vt = valueTypeMap
+ n.v = valueTypeMap
decodeFurther = true
case d.bd >= cborBaseTag && d.bd < cborBaseSimple:
- vt = valueTypeExt
- var re RawExt
- ui := d.decUint()
+ n.v = valueTypeExt
+ n.u = d.decUint()
+ n.l = nil
d.bdRead = false
- re.Tag = ui
- d.d.decode(&re.Value)
- v = &re
+ // d.d.decode(&re.Value) // handled by decode itself.
// decodeFurther = true
default:
d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
@@ -557,8 +556,12 @@ func (d *cborDecDriver) DecodeNaked() (v interface{}, vt valueType, decodeFurthe
// // Now, vv contains the same string "one-byte"
//
type CborHandle struct {
- BasicHandle
binaryEncodingType
+ BasicHandle
+}
+
+func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
+ return h.SetExt(rt, tag, &setExtWrapper{i: ext})
}
func (h *CborHandle) newEncDriver(e *Encoder) encDriver {
@@ -569,8 +572,12 @@ func (h *CborHandle) newDecDriver(d *Decoder) decDriver {
return &cborDecDriver{d: d, r: d.r, h: h, br: d.bytes}
}
-func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
- return h.SetExt(rt, tag, &setExtWrapper{i: ext})
+func (e *cborEncDriver) reset() {
+ e.w = e.e.w
+}
+
+func (d *cborDecDriver) reset() {
+ d.r = d.d.r
}
var _ decDriver = (*cborDecDriver)(nil)
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor_test.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor_test.go
new file mode 100644
index 000000000..205dffa7d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor_test.go
@@ -0,0 +1,205 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/hex"
+ "math"
+ "os"
+ "regexp"
+ "strings"
+ "testing"
+)
+
+func TestCborIndefiniteLength(t *testing.T) {
+ oldMapType := testCborH.MapType
+ defer func() {
+ testCborH.MapType = oldMapType
+ }()
+ testCborH.MapType = testMapStrIntfTyp
+ // var (
+ // M1 map[string][]byte
+ // M2 map[uint64]bool
+ // L1 []interface{}
+ // S1 []string
+ // B1 []byte
+ // )
+ var v, vv interface{}
+ // define it (v), encode it using indefinite lengths, decode it (vv), compare v to vv
+ v = map[string]interface{}{
+ "one-byte-key": []byte{1, 2, 3, 4, 5, 6},
+ "two-string-key": "two-value",
+ "three-list-key": []interface{}{true, false, uint64(1), int64(-1)},
+ }
+ var buf bytes.Buffer
+ // buf.Reset()
+ e := NewEncoder(&buf, testCborH)
+ buf.WriteByte(cborBdIndefiniteMap)
+ //----
+ buf.WriteByte(cborBdIndefiniteString)
+ e.MustEncode("one-")
+ e.MustEncode("byte-")
+ e.MustEncode("key")
+ buf.WriteByte(cborBdBreak)
+
+ buf.WriteByte(cborBdIndefiniteBytes)
+ e.MustEncode([]byte{1, 2, 3})
+ e.MustEncode([]byte{4, 5, 6})
+ buf.WriteByte(cborBdBreak)
+
+ //----
+ buf.WriteByte(cborBdIndefiniteString)
+ e.MustEncode("two-")
+ e.MustEncode("string-")
+ e.MustEncode("key")
+ buf.WriteByte(cborBdBreak)
+
+ buf.WriteByte(cborBdIndefiniteString)
+ e.MustEncode([]byte("two-")) // encode as bytes, to check robustness of code
+ e.MustEncode([]byte("value"))
+ buf.WriteByte(cborBdBreak)
+
+ //----
+ buf.WriteByte(cborBdIndefiniteString)
+ e.MustEncode("three-")
+ e.MustEncode("list-")
+ e.MustEncode("key")
+ buf.WriteByte(cborBdBreak)
+
+ buf.WriteByte(cborBdIndefiniteArray)
+ e.MustEncode(true)
+ e.MustEncode(false)
+ e.MustEncode(uint64(1))
+ e.MustEncode(int64(-1))
+ buf.WriteByte(cborBdBreak)
+
+ buf.WriteByte(cborBdBreak) // close map
+
+ NewDecoderBytes(buf.Bytes(), testCborH).MustDecode(&vv)
+ if err := deepEqual(v, vv); err != nil {
+ logT(t, "-------- Before and After marshal do not match: Error: %v", err)
+ logT(t, " ....... GOLDEN: (%T) %#v", v, v)
+ logT(t, " ....... DECODED: (%T) %#v", vv, vv)
+ failT(t)
+ }
+}
+
+type testCborGolden struct {
+ Base64 string `codec:"cbor"`
+ Hex string `codec:"hex"`
+ Roundtrip bool `codec:"roundtrip"`
+ Decoded interface{} `codec:"decoded"`
+ Diagnostic string `codec:"diagnostic"`
+ Skip bool `codec:"skip"`
+}
+
+// Some tests are skipped because they include numbers outside the range of int64/uint64
+func doTestCborGoldens(t *testing.T) {
+ oldMapType := testCborH.MapType
+ defer func() {
+ testCborH.MapType = oldMapType
+ }()
+ testCborH.MapType = testMapStrIntfTyp
+ // decode test-cbor-goldens.json into a list of []*testCborGolden
+ // for each one,
+ // - decode hex into []byte bs
+ // - decode bs into interface{} v
+ // - compare both using deepequal
+ // - for any miss, record it
+ var gs []*testCborGolden
+ f, err := os.Open("test-cbor-goldens.json")
+ if err != nil {
+ logT(t, "error opening test-cbor-goldens.json: %v", err)
+ failT(t)
+ }
+ defer f.Close()
+ jh := new(JsonHandle)
+ jh.MapType = testMapStrIntfTyp
+ // d := NewDecoder(f, jh)
+ d := NewDecoder(bufio.NewReader(f), jh)
+ // err = d.Decode(&gs)
+ d.MustDecode(&gs)
+ if err != nil {
+ logT(t, "error json decoding test-cbor-goldens.json: %v", err)
+ failT(t)
+ }
+
+ tagregex := regexp.MustCompile(`[\d]+\(.+?\)`)
+ hexregex := regexp.MustCompile(`h'([0-9a-fA-F]*)'`)
+ for i, g := range gs {
+ // fmt.Printf("%v, skip: %v, isTag: %v, %s\n", i, g.Skip, tagregex.MatchString(g.Diagnostic), g.Diagnostic)
+ // skip tags or simple or those with prefix, as we can't verify them.
+ if g.Skip || strings.HasPrefix(g.Diagnostic, "simple(") || tagregex.MatchString(g.Diagnostic) {
+ // fmt.Printf("%v: skipped\n", i)
+ logT(t, "[%v] skipping because skip=true OR unsupported simple value or Tag Value", i)
+ continue
+ }
+ // println("++++++++++++", i, "g.Diagnostic", g.Diagnostic)
+ if hexregex.MatchString(g.Diagnostic) {
+ // println(i, "g.Diagnostic matched hex")
+ if s2 := g.Diagnostic[2 : len(g.Diagnostic)-1]; s2 == "" {
+ g.Decoded = zeroByteSlice
+ } else if bs2, err2 := hex.DecodeString(s2); err2 == nil {
+ g.Decoded = bs2
+ }
+ // fmt.Printf("%v: hex: %v\n", i, g.Decoded)
+ }
+ bs, err := hex.DecodeString(g.Hex)
+ if err != nil {
+ logT(t, "[%v] error hex decoding %s [%v]: %v", i, g.Hex, err)
+ failT(t)
+ }
+ var v interface{}
+ NewDecoderBytes(bs, testCborH).MustDecode(&v)
+ if _, ok := v.(RawExt); ok {
+ continue
+ }
+ // check the diagnostics to compare
+ switch g.Diagnostic {
+ case "Infinity":
+ b := math.IsInf(v.(float64), 1)
+ testCborError(t, i, math.Inf(1), v, nil, &b)
+ case "-Infinity":
+ b := math.IsInf(v.(float64), -1)
+ testCborError(t, i, math.Inf(-1), v, nil, &b)
+ case "NaN":
+ // println(i, "checking NaN")
+ b := math.IsNaN(v.(float64))
+ testCborError(t, i, math.NaN(), v, nil, &b)
+ case "undefined":
+ b := v == nil
+ testCborError(t, i, nil, v, nil, &b)
+ default:
+ v0 := g.Decoded
+ // testCborCoerceJsonNumber(reflect.ValueOf(&v0))
+ testCborError(t, i, v0, v, deepEqual(v0, v), nil)
+ }
+ }
+}
+
+func testCborError(t *testing.T, i int, v0, v1 interface{}, err error, equal *bool) {
+ if err == nil && equal == nil {
+ // fmt.Printf("%v testCborError passed (err and equal nil)\n", i)
+ return
+ }
+ if err != nil {
+ logT(t, "[%v] deepEqual error: %v", i, err)
+ logT(t, " ....... GOLDEN: (%T) %#v", v0, v0)
+ logT(t, " ....... DECODED: (%T) %#v", v1, v1)
+ failT(t)
+ }
+ if equal != nil && !*equal {
+ logT(t, "[%v] values not equal", i)
+ logT(t, " ....... GOLDEN: (%T) %#v", v0, v0)
+ logT(t, " ....... DECODED: (%T) %#v", v1, v1)
+ failT(t)
+ }
+ // fmt.Printf("%v testCborError passed (checks passed)\n", i)
+}
+
+func TestCborGoldens(t *testing.T) {
+ doTestCborGoldens(t)
+}
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/codec_test.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/codec_test.go
new file mode 100644
index 000000000..ab14e2d01
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/codec_test.go
@@ -0,0 +1,1185 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// Test works by using a slice of interfaces.
+// It can test for encoding/decoding into/from a nil interface{}
+// or passing the object to encode/decode into.
+//
+// There are basically 2 main tests here.
+// First test internally encodes and decodes things and verifies that
+// the artifact was as expected.
+// Second test will use python msgpack to create a bunch of golden files,
+// read those files, and compare them to what it should be. It then
+// writes those files back out and compares the byte streams.
+//
+// Taken together, the tests are pretty extensive.
+//
+// The following manual tests must be done:
+// - TestCodecUnderlyingType
+// - Set fastpathEnabled to false and run tests (to ensure that regular reflection works).
+// We don't want to use a variable there so that code is ellided.
+
+import (
+ "bytes"
+ "encoding/gob"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "math"
+ "math/rand"
+ "net"
+ "net/rpc"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strconv"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+func init() {
+ testInitFlags()
+ testPreInitFns = append(testPreInitFns, testInit)
+}
+
+type testVerifyArg int
+
+const (
+ testVerifyMapTypeSame testVerifyArg = iota
+ testVerifyMapTypeStrIntf
+ testVerifyMapTypeIntfIntf
+ // testVerifySliceIntf
+ testVerifyForPython
+)
+
+const testSkipRPCTests = false
+
+var (
+ testVerbose bool
+ testInitDebug bool
+ testUseIoEncDec bool
+ testStructToArray bool
+ testCanonical bool
+ testUseReset bool
+ testWriteNoSymbols bool
+ testSkipIntf bool
+ testInternStr bool
+ testUseMust bool
+
+ skipVerifyVal interface{} = &(struct{}{})
+
+ testMapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
+
+ // For Go Time, do not use a descriptive timezone.
+ // It's unnecessary, and makes it harder to do a reflect.DeepEqual.
+ // The Offset already tells what the offset should be, if not on UTC and unknown zone name.
+ timeLoc = time.FixedZone("", -8*60*60) // UTC-08:00 //time.UTC-8
+ timeToCompare1 = time.Date(2012, 2, 2, 2, 2, 2, 2000, timeLoc).UTC()
+ timeToCompare2 = time.Date(1900, 2, 2, 2, 2, 2, 2000, timeLoc).UTC()
+ timeToCompare3 = time.Unix(0, 270).UTC() // use value that must be encoded as uint64 for nanoseconds (for cbor/msgpack comparison)
+ //timeToCompare4 = time.Time{}.UTC() // does not work well with simple cbor time encoding (overflow)
+ timeToCompare4 = time.Unix(-2013855848, 4223).UTC()
+
+ table []interface{} // main items we encode
+ tableVerify []interface{} // we verify encoded things against this after decode
+ tableTestNilVerify []interface{} // for nil interface, use this to verify (rules are different)
+ tablePythonVerify []interface{} // for verifying for python, since Python sometimes
+ // will encode a float32 as float64, or large int as uint
+ testRpcInt = new(TestRpcInt)
+)
+
+func testInitFlags() {
+ // delete(testDecOpts.ExtFuncs, timeTyp)
+ flag.BoolVar(&testVerbose, "tv", false, "Test Verbose")
+ flag.BoolVar(&testInitDebug, "tg", false, "Test Init Debug")
+ flag.BoolVar(&testUseIoEncDec, "ti", false, "Use IO Reader/Writer for Marshal/Unmarshal")
+ flag.BoolVar(&testStructToArray, "ts", false, "Set StructToArray option")
+ flag.BoolVar(&testWriteNoSymbols, "tn", false, "Set NoSymbols option")
+ flag.BoolVar(&testCanonical, "tc", false, "Set Canonical option")
+ flag.BoolVar(&testInternStr, "te", false, "Set InternStr option")
+ flag.BoolVar(&testSkipIntf, "tf", false, "Skip Interfaces")
+ flag.BoolVar(&testUseReset, "tr", false, "Use Reset")
+ flag.BoolVar(&testUseMust, "tm", true, "Use Must(En|De)code")
+}
+
+func testByteBuf(in []byte) *bytes.Buffer {
+ return bytes.NewBuffer(in)
+}
+
+type TestABC struct {
+ A, B, C string
+}
+
+type TestRpcInt struct {
+ i int
+}
+
+func (r *TestRpcInt) Update(n int, res *int) error { r.i = n; *res = r.i; return nil }
+func (r *TestRpcInt) Square(ignore int, res *int) error { *res = r.i * r.i; return nil }
+func (r *TestRpcInt) Mult(n int, res *int) error { *res = r.i * n; return nil }
+func (r *TestRpcInt) EchoStruct(arg TestABC, res *string) error {
+ *res = fmt.Sprintf("%#v", arg)
+ return nil
+}
+func (r *TestRpcInt) Echo123(args []string, res *string) error {
+ *res = fmt.Sprintf("%#v", args)
+ return nil
+}
+
+type testUnixNanoTimeExt struct {
+ // keep timestamp here, so that do not incur interface-conversion costs
+ ts int64
+}
+
+// func (x *testUnixNanoTimeExt) WriteExt(interface{}) []byte { panic("unsupported") }
+// func (x *testUnixNanoTimeExt) ReadExt(interface{}, []byte) { panic("unsupported") }
+func (x *testUnixNanoTimeExt) ConvertExt(v interface{}) interface{} {
+ switch v2 := v.(type) {
+ case time.Time:
+ x.ts = v2.UTC().UnixNano()
+ case *time.Time:
+ x.ts = v2.UTC().UnixNano()
+ default:
+ panic(fmt.Sprintf("unsupported format for time conversion: expecting time.Time; got %T", v))
+ }
+ return &x.ts
+}
+func (x *testUnixNanoTimeExt) UpdateExt(dest interface{}, v interface{}) {
+ // fmt.Printf("testUnixNanoTimeExt.UpdateExt: v: %v\n", v)
+ tt := dest.(*time.Time)
+ switch v2 := v.(type) {
+ case int64:
+ *tt = time.Unix(0, v2).UTC()
+ case *int64:
+ *tt = time.Unix(0, *v2).UTC()
+ case uint64:
+ *tt = time.Unix(0, int64(v2)).UTC()
+ case *uint64:
+ *tt = time.Unix(0, int64(*v2)).UTC()
+ //case float64:
+ //case string:
+ default:
+ panic(fmt.Sprintf("unsupported format for time conversion: expecting int64/uint64; got %T", v))
+ }
+ // fmt.Printf("testUnixNanoTimeExt.UpdateExt: v: %v, tt: %#v\n", v, tt)
+}
+
+func testVerifyVal(v interface{}, arg testVerifyArg) (v2 interface{}) {
+ //for python msgpack,
+ // - all positive integers are unsigned 64-bit ints
+ // - all floats are float64
+ switch iv := v.(type) {
+ case int8:
+ if iv >= 0 {
+ v2 = uint64(iv)
+ } else {
+ v2 = int64(iv)
+ }
+ case int16:
+ if iv >= 0 {
+ v2 = uint64(iv)
+ } else {
+ v2 = int64(iv)
+ }
+ case int32:
+ if iv >= 0 {
+ v2 = uint64(iv)
+ } else {
+ v2 = int64(iv)
+ }
+ case int64:
+ if iv >= 0 {
+ v2 = uint64(iv)
+ } else {
+ v2 = int64(iv)
+ }
+ case uint8:
+ v2 = uint64(iv)
+ case uint16:
+ v2 = uint64(iv)
+ case uint32:
+ v2 = uint64(iv)
+ case uint64:
+ v2 = uint64(iv)
+ case float32:
+ v2 = float64(iv)
+ case float64:
+ v2 = float64(iv)
+ case []interface{}:
+ m2 := make([]interface{}, len(iv))
+ for j, vj := range iv {
+ m2[j] = testVerifyVal(vj, arg)
+ }
+ v2 = m2
+ case map[string]bool:
+ switch arg {
+ case testVerifyMapTypeSame:
+ m2 := make(map[string]bool)
+ for kj, kv := range iv {
+ m2[kj] = kv
+ }
+ v2 = m2
+ case testVerifyMapTypeStrIntf, testVerifyForPython:
+ m2 := make(map[string]interface{})
+ for kj, kv := range iv {
+ m2[kj] = kv
+ }
+ v2 = m2
+ case testVerifyMapTypeIntfIntf:
+ m2 := make(map[interface{}]interface{})
+ for kj, kv := range iv {
+ m2[kj] = kv
+ }
+ v2 = m2
+ }
+ case map[string]interface{}:
+ switch arg {
+ case testVerifyMapTypeSame:
+ m2 := make(map[string]interface{})
+ for kj, kv := range iv {
+ m2[kj] = testVerifyVal(kv, arg)
+ }
+ v2 = m2
+ case testVerifyMapTypeStrIntf, testVerifyForPython:
+ m2 := make(map[string]interface{})
+ for kj, kv := range iv {
+ m2[kj] = testVerifyVal(kv, arg)
+ }
+ v2 = m2
+ case testVerifyMapTypeIntfIntf:
+ m2 := make(map[interface{}]interface{})
+ for kj, kv := range iv {
+ m2[kj] = testVerifyVal(kv, arg)
+ }
+ v2 = m2
+ }
+ case map[interface{}]interface{}:
+ m2 := make(map[interface{}]interface{})
+ for kj, kv := range iv {
+ m2[testVerifyVal(kj, arg)] = testVerifyVal(kv, arg)
+ }
+ v2 = m2
+ case time.Time:
+ switch arg {
+ case testVerifyForPython:
+ if iv2 := iv.UnixNano(); iv2 >= 0 {
+ v2 = uint64(iv2)
+ } else {
+ v2 = int64(iv2)
+ }
+ default:
+ v2 = v
+ }
+ default:
+ v2 = v
+ }
+ return
+}
+
+func testInit() {
+ gob.Register(new(TestStruc))
+ if testInitDebug {
+ ts0 := newTestStruc(2, false, !testSkipIntf, false)
+ fmt.Printf("====> depth: %v, ts: %#v\n", 2, ts0)
+ }
+
+ for _, v := range testHandles {
+ bh := v.getBasicHandle()
+ bh.InternString = testInternStr
+ bh.Canonical = testCanonical
+ bh.StructToArray = testStructToArray
+ // mostly doing this for binc
+ if testWriteNoSymbols {
+ bh.AsSymbols = AsSymbolNone
+ } else {
+ bh.AsSymbols = AsSymbolAll
+ }
+ }
+
+ testMsgpackH.RawToString = true
+
+ // testMsgpackH.AddExt(byteSliceTyp, 0, testMsgpackH.BinaryEncodeExt, testMsgpackH.BinaryDecodeExt)
+ // testMsgpackH.AddExt(timeTyp, 1, testMsgpackH.TimeEncodeExt, testMsgpackH.TimeDecodeExt)
+ timeEncExt := func(rv reflect.Value) (bs []byte, err error) {
+ defer panicToErr(&err)
+ bs = timeExt{}.WriteExt(rv.Interface())
+ return
+ }
+ timeDecExt := func(rv reflect.Value, bs []byte) (err error) {
+ defer panicToErr(&err)
+ timeExt{}.ReadExt(rv.Interface(), bs)
+ return
+ }
+
+ // add extensions for msgpack, simple for time.Time, so we can encode/decode same way.
+ // use different flavors of XXXExt calls, including deprecated ones.
+ testSimpleH.AddExt(timeTyp, 1, timeEncExt, timeDecExt)
+ testMsgpackH.SetBytesExt(timeTyp, 1, timeExt{})
+ testCborH.SetInterfaceExt(timeTyp, 1, &testUnixNanoTimeExt{})
+ testJsonH.SetInterfaceExt(timeTyp, 1, &testUnixNanoTimeExt{})
+
+ primitives := []interface{}{
+ int8(-8),
+ int16(-1616),
+ int32(-32323232),
+ int64(-6464646464646464),
+ uint8(192),
+ uint16(1616),
+ uint32(32323232),
+ uint64(6464646464646464),
+ byte(192),
+ float32(-3232.0),
+ float64(-6464646464.0),
+ float32(3232.0),
+ float64(6464646464.0),
+ false,
+ true,
+ nil,
+ "someday",
+ "",
+ "bytestring",
+ timeToCompare1,
+ timeToCompare2,
+ timeToCompare3,
+ timeToCompare4,
+ }
+ mapsAndStrucs := []interface{}{
+ map[string]bool{
+ "true": true,
+ "false": false,
+ },
+ map[string]interface{}{
+ "true": "True",
+ "false": false,
+ "uint16(1616)": uint16(1616),
+ },
+ //add a complex combo map in here. (map has list which has map)
+ //note that after the first thing, everything else should be generic.
+ map[string]interface{}{
+ "list": []interface{}{
+ int16(1616),
+ int32(32323232),
+ true,
+ float32(-3232.0),
+ map[string]interface{}{
+ "TRUE": true,
+ "FALSE": false,
+ },
+ []interface{}{true, false},
+ },
+ "int32": int32(32323232),
+ "bool": true,
+ "LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
+ "SHORT STRING": "1234567890",
+ },
+ map[interface{}]interface{}{
+ true: "true",
+ uint8(138): false,
+ "false": uint8(200),
+ },
+ newTestStruc(0, false, !testSkipIntf, false),
+ }
+
+ table = []interface{}{}
+ table = append(table, primitives...) //0-19 are primitives
+ table = append(table, primitives) //20 is a list of primitives
+ table = append(table, mapsAndStrucs...) //21-24 are maps. 25 is a *struct
+
+ tableVerify = make([]interface{}, len(table))
+ tableTestNilVerify = make([]interface{}, len(table))
+ tablePythonVerify = make([]interface{}, len(table))
+
+ lp := len(primitives)
+ av := tableVerify
+ for i, v := range table {
+ if i == lp+3 {
+ av[i] = skipVerifyVal
+ continue
+ }
+ //av[i] = testVerifyVal(v, testVerifyMapTypeSame)
+ switch v.(type) {
+ case []interface{}:
+ av[i] = testVerifyVal(v, testVerifyMapTypeSame)
+ case map[string]interface{}:
+ av[i] = testVerifyVal(v, testVerifyMapTypeSame)
+ case map[interface{}]interface{}:
+ av[i] = testVerifyVal(v, testVerifyMapTypeSame)
+ default:
+ av[i] = v
+ }
+ }
+
+ av = tableTestNilVerify
+ for i, v := range table {
+ if i > lp+3 {
+ av[i] = skipVerifyVal
+ continue
+ }
+ av[i] = testVerifyVal(v, testVerifyMapTypeStrIntf)
+ }
+
+ av = tablePythonVerify
+ for i, v := range table {
+ if i > lp+3 {
+ av[i] = skipVerifyVal
+ continue
+ }
+ av[i] = testVerifyVal(v, testVerifyForPython)
+ }
+
+ tablePythonVerify = tablePythonVerify[:24]
+}
+
+func testUnmarshal(v interface{}, data []byte, h Handle) (err error) {
+ return testCodecDecode(data, v, h)
+}
+
+func testMarshal(v interface{}, h Handle) (bs []byte, err error) {
+ return testCodecEncode(v, nil, testByteBuf, h)
+}
+
+func testMarshalErr(v interface{}, h Handle, t *testing.T, name string) (bs []byte, err error) {
+ if bs, err = testMarshal(v, h); err != nil {
+ logT(t, "Error encoding %s: %v, Err: %v", name, v, err)
+ t.FailNow()
+ }
+ return
+}
+
+func testUnmarshalErr(v interface{}, data []byte, h Handle, t *testing.T, name string) (err error) {
+ if err = testUnmarshal(v, data, h); err != nil {
+ logT(t, "Error Decoding into %s: %v, Err: %v", name, v, err)
+ t.FailNow()
+ }
+ return
+}
+
+// doTestCodecTableOne allows us test for different variations based on arguments passed.
+func doTestCodecTableOne(t *testing.T, testNil bool, h Handle,
+ vs []interface{}, vsVerify []interface{}) {
+ //if testNil, then just test for when a pointer to a nil interface{} is passed. It should work.
+ //Current setup allows us test (at least manually) the nil interface or typed interface.
+ logT(t, "================ TestNil: %v ================\n", testNil)
+ for i, v0 := range vs {
+ logT(t, "..............................................")
+ logT(t, " Testing: #%d:, %T, %#v\n", i, v0, v0)
+ b0, err := testMarshalErr(v0, h, t, "v0")
+ if err != nil {
+ continue
+ }
+ if h.isBinary() {
+ logT(t, " Encoded bytes: len: %v, %v\n", len(b0), b0)
+ } else {
+ logT(t, " Encoded string: len: %v, %v\n", len(string(b0)), string(b0))
+ // println("########### encoded string: " + string(b0))
+ }
+ var v1 interface{}
+
+ if testNil {
+ err = testUnmarshal(&v1, b0, h)
+ } else {
+ if v0 != nil {
+ v0rt := reflect.TypeOf(v0) // ptr
+ rv1 := reflect.New(v0rt)
+ err = testUnmarshal(rv1.Interface(), b0, h)
+ v1 = rv1.Elem().Interface()
+ // v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface()
+ }
+ }
+
+ logT(t, " v1 returned: %T, %#v", v1, v1)
+ // if v1 != nil {
+ // logT(t, " v1 returned: %T, %#v", v1, v1)
+ // //we always indirect, because ptr to typed value may be passed (if not testNil)
+ // v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface()
+ // }
+ if err != nil {
+ logT(t, "-------- Error: %v. Partial return: %v", err, v1)
+ failT(t)
+ continue
+ }
+ v0check := vsVerify[i]
+ if v0check == skipVerifyVal {
+ logT(t, " Nil Check skipped: Decoded: %T, %#v\n", v1, v1)
+ continue
+ }
+
+ if err = deepEqual(v0check, v1); err == nil {
+ logT(t, "++++++++ Before and After marshal matched\n")
+ } else {
+ // logT(t, "-------- Before and After marshal do not match: Error: %v"+
+ // " ====> GOLDEN: (%T) %#v, DECODED: (%T) %#v\n", err, v0check, v0check, v1, v1)
+ logT(t, "-------- Before and After marshal do not match: Error: %v", err)
+ logT(t, " ....... GOLDEN: (%T) %#v", v0check, v0check)
+ logT(t, " ....... DECODED: (%T) %#v", v1, v1)
+ failT(t)
+ }
+ }
+}
+
+func testCodecTableOne(t *testing.T, h Handle) {
+ testOnce.Do(testInitAll)
+ // func TestMsgpackAllExperimental(t *testing.T) {
+ // dopts := testDecOpts(nil, nil, false, true, true),
+
+ idxTime, numPrim, numMap := 19, 23, 4
+ //println("#################")
+ switch v := h.(type) {
+ case *MsgpackHandle:
+ var oldWriteExt, oldRawToString bool
+ oldWriteExt, v.WriteExt = v.WriteExt, true
+ oldRawToString, v.RawToString = v.RawToString, true
+ doTestCodecTableOne(t, false, h, table, tableVerify)
+ v.WriteExt, v.RawToString = oldWriteExt, oldRawToString
+ case *JsonHandle:
+ //skip []interface{} containing time.Time, as it encodes as a number, but cannot decode back to time.Time.
+ //As there is no real support for extension tags in json, this must be skipped.
+ doTestCodecTableOne(t, false, h, table[:numPrim], tableVerify[:numPrim])
+ doTestCodecTableOne(t, false, h, table[numPrim+1:], tableVerify[numPrim+1:])
+ default:
+ doTestCodecTableOne(t, false, h, table, tableVerify)
+ }
+ // func TestMsgpackAll(t *testing.T) {
+
+ // //skip []interface{} containing time.Time
+ // doTestCodecTableOne(t, false, h, table[:numPrim], tableVerify[:numPrim])
+ // doTestCodecTableOne(t, false, h, table[numPrim+1:], tableVerify[numPrim+1:])
+ // func TestMsgpackNilStringMap(t *testing.T) {
+ var oldMapType reflect.Type
+ v := h.getBasicHandle()
+
+ oldMapType, v.MapType = v.MapType, testMapStrIntfTyp
+
+ //skip time.Time, []interface{} containing time.Time, last map, and newStruc
+ doTestCodecTableOne(t, true, h, table[:idxTime], tableTestNilVerify[:idxTime])
+ doTestCodecTableOne(t, true, h, table[numPrim+1:numPrim+numMap], tableTestNilVerify[numPrim+1:numPrim+numMap])
+
+ v.MapType = oldMapType
+
+ // func TestMsgpackNilIntf(t *testing.T) {
+
+ //do newTestStruc and last element of map
+ doTestCodecTableOne(t, true, h, table[numPrim+numMap:], tableTestNilVerify[numPrim+numMap:])
+ //TODO? What is this one?
+ //doTestCodecTableOne(t, true, h, table[17:18], tableTestNilVerify[17:18])
+}
+
+func testCodecMiscOne(t *testing.T, h Handle) {
+ testOnce.Do(testInitAll)
+ b, err := testMarshalErr(32, h, t, "32")
+ // Cannot do this nil one, because faster type assertion decoding will panic
+ // var i *int32
+ // if err = testUnmarshal(b, i, nil); err == nil {
+ // logT(t, "------- Expecting error because we cannot unmarshal to int32 nil ptr")
+ // t.FailNow()
+ // }
+ var i2 int32 = 0
+ err = testUnmarshalErr(&i2, b, h, t, "int32-ptr")
+ if i2 != int32(32) {
+ logT(t, "------- didn't unmarshal to 32: Received: %d", i2)
+ t.FailNow()
+ }
+
+ // func TestMsgpackDecodePtr(t *testing.T) {
+ ts := newTestStruc(0, false, !testSkipIntf, false)
+ b, err = testMarshalErr(ts, h, t, "pointer-to-struct")
+ if len(b) < 40 {
+ logT(t, "------- Size must be > 40. Size: %d", len(b))
+ t.FailNow()
+ }
+ if h.isBinary() {
+ logT(t, "------- b: %v", b)
+ } else {
+ logT(t, "------- b: %s", b)
+ }
+ ts2 := new(TestStruc)
+ err = testUnmarshalErr(ts2, b, h, t, "pointer-to-struct")
+ if ts2.I64 != math.MaxInt64*2/3 {
+ logT(t, "------- Unmarshal wrong. Expect I64 = 64. Got: %v", ts2.I64)
+ t.FailNow()
+ }
+
+ // func TestMsgpackIntfDecode(t *testing.T) {
+ m := map[string]int{"A": 2, "B": 3}
+ p := []interface{}{m}
+ bs, err := testMarshalErr(p, h, t, "p")
+
+ m2 := map[string]int{}
+ p2 := []interface{}{m2}
+ err = testUnmarshalErr(&p2, bs, h, t, "&p2")
+
+ if m2["A"] != 2 || m2["B"] != 3 {
+ logT(t, "m2 not as expected: expecting: %v, got: %v", m, m2)
+ t.FailNow()
+ }
+ // log("m: %v, m2: %v, p: %v, p2: %v", m, m2, p, p2)
+ checkEqualT(t, p, p2, "p=p2")
+ checkEqualT(t, m, m2, "m=m2")
+ if err = deepEqual(p, p2); err == nil {
+ logT(t, "p and p2 match")
+ } else {
+ logT(t, "Not Equal: %v. p: %v, p2: %v", err, p, p2)
+ t.FailNow()
+ }
+ if err = deepEqual(m, m2); err == nil {
+ logT(t, "m and m2 match")
+ } else {
+ logT(t, "Not Equal: %v. m: %v, m2: %v", err, m, m2)
+ t.FailNow()
+ }
+
+ // func TestMsgpackDecodeStructSubset(t *testing.T) {
+ // test that we can decode a subset of the stream
+ mm := map[string]interface{}{"A": 5, "B": 99, "C": 333}
+ bs, err = testMarshalErr(mm, h, t, "mm")
+ type ttt struct {
+ A uint8
+ C int32
+ }
+ var t2 ttt
+ testUnmarshalErr(&t2, bs, h, t, "t2")
+ t3 := ttt{5, 333}
+ checkEqualT(t, t2, t3, "t2=t3")
+
+ // println(">>>>>")
+ // test simple arrays, non-addressable arrays, slices
+ type tarr struct {
+ A int64
+ B [3]int64
+ C []byte
+ D [3]byte
+ }
+ var tarr0 = tarr{1, [3]int64{2, 3, 4}, []byte{4, 5, 6}, [3]byte{7, 8, 9}}
+ // test both pointer and non-pointer (value)
+ for _, tarr1 := range []interface{}{tarr0, &tarr0} {
+ bs, err = testMarshalErr(tarr1, h, t, "tarr1")
+ if err != nil {
+ logT(t, "Error marshalling: %v", err)
+ t.FailNow()
+ }
+ if _, ok := h.(*JsonHandle); ok {
+ logT(t, "Marshal as: %s", bs)
+ }
+ var tarr2 tarr
+ testUnmarshalErr(&tarr2, bs, h, t, "tarr2")
+ checkEqualT(t, tarr0, tarr2, "tarr0=tarr2")
+ // fmt.Printf(">>>> err: %v. tarr1: %v, tarr2: %v\n", err, tarr0, tarr2)
+ }
+
+ // test byte array, even if empty (msgpack only)
+ if h == testMsgpackH {
+ type ystruct struct {
+ Anarray []byte
+ }
+ var ya = ystruct{}
+ testUnmarshalErr(&ya, []byte{0x91, 0x90}, h, t, "ya")
+ }
+}
+
+func testCodecEmbeddedPointer(t *testing.T, h Handle) {
+ testOnce.Do(testInitAll)
+ type Z int
+ type A struct {
+ AnInt int
+ }
+ type B struct {
+ *Z
+ *A
+ MoreInt int
+ }
+ var z Z = 4
+ x1 := &B{&z, &A{5}, 6}
+ bs, err := testMarshalErr(x1, h, t, "x1")
+ // fmt.Printf("buf: len(%v): %x\n", buf.Len(), buf.Bytes())
+ var x2 = new(B)
+ err = testUnmarshalErr(x2, bs, h, t, "x2")
+ err = checkEqualT(t, x1, x2, "x1=x2")
+ _ = err
+}
+
+func testCodecUnderlyingType(t *testing.T, h Handle) {
+ testOnce.Do(testInitAll)
+ // Manual Test.
+ // Run by hand, with accompanying print statements in fast-path.go
+ // to ensure that the fast functions are called.
+ type T1 map[string]string
+ v := T1{"1": "1s", "2": "2s"}
+ var bs []byte
+ var err error
+ NewEncoderBytes(&bs, h).MustEncode(v)
+ if err != nil {
+ logT(t, "Error during encode: %v", err)
+ failT(t)
+ }
+ var v2 T1
+ NewDecoderBytes(bs, h).MustDecode(&v2)
+ if err != nil {
+ logT(t, "Error during decode: %v", err)
+ failT(t)
+ }
+}
+
+func testCodecChan(t *testing.T, h Handle) {
+ // - send a slice []*int64 (sl1) into an chan (ch1) with cap > len(s1)
+ // - encode ch1 as a stream array
+ // - decode a chan (ch2), with cap > len(s1) from the stream array
+ // - receive from ch2 into slice sl2
+ // - compare sl1 and sl2
+ // - do this for codecs: json, cbor (covers all types)
+ sl1 := make([]*int64, 4)
+ for i := range sl1 {
+ var j int64 = int64(i)
+ sl1[i] = &j
+ }
+ ch1 := make(chan *int64, 4)
+ for _, j := range sl1 {
+ ch1 <- j
+ }
+ var bs []byte
+ NewEncoderBytes(&bs, h).MustEncode(ch1)
+ // if !h.isBinary() {
+ // fmt.Printf("before: len(ch1): %v, bs: %s\n", len(ch1), bs)
+ // }
+ // var ch2 chan *int64 // this will block if json, etc.
+ ch2 := make(chan *int64, 8)
+ NewDecoderBytes(bs, h).MustDecode(&ch2)
+ // logT(t, "Len(ch2): %v", len(ch2))
+ // fmt.Printf("after: len(ch2): %v, ch2: %v\n", len(ch2), ch2)
+ close(ch2)
+ var sl2 []*int64
+ for j := range ch2 {
+ sl2 = append(sl2, j)
+ }
+ if err := deepEqual(sl1, sl2); err != nil {
+ logT(t, "Not Match: %v; len: %v, %v", err, len(sl1), len(sl2))
+ failT(t)
+ }
+}
+
+func testCodecRpcOne(t *testing.T, rr Rpc, h Handle, doRequest bool, exitSleepMs time.Duration,
+) (port int) {
+ testOnce.Do(testInitAll)
+ if testSkipRPCTests {
+ return
+ }
+ // rpc needs EOF, which is sent via a panic, and so must be recovered.
+ if !recoverPanicToErr {
+ logT(t, "EXPECTED. set recoverPanicToErr=true, since rpc needs EOF")
+ t.FailNow()
+ }
+ srv := rpc.NewServer()
+ srv.Register(testRpcInt)
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
+ // log("listener: %v", ln.Addr())
+ checkErrT(t, err)
+ port = (ln.Addr().(*net.TCPAddr)).Port
+ // var opts *DecoderOptions
+ // opts := testDecOpts
+ // opts.MapType = mapStrIntfTyp
+ // opts.RawToString = false
+ serverExitChan := make(chan bool, 1)
+ var serverExitFlag uint64 = 0
+ serverFn := func() {
+ for {
+ conn1, err1 := ln.Accept()
+ // if err1 != nil {
+ // //fmt.Printf("accept err1: %v\n", err1)
+ // continue
+ // }
+ if atomic.LoadUint64(&serverExitFlag) == 1 {
+ serverExitChan <- true
+ conn1.Close()
+ return // exit serverFn goroutine
+ }
+ if err1 == nil {
+ var sc rpc.ServerCodec = rr.ServerCodec(conn1, h)
+ srv.ServeCodec(sc)
+ }
+ }
+ }
+
+ clientFn := func(cc rpc.ClientCodec) {
+ cl := rpc.NewClientWithCodec(cc)
+ defer cl.Close()
+ // defer func() { println("##### client closing"); cl.Close() }()
+ var up, sq, mult int
+ var rstr string
+ // log("Calling client")
+ checkErrT(t, cl.Call("TestRpcInt.Update", 5, &up))
+ // log("Called TestRpcInt.Update")
+ checkEqualT(t, testRpcInt.i, 5, "testRpcInt.i=5")
+ checkEqualT(t, up, 5, "up=5")
+ checkErrT(t, cl.Call("TestRpcInt.Square", 1, &sq))
+ checkEqualT(t, sq, 25, "sq=25")
+ checkErrT(t, cl.Call("TestRpcInt.Mult", 20, &mult))
+ checkEqualT(t, mult, 100, "mult=100")
+ checkErrT(t, cl.Call("TestRpcInt.EchoStruct", TestABC{"Aa", "Bb", "Cc"}, &rstr))
+ checkEqualT(t, rstr, fmt.Sprintf("%#v", TestABC{"Aa", "Bb", "Cc"}), "rstr=")
+ checkErrT(t, cl.Call("TestRpcInt.Echo123", []string{"A1", "B2", "C3"}, &rstr))
+ checkEqualT(t, rstr, fmt.Sprintf("%#v", []string{"A1", "B2", "C3"}), "rstr=")
+ }
+
+ connFn := func() (bs net.Conn) {
+ // log("calling f1")
+ bs, err2 := net.Dial(ln.Addr().Network(), ln.Addr().String())
+ //fmt.Printf("f1. bs: %v, err2: %v\n", bs, err2)
+ checkErrT(t, err2)
+ return
+ }
+
+ exitFn := func() {
+ atomic.StoreUint64(&serverExitFlag, 1)
+ bs := connFn()
+ <-serverExitChan
+ bs.Close()
+ // serverExitChan <- true
+ }
+
+ go serverFn()
+ runtime.Gosched()
+ //time.Sleep(100 * time.Millisecond)
+ if exitSleepMs == 0 {
+ defer ln.Close()
+ defer exitFn()
+ }
+ if doRequest {
+ bs := connFn()
+ cc := rr.ClientCodec(bs, h)
+ clientFn(cc)
+ }
+ if exitSleepMs != 0 {
+ go func() {
+ defer ln.Close()
+ time.Sleep(exitSleepMs)
+ exitFn()
+ }()
+ }
+ return
+}
+
+func doTestMapEncodeForCanonical(t *testing.T, name string, h Handle) {
+ v1 := map[string]interface{}{
+ "a": 1,
+ "b": "hello",
+ "c": map[string]interface{}{
+ "c/a": 1,
+ "c/b": "world",
+ "c/c": []int{1, 2, 3, 4},
+ "c/d": map[string]interface{}{
+ "c/d/a": "fdisajfoidsajfopdjsaopfjdsapofda",
+ "c/d/b": "fdsafjdposakfodpsakfopdsakfpodsakfpodksaopfkdsopafkdopsa",
+ "c/d/c": "poir02 ir30qif4p03qir0pogjfpoaerfgjp ofke[padfk[ewapf kdp[afep[aw",
+ "c/d/d": "fdsopafkd[sa f-32qor-=4qeof -afo-erfo r-eafo 4e- o r4-qwo ag",
+ "c/d/e": "kfep[a sfkr0[paf[a foe-[wq ewpfao-q ro3-q ro-4qof4-qor 3-e orfkropzjbvoisdb",
+ "c/d/f": "",
+ },
+ "c/e": map[int]string{
+ 1: "1",
+ 22: "22",
+ 333: "333",
+ 4444: "4444",
+ 55555: "55555",
+ },
+ "c/f": map[string]int{
+ "1": 1,
+ "22": 22,
+ "333": 333,
+ "4444": 4444,
+ "55555": 55555,
+ },
+ },
+ }
+ var v2 map[string]interface{}
+ var b1, b2 []byte
+
+ // encode v1 into b1, decode b1 into v2, encode v2 into b2, compare b1 and b2
+
+ bh := h.getBasicHandle()
+ if !bh.Canonical {
+ bh.Canonical = true
+ defer func() { bh.Canonical = false }()
+ }
+
+ e1 := NewEncoderBytes(&b1, h)
+ e1.MustEncode(v1)
+ d1 := NewDecoderBytes(b1, h)
+ d1.MustDecode(&v2)
+ e2 := NewEncoderBytes(&b2, h)
+ e2.MustEncode(v2)
+ if !bytes.Equal(b1, b2) {
+ logT(t, "Unequal bytes: %v VS %v", b1, b2)
+ t.FailNow()
+ }
+}
+
+// Comprehensive testing that generates data encoded from python handle (cbor, msgpack),
+// and validates that our code can read and write it out accordingly.
+// We keep this unexported here, and put actual test in ext_dep_test.go.
+// This way, it can be excluded by excluding file completely.
+func doTestPythonGenStreams(t *testing.T, name string, h Handle) {
+ logT(t, "TestPythonGenStreams-%v", name)
+ tmpdir, err := ioutil.TempDir("", "golang-"+name+"-test")
+ if err != nil {
+ logT(t, "-------- Unable to create temp directory\n")
+ t.FailNow()
+ }
+ defer os.RemoveAll(tmpdir)
+ logT(t, "tmpdir: %v", tmpdir)
+ cmd := exec.Command("python", "test.py", "testdata", tmpdir)
+ //cmd.Stdin = strings.NewReader("some input")
+ //cmd.Stdout = &out
+ var cmdout []byte
+ if cmdout, err = cmd.CombinedOutput(); err != nil {
+ logT(t, "-------- Error running test.py testdata. Err: %v", err)
+ logT(t, " %v", string(cmdout))
+ t.FailNow()
+ }
+
+ bh := h.getBasicHandle()
+
+ oldMapType := bh.MapType
+ for i, v := range tablePythonVerify {
+ // if v == uint64(0) && h == testMsgpackH {
+ // v = int64(0)
+ // }
+ bh.MapType = oldMapType
+ //load up the golden file based on number
+ //decode it
+ //compare to in-mem object
+ //encode it again
+ //compare to output stream
+ logT(t, "..............................................")
+ logT(t, " Testing: #%d: %T, %#v\n", i, v, v)
+ var bss []byte
+ bss, err = ioutil.ReadFile(filepath.Join(tmpdir, strconv.Itoa(i)+"."+name+".golden"))
+ if err != nil {
+ logT(t, "-------- Error reading golden file: %d. Err: %v", i, err)
+ failT(t)
+ continue
+ }
+ bh.MapType = testMapStrIntfTyp
+
+ var v1 interface{}
+ if err = testUnmarshal(&v1, bss, h); err != nil {
+ logT(t, "-------- Error decoding stream: %d: Err: %v", i, err)
+ failT(t)
+ continue
+ }
+ if v == skipVerifyVal {
+ continue
+ }
+ //no need to indirect, because we pass a nil ptr, so we already have the value
+ //if v1 != nil { v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() }
+ if err = deepEqual(v, v1); err == nil {
+ logT(t, "++++++++ Objects match: %T, %v", v, v)
+ } else {
+ logT(t, "-------- Objects do not match: %v. Source: %T. Decoded: %T", err, v, v1)
+ logT(t, "-------- GOLDEN: %#v", v)
+ // logT(t, "-------- DECODED: %#v <====> %#v", v1, reflect.Indirect(reflect.ValueOf(v1)).Interface())
+ logT(t, "-------- DECODED: %#v <====> %#v", v1, reflect.Indirect(reflect.ValueOf(v1)).Interface())
+ failT(t)
+ }
+ bsb, err := testMarshal(v1, h)
+ if err != nil {
+ logT(t, "Error encoding to stream: %d: Err: %v", i, err)
+ failT(t)
+ continue
+ }
+ if err = deepEqual(bsb, bss); err == nil {
+ logT(t, "++++++++ Bytes match")
+ } else {
+ logT(t, "???????? Bytes do not match. %v.", err)
+ xs := "--------"
+ if reflect.ValueOf(v).Kind() == reflect.Map {
+ xs = " "
+ logT(t, "%s It's a map. Ok that they don't match (dependent on ordering).", xs)
+ } else {
+ logT(t, "%s It's not a map. They should match.", xs)
+ failT(t)
+ }
+ logT(t, "%s FROM_FILE: %4d] %v", xs, len(bss), bss)
+ logT(t, "%s ENCODED: %4d] %v", xs, len(bsb), bsb)
+ }
+ }
+ bh.MapType = oldMapType
+}
+
+// To test MsgpackSpecRpc, we test 3 scenarios:
+// - Go Client to Go RPC Service (contained within TestMsgpackRpcSpec)
+// - Go client to Python RPC Service (contained within doTestMsgpackRpcSpecGoClientToPythonSvc)
+// - Python Client to Go RPC Service (contained within doTestMsgpackRpcSpecPythonClientToGoSvc)
+//
+// This allows us test the different calling conventions
+// - Go Service requires only one argument
+// - Python Service allows multiple arguments
+
+func doTestMsgpackRpcSpecGoClientToPythonSvc(t *testing.T) {
+ if testSkipRPCTests {
+ return
+ }
+ // openPorts are between 6700 and 6800
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+ openPort := strconv.FormatInt(6700+r.Int63n(99), 10)
+ // openPort := "6792"
+ cmd := exec.Command("python", "test.py", "rpc-server", openPort, "4")
+ checkErrT(t, cmd.Start())
+ bs, err2 := net.Dial("tcp", ":"+openPort)
+ for i := 0; i < 10 && err2 != nil; i++ {
+ time.Sleep(50 * time.Millisecond) // time for python rpc server to start
+ bs, err2 = net.Dial("tcp", ":"+openPort)
+ }
+ checkErrT(t, err2)
+ cc := MsgpackSpecRpc.ClientCodec(bs, testMsgpackH)
+ cl := rpc.NewClientWithCodec(cc)
+ defer cl.Close()
+ var rstr string
+ checkErrT(t, cl.Call("EchoStruct", TestABC{"Aa", "Bb", "Cc"}, &rstr))
+ //checkEqualT(t, rstr, "{'A': 'Aa', 'B': 'Bb', 'C': 'Cc'}")
+ var mArgs MsgpackSpecRpcMultiArgs = []interface{}{"A1", "B2", "C3"}
+ checkErrT(t, cl.Call("Echo123", mArgs, &rstr))
+ checkEqualT(t, rstr, "1:A1 2:B2 3:C3", "rstr=")
+ cmd.Process.Kill()
+}
+
+func doTestMsgpackRpcSpecPythonClientToGoSvc(t *testing.T) {
+ if testSkipRPCTests {
+ return
+ }
+ port := testCodecRpcOne(t, MsgpackSpecRpc, testMsgpackH, false, 1*time.Second)
+ //time.Sleep(1000 * time.Millisecond)
+ cmd := exec.Command("python", "test.py", "rpc-client-go-service", strconv.Itoa(port))
+ var cmdout []byte
+ var err error
+ if cmdout, err = cmd.CombinedOutput(); err != nil {
+ logT(t, "-------- Error running test.py rpc-client-go-service. Err: %v", err)
+ logT(t, " %v", string(cmdout))
+ t.FailNow()
+ }
+ checkEqualT(t, string(cmdout),
+ fmt.Sprintf("%#v\n%#v\n", []string{"A1", "B2", "C3"}, TestABC{"Aa", "Bb", "Cc"}), "cmdout=")
+}
+
+func TestBincCodecsTable(t *testing.T) {
+ testCodecTableOne(t, testBincH)
+}
+
+func TestBincCodecsMisc(t *testing.T) {
+ testCodecMiscOne(t, testBincH)
+}
+
+func TestBincCodecsEmbeddedPointer(t *testing.T) {
+ testCodecEmbeddedPointer(t, testBincH)
+}
+
+func TestSimpleCodecsTable(t *testing.T) {
+ testCodecTableOne(t, testSimpleH)
+}
+
+func TestSimpleCodecsMisc(t *testing.T) {
+ testCodecMiscOne(t, testSimpleH)
+}
+
+func TestSimpleCodecsEmbeddedPointer(t *testing.T) {
+ testCodecEmbeddedPointer(t, testSimpleH)
+}
+
+func TestMsgpackCodecsTable(t *testing.T) {
+ testCodecTableOne(t, testMsgpackH)
+}
+
+func TestMsgpackCodecsMisc(t *testing.T) {
+ testCodecMiscOne(t, testMsgpackH)
+}
+
+func TestMsgpackCodecsEmbeddedPointer(t *testing.T) {
+ testCodecEmbeddedPointer(t, testMsgpackH)
+}
+
+func TestCborCodecsTable(t *testing.T) {
+ testCodecTableOne(t, testCborH)
+}
+
+func TestCborCodecsMisc(t *testing.T) {
+ testCodecMiscOne(t, testCborH)
+}
+
+func TestCborCodecsEmbeddedPointer(t *testing.T) {
+ testCodecEmbeddedPointer(t, testCborH)
+}
+
+func TestCborMapEncodeForCanonical(t *testing.T) {
+ doTestMapEncodeForCanonical(t, "cbor", testCborH)
+}
+
+func TestJsonCodecsTable(t *testing.T) {
+ testCodecTableOne(t, testJsonH)
+}
+
+func TestJsonCodecsMisc(t *testing.T) {
+ testCodecMiscOne(t, testJsonH)
+}
+
+func TestJsonCodecsEmbeddedPointer(t *testing.T) {
+ testCodecEmbeddedPointer(t, testJsonH)
+}
+
+func TestJsonCodecChan(t *testing.T) {
+ testCodecChan(t, testJsonH)
+}
+
+func TestCborCodecChan(t *testing.T) {
+ testCodecChan(t, testCborH)
+}
+
+// ----- RPC -----
+
+func TestBincRpcGo(t *testing.T) {
+ testCodecRpcOne(t, GoRpc, testBincH, true, 0)
+}
+
+func TestSimpleRpcGo(t *testing.T) {
+ testCodecRpcOne(t, GoRpc, testSimpleH, true, 0)
+}
+
+func TestMsgpackRpcGo(t *testing.T) {
+ testCodecRpcOne(t, GoRpc, testMsgpackH, true, 0)
+}
+
+func TestCborRpcGo(t *testing.T) {
+ testCodecRpcOne(t, GoRpc, testCborH, true, 0)
+}
+
+func TestJsonRpcGo(t *testing.T) {
+ testCodecRpcOne(t, GoRpc, testJsonH, true, 0)
+}
+
+func TestMsgpackRpcSpec(t *testing.T) {
+ testCodecRpcOne(t, MsgpackSpecRpc, testMsgpackH, true, 0)
+}
+
+func TestBincUnderlyingType(t *testing.T) {
+ testCodecUnderlyingType(t, testBincH)
+}
+
+// TODO:
+// Add Tests for:
+// - decoding empty list/map in stream into a nil slice/map
+// - binary(M|Unm)arsher support for time.Time (e.g. cbor encoding)
+// - text(M|Unm)arshaler support for time.Time (e.g. json encoding)
+// - non fast-path scenarios e.g. map[string]uint16, []customStruct.
+// Expand cbor to include indefinite length stuff for this non-fast-path types.
+// This may not be necessary, since we have the manual tests (fastpathEnabled=false) to test/validate with.
+// - CodecSelfer
+// Ensure it is called when (en|de)coding interface{} or reflect.Value (2 different codepaths).
+// - interfaces: textMarshaler, binaryMarshaler, codecSelfer
+// - struct tags:
+// on anonymous fields, _struct (all fields), etc
+// - codecgen of struct containing channels.
+//
+// Cleanup tests:
+// - The are brittle in their handling of validation and skipping
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen_test.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen_test.go
new file mode 100644
index 000000000..a73497e91
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen_test.go
@@ -0,0 +1,24 @@
+//+build x,codecgen
+
+package codec
+
+import (
+ "fmt"
+ "testing"
+)
+
+func _TestCodecgenJson1(t *testing.T) {
+ // This is just a simplistic test for codecgen.
+ // It is typically disabled. We only enable it for debugging purposes.
+ const callCodecgenDirect bool = true
+ v := newTestStruc(2, false, !testSkipIntf, false)
+ var bs []byte
+ e := NewEncoderBytes(&bs, testJsonH)
+ if callCodecgenDirect {
+ v.CodecEncodeSelf(e)
+ e.w.atEndOfEncode()
+ } else {
+ e.MustEncode(v)
+ }
+ fmt.Printf("%s\n", bs)
+}
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/decode.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/decode.go
index 71dd71c89..b3b99f036 100644
--- a/Godeps/_workspace/src/github.com/ugorji/go/codec/decode.go
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/decode.go
@@ -9,6 +9,7 @@ import (
"fmt"
"io"
"reflect"
+ "time"
)
// Some tagging information for error messages.
@@ -48,16 +49,23 @@ type decDriver interface {
// this will check if the next token is a break.
CheckBreak() bool
TryDecodeAsNil() bool
- // check if a container type: vt is one of: Bytes, String, Nil, Slice or Map.
- // if vt param == valueTypeNil, and nil is seen in stream, consume the nil.
- IsContainerType(vt valueType) bool
+ // vt is one of: Bytes, String, Nil, Slice or Map. Return unSet if not known.
+ ContainerType() (vt valueType)
IsBuiltinType(rt uintptr) bool
DecodeBuiltin(rt uintptr, v interface{})
- //decodeNaked: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types).
- //for extensions, decodeNaked must completely decode them as a *RawExt.
- //extensions should also use readx to decode them, for efficiency.
- //kInterface will extract the detached byte slice if it has to pass it outside its realm.
- DecodeNaked() (v interface{}, vt valueType, decodeFurther bool)
+
+ // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt.
+ // For maps and arrays, it will not do the decoding in-band, but will signal
+ // the decoder, so that is done later, by setting the decNaked.valueType field.
+ //
+ // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types).
+ // for extensions, DecodeNaked must read the tag and the []byte if it exists.
+ // if the []byte is not read, then kInterfaceNaked will treat it as a Handle
+ // that stores the subsequent value in-band, and complete reading the RawExt.
+ //
+ // extensions should also use readx to decode them, for efficiency.
+ // kInterface will extract the detached byte slice if it has to pass it outside its realm.
+ DecodeNaked()
DecodeInt(bitsize uint8) (i int64)
DecodeUint(bitsize uint8) (ui uint64)
DecodeFloat(chkOverflow32 bool) (f float64)
@@ -78,13 +86,15 @@ type decDriver interface {
// decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte)
ReadMapStart() int
ReadArrayStart() int
- // ReadEnd registers the end of a map or array.
- ReadEnd()
+
+ reset()
+ uncacheRead()
}
type decNoSeparator struct{}
-func (_ decNoSeparator) ReadEnd() {}
+func (_ decNoSeparator) ReadEnd() {}
+func (_ decNoSeparator) uncacheRead() {}
type DecodeOptions struct {
// MapType specifies type to use during schema-less decoding of a map in the stream.
@@ -326,6 +336,13 @@ type bytesDecReader struct {
t int // track start
}
+func (z *bytesDecReader) reset(in []byte) {
+ z.b = in
+ z.a = len(in)
+ z.c = 0
+ z.t = 0
+}
+
func (z *bytesDecReader) numread() int {
return z.c
}
@@ -460,12 +477,8 @@ func (f *decFnInfo) textUnmarshal(rv reflect.Value) {
func (f *decFnInfo) jsonUnmarshal(rv reflect.Value) {
tm := f.getValueForUnmarshalInterface(rv, f.ti.junmIndir).(jsonUnmarshaler)
// bs := f.d.d.DecodeBytes(f.d.b[:], true, true)
- // grab the bytes to be read, as UnmarshalJSON wants the full JSON to unmarshal it itself.
- f.d.r.track()
- f.d.swallow()
- bs := f.d.r.stopTrack()
- // fmt.Printf(">>>>>> REFLECTION JSON: %s\n", bs)
- fnerr := tm.UnmarshalJSON(bs)
+ // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
+ fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
if fnerr != nil {
panic(fnerr)
}
@@ -549,63 +562,96 @@ func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) {
// nil interface:
// use some hieristics to decode it appropriately
// based on the detected next value in the stream.
- v, vt, decodeFurther := f.d.d.DecodeNaked()
- if vt == valueTypeNil {
+ d := f.d
+ d.d.DecodeNaked()
+ n := &d.n
+ if n.v == valueTypeNil {
return
}
// We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader).
- if num := f.ti.rt.NumMethod(); num > 0 {
- f.d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, num)
+ // if num := f.ti.rt.NumMethod(); num > 0 {
+ if f.ti.numMeth > 0 {
+ d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth)
return
}
- var useRvn bool
- switch vt {
+ // var useRvn bool
+ switch n.v {
case valueTypeMap:
- if f.d.h.MapType == nil {
- var m2 map[interface{}]interface{}
- v = &m2
+ // if d.h.MapType == nil || d.h.MapType == mapIntfIntfTyp {
+ // } else if d.h.MapType == mapStrIntfTyp { // for json performance
+ // }
+ if d.mtid == 0 || d.mtid == mapIntfIntfTypId {
+ l := len(n.ms)
+ n.ms = append(n.ms, nil)
+ d.decode(&n.ms[l])
+ rvn = reflect.ValueOf(&n.ms[l]).Elem()
+ n.ms = n.ms[:l]
+ } else if d.mtid == mapStrIntfTypId { // for json performance
+ l := len(n.ns)
+ n.ns = append(n.ns, nil)
+ d.decode(&n.ns[l])
+ rvn = reflect.ValueOf(&n.ns[l]).Elem()
+ n.ns = n.ns[:l]
} else {
- rvn = reflect.New(f.d.h.MapType).Elem()
- useRvn = true
+ rvn = reflect.New(d.h.MapType).Elem()
+ d.decodeValue(rvn, nil)
}
case valueTypeArray:
- if f.d.h.SliceType == nil {
- var m2 []interface{}
- v = &m2
+ // if d.h.SliceType == nil || d.h.SliceType == intfSliceTyp {
+ if d.stid == 0 || d.stid == intfSliceTypId {
+ l := len(n.ss)
+ n.ss = append(n.ss, nil)
+ d.decode(&n.ss[l])
+ rvn = reflect.ValueOf(&n.ss[l]).Elem()
+ n.ss = n.ss[:l]
} else {
- rvn = reflect.New(f.d.h.SliceType).Elem()
- useRvn = true
+ rvn = reflect.New(d.h.SliceType).Elem()
+ d.decodeValue(rvn, nil)
}
case valueTypeExt:
- re := v.(*RawExt)
- bfn := f.d.h.getExtForTag(re.Tag)
+ var v interface{}
+ tag, bytes := n.u, n.l // calling decode below might taint the values
+ if bytes == nil {
+ l := len(n.is)
+ n.is = append(n.is, nil)
+ v2 := &n.is[l]
+ n.is = n.is[:l]
+ d.decode(v2)
+ v = *v2
+ }
+ bfn := d.h.getExtForTag(tag)
if bfn == nil {
- re.Data = detachZeroCopyBytes(f.d.bytes, nil, re.Data)
- rvn = reflect.ValueOf(*re)
+ var re RawExt
+ re.Tag = tag
+ re.Data = detachZeroCopyBytes(d.bytes, nil, bytes)
+ rvn = reflect.ValueOf(re)
} else {
rvnA := reflect.New(bfn.rt)
rvn = rvnA.Elem()
- if re.Data != nil {
- bfn.ext.ReadExt(rvnA.Interface(), re.Data)
+ if bytes != nil {
+ bfn.ext.ReadExt(rvnA.Interface(), bytes)
} else {
- bfn.ext.UpdateExt(rvnA.Interface(), re.Value)
+ bfn.ext.UpdateExt(rvnA.Interface(), v)
}
}
- return
- }
- if decodeFurther {
- if useRvn {
- f.d.decodeValue(rvn, nil)
- } else if v != nil {
- // this v is a pointer, so we need to dereference it when done
- f.d.decode(v)
- rvn = reflect.ValueOf(v).Elem()
- useRvn = true
- }
- }
-
- if !useRvn && v != nil {
- rvn = reflect.ValueOf(v)
+ case valueTypeNil:
+ // no-op
+ case valueTypeInt:
+ rvn = reflect.ValueOf(&n.i).Elem()
+ case valueTypeUint:
+ rvn = reflect.ValueOf(&n.u).Elem()
+ case valueTypeFloat:
+ rvn = reflect.ValueOf(&n.f).Elem()
+ case valueTypeBool:
+ rvn = reflect.ValueOf(&n.b).Elem()
+ case valueTypeString, valueTypeSymbol:
+ rvn = reflect.ValueOf(&n.s).Elem()
+ case valueTypeBytes:
+ rvn = reflect.ValueOf(&n.l).Elem()
+ case valueTypeTimestamp:
+ rvn = reflect.ValueOf(&n.t).Elem()
+ default:
+ panic(fmt.Errorf("kInterfaceNaked: unexpected valueType: %d", n.v))
}
return
}
@@ -655,10 +701,14 @@ func (f *decFnInfo) kStruct(rv reflect.Value) {
fti := f.ti
d := f.d
dd := d.d
- if dd.IsContainerType(valueTypeMap) {
+ cr := d.cr
+ ctyp := dd.ContainerType()
+ if ctyp == valueTypeMap {
containerLen := dd.ReadMapStart()
if containerLen == 0 {
- dd.ReadEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
return
}
tisfi := fti.sfi
@@ -666,8 +716,14 @@ func (f *decFnInfo) kStruct(rv reflect.Value) {
if hasLen {
for j := 0; j < containerLen; j++ {
// rvkencname := dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true))
// rvksi := ti.getForEncName(rvkencname)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if k := fti.indexForEncName(rvkencname); k > -1 {
si := tisfi[k]
if dd.TryDecodeAsNil() {
@@ -682,8 +738,14 @@ func (f *decFnInfo) kStruct(rv reflect.Value) {
} else {
for j := 0; !dd.CheckBreak(); j++ {
// rvkencname := dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true))
// rvksi := ti.getForEncName(rvkencname)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if k := fti.indexForEncName(rvkencname); k > -1 {
si := tisfi[k]
if dd.TryDecodeAsNil() {
@@ -695,12 +757,16 @@ func (f *decFnInfo) kStruct(rv reflect.Value) {
d.structFieldNotFound(-1, rvkencname)
}
}
- dd.ReadEnd()
}
- } else if dd.IsContainerType(valueTypeArray) {
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ } else if ctyp == valueTypeArray {
containerLen := dd.ReadArrayStart()
if containerLen == 0 {
- dd.ReadEnd()
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
return
}
// Not much gain from doing it two ways for array.
@@ -714,6 +780,9 @@ func (f *decFnInfo) kStruct(rv reflect.Value) {
} else if dd.CheckBreak() {
break
}
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
if dd.TryDecodeAsNil() {
si.setToZeroValue(rv)
} else {
@@ -723,10 +792,15 @@ func (f *decFnInfo) kStruct(rv reflect.Value) {
if containerLen > len(fti.sfip) {
// read remaining values and throw away
for j := len(fti.sfip); j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
d.structFieldNotFound(j, "")
}
}
- dd.ReadEnd()
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
} else {
f.d.error(onlyMapOrArrayCanDecodeIntoStructErr)
return
@@ -740,59 +814,50 @@ func (f *decFnInfo) kSlice(rv reflect.Value) {
d := f.d
dd := d.d
rtelem0 := ti.rt.Elem()
-
- if dd.IsContainerType(valueTypeBytes) || dd.IsContainerType(valueTypeString) {
- if ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8 {
- if f.seq == seqTypeChan {
- bs2 := dd.DecodeBytes(nil, false, true)
- ch := rv.Interface().(chan<- byte)
- for _, b := range bs2 {
- ch <- b
- }
- } else {
- rvbs := rv.Bytes()
- bs2 := dd.DecodeBytes(rvbs, false, false)
- if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) {
- if rv.CanSet() {
- rv.SetBytes(bs2)
- } else {
- copy(rvbs, bs2)
- }
+ ctyp := dd.ContainerType()
+ if ctyp == valueTypeBytes || ctyp == valueTypeString {
+ // you can only decode bytes or string in the stream into a slice or array of bytes
+ if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) {
+ f.d.errorf("bytes or string in the stream must be decoded into a slice or array of bytes, not %v", ti.rt)
+ }
+ if f.seq == seqTypeChan {
+ bs2 := dd.DecodeBytes(nil, false, true)
+ ch := rv.Interface().(chan<- byte)
+ for _, b := range bs2 {
+ ch <- b
+ }
+ } else {
+ rvbs := rv.Bytes()
+ bs2 := dd.DecodeBytes(rvbs, false, false)
+ if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) {
+ if rv.CanSet() {
+ rv.SetBytes(bs2)
+ } else {
+ copy(rvbs, bs2)
}
}
- return
}
+ return
}
// array := f.seq == seqTypeChan
- slh, containerLenS := d.decSliceHelperStart()
-
- var rvlen, numToRead int
- var truncated bool // says that the len of the sequence is not same as the expected number of elements.
-
- numToRead = containerLenS // if truncated, reset numToRead
-
- // an array can never return a nil slice. so no need to check f.array here.
- if rv.IsNil() {
- // either chan or slice
- if rvlen, truncated = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size())); truncated {
- numToRead = rvlen
- }
- if f.seq == seqTypeSlice {
- rv.Set(reflect.MakeSlice(ti.rt, rvlen, rvlen))
- } else if f.seq == seqTypeChan {
- rv.Set(reflect.MakeChan(ti.rt, rvlen))
- }
- } else {
- rvlen = rv.Len()
- }
+ slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map)
+ // // an array can never return a nil slice. so no need to check f.array here.
if containerLenS == 0 {
- if f.seq == seqTypeSlice && rvlen != 0 {
- rv.SetLen(0)
+ if f.seq == seqTypeSlice {
+ if rv.IsNil() {
+ rv.Set(reflect.MakeSlice(ti.rt, 0, 0))
+ } else {
+ rv.SetLen(0)
+ }
+ } else if f.seq == seqTypeChan {
+ if rv.IsNil() {
+ rv.Set(reflect.MakeChan(ti.rt, 0))
+ }
}
- // dd.ReadEnd()
+ slh.End()
return
}
@@ -806,30 +871,48 @@ func (f *decFnInfo) kSlice(rv reflect.Value) {
rv0 = rv
rvChanged := false
- rvcap := rv.Cap()
-
// for j := 0; j < containerLenS; j++ {
-
- if containerLenS >= 0 { // hasLen
+ var rvlen int
+ if containerLenS > 0 { // hasLen
if f.seq == seqTypeChan {
+ if rv.IsNil() {
+ rvlen, _ = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size()))
+ rv.Set(reflect.MakeChan(ti.rt, rvlen))
+ }
// handle chan specially:
for j := 0; j < containerLenS; j++ {
rv9 = reflect.New(rtelem0).Elem()
+ slh.ElemContainerState(j)
d.decodeValue(rv9, fn)
rv.Send(rv9)
}
} else { // slice or array
+ var truncated bool // says len of sequence is not same as expected number of elements
+ numToRead := containerLenS // if truncated, reset numToRead
+
+ rvcap := rv.Cap()
+ rvlen = rv.Len()
if containerLenS > rvcap {
if f.seq == seqTypeArray {
d.arrayCannotExpand(rvlen, containerLenS)
} else {
oldRvlenGtZero := rvlen > 0
rvlen, truncated = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size()))
- rv = reflect.MakeSlice(ti.rt, rvlen, rvlen)
- if oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) {
+ if truncated {
+ if rvlen <= rvcap {
+ rv.SetLen(rvlen)
+ } else {
+ rv = reflect.MakeSlice(ti.rt, rvlen, rvlen)
+ rvChanged = true
+ }
+ } else {
+ rv = reflect.MakeSlice(ti.rt, rvlen, rvlen)
+ rvChanged = true
+ }
+ if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) {
reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap)
}
- rvChanged = true
+ rvcap = rvlen
}
numToRead = rvlen
} else if containerLenS != rvlen {
@@ -841,6 +924,7 @@ func (f *decFnInfo) kSlice(rv reflect.Value) {
j := 0
// we read up to the numToRead
for ; j < numToRead; j++ {
+ slh.ElemContainerState(j)
d.decodeValue(rv.Index(j), fn)
}
@@ -849,6 +933,7 @@ func (f *decFnInfo) kSlice(rv reflect.Value) {
if f.seq == seqTypeArray {
for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
d.swallow()
}
} else if truncated { // slice was truncated, as chan NOT in this block
@@ -858,44 +943,59 @@ func (f *decFnInfo) kSlice(rv reflect.Value) {
if resetSliceElemToZeroValue {
rv9.Set(reflect.Zero(rtelem0))
}
+ slh.ElemContainerState(j)
d.decodeValue(rv9, fn)
}
}
}
} else {
- for j := 0; !dd.CheckBreak(); j++ {
- var decodeIntoBlank bool
- // if indefinite, etc, then expand the slice if necessary
- if j >= rvlen {
- if f.seq == seqTypeArray {
- d.arrayCannotExpand(rvlen, j+1)
- decodeIntoBlank = true
- } else if f.seq == seqTypeSlice {
- // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // uses append logic, plus varargs
- rv = expandSliceValue(rv, 1)
- rv9 = rv.Index(j)
- // rv.Index(rv.Len() - 1).Set(reflect.Zero(rtelem0))
- if resetSliceElemToZeroValue {
- rv9.Set(reflect.Zero(rtelem0))
- }
- rvlen++
- rvChanged = true
- }
- } else if f.seq != seqTypeChan { // slice or array
- rv9 = rv.Index(j)
- }
+ rvlen = rv.Len()
+ j := 0
+ for ; !dd.CheckBreak(); j++ {
if f.seq == seqTypeChan {
+ slh.ElemContainerState(j)
rv9 = reflect.New(rtelem0).Elem()
d.decodeValue(rv9, fn)
rv.Send(rv9)
- } else if decodeIntoBlank {
- d.swallow()
- } else { // seqTypeSlice
- d.decodeValue(rv9, fn)
+ } else {
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= rvlen {
+ if f.seq == seqTypeArray {
+ d.arrayCannotExpand(rvlen, j+1)
+ decodeIntoBlank = true
+ } else { // if f.seq == seqTypeSlice
+ // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // uses append logic, plus varargs
+ rv = expandSliceValue(rv, 1)
+ rv9 = rv.Index(j)
+ // rv.Index(rv.Len() - 1).Set(reflect.Zero(rtelem0))
+ if resetSliceElemToZeroValue {
+ rv9.Set(reflect.Zero(rtelem0))
+ }
+ rvlen++
+ rvChanged = true
+ }
+ } else { // slice or array
+ rv9 = rv.Index(j)
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else { // seqTypeSlice
+ d.decodeValue(rv9, fn)
+ }
+ }
+ }
+ if f.seq == seqTypeSlice {
+ if j < rvlen {
+ rv.SetLen(j)
+ } else if j == 0 && rv.IsNil() {
+ rv = reflect.MakeSlice(ti.rt, 0, 0)
+ rvChanged = true
}
}
- slh.End()
}
+ slh.End()
if rvChanged {
rv0.Set(rv)
@@ -911,20 +1011,22 @@ func (f *decFnInfo) kMap(rv reflect.Value) {
d := f.d
dd := d.d
containerLen := dd.ReadMapStart()
-
+ cr := d.cr
ti := f.ti
if rv.IsNil() {
rv.Set(reflect.MakeMap(ti.rt))
}
if containerLen == 0 {
- // It is not length-prefix style container. They have no End marker.
- // dd.ReadMapEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
return
}
ktype, vtype := ti.rt.Key(), ti.rt.Elem()
ktypeId := reflect.ValueOf(ktype).Pointer()
+ vtypeKind := vtype.Kind()
var keyFn, valFn *decFn
var xtyp reflect.Type
for xtyp = ktype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() {
@@ -933,13 +1035,12 @@ func (f *decFnInfo) kMap(rv reflect.Value) {
for xtyp = vtype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() {
}
valFn = d.getDecFn(xtyp, true, true)
- var mapGet bool
+ var mapGet, mapSet bool
if !f.d.h.MapValueReset {
// if pointer, mapGet = true
// if interface, mapGet = true if !DecodeNakedAlways (else false)
// if builtin, mapGet = false
// else mapGet = true
- vtypeKind := vtype.Kind()
if vtypeKind == reflect.Ptr {
mapGet = true
} else if vtypeKind == reflect.Interface {
@@ -951,12 +1052,15 @@ func (f *decFnInfo) kMap(rv reflect.Value) {
}
}
- var rvk, rvv reflect.Value
+ var rvk, rvv, rvz reflect.Value
// for j := 0; j < containerLen; j++ {
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
rvk = reflect.New(ktype).Elem()
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
d.decodeValue(rvk, keyFn)
// special case if a byte array.
@@ -966,20 +1070,43 @@ func (f *decFnInfo) kMap(rv reflect.Value) {
rvk = reflect.ValueOf(d.string(rvk.Bytes()))
}
}
+ mapSet = true // set to false if u do a get, and its a pointer, and exists
if mapGet {
rvv = rv.MapIndex(rvk)
- if !rvv.IsValid() {
- rvv = reflect.New(vtype).Elem()
+ if rvv.IsValid() {
+ if vtypeKind == reflect.Ptr {
+ mapSet = false
+ }
+ } else {
+ if rvz.IsValid() {
+ rvz.Set(reflect.Zero(vtype))
+ } else {
+ rvz = reflect.New(vtype).Elem()
+ }
+ rvv = rvz
}
} else {
- rvv = reflect.New(vtype).Elem()
+ if rvz.IsValid() {
+ rvz.Set(reflect.Zero(vtype))
+ } else {
+ rvz = reflect.New(vtype).Elem()
+ }
+ rvv = rvz
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
}
d.decodeValue(rvv, valFn)
- rv.SetMapIndex(rvk, rvv)
+ if mapSet {
+ rv.SetMapIndex(rvk, rvv)
+ }
}
} else {
for j := 0; !dd.CheckBreak(); j++ {
rvk = reflect.New(ktype).Elem()
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
d.decodeValue(rvk, keyFn)
// special case if a byte array.
@@ -989,18 +1116,40 @@ func (f *decFnInfo) kMap(rv reflect.Value) {
rvk = reflect.ValueOf(d.string(rvk.Bytes()))
}
}
+ mapSet = true // set to false if u do a get, and its a pointer, and exists
if mapGet {
rvv = rv.MapIndex(rvk)
- if !rvv.IsValid() {
- rvv = reflect.New(vtype).Elem()
+ if rvv.IsValid() {
+ if vtypeKind == reflect.Ptr {
+ mapSet = false
+ }
+ } else {
+ if rvz.IsValid() {
+ rvz.Set(reflect.Zero(vtype))
+ } else {
+ rvz = reflect.New(vtype).Elem()
+ }
+ rvv = rvz
}
} else {
- rvv = reflect.New(vtype).Elem()
+ if rvz.IsValid() {
+ rvz.Set(reflect.Zero(vtype))
+ } else {
+ rvz = reflect.New(vtype).Elem()
+ }
+ rvv = rvz
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
}
d.decodeValue(rvv, valFn)
- rv.SetMapIndex(rvk, rvv)
+ if mapSet {
+ rv.SetMapIndex(rvk, rvv)
+ }
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
}
@@ -1009,6 +1158,65 @@ type decRtidFn struct {
fn decFn
}
+// decNaked is used to keep track of the primitives decoded.
+// Without it, we would have to decode each primitive and wrap it
+// in an interface{}, causing an allocation.
+// In this model, the primitives are decoded in a "pseudo-atomic" fashion,
+// so we can rest assured that no other decoding happens while these
+// primitives are being decoded.
+//
+// maps and arrays are not handled by this mechanism.
+// However, RawExt is, and we accomodate for extensions that decode
+// RawExt from DecodeNaked, but need to decode the value subsequently.
+// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat.
+//
+// However, decNaked also keeps some arrays of default maps and slices
+// used in DecodeNaked. This way, we can get a pointer to it
+// without causing a new heap allocation.
+//
+// kInterfaceNaked will ensure that there is no allocation for the common
+// uses.
+type decNaked struct {
+ // r RawExt // used for RawExt, uint, []byte.
+ u uint64
+ i int64
+ f float64
+ l []byte
+ s string
+ t time.Time
+ b bool
+ v valueType
+
+ // stacks for reducing allocation
+ is []interface{}
+ ms []map[interface{}]interface{}
+ ns []map[string]interface{}
+ ss [][]interface{}
+ // rs []RawExt
+
+ // keep arrays at the bottom? Chance is that they are not used much.
+ ia [4]interface{}
+ ma [4]map[interface{}]interface{}
+ na [4]map[string]interface{}
+ sa [4][]interface{}
+ // ra [2]RawExt
+}
+
+func (n *decNaked) reset() {
+ if n.ss != nil {
+ n.ss = n.ss[:0]
+ }
+ if n.is != nil {
+ n.is = n.is[:0]
+ }
+ if n.ms != nil {
+ n.ms = n.ms[:0]
+ }
+ if n.ns != nil {
+ n.ns = n.ns[:0]
+ }
+}
+
// A Decoder reads and decodes an object from an input stream in the codec format.
type Decoder struct {
// hopefully, reduce derefencing cost by laying the decReader inside the Decoder.
@@ -1019,32 +1227,84 @@ type Decoder struct {
// as the handler MAY need to do some coordination.
r decReader
// sa [initCollectionCap]decRtidFn
- s []decRtidFn
- h *BasicHandle
+ h *BasicHandle
+ hh Handle
- rb bytesDecReader
- hh Handle
be bool // is binary encoding
bytes bool // is bytes reader
js bool // is json handle
+ rb bytesDecReader
ri ioDecReader
- f map[uintptr]*decFn
- is map[string]string // used for interning strings
+ cr containerStateRecv
+
+ s []decRtidFn
+ f map[uintptr]*decFn
// _ uintptr // for alignment purposes, so next one starts from a cache line
- b [scratchByteArrayLen]byte
+ // cache the mapTypeId and sliceTypeId for faster comparisons
+ mtid uintptr
+ stid uintptr
+
+ n decNaked
+ b [scratchByteArrayLen]byte
+ is map[string]string // used for interning strings
}
// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader.
//
// For efficiency, Users are encouraged to pass in a memory buffered reader
// (eg bufio.Reader, bytes.Buffer).
-func NewDecoder(r io.Reader, h Handle) (d *Decoder) {
- d = &Decoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()}
- // d.s = d.sa[:0]
+func NewDecoder(r io.Reader, h Handle) *Decoder {
+ d := newDecoder(h)
+ d.Reset(r)
+ return d
+}
+
+// NewDecoderBytes returns a Decoder which efficiently decodes directly
+// from a byte slice with zero copying.
+func NewDecoderBytes(in []byte, h Handle) *Decoder {
+ d := newDecoder(h)
+ d.ResetBytes(in)
+ return d
+}
+
+func newDecoder(h Handle) *Decoder {
+ d := &Decoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()}
+ n := &d.n
+ // n.rs = n.ra[:0]
+ n.ms = n.ma[:0]
+ n.is = n.ia[:0]
+ n.ns = n.na[:0]
+ n.ss = n.sa[:0]
+ _, d.js = h.(*JsonHandle)
+ if d.h.InternString {
+ d.is = make(map[string]string, 32)
+ }
+ d.d = h.newDecDriver(d)
+ d.cr, _ = d.d.(containerStateRecv)
+ // d.d = h.newDecDriver(decReaderT{true, &d.rb, &d.ri})
+ return d
+}
+
+func (d *Decoder) resetCommon() {
+ d.n.reset()
+ d.d.reset()
+ // reset all things which were cached from the Handle,
+ // but could be changed.
+ d.mtid, d.stid = 0, 0
+ if d.h.MapType != nil {
+ d.mtid = reflect.ValueOf(d.h.MapType).Pointer()
+ }
+ if d.h.SliceType != nil {
+ d.stid = reflect.ValueOf(d.h.SliceType).Pointer()
+ }
+}
+
+func (d *Decoder) Reset(r io.Reader) {
d.ri.x = &d.b
+ // d.s = d.sa[:0]
d.ri.bs.r = r
var ok bool
d.ri.br, ok = r.(decReaderByteScanner)
@@ -1052,31 +1312,22 @@ func NewDecoder(r io.Reader, h Handle) (d *Decoder) {
d.ri.br = &d.ri.bs
}
d.r = &d.ri
- if d.h.InternString {
- d.is = make(map[string]string, 32)
- }
- _, d.js = h.(*JsonHandle)
- d.d = h.newDecDriver(d)
- return
+ d.resetCommon()
}
-// NewDecoderBytes returns a Decoder which efficiently decodes directly
-// from a byte slice with zero copying.
-func NewDecoderBytes(in []byte, h Handle) (d *Decoder) {
- d = &Decoder{hh: h, h: h.getBasicHandle(), be: h.isBinary(), bytes: true}
+func (d *Decoder) ResetBytes(in []byte) {
// d.s = d.sa[:0]
- d.rb.b = in
- d.rb.a = len(in)
+ d.rb.reset(in)
d.r = &d.rb
- if d.h.InternString {
- d.is = make(map[string]string, 32)
- }
- _, d.js = h.(*JsonHandle)
- d.d = h.newDecDriver(d)
- // d.d = h.newDecDriver(decReaderT{true, &d.rb, &d.ri})
- return
+ d.resetCommon()
}
+// func (d *Decoder) sendContainerState(c containerState) {
+// if d.cr != nil {
+// d.cr.sendContainerState(c)
+// }
+// }
+
// Decode decodes the stream from reader and stores the result in the
// value pointed to by v. v cannot be a nil pointer. v can also be
// a reflect.Value of a pointer.
@@ -1142,9 +1393,12 @@ func (d *Decoder) swallowViaHammer() {
func (d *Decoder) swallow() {
// smarter decode that just swallows the content
dd := d.d
- switch {
- case dd.TryDecodeAsNil():
- case dd.IsContainerType(valueTypeMap):
+ if dd.TryDecodeAsNil() {
+ return
+ }
+ cr := d.cr
+ switch dd.ContainerType() {
+ case valueTypeMap:
containerLen := dd.ReadMapStart()
clenGtEqualZero := containerLen >= 0
for j := 0; ; j++ {
@@ -1155,11 +1409,19 @@ func (d *Decoder) swallow() {
} else if dd.CheckBreak() {
break
}
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
d.swallow()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
d.swallow()
}
- dd.ReadEnd()
- case dd.IsContainerType(valueTypeArray):
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
+ case valueTypeArray:
containerLenS := dd.ReadArrayStart()
clenGtEqualZero := containerLenS >= 0
for j := 0; ; j++ {
@@ -1170,17 +1432,30 @@ func (d *Decoder) swallow() {
} else if dd.CheckBreak() {
break
}
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
d.swallow()
}
- dd.ReadEnd()
- case dd.IsContainerType(valueTypeBytes):
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
+ case valueTypeBytes:
dd.DecodeBytes(d.b[:], false, true)
- case dd.IsContainerType(valueTypeString):
+ case valueTypeString:
dd.DecodeBytes(d.b[:], true, true)
// dd.DecodeStringAsBytes(d.b[:])
default:
// these are all primitives, which we can get from decodeNaked
+ // if RawExt using Value, complete the processing.
dd.DecodeNaked()
+ if n := &d.n; n.v == valueTypeExt && n.l == nil {
+ l := len(n.is)
+ n.is = append(n.is, nil)
+ v2 := &n.is[l]
+ n.is = n.is[:l]
+ d.decode(v2)
+ }
}
}
@@ -1230,14 +1505,20 @@ func (d *Decoder) decode(iv interface{}) {
case *[]uint8:
*v = nil
case reflect.Value:
- d.chkPtrValue(v)
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ d.errNotValidPtrValue(v)
+ }
+ // d.chkPtrValue(v)
v = v.Elem()
if v.IsValid() {
v.Set(reflect.Zero(v.Type()))
}
default:
rv := reflect.ValueOf(iv)
- d.chkPtrValue(rv)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ d.errNotValidPtrValue(rv)
+ }
+ // d.chkPtrValue(rv)
rv = rv.Elem()
if rv.IsValid() {
rv.Set(reflect.Zero(rv.Type()))
@@ -1255,7 +1536,10 @@ func (d *Decoder) decode(iv interface{}) {
v.CodecDecodeSelf(d)
case reflect.Value:
- d.chkPtrValue(v)
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ d.errNotValidPtrValue(v)
+ }
+ // d.chkPtrValue(v)
d.decodeValueNotNil(v.Elem(), nil)
case *string:
@@ -1325,7 +1609,10 @@ func (d *Decoder) preDecodeValue(rv reflect.Value, tryNil bool) (rv2 reflect.Val
func (d *Decoder) decodeI(iv interface{}, checkPtr, tryNil, checkFastpath, checkCodecSelfer bool) {
rv := reflect.ValueOf(iv)
if checkPtr {
- d.chkPtrValue(rv)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ d.errNotValidPtrValue(rv)
+ }
+ // d.chkPtrValue(rv)
}
rv, proceed := d.preDecodeValue(rv, tryNil)
if proceed {
@@ -1529,6 +1816,10 @@ func (d *Decoder) chkPtrValue(rv reflect.Value) {
if rv.Kind() == reflect.Ptr && !rv.IsNil() {
return
}
+ d.errNotValidPtrValue(rv)
+}
+
+func (d *Decoder) errNotValidPtrValue(rv reflect.Value) {
if !rv.IsValid() {
d.error(cannotDecodeIntoNilErr)
return
@@ -1571,31 +1862,65 @@ func (d *Decoder) intern(s string) {
}
}
+func (d *Decoder) nextValueBytes() []byte {
+ d.d.uncacheRead()
+ d.r.track()
+ d.swallow()
+ return d.r.stopTrack()
+}
+
// --------------------------------------------------
// decSliceHelper assists when decoding into a slice, from a map or an array in the stream.
// A slice can be set from a map or array in stream. This supports the MapBySlice interface.
type decSliceHelper struct {
- dd decDriver
- ct valueType
+ d *Decoder
+ // ct valueType
+ array bool
}
func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) {
- x.dd = d.d
- if x.dd.IsContainerType(valueTypeArray) {
- x.ct = valueTypeArray
- clen = x.dd.ReadArrayStart()
- } else if x.dd.IsContainerType(valueTypeMap) {
- x.ct = valueTypeMap
- clen = x.dd.ReadMapStart() * 2
+ dd := d.d
+ ctyp := dd.ContainerType()
+ if ctyp == valueTypeArray {
+ x.array = true
+ clen = dd.ReadArrayStart()
+ } else if ctyp == valueTypeMap {
+ clen = dd.ReadMapStart() * 2
} else {
- d.errorf("only encoded map or array can be decoded into a slice")
+ d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp)
}
+ // x.ct = ctyp
+ x.d = d
return
}
func (x decSliceHelper) End() {
- x.dd.ReadEnd()
+ cr := x.d.cr
+ if cr == nil {
+ return
+ }
+ if x.array {
+ cr.sendContainerState(containerArrayEnd)
+ } else {
+ cr.sendContainerState(containerMapEnd)
+ }
+}
+
+func (x decSliceHelper) ElemContainerState(index int) {
+ cr := x.d.cr
+ if cr == nil {
+ return
+ }
+ if x.array {
+ cr.sendContainerState(containerArrayElem)
+ } else {
+ if index%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ }
}
func decByteSlice(r decReader, clen int, bs []byte) (bsOut []byte) {
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/encode.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/encode.go
index 49c3a4577..99af6fa55 100644
--- a/Godeps/_workspace/src/github.com/ugorji/go/codec/encode.go
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/encode.go
@@ -62,13 +62,14 @@ type encDriver interface {
EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder)
EncodeArrayStart(length int)
EncodeMapStart(length int)
- EncodeEnd()
EncodeString(c charEncoding, v string)
EncodeSymbol(v string)
EncodeStringBytes(c charEncoding, v []byte)
//TODO
//encBignum(f *big.Int)
//encStringRunes(c charEncoding, v []rune)
+
+ reset()
}
type encDriverAsis interface {
@@ -158,6 +159,7 @@ func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) {
// ioEncWriter implements encWriter and can write to an io.Writer implementation
type ioEncWriter struct {
w ioEncWriterWriter
+ s simpleIoEncWriterWriter
// x [8]byte // temp byte array re-used internally for efficiency
}
@@ -382,30 +384,32 @@ func (f *encFnInfo) kSlice(rv reflect.Value) {
// (don't call rv.Bytes, rv.Slice, etc).
// E.g. type struct S{B [2]byte};
// Encode(S{}) will bomb on "panic: slice of unaddressable array".
+ e := f.e
if f.seq != seqTypeArray {
if rv.IsNil() {
- f.e.e.EncodeNil()
+ e.e.EncodeNil()
return
}
// If in this method, then there was no extension function defined.
// So it's okay to treat as []byte.
if ti.rtid == uint8SliceTypId {
- f.e.e.EncodeStringBytes(c_RAW, rv.Bytes())
+ e.e.EncodeStringBytes(c_RAW, rv.Bytes())
return
}
}
+ cr := e.cr
rtelem := ti.rt.Elem()
l := rv.Len()
- if rtelem.Kind() == reflect.Uint8 {
+ if ti.rtid == uint8SliceTypId || rtelem.Kind() == reflect.Uint8 {
switch f.seq {
case seqTypeArray:
- // if l == 0 { f.e.e.encodeStringBytes(c_RAW, nil) } else
+ // if l == 0 { e.e.encodeStringBytes(c_RAW, nil) } else
if rv.CanAddr() {
- f.e.e.EncodeStringBytes(c_RAW, rv.Slice(0, l).Bytes())
+ e.e.EncodeStringBytes(c_RAW, rv.Slice(0, l).Bytes())
} else {
var bs []byte
- if l <= cap(f.e.b) {
- bs = f.e.b[:l]
+ if l <= cap(e.b) {
+ bs = e.b[:l]
} else {
bs = make([]byte, l)
}
@@ -414,12 +418,12 @@ func (f *encFnInfo) kSlice(rv reflect.Value) {
// for i := 0; i < l; i++ {
// bs[i] = byte(rv.Index(i).Uint())
// }
- f.e.e.EncodeStringBytes(c_RAW, bs)
+ e.e.EncodeStringBytes(c_RAW, bs)
}
case seqTypeSlice:
- f.e.e.EncodeStringBytes(c_RAW, rv.Bytes())
+ e.e.EncodeStringBytes(c_RAW, rv.Bytes())
case seqTypeChan:
- bs := f.e.b[:0]
+ bs := e.b[:0]
// do not use range, so that the number of elements encoded
// does not change, and encoding does not hang waiting on someone to close chan.
// for b := range rv.Interface().(<-chan byte) {
@@ -429,22 +433,21 @@ func (f *encFnInfo) kSlice(rv reflect.Value) {
for i := 0; i < l; i++ {
bs = append(bs, <-ch)
}
- f.e.e.EncodeStringBytes(c_RAW, bs)
+ e.e.EncodeStringBytes(c_RAW, bs)
}
return
}
if ti.mbs {
if l%2 == 1 {
- f.e.errorf("mapBySlice requires even slice length, but got %v", l)
+ e.errorf("mapBySlice requires even slice length, but got %v", l)
return
}
- f.e.e.EncodeMapStart(l / 2)
+ e.e.EncodeMapStart(l / 2)
} else {
- f.e.e.EncodeArrayStart(l)
+ e.e.EncodeArrayStart(l)
}
- e := f.e
if l > 0 {
for rtelem.Kind() == reflect.Ptr {
rtelem = rtelem.Elem()
@@ -459,29 +462,48 @@ func (f *encFnInfo) kSlice(rv reflect.Value) {
}
// TODO: Consider perf implication of encoding odd index values as symbols if type is string
for j := 0; j < l; j++ {
+ if cr != nil {
+ if ti.mbs {
+ if l%2 == 0 {
+ cr.sendContainerState(containerMapKey)
+ } else {
+ cr.sendContainerState(containerMapValue)
+ }
+ } else {
+ cr.sendContainerState(containerArrayElem)
+ }
+ }
if f.seq == seqTypeChan {
if rv2, ok2 := rv.Recv(); ok2 {
e.encodeValue(rv2, fn)
+ } else {
+ e.encode(nil) // WE HAVE TO DO SOMETHING, so nil if nothing received.
}
} else {
e.encodeValue(rv.Index(j), fn)
}
}
-
}
- f.e.e.EncodeEnd()
+ if cr != nil {
+ if ti.mbs {
+ cr.sendContainerState(containerMapEnd)
+ } else {
+ cr.sendContainerState(containerArrayEnd)
+ }
+ }
}
func (f *encFnInfo) kStruct(rv reflect.Value) {
fti := f.ti
e := f.e
+ cr := e.cr
tisfi := fti.sfip
toMap := !(fti.toArray || e.h.StructToArray)
newlen := len(fti.sfi)
// Use sync.Pool to reduce allocating slices unnecessarily.
- // The cost of the occasional locking is less than the cost of locking.
+ // The cost of the occasional locking is less than the cost of new allocation.
pool, poolv, fkvs := encStructPoolGet(newlen)
// if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct)
@@ -519,7 +541,7 @@ func (f *encFnInfo) kStruct(rv reflect.Value) {
// debugf(">>>> kStruct: newlen: %v", newlen)
// sep := !e.be
- ee := f.e.e //don't dereference everytime
+ ee := e.e //don't dereference everytime
if toMap {
ee.EncodeMapStart(newlen)
@@ -527,21 +549,35 @@ func (f *encFnInfo) kStruct(rv reflect.Value) {
asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0
for j := 0; j < newlen; j++ {
kv = fkvs[j]
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(kv.v)
} else {
ee.EncodeString(c_UTF8, kv.v)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encodeValue(kv.r, nil)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
} else {
ee.EncodeArrayStart(newlen)
for j := 0; j < newlen; j++ {
kv = fkvs[j]
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
e.encodeValue(kv.r, nil)
}
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
}
- ee.EncodeEnd()
// do not use defer. Instead, use explicit pool return at end of function.
// defer has a cost we are trying to avoid.
@@ -578,8 +614,11 @@ func (f *encFnInfo) kMap(rv reflect.Value) {
l := rv.Len()
ee.EncodeMapStart(l)
e := f.e
+ cr := e.cr
if l == 0 {
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
return
}
var asSymbols bool
@@ -622,6 +661,9 @@ func (f *encFnInfo) kMap(rv reflect.Value) {
e.kMapCanonical(rtkeyid, rtkey, rv, mks, valFn, asSymbols)
} else {
for j := range mks {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if keyTypeIsString {
if asSymbols {
ee.EncodeSymbol(mks[j].String())
@@ -631,15 +673,20 @@ func (f *encFnInfo) kMap(rv reflect.Value) {
} else {
e.encodeValue(mks[j], keyFn)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encodeValue(rv.MapIndex(mks[j]), valFn)
}
}
-
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (e *Encoder) kMapCanonical(rtkeyid uintptr, rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *encFn, asSymbols bool) {
ee := e.e
+ cr := e.cr
// we previously did out-of-band if an extension was registered.
// This is not necessary, as the natural kind is sufficient for ordering.
@@ -652,7 +699,13 @@ func (e *Encoder) kMapCanonical(rtkeyid uintptr, rtkey reflect.Type, rv reflect.
}
sort.Sort(bytesRvSlice(mksv))
for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeStringBytes(c_RAW, mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
}
} else {
@@ -666,7 +719,13 @@ func (e *Encoder) kMapCanonical(rtkeyid uintptr, rtkey reflect.Type, rv reflect.
}
sort.Sort(boolRvSlice(mksv))
for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
}
case reflect.String:
@@ -678,11 +737,17 @@ func (e *Encoder) kMapCanonical(rtkeyid uintptr, rtkey reflect.Type, rv reflect.
}
sort.Sort(stringRvSlice(mksv))
for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(mksv[i].v)
} else {
ee.EncodeString(c_UTF8, mksv[i].v)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
}
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr:
@@ -694,7 +759,13 @@ func (e *Encoder) kMapCanonical(rtkeyid uintptr, rtkey reflect.Type, rv reflect.
}
sort.Sort(uintRvSlice(mksv))
for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
}
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
@@ -706,7 +777,13 @@ func (e *Encoder) kMapCanonical(rtkeyid uintptr, rtkey reflect.Type, rv reflect.
}
sort.Sort(intRvSlice(mksv))
for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
}
case reflect.Float32:
@@ -718,7 +795,13 @@ func (e *Encoder) kMapCanonical(rtkeyid uintptr, rtkey reflect.Type, rv reflect.
}
sort.Sort(floatRvSlice(mksv))
for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(float32(mksv[i].v))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
}
case reflect.Float64:
@@ -730,7 +813,13 @@ func (e *Encoder) kMapCanonical(rtkeyid uintptr, rtkey reflect.Type, rv reflect.
}
sort.Sort(floatRvSlice(mksv))
for i := range mksv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(mksv[i].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
}
default:
@@ -749,7 +838,13 @@ func (e *Encoder) kMapCanonical(rtkeyid uintptr, rtkey reflect.Type, rv reflect.
}
sort.Sort(bytesRvSlice(mksbv))
for j := range mksbv {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.asis(mksbv[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encodeValue(rv.MapIndex(mksbv[j].r), valFn)
}
}
@@ -787,12 +882,15 @@ type Encoder struct {
wi ioEncWriter
wb bytesEncWriter
- h *BasicHandle
- as encDriverAsis
+ h *BasicHandle
hh Handle
- f map[uintptr]*encFn
- b [scratchByteArrayLen]byte
+
+ cr containerStateRecv
+ as encDriverAsis
+
+ f map[uintptr]*encFn
+ b [scratchByteArrayLen]byte
}
// NewEncoder returns an Encoder for encoding into an io.Writer.
@@ -800,20 +898,8 @@ type Encoder struct {
// For efficiency, Users are encouraged to pass in a memory buffered writer
// (eg bufio.Writer, bytes.Buffer).
func NewEncoder(w io.Writer, h Handle) *Encoder {
- e := &Encoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()}
- ww, ok := w.(ioEncWriterWriter)
- if !ok {
- sww := simpleIoEncWriterWriter{w: w}
- sww.bw, _ = w.(io.ByteWriter)
- sww.sw, _ = w.(ioEncStringWriter)
- ww = &sww
- //ww = bufio.NewWriterSize(w, defEncByteBufSize)
- }
- e.wi.w = ww
- e.w = &e.wi
- _, e.js = h.(*JsonHandle)
- e.e = h.newEncDriver(e)
- e.as, _ = e.e.(encDriverAsis)
+ e := newEncoder(h)
+ e.Reset(w)
return e
}
@@ -823,19 +909,56 @@ func NewEncoder(w io.Writer, h Handle) *Encoder {
// It will potentially replace the output byte slice pointed to.
// After encoding, the out parameter contains the encoded contents.
func NewEncoderBytes(out *[]byte, h Handle) *Encoder {
+ e := newEncoder(h)
+ e.ResetBytes(out)
+ return e
+}
+
+func newEncoder(h Handle) *Encoder {
e := &Encoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()}
+ _, e.js = h.(*JsonHandle)
+ e.e = h.newEncDriver(e)
+ e.as, _ = e.e.(encDriverAsis)
+ e.cr, _ = e.e.(containerStateRecv)
+ return e
+}
+
+// Reset the Encoder with a new output stream.
+//
+// This accomodates using the state of the Encoder,
+// where it has "cached" information about sub-engines.
+func (e *Encoder) Reset(w io.Writer) {
+ ww, ok := w.(ioEncWriterWriter)
+ if ok {
+ e.wi.w = ww
+ } else {
+ sww := &e.wi.s
+ sww.w = w
+ sww.bw, _ = w.(io.ByteWriter)
+ sww.sw, _ = w.(ioEncStringWriter)
+ e.wi.w = sww
+ //ww = bufio.NewWriterSize(w, defEncByteBufSize)
+ }
+ e.w = &e.wi
+ e.e.reset()
+}
+
+func (e *Encoder) ResetBytes(out *[]byte) {
in := *out
if in == nil {
in = make([]byte, defEncByteBufSize)
}
- e.wb.b, e.wb.out = in, out
+ e.wb.b, e.wb.out, e.wb.c = in, out, 0
e.w = &e.wb
- _, e.js = h.(*JsonHandle)
- e.e = h.newEncDriver(e)
- e.as, _ = e.e.(encDriverAsis)
- return e
+ e.e.reset()
}
+// func (e *Encoder) sendContainerState(c containerState) {
+// if e.cr != nil {
+// e.cr.sendContainerState(c)
+// }
+// }
+
// Encode writes an object into a stream.
//
// Encoding can be configured via the struct tag for the fields.
@@ -1020,26 +1143,24 @@ func (e *Encoder) encodeI(iv interface{}, checkFastpath, checkCodecSelfer bool)
}
func (e *Encoder) preEncodeValue(rv reflect.Value) (rv2 reflect.Value, proceed bool) {
-LOOP:
- for {
- switch rv.Kind() {
- case reflect.Ptr, reflect.Interface:
- if rv.IsNil() {
- e.e.EncodeNil()
- return
- }
- rv = rv.Elem()
- continue LOOP
- case reflect.Slice, reflect.Map:
- if rv.IsNil() {
- e.e.EncodeNil()
- return
- }
- case reflect.Invalid, reflect.Func:
+ // use a goto statement instead of a recursive function for ptr/interface.
+TOP:
+ switch rv.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ if rv.IsNil() {
e.e.EncodeNil()
return
}
- break
+ rv = rv.Elem()
+ goto TOP
+ case reflect.Slice, reflect.Map:
+ if rv.IsNil() {
+ e.e.EncodeNil()
+ return
+ }
+ case reflect.Invalid, reflect.Func:
+ e.e.EncodeNil()
+ return
}
return rv, true
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.generated.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.generated.go
index cb5d6a694..d968a500f 100644
--- a/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.generated.go
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.generated.go
@@ -1,4 +1,4 @@
-// //+build ignore
+// +build !notfastpath
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
@@ -373,6 +373,9 @@ func init() {
// -- -- fast path type switch
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
switch v := iv.(type) {
case []interface{}:
@@ -1731,12 +1734,16 @@ func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e)
default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
return false
}
return true
}
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
switch v := iv.(type) {
case []interface{}:
@@ -1815,12 +1822,16 @@ func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e)
default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
return false
}
return true
}
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
switch v := iv.(type) {
case map[interface{}]interface{}:
@@ -3117,15 +3128,21 @@ func (f *encFnInfo) fastpathEncSliceIntfR(rv reflect.Value) {
}
func (_ fastpathT) EncSliceIntfV(v []interface{}, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeArrayStart(len(v))
for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
e.encode(v2)
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
}
func (f *encFnInfo) fastpathEncSliceStringR(rv reflect.Value) {
@@ -3133,15 +3150,21 @@ func (f *encFnInfo) fastpathEncSliceStringR(rv reflect.Value) {
}
func (_ fastpathT) EncSliceStringV(v []string, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeArrayStart(len(v))
for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
ee.EncodeString(c_UTF8, v2)
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
}
func (f *encFnInfo) fastpathEncSliceFloat32R(rv reflect.Value) {
@@ -3149,15 +3172,21 @@ func (f *encFnInfo) fastpathEncSliceFloat32R(rv reflect.Value) {
}
func (_ fastpathT) EncSliceFloat32V(v []float32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeArrayStart(len(v))
for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
ee.EncodeFloat32(v2)
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
}
func (f *encFnInfo) fastpathEncSliceFloat64R(rv reflect.Value) {
@@ -3165,15 +3194,21 @@ func (f *encFnInfo) fastpathEncSliceFloat64R(rv reflect.Value) {
}
func (_ fastpathT) EncSliceFloat64V(v []float64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeArrayStart(len(v))
for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
ee.EncodeFloat64(v2)
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
}
func (f *encFnInfo) fastpathEncSliceUintR(rv reflect.Value) {
@@ -3181,15 +3216,21 @@ func (f *encFnInfo) fastpathEncSliceUintR(rv reflect.Value) {
}
func (_ fastpathT) EncSliceUintV(v []uint, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeArrayStart(len(v))
for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
ee.EncodeUint(uint64(v2))
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
}
func (f *encFnInfo) fastpathEncSliceUint16R(rv reflect.Value) {
@@ -3197,15 +3238,21 @@ func (f *encFnInfo) fastpathEncSliceUint16R(rv reflect.Value) {
}
func (_ fastpathT) EncSliceUint16V(v []uint16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeArrayStart(len(v))
for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
ee.EncodeUint(uint64(v2))
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
}
func (f *encFnInfo) fastpathEncSliceUint32R(rv reflect.Value) {
@@ -3213,15 +3260,21 @@ func (f *encFnInfo) fastpathEncSliceUint32R(rv reflect.Value) {
}
func (_ fastpathT) EncSliceUint32V(v []uint32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeArrayStart(len(v))
for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
ee.EncodeUint(uint64(v2))
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
}
func (f *encFnInfo) fastpathEncSliceUint64R(rv reflect.Value) {
@@ -3229,15 +3282,21 @@ func (f *encFnInfo) fastpathEncSliceUint64R(rv reflect.Value) {
}
func (_ fastpathT) EncSliceUint64V(v []uint64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeArrayStart(len(v))
for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
ee.EncodeUint(uint64(v2))
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
}
func (f *encFnInfo) fastpathEncSliceUintptrR(rv reflect.Value) {
@@ -3245,15 +3304,21 @@ func (f *encFnInfo) fastpathEncSliceUintptrR(rv reflect.Value) {
}
func (_ fastpathT) EncSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeArrayStart(len(v))
for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
e.encode(v2)
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
}
func (f *encFnInfo) fastpathEncSliceIntR(rv reflect.Value) {
@@ -3261,15 +3326,21 @@ func (f *encFnInfo) fastpathEncSliceIntR(rv reflect.Value) {
}
func (_ fastpathT) EncSliceIntV(v []int, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeArrayStart(len(v))
for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
ee.EncodeInt(int64(v2))
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
}
func (f *encFnInfo) fastpathEncSliceInt8R(rv reflect.Value) {
@@ -3277,15 +3348,21 @@ func (f *encFnInfo) fastpathEncSliceInt8R(rv reflect.Value) {
}
func (_ fastpathT) EncSliceInt8V(v []int8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeArrayStart(len(v))
for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
ee.EncodeInt(int64(v2))
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
}
func (f *encFnInfo) fastpathEncSliceInt16R(rv reflect.Value) {
@@ -3293,15 +3370,21 @@ func (f *encFnInfo) fastpathEncSliceInt16R(rv reflect.Value) {
}
func (_ fastpathT) EncSliceInt16V(v []int16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeArrayStart(len(v))
for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
ee.EncodeInt(int64(v2))
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
}
func (f *encFnInfo) fastpathEncSliceInt32R(rv reflect.Value) {
@@ -3309,15 +3392,21 @@ func (f *encFnInfo) fastpathEncSliceInt32R(rv reflect.Value) {
}
func (_ fastpathT) EncSliceInt32V(v []int32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeArrayStart(len(v))
for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
ee.EncodeInt(int64(v2))
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
}
func (f *encFnInfo) fastpathEncSliceInt64R(rv reflect.Value) {
@@ -3325,15 +3414,21 @@ func (f *encFnInfo) fastpathEncSliceInt64R(rv reflect.Value) {
}
func (_ fastpathT) EncSliceInt64V(v []int64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeArrayStart(len(v))
for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
ee.EncodeInt(int64(v2))
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
}
func (f *encFnInfo) fastpathEncSliceBoolR(rv reflect.Value) {
@@ -3341,15 +3436,21 @@ func (f *encFnInfo) fastpathEncSliceBoolR(rv reflect.Value) {
}
func (_ fastpathT) EncSliceBoolV(v []bool, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeArrayStart(len(v))
for _, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerArrayElem)
+ }
ee.EncodeBool(v2)
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerArrayEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntfIntfR(rv reflect.Value) {
@@ -3357,6 +3458,7 @@ func (f *encFnInfo) fastpathEncMapIntfIntfR(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -3378,16 +3480,30 @@ func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, checkNil bool,
}
sort.Sort(bytesISlice(v2))
for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[v2[j].i])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntfStringR(rv reflect.Value) {
@@ -3395,6 +3511,7 @@ func (f *encFnInfo) fastpathEncMapIntfStringR(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -3416,16 +3533,30 @@ func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, checkNil bool, e
}
sort.Sort(bytesISlice(v2))
for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[v2[j].i])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntfUintR(rv reflect.Value) {
@@ -3433,6 +3564,7 @@ func (f *encFnInfo) fastpathEncMapIntfUintR(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -3454,16 +3586,30 @@ func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, checkNil bool, e *Enc
}
sort.Sort(bytesISlice(v2))
for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[v2[j].i])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntfUint8R(rv reflect.Value) {
@@ -3471,6 +3617,7 @@ func (f *encFnInfo) fastpathEncMapIntfUint8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -3492,16 +3639,30 @@ func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, checkNil bool, e *E
}
sort.Sort(bytesISlice(v2))
for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[v2[j].i])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntfUint16R(rv reflect.Value) {
@@ -3509,6 +3670,7 @@ func (f *encFnInfo) fastpathEncMapIntfUint16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -3530,16 +3692,30 @@ func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, checkNil bool, e
}
sort.Sort(bytesISlice(v2))
for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[v2[j].i])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntfUint32R(rv reflect.Value) {
@@ -3547,6 +3723,7 @@ func (f *encFnInfo) fastpathEncMapIntfUint32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -3568,16 +3745,30 @@ func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, checkNil bool, e
}
sort.Sort(bytesISlice(v2))
for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[v2[j].i])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntfUint64R(rv reflect.Value) {
@@ -3585,6 +3776,7 @@ func (f *encFnInfo) fastpathEncMapIntfUint64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -3606,16 +3798,30 @@ func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, checkNil bool, e
}
sort.Sort(bytesISlice(v2))
for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[v2[j].i])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntfUintptrR(rv reflect.Value) {
@@ -3623,6 +3829,7 @@ func (f *encFnInfo) fastpathEncMapIntfUintptrR(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -3644,16 +3851,30 @@ func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool,
}
sort.Sort(bytesISlice(v2))
for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[v2[j].i])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntfIntR(rv reflect.Value) {
@@ -3661,6 +3882,7 @@ func (f *encFnInfo) fastpathEncMapIntfIntR(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -3682,16 +3904,30 @@ func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, checkNil bool, e *Encod
}
sort.Sort(bytesISlice(v2))
for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[v2[j].i])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntfInt8R(rv reflect.Value) {
@@ -3699,6 +3935,7 @@ func (f *encFnInfo) fastpathEncMapIntfInt8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -3720,16 +3957,30 @@ func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, checkNil bool, e *Enc
}
sort.Sort(bytesISlice(v2))
for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[v2[j].i])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntfInt16R(rv reflect.Value) {
@@ -3737,6 +3988,7 @@ func (f *encFnInfo) fastpathEncMapIntfInt16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -3758,16 +4010,30 @@ func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, checkNil bool, e *E
}
sort.Sort(bytesISlice(v2))
for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[v2[j].i])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntfInt32R(rv reflect.Value) {
@@ -3775,6 +4041,7 @@ func (f *encFnInfo) fastpathEncMapIntfInt32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -3796,16 +4063,30 @@ func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, checkNil bool, e *E
}
sort.Sort(bytesISlice(v2))
for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[v2[j].i])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntfInt64R(rv reflect.Value) {
@@ -3813,6 +4094,7 @@ func (f *encFnInfo) fastpathEncMapIntfInt64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -3834,16 +4116,30 @@ func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, checkNil bool, e *E
}
sort.Sort(bytesISlice(v2))
for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[v2[j].i])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntfFloat32R(rv reflect.Value) {
@@ -3851,6 +4147,7 @@ func (f *encFnInfo) fastpathEncMapIntfFloat32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -3872,16 +4169,30 @@ func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, checkNil bool,
}
sort.Sort(bytesISlice(v2))
for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[v2[j].i])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntfFloat64R(rv reflect.Value) {
@@ -3889,6 +4200,7 @@ func (f *encFnInfo) fastpathEncMapIntfFloat64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -3910,16 +4222,30 @@ func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, checkNil bool,
}
sort.Sort(bytesISlice(v2))
for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[v2[j].i])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntfBoolR(rv reflect.Value) {
@@ -3927,6 +4253,7 @@ func (f *encFnInfo) fastpathEncMapIntfBoolR(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -3948,16 +4275,30 @@ func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, checkNil bool, e *Enc
}
sort.Sort(bytesISlice(v2))
for j := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.asis(v2[j].v)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[v2[j].i])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapStringIntfR(rv reflect.Value) {
@@ -3965,6 +4306,7 @@ func (f *encFnInfo) fastpathEncMapStringIntfR(rv reflect.Value) {
}
func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -3980,24 +4322,38 @@ func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, checkNil bool, e
}
sort.Sort(stringSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[string(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapStringStringR(rv reflect.Value) {
@@ -4005,6 +4361,7 @@ func (f *encFnInfo) fastpathEncMapStringStringR(rv reflect.Value) {
}
func (_ fastpathT) EncMapStringStringV(v map[string]string, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4020,24 +4377,38 @@ func (_ fastpathT) EncMapStringStringV(v map[string]string, checkNil bool, e *En
}
sort.Sort(stringSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v[string(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapStringUintR(rv reflect.Value) {
@@ -4045,6 +4416,7 @@ func (f *encFnInfo) fastpathEncMapStringUintR(rv reflect.Value) {
}
func (_ fastpathT) EncMapStringUintV(v map[string]uint, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4060,24 +4432,38 @@ func (_ fastpathT) EncMapStringUintV(v map[string]uint, checkNil bool, e *Encode
}
sort.Sort(stringSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[string(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapStringUint8R(rv reflect.Value) {
@@ -4085,6 +4471,7 @@ func (f *encFnInfo) fastpathEncMapStringUint8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4100,24 +4487,38 @@ func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, checkNil bool, e *Enco
}
sort.Sort(stringSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[string(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapStringUint16R(rv reflect.Value) {
@@ -4125,6 +4526,7 @@ func (f *encFnInfo) fastpathEncMapStringUint16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4140,24 +4542,38 @@ func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, checkNil bool, e *En
}
sort.Sort(stringSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[string(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapStringUint32R(rv reflect.Value) {
@@ -4165,6 +4581,7 @@ func (f *encFnInfo) fastpathEncMapStringUint32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4180,24 +4597,38 @@ func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, checkNil bool, e *En
}
sort.Sort(stringSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[string(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapStringUint64R(rv reflect.Value) {
@@ -4205,6 +4636,7 @@ func (f *encFnInfo) fastpathEncMapStringUint64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4220,24 +4652,38 @@ func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, checkNil bool, e *En
}
sort.Sort(stringSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[string(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapStringUintptrR(rv reflect.Value) {
@@ -4245,6 +4691,7 @@ func (f *encFnInfo) fastpathEncMapStringUintptrR(rv reflect.Value) {
}
func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4260,24 +4707,38 @@ func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, checkNil bool, e *
}
sort.Sort(stringSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[string(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapStringIntR(rv reflect.Value) {
@@ -4285,6 +4746,7 @@ func (f *encFnInfo) fastpathEncMapStringIntR(rv reflect.Value) {
}
func (_ fastpathT) EncMapStringIntV(v map[string]int, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4300,24 +4762,38 @@ func (_ fastpathT) EncMapStringIntV(v map[string]int, checkNil bool, e *Encoder)
}
sort.Sort(stringSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[string(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapStringInt8R(rv reflect.Value) {
@@ -4325,6 +4801,7 @@ func (f *encFnInfo) fastpathEncMapStringInt8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapStringInt8V(v map[string]int8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4340,24 +4817,38 @@ func (_ fastpathT) EncMapStringInt8V(v map[string]int8, checkNil bool, e *Encode
}
sort.Sort(stringSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[string(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapStringInt16R(rv reflect.Value) {
@@ -4365,6 +4856,7 @@ func (f *encFnInfo) fastpathEncMapStringInt16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapStringInt16V(v map[string]int16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4380,24 +4872,38 @@ func (_ fastpathT) EncMapStringInt16V(v map[string]int16, checkNil bool, e *Enco
}
sort.Sort(stringSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[string(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapStringInt32R(rv reflect.Value) {
@@ -4405,6 +4911,7 @@ func (f *encFnInfo) fastpathEncMapStringInt32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapStringInt32V(v map[string]int32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4420,24 +4927,38 @@ func (_ fastpathT) EncMapStringInt32V(v map[string]int32, checkNil bool, e *Enco
}
sort.Sort(stringSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[string(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapStringInt64R(rv reflect.Value) {
@@ -4445,6 +4966,7 @@ func (f *encFnInfo) fastpathEncMapStringInt64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapStringInt64V(v map[string]int64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4460,24 +4982,38 @@ func (_ fastpathT) EncMapStringInt64V(v map[string]int64, checkNil bool, e *Enco
}
sort.Sort(stringSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[string(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapStringFloat32R(rv reflect.Value) {
@@ -4485,6 +5021,7 @@ func (f *encFnInfo) fastpathEncMapStringFloat32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4500,24 +5037,38 @@ func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, checkNil bool, e *
}
sort.Sort(stringSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v[string(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapStringFloat64R(rv reflect.Value) {
@@ -4525,6 +5076,7 @@ func (f *encFnInfo) fastpathEncMapStringFloat64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4540,24 +5092,38 @@ func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, checkNil bool, e *
}
sort.Sort(stringSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v[string(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapStringBoolR(rv reflect.Value) {
@@ -4565,6 +5131,7 @@ func (f *encFnInfo) fastpathEncMapStringBoolR(rv reflect.Value) {
}
func (_ fastpathT) EncMapStringBoolV(v map[string]bool, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4580,24 +5147,38 @@ func (_ fastpathT) EncMapStringBoolV(v map[string]bool, checkNil bool, e *Encode
}
sort.Sort(stringSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v[string(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat32IntfR(rv reflect.Value) {
@@ -4605,6 +5186,7 @@ func (f *encFnInfo) fastpathEncMapFloat32IntfR(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4619,16 +5201,30 @@ func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, checkNil bool,
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[float32(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat32StringR(rv reflect.Value) {
@@ -4636,6 +5232,7 @@ func (f *encFnInfo) fastpathEncMapFloat32StringR(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4650,16 +5247,30 @@ func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, checkNil bool, e *
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v[float32(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat32UintR(rv reflect.Value) {
@@ -4667,6 +5278,7 @@ func (f *encFnInfo) fastpathEncMapFloat32UintR(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4681,16 +5293,30 @@ func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, checkNil bool, e *Enco
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[float32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat32Uint8R(rv reflect.Value) {
@@ -4698,6 +5324,7 @@ func (f *encFnInfo) fastpathEncMapFloat32Uint8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4712,16 +5339,30 @@ func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, checkNil bool, e *En
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[float32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat32Uint16R(rv reflect.Value) {
@@ -4729,6 +5370,7 @@ func (f *encFnInfo) fastpathEncMapFloat32Uint16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4743,16 +5385,30 @@ func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, checkNil bool, e *
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[float32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat32Uint32R(rv reflect.Value) {
@@ -4760,6 +5416,7 @@ func (f *encFnInfo) fastpathEncMapFloat32Uint32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4774,16 +5431,30 @@ func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, checkNil bool, e *
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[float32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat32Uint64R(rv reflect.Value) {
@@ -4791,6 +5462,7 @@ func (f *encFnInfo) fastpathEncMapFloat32Uint64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4805,16 +5477,30 @@ func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, checkNil bool, e *
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[float32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat32UintptrR(rv reflect.Value) {
@@ -4822,6 +5508,7 @@ func (f *encFnInfo) fastpathEncMapFloat32UintptrR(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4836,16 +5523,30 @@ func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, e
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[float32(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat32IntR(rv reflect.Value) {
@@ -4853,6 +5554,7 @@ func (f *encFnInfo) fastpathEncMapFloat32IntR(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4867,16 +5569,30 @@ func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, checkNil bool, e *Encode
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[float32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat32Int8R(rv reflect.Value) {
@@ -4884,6 +5600,7 @@ func (f *encFnInfo) fastpathEncMapFloat32Int8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4898,16 +5615,30 @@ func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, checkNil bool, e *Enco
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[float32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat32Int16R(rv reflect.Value) {
@@ -4915,6 +5646,7 @@ func (f *encFnInfo) fastpathEncMapFloat32Int16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4929,16 +5661,30 @@ func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, checkNil bool, e *En
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[float32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat32Int32R(rv reflect.Value) {
@@ -4946,6 +5692,7 @@ func (f *encFnInfo) fastpathEncMapFloat32Int32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4960,16 +5707,30 @@ func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, checkNil bool, e *En
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[float32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat32Int64R(rv reflect.Value) {
@@ -4977,6 +5738,7 @@ func (f *encFnInfo) fastpathEncMapFloat32Int64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -4991,16 +5753,30 @@ func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, checkNil bool, e *En
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[float32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat32Float32R(rv reflect.Value) {
@@ -5008,6 +5784,7 @@ func (f *encFnInfo) fastpathEncMapFloat32Float32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5022,16 +5799,30 @@ func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, checkNil bool, e
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v[float32(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat32Float64R(rv reflect.Value) {
@@ -5039,6 +5830,7 @@ func (f *encFnInfo) fastpathEncMapFloat32Float64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5053,16 +5845,30 @@ func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, checkNil bool, e
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v[float32(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat32BoolR(rv reflect.Value) {
@@ -5070,6 +5876,7 @@ func (f *encFnInfo) fastpathEncMapFloat32BoolR(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5084,16 +5891,30 @@ func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, checkNil bool, e *Enco
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(float32(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v[float32(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat32(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat64IntfR(rv reflect.Value) {
@@ -5101,6 +5922,7 @@ func (f *encFnInfo) fastpathEncMapFloat64IntfR(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5115,16 +5937,30 @@ func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, checkNil bool,
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[float64(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat64StringR(rv reflect.Value) {
@@ -5132,6 +5968,7 @@ func (f *encFnInfo) fastpathEncMapFloat64StringR(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5146,16 +5983,30 @@ func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, checkNil bool, e *
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v[float64(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat64UintR(rv reflect.Value) {
@@ -5163,6 +6014,7 @@ func (f *encFnInfo) fastpathEncMapFloat64UintR(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5177,16 +6029,30 @@ func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, checkNil bool, e *Enco
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[float64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat64Uint8R(rv reflect.Value) {
@@ -5194,6 +6060,7 @@ func (f *encFnInfo) fastpathEncMapFloat64Uint8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5208,16 +6075,30 @@ func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, checkNil bool, e *En
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[float64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat64Uint16R(rv reflect.Value) {
@@ -5225,6 +6106,7 @@ func (f *encFnInfo) fastpathEncMapFloat64Uint16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5239,16 +6121,30 @@ func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, checkNil bool, e *
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[float64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat64Uint32R(rv reflect.Value) {
@@ -5256,6 +6152,7 @@ func (f *encFnInfo) fastpathEncMapFloat64Uint32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5270,16 +6167,30 @@ func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, checkNil bool, e *
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[float64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat64Uint64R(rv reflect.Value) {
@@ -5287,6 +6198,7 @@ func (f *encFnInfo) fastpathEncMapFloat64Uint64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5301,16 +6213,30 @@ func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, checkNil bool, e *
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[float64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat64UintptrR(rv reflect.Value) {
@@ -5318,6 +6244,7 @@ func (f *encFnInfo) fastpathEncMapFloat64UintptrR(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5332,16 +6259,30 @@ func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, e
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[float64(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat64IntR(rv reflect.Value) {
@@ -5349,6 +6290,7 @@ func (f *encFnInfo) fastpathEncMapFloat64IntR(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5363,16 +6305,30 @@ func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, checkNil bool, e *Encode
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[float64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat64Int8R(rv reflect.Value) {
@@ -5380,6 +6336,7 @@ func (f *encFnInfo) fastpathEncMapFloat64Int8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5394,16 +6351,30 @@ func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, checkNil bool, e *Enco
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[float64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat64Int16R(rv reflect.Value) {
@@ -5411,6 +6382,7 @@ func (f *encFnInfo) fastpathEncMapFloat64Int16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5425,16 +6397,30 @@ func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, checkNil bool, e *En
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[float64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat64Int32R(rv reflect.Value) {
@@ -5442,6 +6428,7 @@ func (f *encFnInfo) fastpathEncMapFloat64Int32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5456,16 +6443,30 @@ func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, checkNil bool, e *En
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[float64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat64Int64R(rv reflect.Value) {
@@ -5473,6 +6474,7 @@ func (f *encFnInfo) fastpathEncMapFloat64Int64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5487,16 +6489,30 @@ func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, checkNil bool, e *En
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[float64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat64Float32R(rv reflect.Value) {
@@ -5504,6 +6520,7 @@ func (f *encFnInfo) fastpathEncMapFloat64Float32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5518,16 +6535,30 @@ func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, checkNil bool, e
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v[float64(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat64Float64R(rv reflect.Value) {
@@ -5535,6 +6566,7 @@ func (f *encFnInfo) fastpathEncMapFloat64Float64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5549,16 +6581,30 @@ func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, checkNil bool, e
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v[float64(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapFloat64BoolR(rv reflect.Value) {
@@ -5566,6 +6612,7 @@ func (f *encFnInfo) fastpathEncMapFloat64BoolR(rv reflect.Value) {
}
func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5580,16 +6627,30 @@ func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, checkNil bool, e *Enco
}
sort.Sort(floatSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(float64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v[float64(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeFloat64(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintIntfR(rv reflect.Value) {
@@ -5597,6 +6658,7 @@ func (f *encFnInfo) fastpathEncMapUintIntfR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5611,16 +6673,30 @@ func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, checkNil bool, e *Enc
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[uint(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintStringR(rv reflect.Value) {
@@ -5628,6 +6704,7 @@ func (f *encFnInfo) fastpathEncMapUintStringR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintStringV(v map[uint]string, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5642,16 +6719,30 @@ func (_ fastpathT) EncMapUintStringV(v map[uint]string, checkNil bool, e *Encode
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v[uint(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintUintR(rv reflect.Value) {
@@ -5659,6 +6750,7 @@ func (f *encFnInfo) fastpathEncMapUintUintR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintUintV(v map[uint]uint, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5673,16 +6765,30 @@ func (_ fastpathT) EncMapUintUintV(v map[uint]uint, checkNil bool, e *Encoder) {
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintUint8R(rv reflect.Value) {
@@ -5690,6 +6796,7 @@ func (f *encFnInfo) fastpathEncMapUintUint8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5704,16 +6811,30 @@ func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, checkNil bool, e *Encoder)
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintUint16R(rv reflect.Value) {
@@ -5721,6 +6842,7 @@ func (f *encFnInfo) fastpathEncMapUintUint16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5735,16 +6857,30 @@ func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, checkNil bool, e *Encode
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintUint32R(rv reflect.Value) {
@@ -5752,6 +6888,7 @@ func (f *encFnInfo) fastpathEncMapUintUint32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5766,16 +6903,30 @@ func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, checkNil bool, e *Encode
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintUint64R(rv reflect.Value) {
@@ -5783,6 +6934,7 @@ func (f *encFnInfo) fastpathEncMapUintUint64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5797,16 +6949,30 @@ func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, checkNil bool, e *Encode
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintUintptrR(rv reflect.Value) {
@@ -5814,6 +6980,7 @@ func (f *encFnInfo) fastpathEncMapUintUintptrR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5828,16 +6995,30 @@ func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[uint(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintIntR(rv reflect.Value) {
@@ -5845,6 +7026,7 @@ func (f *encFnInfo) fastpathEncMapUintIntR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintIntV(v map[uint]int, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5859,16 +7041,30 @@ func (_ fastpathT) EncMapUintIntV(v map[uint]int, checkNil bool, e *Encoder) {
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintInt8R(rv reflect.Value) {
@@ -5876,6 +7072,7 @@ func (f *encFnInfo) fastpathEncMapUintInt8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5890,16 +7087,30 @@ func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, checkNil bool, e *Encoder) {
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintInt16R(rv reflect.Value) {
@@ -5907,6 +7118,7 @@ func (f *encFnInfo) fastpathEncMapUintInt16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5921,16 +7133,30 @@ func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, checkNil bool, e *Encoder)
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintInt32R(rv reflect.Value) {
@@ -5938,6 +7164,7 @@ func (f *encFnInfo) fastpathEncMapUintInt32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5952,16 +7179,30 @@ func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, checkNil bool, e *Encoder)
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintInt64R(rv reflect.Value) {
@@ -5969,6 +7210,7 @@ func (f *encFnInfo) fastpathEncMapUintInt64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -5983,16 +7225,30 @@ func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, checkNil bool, e *Encoder)
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintFloat32R(rv reflect.Value) {
@@ -6000,6 +7256,7 @@ func (f *encFnInfo) fastpathEncMapUintFloat32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6014,16 +7271,30 @@ func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v[uint(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintFloat64R(rv reflect.Value) {
@@ -6031,6 +7302,7 @@ func (f *encFnInfo) fastpathEncMapUintFloat64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6045,16 +7317,30 @@ func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v[uint(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintBoolR(rv reflect.Value) {
@@ -6062,6 +7348,7 @@ func (f *encFnInfo) fastpathEncMapUintBoolR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6076,16 +7363,30 @@ func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, checkNil bool, e *Encoder) {
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v[uint(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint8IntfR(rv reflect.Value) {
@@ -6093,6 +7394,7 @@ func (f *encFnInfo) fastpathEncMapUint8IntfR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6107,16 +7409,30 @@ func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, checkNil bool, e *E
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[uint8(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint8StringR(rv reflect.Value) {
@@ -6124,6 +7440,7 @@ func (f *encFnInfo) fastpathEncMapUint8StringR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6138,16 +7455,30 @@ func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v[uint8(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint8UintR(rv reflect.Value) {
@@ -6155,6 +7486,7 @@ func (f *encFnInfo) fastpathEncMapUint8UintR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6169,16 +7501,30 @@ func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, checkNil bool, e *Encoder)
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint8Uint8R(rv reflect.Value) {
@@ -6186,6 +7532,7 @@ func (f *encFnInfo) fastpathEncMapUint8Uint8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6200,16 +7547,30 @@ func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, checkNil bool, e *Encode
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint8Uint16R(rv reflect.Value) {
@@ -6217,6 +7578,7 @@ func (f *encFnInfo) fastpathEncMapUint8Uint16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6231,16 +7593,30 @@ func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint8Uint32R(rv reflect.Value) {
@@ -6248,6 +7624,7 @@ func (f *encFnInfo) fastpathEncMapUint8Uint32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6262,16 +7639,30 @@ func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint8Uint64R(rv reflect.Value) {
@@ -6279,6 +7670,7 @@ func (f *encFnInfo) fastpathEncMapUint8Uint64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6293,16 +7685,30 @@ func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint8UintptrR(rv reflect.Value) {
@@ -6310,6 +7716,7 @@ func (f *encFnInfo) fastpathEncMapUint8UintptrR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6324,16 +7731,30 @@ func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, e *En
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[uint8(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint8IntR(rv reflect.Value) {
@@ -6341,6 +7762,7 @@ func (f *encFnInfo) fastpathEncMapUint8IntR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6355,16 +7777,30 @@ func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, checkNil bool, e *Encoder) {
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint8Int8R(rv reflect.Value) {
@@ -6372,6 +7808,7 @@ func (f *encFnInfo) fastpathEncMapUint8Int8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6386,16 +7823,30 @@ func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, checkNil bool, e *Encoder)
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint8Int16R(rv reflect.Value) {
@@ -6403,6 +7854,7 @@ func (f *encFnInfo) fastpathEncMapUint8Int16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6417,16 +7869,30 @@ func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, checkNil bool, e *Encode
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint8Int32R(rv reflect.Value) {
@@ -6434,6 +7900,7 @@ func (f *encFnInfo) fastpathEncMapUint8Int32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6448,16 +7915,30 @@ func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, checkNil bool, e *Encode
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint8Int64R(rv reflect.Value) {
@@ -6465,6 +7946,7 @@ func (f *encFnInfo) fastpathEncMapUint8Int64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6479,16 +7961,30 @@ func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, checkNil bool, e *Encode
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint8Float32R(rv reflect.Value) {
@@ -6496,6 +7992,7 @@ func (f *encFnInfo) fastpathEncMapUint8Float32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6510,16 +8007,30 @@ func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, checkNil bool, e *En
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v[uint8(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint8Float64R(rv reflect.Value) {
@@ -6527,6 +8038,7 @@ func (f *encFnInfo) fastpathEncMapUint8Float64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6541,16 +8053,30 @@ func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, checkNil bool, e *En
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v[uint8(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint8BoolR(rv reflect.Value) {
@@ -6558,6 +8084,7 @@ func (f *encFnInfo) fastpathEncMapUint8BoolR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6572,16 +8099,30 @@ func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, checkNil bool, e *Encoder)
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v[uint8(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint16IntfR(rv reflect.Value) {
@@ -6589,6 +8130,7 @@ func (f *encFnInfo) fastpathEncMapUint16IntfR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6603,16 +8145,30 @@ func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, checkNil bool, e
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[uint16(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint16StringR(rv reflect.Value) {
@@ -6620,6 +8176,7 @@ func (f *encFnInfo) fastpathEncMapUint16StringR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6634,16 +8191,30 @@ func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, checkNil bool, e *En
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v[uint16(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint16UintR(rv reflect.Value) {
@@ -6651,6 +8222,7 @@ func (f *encFnInfo) fastpathEncMapUint16UintR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6665,16 +8237,30 @@ func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, checkNil bool, e *Encode
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint16Uint8R(rv reflect.Value) {
@@ -6682,6 +8268,7 @@ func (f *encFnInfo) fastpathEncMapUint16Uint8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6696,16 +8283,30 @@ func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint16Uint16R(rv reflect.Value) {
@@ -6713,6 +8314,7 @@ func (f *encFnInfo) fastpathEncMapUint16Uint16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6727,16 +8329,30 @@ func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, checkNil bool, e *En
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint16Uint32R(rv reflect.Value) {
@@ -6744,6 +8360,7 @@ func (f *encFnInfo) fastpathEncMapUint16Uint32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6758,16 +8375,30 @@ func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, checkNil bool, e *En
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint16Uint64R(rv reflect.Value) {
@@ -6775,6 +8406,7 @@ func (f *encFnInfo) fastpathEncMapUint16Uint64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6789,16 +8421,30 @@ func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, checkNil bool, e *En
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint16UintptrR(rv reflect.Value) {
@@ -6806,6 +8452,7 @@ func (f *encFnInfo) fastpathEncMapUint16UintptrR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6820,16 +8467,30 @@ func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, e *
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[uint16(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint16IntR(rv reflect.Value) {
@@ -6837,6 +8498,7 @@ func (f *encFnInfo) fastpathEncMapUint16IntR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6851,16 +8513,30 @@ func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, checkNil bool, e *Encoder)
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint16Int8R(rv reflect.Value) {
@@ -6868,6 +8544,7 @@ func (f *encFnInfo) fastpathEncMapUint16Int8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6882,16 +8559,30 @@ func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, checkNil bool, e *Encode
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint16Int16R(rv reflect.Value) {
@@ -6899,6 +8590,7 @@ func (f *encFnInfo) fastpathEncMapUint16Int16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6913,16 +8605,30 @@ func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint16Int32R(rv reflect.Value) {
@@ -6930,6 +8636,7 @@ func (f *encFnInfo) fastpathEncMapUint16Int32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6944,16 +8651,30 @@ func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint16Int64R(rv reflect.Value) {
@@ -6961,6 +8682,7 @@ func (f *encFnInfo) fastpathEncMapUint16Int64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -6975,16 +8697,30 @@ func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint16Float32R(rv reflect.Value) {
@@ -6992,6 +8728,7 @@ func (f *encFnInfo) fastpathEncMapUint16Float32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7006,16 +8743,30 @@ func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, checkNil bool, e *
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v[uint16(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint16Float64R(rv reflect.Value) {
@@ -7023,6 +8774,7 @@ func (f *encFnInfo) fastpathEncMapUint16Float64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7037,16 +8789,30 @@ func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, checkNil bool, e *
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v[uint16(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint16BoolR(rv reflect.Value) {
@@ -7054,6 +8820,7 @@ func (f *encFnInfo) fastpathEncMapUint16BoolR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7068,16 +8835,30 @@ func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, checkNil bool, e *Encode
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v[uint16(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint32IntfR(rv reflect.Value) {
@@ -7085,6 +8866,7 @@ func (f *encFnInfo) fastpathEncMapUint32IntfR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7099,16 +8881,30 @@ func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, checkNil bool, e
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[uint32(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint32StringR(rv reflect.Value) {
@@ -7116,6 +8912,7 @@ func (f *encFnInfo) fastpathEncMapUint32StringR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7130,16 +8927,30 @@ func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, checkNil bool, e *En
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v[uint32(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint32UintR(rv reflect.Value) {
@@ -7147,6 +8958,7 @@ func (f *encFnInfo) fastpathEncMapUint32UintR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7161,16 +8973,30 @@ func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, checkNil bool, e *Encode
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint32Uint8R(rv reflect.Value) {
@@ -7178,6 +9004,7 @@ func (f *encFnInfo) fastpathEncMapUint32Uint8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7192,16 +9019,30 @@ func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint32Uint16R(rv reflect.Value) {
@@ -7209,6 +9050,7 @@ func (f *encFnInfo) fastpathEncMapUint32Uint16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7223,16 +9065,30 @@ func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, checkNil bool, e *En
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint32Uint32R(rv reflect.Value) {
@@ -7240,6 +9096,7 @@ func (f *encFnInfo) fastpathEncMapUint32Uint32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7254,16 +9111,30 @@ func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, checkNil bool, e *En
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint32Uint64R(rv reflect.Value) {
@@ -7271,6 +9142,7 @@ func (f *encFnInfo) fastpathEncMapUint32Uint64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7285,16 +9157,30 @@ func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, checkNil bool, e *En
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint32UintptrR(rv reflect.Value) {
@@ -7302,6 +9188,7 @@ func (f *encFnInfo) fastpathEncMapUint32UintptrR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7316,16 +9203,30 @@ func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, e *
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[uint32(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint32IntR(rv reflect.Value) {
@@ -7333,6 +9234,7 @@ func (f *encFnInfo) fastpathEncMapUint32IntR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7347,16 +9249,30 @@ func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, checkNil bool, e *Encoder)
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint32Int8R(rv reflect.Value) {
@@ -7364,6 +9280,7 @@ func (f *encFnInfo) fastpathEncMapUint32Int8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7378,16 +9295,30 @@ func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, checkNil bool, e *Encode
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint32Int16R(rv reflect.Value) {
@@ -7395,6 +9326,7 @@ func (f *encFnInfo) fastpathEncMapUint32Int16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7409,16 +9341,30 @@ func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint32Int32R(rv reflect.Value) {
@@ -7426,6 +9372,7 @@ func (f *encFnInfo) fastpathEncMapUint32Int32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7440,16 +9387,30 @@ func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint32Int64R(rv reflect.Value) {
@@ -7457,6 +9418,7 @@ func (f *encFnInfo) fastpathEncMapUint32Int64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7471,16 +9433,30 @@ func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint32Float32R(rv reflect.Value) {
@@ -7488,6 +9464,7 @@ func (f *encFnInfo) fastpathEncMapUint32Float32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7502,16 +9479,30 @@ func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, checkNil bool, e *
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v[uint32(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint32Float64R(rv reflect.Value) {
@@ -7519,6 +9510,7 @@ func (f *encFnInfo) fastpathEncMapUint32Float64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7533,16 +9525,30 @@ func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, checkNil bool, e *
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v[uint32(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint32BoolR(rv reflect.Value) {
@@ -7550,6 +9556,7 @@ func (f *encFnInfo) fastpathEncMapUint32BoolR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7564,16 +9571,30 @@ func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, checkNil bool, e *Encode
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v[uint32(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint64IntfR(rv reflect.Value) {
@@ -7581,6 +9602,7 @@ func (f *encFnInfo) fastpathEncMapUint64IntfR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7595,16 +9617,30 @@ func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, checkNil bool, e
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[uint64(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint64StringR(rv reflect.Value) {
@@ -7612,6 +9648,7 @@ func (f *encFnInfo) fastpathEncMapUint64StringR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7626,16 +9663,30 @@ func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, checkNil bool, e *En
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v[uint64(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint64UintR(rv reflect.Value) {
@@ -7643,6 +9694,7 @@ func (f *encFnInfo) fastpathEncMapUint64UintR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7657,16 +9709,30 @@ func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, checkNil bool, e *Encode
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint64Uint8R(rv reflect.Value) {
@@ -7674,6 +9740,7 @@ func (f *encFnInfo) fastpathEncMapUint64Uint8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7688,16 +9755,30 @@ func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint64Uint16R(rv reflect.Value) {
@@ -7705,6 +9786,7 @@ func (f *encFnInfo) fastpathEncMapUint64Uint16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7719,16 +9801,30 @@ func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, checkNil bool, e *En
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint64Uint32R(rv reflect.Value) {
@@ -7736,6 +9832,7 @@ func (f *encFnInfo) fastpathEncMapUint64Uint32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7750,16 +9847,30 @@ func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, checkNil bool, e *En
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint64Uint64R(rv reflect.Value) {
@@ -7767,6 +9878,7 @@ func (f *encFnInfo) fastpathEncMapUint64Uint64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7781,16 +9893,30 @@ func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, checkNil bool, e *En
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uint64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint64UintptrR(rv reflect.Value) {
@@ -7798,6 +9924,7 @@ func (f *encFnInfo) fastpathEncMapUint64UintptrR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7812,16 +9939,30 @@ func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, e *
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[uint64(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint64IntR(rv reflect.Value) {
@@ -7829,6 +9970,7 @@ func (f *encFnInfo) fastpathEncMapUint64IntR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7843,16 +9985,30 @@ func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, checkNil bool, e *Encoder)
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint64Int8R(rv reflect.Value) {
@@ -7860,6 +10016,7 @@ func (f *encFnInfo) fastpathEncMapUint64Int8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7874,16 +10031,30 @@ func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, checkNil bool, e *Encode
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint64Int16R(rv reflect.Value) {
@@ -7891,6 +10062,7 @@ func (f *encFnInfo) fastpathEncMapUint64Int16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7905,16 +10077,30 @@ func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint64Int32R(rv reflect.Value) {
@@ -7922,6 +10108,7 @@ func (f *encFnInfo) fastpathEncMapUint64Int32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7936,16 +10123,30 @@ func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint64Int64R(rv reflect.Value) {
@@ -7953,6 +10154,7 @@ func (f *encFnInfo) fastpathEncMapUint64Int64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7967,16 +10169,30 @@ func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uint64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint64Float32R(rv reflect.Value) {
@@ -7984,6 +10200,7 @@ func (f *encFnInfo) fastpathEncMapUint64Float32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -7998,16 +10215,30 @@ func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, checkNil bool, e *
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v[uint64(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint64Float64R(rv reflect.Value) {
@@ -8015,6 +10246,7 @@ func (f *encFnInfo) fastpathEncMapUint64Float64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8029,16 +10261,30 @@ func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, checkNil bool, e *
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v[uint64(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUint64BoolR(rv reflect.Value) {
@@ -8046,6 +10292,7 @@ func (f *encFnInfo) fastpathEncMapUint64BoolR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8060,16 +10307,30 @@ func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, checkNil bool, e *Encode
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(uint64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v[uint64(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeUint(uint64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintptrIntfR(rv reflect.Value) {
@@ -8077,6 +10338,7 @@ func (f *encFnInfo) fastpathEncMapUintptrIntfR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8091,16 +10353,30 @@ func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool,
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[uintptr(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintptrStringR(rv reflect.Value) {
@@ -8108,6 +10384,7 @@ func (f *encFnInfo) fastpathEncMapUintptrStringR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8122,16 +10399,30 @@ func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, checkNil bool, e *
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v[uintptr(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintptrUintR(rv reflect.Value) {
@@ -8139,6 +10430,7 @@ func (f *encFnInfo) fastpathEncMapUintptrUintR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8153,16 +10445,30 @@ func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uintptr(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintptrUint8R(rv reflect.Value) {
@@ -8170,6 +10476,7 @@ func (f *encFnInfo) fastpathEncMapUintptrUint8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8184,16 +10491,30 @@ func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, e *En
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uintptr(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintptrUint16R(rv reflect.Value) {
@@ -8201,6 +10522,7 @@ func (f *encFnInfo) fastpathEncMapUintptrUint16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8215,16 +10537,30 @@ func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, e *
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uintptr(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintptrUint32R(rv reflect.Value) {
@@ -8232,6 +10568,7 @@ func (f *encFnInfo) fastpathEncMapUintptrUint32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8246,16 +10583,30 @@ func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, e *
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uintptr(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintptrUint64R(rv reflect.Value) {
@@ -8263,6 +10614,7 @@ func (f *encFnInfo) fastpathEncMapUintptrUint64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8277,16 +10629,30 @@ func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, e *
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[uintptr(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintptrUintptrR(rv reflect.Value) {
@@ -8294,6 +10660,7 @@ func (f *encFnInfo) fastpathEncMapUintptrUintptrR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8308,16 +10675,30 @@ func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, e
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[uintptr(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintptrIntR(rv reflect.Value) {
@@ -8325,6 +10706,7 @@ func (f *encFnInfo) fastpathEncMapUintptrIntR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8339,16 +10721,30 @@ func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, checkNil bool, e *Encode
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uintptr(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintptrInt8R(rv reflect.Value) {
@@ -8356,6 +10752,7 @@ func (f *encFnInfo) fastpathEncMapUintptrInt8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8370,16 +10767,30 @@ func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uintptr(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintptrInt16R(rv reflect.Value) {
@@ -8387,6 +10798,7 @@ func (f *encFnInfo) fastpathEncMapUintptrInt16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8401,16 +10813,30 @@ func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, checkNil bool, e *En
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uintptr(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintptrInt32R(rv reflect.Value) {
@@ -8418,6 +10844,7 @@ func (f *encFnInfo) fastpathEncMapUintptrInt32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8432,16 +10859,30 @@ func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, checkNil bool, e *En
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uintptr(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintptrInt64R(rv reflect.Value) {
@@ -8449,6 +10890,7 @@ func (f *encFnInfo) fastpathEncMapUintptrInt64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8463,16 +10905,30 @@ func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, checkNil bool, e *En
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[uintptr(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintptrFloat32R(rv reflect.Value) {
@@ -8480,6 +10936,7 @@ func (f *encFnInfo) fastpathEncMapUintptrFloat32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8494,16 +10951,30 @@ func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, e
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v[uintptr(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintptrFloat64R(rv reflect.Value) {
@@ -8511,6 +10982,7 @@ func (f *encFnInfo) fastpathEncMapUintptrFloat64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8525,16 +10997,30 @@ func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, e
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v[uintptr(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapUintptrBoolR(rv reflect.Value) {
@@ -8542,6 +11028,7 @@ func (f *encFnInfo) fastpathEncMapUintptrBoolR(rv reflect.Value) {
}
func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8556,16 +11043,30 @@ func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, checkNil bool, e *Enco
}
sort.Sort(uintSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(uintptr(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v[uintptr(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
e.encode(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntIntfR(rv reflect.Value) {
@@ -8573,6 +11074,7 @@ func (f *encFnInfo) fastpathEncMapIntIntfR(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8587,16 +11089,30 @@ func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, checkNil bool, e *Encod
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[int(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntStringR(rv reflect.Value) {
@@ -8604,6 +11120,7 @@ func (f *encFnInfo) fastpathEncMapIntStringR(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntStringV(v map[int]string, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8618,16 +11135,30 @@ func (_ fastpathT) EncMapIntStringV(v map[int]string, checkNil bool, e *Encoder)
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v[int(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntUintR(rv reflect.Value) {
@@ -8635,6 +11166,7 @@ func (f *encFnInfo) fastpathEncMapIntUintR(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntUintV(v map[int]uint, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8649,16 +11181,30 @@ func (_ fastpathT) EncMapIntUintV(v map[int]uint, checkNil bool, e *Encoder) {
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntUint8R(rv reflect.Value) {
@@ -8666,6 +11212,7 @@ func (f *encFnInfo) fastpathEncMapIntUint8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8680,16 +11227,30 @@ func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, checkNil bool, e *Encoder) {
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntUint16R(rv reflect.Value) {
@@ -8697,6 +11258,7 @@ func (f *encFnInfo) fastpathEncMapIntUint16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8711,16 +11273,30 @@ func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, checkNil bool, e *Encoder)
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntUint32R(rv reflect.Value) {
@@ -8728,6 +11304,7 @@ func (f *encFnInfo) fastpathEncMapIntUint32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8742,16 +11319,30 @@ func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, checkNil bool, e *Encoder)
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntUint64R(rv reflect.Value) {
@@ -8759,6 +11350,7 @@ func (f *encFnInfo) fastpathEncMapIntUint64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8773,16 +11365,30 @@ func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, checkNil bool, e *Encoder)
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntUintptrR(rv reflect.Value) {
@@ -8790,6 +11396,7 @@ func (f *encFnInfo) fastpathEncMapIntUintptrR(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8804,16 +11411,30 @@ func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, checkNil bool, e *Encode
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[int(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntIntR(rv reflect.Value) {
@@ -8821,6 +11442,7 @@ func (f *encFnInfo) fastpathEncMapIntIntR(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntIntV(v map[int]int, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8835,16 +11457,30 @@ func (_ fastpathT) EncMapIntIntV(v map[int]int, checkNil bool, e *Encoder) {
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntInt8R(rv reflect.Value) {
@@ -8852,6 +11488,7 @@ func (f *encFnInfo) fastpathEncMapIntInt8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntInt8V(v map[int]int8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8866,16 +11503,30 @@ func (_ fastpathT) EncMapIntInt8V(v map[int]int8, checkNil bool, e *Encoder) {
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntInt16R(rv reflect.Value) {
@@ -8883,6 +11534,7 @@ func (f *encFnInfo) fastpathEncMapIntInt16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntInt16V(v map[int]int16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8897,16 +11549,30 @@ func (_ fastpathT) EncMapIntInt16V(v map[int]int16, checkNil bool, e *Encoder) {
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntInt32R(rv reflect.Value) {
@@ -8914,6 +11580,7 @@ func (f *encFnInfo) fastpathEncMapIntInt32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntInt32V(v map[int]int32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8928,16 +11595,30 @@ func (_ fastpathT) EncMapIntInt32V(v map[int]int32, checkNil bool, e *Encoder) {
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntInt64R(rv reflect.Value) {
@@ -8945,6 +11626,7 @@ func (f *encFnInfo) fastpathEncMapIntInt64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntInt64V(v map[int]int64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8959,16 +11641,30 @@ func (_ fastpathT) EncMapIntInt64V(v map[int]int64, checkNil bool, e *Encoder) {
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntFloat32R(rv reflect.Value) {
@@ -8976,6 +11672,7 @@ func (f *encFnInfo) fastpathEncMapIntFloat32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -8990,16 +11687,30 @@ func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, checkNil bool, e *Encode
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v[int(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntFloat64R(rv reflect.Value) {
@@ -9007,6 +11718,7 @@ func (f *encFnInfo) fastpathEncMapIntFloat64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9021,16 +11733,30 @@ func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, checkNil bool, e *Encode
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v[int(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapIntBoolR(rv reflect.Value) {
@@ -9038,6 +11764,7 @@ func (f *encFnInfo) fastpathEncMapIntBoolR(rv reflect.Value) {
}
func (_ fastpathT) EncMapIntBoolV(v map[int]bool, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9052,16 +11779,30 @@ func (_ fastpathT) EncMapIntBoolV(v map[int]bool, checkNil bool, e *Encoder) {
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v[int(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt8IntfR(rv reflect.Value) {
@@ -9069,6 +11810,7 @@ func (f *encFnInfo) fastpathEncMapInt8IntfR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9083,16 +11825,30 @@ func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, checkNil bool, e *Enc
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[int8(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt8StringR(rv reflect.Value) {
@@ -9100,6 +11856,7 @@ func (f *encFnInfo) fastpathEncMapInt8StringR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt8StringV(v map[int8]string, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9114,16 +11871,30 @@ func (_ fastpathT) EncMapInt8StringV(v map[int8]string, checkNil bool, e *Encode
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v[int8(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt8UintR(rv reflect.Value) {
@@ -9131,6 +11902,7 @@ func (f *encFnInfo) fastpathEncMapInt8UintR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9145,16 +11917,30 @@ func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, checkNil bool, e *Encoder) {
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt8Uint8R(rv reflect.Value) {
@@ -9162,6 +11948,7 @@ func (f *encFnInfo) fastpathEncMapInt8Uint8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9176,16 +11963,30 @@ func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, checkNil bool, e *Encoder)
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt8Uint16R(rv reflect.Value) {
@@ -9193,6 +11994,7 @@ func (f *encFnInfo) fastpathEncMapInt8Uint16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9207,16 +12009,30 @@ func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, checkNil bool, e *Encode
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt8Uint32R(rv reflect.Value) {
@@ -9224,6 +12040,7 @@ func (f *encFnInfo) fastpathEncMapInt8Uint32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9238,16 +12055,30 @@ func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, checkNil bool, e *Encode
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt8Uint64R(rv reflect.Value) {
@@ -9255,6 +12086,7 @@ func (f *encFnInfo) fastpathEncMapInt8Uint64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9269,16 +12101,30 @@ func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, checkNil bool, e *Encode
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt8UintptrR(rv reflect.Value) {
@@ -9286,6 +12132,7 @@ func (f *encFnInfo) fastpathEncMapInt8UintptrR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9300,16 +12147,30 @@ func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, checkNil bool, e *Enco
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[int8(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt8IntR(rv reflect.Value) {
@@ -9317,6 +12178,7 @@ func (f *encFnInfo) fastpathEncMapInt8IntR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt8IntV(v map[int8]int, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9331,16 +12193,30 @@ func (_ fastpathT) EncMapInt8IntV(v map[int8]int, checkNil bool, e *Encoder) {
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt8Int8R(rv reflect.Value) {
@@ -9348,6 +12224,7 @@ func (f *encFnInfo) fastpathEncMapInt8Int8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9362,16 +12239,30 @@ func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, checkNil bool, e *Encoder) {
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt8Int16R(rv reflect.Value) {
@@ -9379,6 +12270,7 @@ func (f *encFnInfo) fastpathEncMapInt8Int16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9393,16 +12285,30 @@ func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, checkNil bool, e *Encoder)
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt8Int32R(rv reflect.Value) {
@@ -9410,6 +12316,7 @@ func (f *encFnInfo) fastpathEncMapInt8Int32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9424,16 +12331,30 @@ func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, checkNil bool, e *Encoder)
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt8Int64R(rv reflect.Value) {
@@ -9441,6 +12362,7 @@ func (f *encFnInfo) fastpathEncMapInt8Int64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9455,16 +12377,30 @@ func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, checkNil bool, e *Encoder)
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int8(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt8Float32R(rv reflect.Value) {
@@ -9472,6 +12408,7 @@ func (f *encFnInfo) fastpathEncMapInt8Float32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9486,16 +12423,30 @@ func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, checkNil bool, e *Enco
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v[int8(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt8Float64R(rv reflect.Value) {
@@ -9503,6 +12454,7 @@ func (f *encFnInfo) fastpathEncMapInt8Float64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9517,16 +12469,30 @@ func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, checkNil bool, e *Enco
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v[int8(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt8BoolR(rv reflect.Value) {
@@ -9534,6 +12500,7 @@ func (f *encFnInfo) fastpathEncMapInt8BoolR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9548,16 +12515,30 @@ func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, checkNil bool, e *Encoder) {
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int8(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v[int8(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt16IntfR(rv reflect.Value) {
@@ -9565,6 +12546,7 @@ func (f *encFnInfo) fastpathEncMapInt16IntfR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9579,16 +12561,30 @@ func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, checkNil bool, e *E
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[int16(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt16StringR(rv reflect.Value) {
@@ -9596,6 +12592,7 @@ func (f *encFnInfo) fastpathEncMapInt16StringR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt16StringV(v map[int16]string, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9610,16 +12607,30 @@ func (_ fastpathT) EncMapInt16StringV(v map[int16]string, checkNil bool, e *Enco
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v[int16(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt16UintR(rv reflect.Value) {
@@ -9627,6 +12638,7 @@ func (f *encFnInfo) fastpathEncMapInt16UintR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9641,16 +12653,30 @@ func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, checkNil bool, e *Encoder)
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt16Uint8R(rv reflect.Value) {
@@ -9658,6 +12684,7 @@ func (f *encFnInfo) fastpathEncMapInt16Uint8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9672,16 +12699,30 @@ func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, checkNil bool, e *Encode
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt16Uint16R(rv reflect.Value) {
@@ -9689,6 +12730,7 @@ func (f *encFnInfo) fastpathEncMapInt16Uint16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9703,16 +12745,30 @@ func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, checkNil bool, e *Enco
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt16Uint32R(rv reflect.Value) {
@@ -9720,6 +12776,7 @@ func (f *encFnInfo) fastpathEncMapInt16Uint32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9734,16 +12791,30 @@ func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, checkNil bool, e *Enco
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt16Uint64R(rv reflect.Value) {
@@ -9751,6 +12822,7 @@ func (f *encFnInfo) fastpathEncMapInt16Uint64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9765,16 +12837,30 @@ func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, checkNil bool, e *Enco
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt16UintptrR(rv reflect.Value) {
@@ -9782,6 +12868,7 @@ func (f *encFnInfo) fastpathEncMapInt16UintptrR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9796,16 +12883,30 @@ func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, checkNil bool, e *En
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[int16(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt16IntR(rv reflect.Value) {
@@ -9813,6 +12914,7 @@ func (f *encFnInfo) fastpathEncMapInt16IntR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt16IntV(v map[int16]int, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9827,16 +12929,30 @@ func (_ fastpathT) EncMapInt16IntV(v map[int16]int, checkNil bool, e *Encoder) {
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt16Int8R(rv reflect.Value) {
@@ -9844,6 +12960,7 @@ func (f *encFnInfo) fastpathEncMapInt16Int8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9858,16 +12975,30 @@ func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, checkNil bool, e *Encoder)
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt16Int16R(rv reflect.Value) {
@@ -9875,6 +13006,7 @@ func (f *encFnInfo) fastpathEncMapInt16Int16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9889,16 +13021,30 @@ func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, checkNil bool, e *Encode
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt16Int32R(rv reflect.Value) {
@@ -9906,6 +13052,7 @@ func (f *encFnInfo) fastpathEncMapInt16Int32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9920,16 +13067,30 @@ func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, checkNil bool, e *Encode
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt16Int64R(rv reflect.Value) {
@@ -9937,6 +13098,7 @@ func (f *encFnInfo) fastpathEncMapInt16Int64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9951,16 +13113,30 @@ func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, checkNil bool, e *Encode
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int16(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt16Float32R(rv reflect.Value) {
@@ -9968,6 +13144,7 @@ func (f *encFnInfo) fastpathEncMapInt16Float32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -9982,16 +13159,30 @@ func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, checkNil bool, e *En
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v[int16(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt16Float64R(rv reflect.Value) {
@@ -9999,6 +13190,7 @@ func (f *encFnInfo) fastpathEncMapInt16Float64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10013,16 +13205,30 @@ func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, checkNil bool, e *En
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v[int16(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt16BoolR(rv reflect.Value) {
@@ -10030,6 +13236,7 @@ func (f *encFnInfo) fastpathEncMapInt16BoolR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10044,16 +13251,30 @@ func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, checkNil bool, e *Encoder)
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int16(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v[int16(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt32IntfR(rv reflect.Value) {
@@ -10061,6 +13282,7 @@ func (f *encFnInfo) fastpathEncMapInt32IntfR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10075,16 +13297,30 @@ func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, checkNil bool, e *E
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[int32(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt32StringR(rv reflect.Value) {
@@ -10092,6 +13328,7 @@ func (f *encFnInfo) fastpathEncMapInt32StringR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt32StringV(v map[int32]string, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10106,16 +13343,30 @@ func (_ fastpathT) EncMapInt32StringV(v map[int32]string, checkNil bool, e *Enco
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v[int32(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt32UintR(rv reflect.Value) {
@@ -10123,6 +13374,7 @@ func (f *encFnInfo) fastpathEncMapInt32UintR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10137,16 +13389,30 @@ func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, checkNil bool, e *Encoder)
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt32Uint8R(rv reflect.Value) {
@@ -10154,6 +13420,7 @@ func (f *encFnInfo) fastpathEncMapInt32Uint8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10168,16 +13435,30 @@ func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, checkNil bool, e *Encode
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt32Uint16R(rv reflect.Value) {
@@ -10185,6 +13466,7 @@ func (f *encFnInfo) fastpathEncMapInt32Uint16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10199,16 +13481,30 @@ func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, checkNil bool, e *Enco
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt32Uint32R(rv reflect.Value) {
@@ -10216,6 +13512,7 @@ func (f *encFnInfo) fastpathEncMapInt32Uint32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10230,16 +13527,30 @@ func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, checkNil bool, e *Enco
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt32Uint64R(rv reflect.Value) {
@@ -10247,6 +13558,7 @@ func (f *encFnInfo) fastpathEncMapInt32Uint64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10261,16 +13573,30 @@ func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, checkNil bool, e *Enco
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt32UintptrR(rv reflect.Value) {
@@ -10278,6 +13604,7 @@ func (f *encFnInfo) fastpathEncMapInt32UintptrR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10292,16 +13619,30 @@ func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, checkNil bool, e *En
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[int32(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt32IntR(rv reflect.Value) {
@@ -10309,6 +13650,7 @@ func (f *encFnInfo) fastpathEncMapInt32IntR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt32IntV(v map[int32]int, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10323,16 +13665,30 @@ func (_ fastpathT) EncMapInt32IntV(v map[int32]int, checkNil bool, e *Encoder) {
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt32Int8R(rv reflect.Value) {
@@ -10340,6 +13696,7 @@ func (f *encFnInfo) fastpathEncMapInt32Int8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10354,16 +13711,30 @@ func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, checkNil bool, e *Encoder)
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt32Int16R(rv reflect.Value) {
@@ -10371,6 +13742,7 @@ func (f *encFnInfo) fastpathEncMapInt32Int16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10385,16 +13757,30 @@ func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, checkNil bool, e *Encode
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt32Int32R(rv reflect.Value) {
@@ -10402,6 +13788,7 @@ func (f *encFnInfo) fastpathEncMapInt32Int32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10416,16 +13803,30 @@ func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, checkNil bool, e *Encode
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt32Int64R(rv reflect.Value) {
@@ -10433,6 +13834,7 @@ func (f *encFnInfo) fastpathEncMapInt32Int64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10447,16 +13849,30 @@ func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, checkNil bool, e *Encode
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int32(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt32Float32R(rv reflect.Value) {
@@ -10464,6 +13880,7 @@ func (f *encFnInfo) fastpathEncMapInt32Float32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10478,16 +13895,30 @@ func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, checkNil bool, e *En
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v[int32(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt32Float64R(rv reflect.Value) {
@@ -10495,6 +13926,7 @@ func (f *encFnInfo) fastpathEncMapInt32Float64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10509,16 +13941,30 @@ func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, checkNil bool, e *En
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v[int32(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt32BoolR(rv reflect.Value) {
@@ -10526,6 +13972,7 @@ func (f *encFnInfo) fastpathEncMapInt32BoolR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10540,16 +13987,30 @@ func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, checkNil bool, e *Encoder)
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int32(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v[int32(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt64IntfR(rv reflect.Value) {
@@ -10557,6 +14018,7 @@ func (f *encFnInfo) fastpathEncMapInt64IntfR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10571,16 +14033,30 @@ func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, checkNil bool, e *E
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[int64(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt64StringR(rv reflect.Value) {
@@ -10588,6 +14064,7 @@ func (f *encFnInfo) fastpathEncMapInt64StringR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt64StringV(v map[int64]string, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10602,16 +14079,30 @@ func (_ fastpathT) EncMapInt64StringV(v map[int64]string, checkNil bool, e *Enco
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v[int64(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt64UintR(rv reflect.Value) {
@@ -10619,6 +14110,7 @@ func (f *encFnInfo) fastpathEncMapInt64UintR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10633,16 +14125,30 @@ func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, checkNil bool, e *Encoder)
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt64Uint8R(rv reflect.Value) {
@@ -10650,6 +14156,7 @@ func (f *encFnInfo) fastpathEncMapInt64Uint8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10664,16 +14171,30 @@ func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, checkNil bool, e *Encode
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt64Uint16R(rv reflect.Value) {
@@ -10681,6 +14202,7 @@ func (f *encFnInfo) fastpathEncMapInt64Uint16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10695,16 +14217,30 @@ func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, checkNil bool, e *Enco
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt64Uint32R(rv reflect.Value) {
@@ -10712,6 +14248,7 @@ func (f *encFnInfo) fastpathEncMapInt64Uint32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10726,16 +14263,30 @@ func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, checkNil bool, e *Enco
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt64Uint64R(rv reflect.Value) {
@@ -10743,6 +14294,7 @@ func (f *encFnInfo) fastpathEncMapInt64Uint64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10757,16 +14309,30 @@ func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, checkNil bool, e *Enco
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[int64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt64UintptrR(rv reflect.Value) {
@@ -10774,6 +14340,7 @@ func (f *encFnInfo) fastpathEncMapInt64UintptrR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10788,16 +14355,30 @@ func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, checkNil bool, e *En
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[int64(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt64IntR(rv reflect.Value) {
@@ -10805,6 +14386,7 @@ func (f *encFnInfo) fastpathEncMapInt64IntR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt64IntV(v map[int64]int, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10819,16 +14401,30 @@ func (_ fastpathT) EncMapInt64IntV(v map[int64]int, checkNil bool, e *Encoder) {
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt64Int8R(rv reflect.Value) {
@@ -10836,6 +14432,7 @@ func (f *encFnInfo) fastpathEncMapInt64Int8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10850,16 +14447,30 @@ func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, checkNil bool, e *Encoder)
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt64Int16R(rv reflect.Value) {
@@ -10867,6 +14478,7 @@ func (f *encFnInfo) fastpathEncMapInt64Int16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10881,16 +14493,30 @@ func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, checkNil bool, e *Encode
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt64Int32R(rv reflect.Value) {
@@ -10898,6 +14524,7 @@ func (f *encFnInfo) fastpathEncMapInt64Int32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10912,16 +14539,30 @@ func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, checkNil bool, e *Encode
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt64Int64R(rv reflect.Value) {
@@ -10929,6 +14570,7 @@ func (f *encFnInfo) fastpathEncMapInt64Int64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10943,16 +14585,30 @@ func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, checkNil bool, e *Encode
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[int64(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt64Float32R(rv reflect.Value) {
@@ -10960,6 +14616,7 @@ func (f *encFnInfo) fastpathEncMapInt64Float32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -10974,16 +14631,30 @@ func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, checkNil bool, e *En
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v[int64(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt64Float64R(rv reflect.Value) {
@@ -10991,6 +14662,7 @@ func (f *encFnInfo) fastpathEncMapInt64Float64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -11005,16 +14677,30 @@ func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, checkNil bool, e *En
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v[int64(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapInt64BoolR(rv reflect.Value) {
@@ -11022,6 +14708,7 @@ func (f *encFnInfo) fastpathEncMapInt64BoolR(rv reflect.Value) {
}
func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -11036,16 +14723,30 @@ func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, checkNil bool, e *Encoder)
}
sort.Sort(intSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(int64(k2)))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v[int64(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeInt(int64(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapBoolIntfR(rv reflect.Value) {
@@ -11053,6 +14754,7 @@ func (f *encFnInfo) fastpathEncMapBoolIntfR(rv reflect.Value) {
}
func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -11067,16 +14769,30 @@ func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, checkNil bool, e *Enc
}
sort.Sort(boolSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[bool(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapBoolStringR(rv reflect.Value) {
@@ -11084,6 +14800,7 @@ func (f *encFnInfo) fastpathEncMapBoolStringR(rv reflect.Value) {
}
func (_ fastpathT) EncMapBoolStringV(v map[bool]string, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -11098,16 +14815,30 @@ func (_ fastpathT) EncMapBoolStringV(v map[bool]string, checkNil bool, e *Encode
}
sort.Sort(boolSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v[bool(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeString(c_UTF8, v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapBoolUintR(rv reflect.Value) {
@@ -11115,6 +14846,7 @@ func (f *encFnInfo) fastpathEncMapBoolUintR(rv reflect.Value) {
}
func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -11129,16 +14861,30 @@ func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, checkNil bool, e *Encoder) {
}
sort.Sort(boolSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[bool(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapBoolUint8R(rv reflect.Value) {
@@ -11146,6 +14892,7 @@ func (f *encFnInfo) fastpathEncMapBoolUint8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -11160,16 +14907,30 @@ func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, checkNil bool, e *Encoder)
}
sort.Sort(boolSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[bool(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapBoolUint16R(rv reflect.Value) {
@@ -11177,6 +14938,7 @@ func (f *encFnInfo) fastpathEncMapBoolUint16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -11191,16 +14953,30 @@ func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, checkNil bool, e *Encode
}
sort.Sort(boolSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[bool(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapBoolUint32R(rv reflect.Value) {
@@ -11208,6 +14984,7 @@ func (f *encFnInfo) fastpathEncMapBoolUint32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -11222,16 +14999,30 @@ func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, checkNil bool, e *Encode
}
sort.Sort(boolSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[bool(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapBoolUint64R(rv reflect.Value) {
@@ -11239,6 +15030,7 @@ func (f *encFnInfo) fastpathEncMapBoolUint64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -11253,16 +15045,30 @@ func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, checkNil bool, e *Encode
}
sort.Sort(boolSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v[bool(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeUint(uint64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapBoolUintptrR(rv reflect.Value) {
@@ -11270,6 +15076,7 @@ func (f *encFnInfo) fastpathEncMapBoolUintptrR(rv reflect.Value) {
}
func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -11284,16 +15091,30 @@ func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, checkNil bool, e *Enco
}
sort.Sort(boolSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v[bool(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
e.encode(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapBoolIntR(rv reflect.Value) {
@@ -11301,6 +15122,7 @@ func (f *encFnInfo) fastpathEncMapBoolIntR(rv reflect.Value) {
}
func (_ fastpathT) EncMapBoolIntV(v map[bool]int, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -11315,16 +15137,30 @@ func (_ fastpathT) EncMapBoolIntV(v map[bool]int, checkNil bool, e *Encoder) {
}
sort.Sort(boolSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[bool(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapBoolInt8R(rv reflect.Value) {
@@ -11332,6 +15168,7 @@ func (f *encFnInfo) fastpathEncMapBoolInt8R(rv reflect.Value) {
}
func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -11346,16 +15183,30 @@ func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, checkNil bool, e *Encoder) {
}
sort.Sort(boolSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[bool(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapBoolInt16R(rv reflect.Value) {
@@ -11363,6 +15214,7 @@ func (f *encFnInfo) fastpathEncMapBoolInt16R(rv reflect.Value) {
}
func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -11377,16 +15229,30 @@ func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, checkNil bool, e *Encoder)
}
sort.Sort(boolSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[bool(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapBoolInt32R(rv reflect.Value) {
@@ -11394,6 +15260,7 @@ func (f *encFnInfo) fastpathEncMapBoolInt32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -11408,16 +15275,30 @@ func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, checkNil bool, e *Encoder)
}
sort.Sort(boolSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[bool(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapBoolInt64R(rv reflect.Value) {
@@ -11425,6 +15306,7 @@ func (f *encFnInfo) fastpathEncMapBoolInt64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -11439,16 +15321,30 @@ func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, checkNil bool, e *Encoder)
}
sort.Sort(boolSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v[bool(k2)]))
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeInt(int64(v2))
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapBoolFloat32R(rv reflect.Value) {
@@ -11456,6 +15352,7 @@ func (f *encFnInfo) fastpathEncMapBoolFloat32R(rv reflect.Value) {
}
func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -11470,16 +15367,30 @@ func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, checkNil bool, e *Enco
}
sort.Sort(boolSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v[bool(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat32(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapBoolFloat64R(rv reflect.Value) {
@@ -11487,6 +15398,7 @@ func (f *encFnInfo) fastpathEncMapBoolFloat64R(rv reflect.Value) {
}
func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -11501,16 +15413,30 @@ func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, checkNil bool, e *Enco
}
sort.Sort(boolSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v[bool(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeFloat64(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
func (f *encFnInfo) fastpathEncMapBoolBoolR(rv reflect.Value) {
@@ -11518,6 +15444,7 @@ func (f *encFnInfo) fastpathEncMapBoolBoolR(rv reflect.Value) {
}
func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -11532,22 +15459,39 @@ func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, checkNil bool, e *Encoder) {
}
sort.Sort(boolSlice(v2))
for _, k2 := range v2 {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(bool(k2))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v[bool(k2)])
}
} else {
for k2, v2 := range v {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
ee.EncodeBool(k2)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
ee.EncodeBool(v2)
}
}
- ee.EncodeEnd()
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
+ }
}
// -- decode
// -- -- fast path type switch
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
switch v := iv.(type) {
case []interface{}:
@@ -13719,6 +17663,7 @@ func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
}
default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
return false
}
return true
@@ -13746,8 +17691,7 @@ func (f fastpathT) DecSliceIntfX(vp *[]interface{}, checkNil bool, d *Decoder) {
*vp = v
}
}
-func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool,
- d *Decoder) (_ []interface{}, changed bool) {
+func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool, d *Decoder) (_ []interface{}, changed bool) {
dd := d.d
if checkNil && dd.TryDecodeAsNil() {
@@ -13758,59 +17702,83 @@ func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool,
}
slh, containerLenS := d.decSliceHelperStart()
- x2read := containerLenS
- var xtrunc bool
- if canChange && v == nil {
- var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16); xtrunc {
- x2read = xlen
- }
- v = make([]interface{}, xlen)
- changed = true
- }
if containerLenS == 0 {
- if canChange && len(v) != 0 {
- v = v[:0]
+ if canChange {
+ if v == nil {
+ v = []interface{}{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
changed = true
}
- return v, changed
+ slh.End()
+ return
}
if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
if containerLenS > cap(v) {
if canChange {
var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16); xtrunc {
- x2read = xlen
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]interface{}, xlen)
+ }
+ } else {
+ v = make([]interface{}, xlen)
}
- v = make([]interface{}, xlen)
changed = true
} else {
d.arrayCannotExpand(len(v), containerLenS)
- x2read = len(v)
}
+ x2read = len(v)
} else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
}
-
j := 0
for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
d.decode(&v[j])
}
if xtrunc {
for ; j < containerLenS; j++ {
v = append(v, nil)
+ slh.ElemContainerState(j)
d.decode(&v[j])
}
} else if !canChange {
for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
d.swallow()
}
}
} else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []interface{}{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return
+ }
+ if cap(v) == 0 {
+ v = make([]interface{}, 1, 4)
+ changed = true
+ }
j := 0
- for ; !dd.CheckBreak(); j++ {
+ for ; !breakFound; j++ {
if j >= len(v) {
if canChange {
v = append(v, nil)
@@ -13819,15 +17787,21 @@ func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool,
d.arrayCannotExpand(len(v), j+1)
}
}
+ slh.ElemContainerState(j)
if j < len(v) {
d.decode(&v[j])
} else {
d.swallow()
}
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
}
- slh.End()
}
+ slh.End()
return v, changed
}
@@ -13851,8 +17825,7 @@ func (f fastpathT) DecSliceStringX(vp *[]string, checkNil bool, d *Decoder) {
*vp = v
}
}
-func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool,
- d *Decoder) (_ []string, changed bool) {
+func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool, d *Decoder) (_ []string, changed bool) {
dd := d.d
if checkNil && dd.TryDecodeAsNil() {
@@ -13863,59 +17836,83 @@ func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool,
}
slh, containerLenS := d.decSliceHelperStart()
- x2read := containerLenS
- var xtrunc bool
- if canChange && v == nil {
- var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16); xtrunc {
- x2read = xlen
- }
- v = make([]string, xlen)
- changed = true
- }
if containerLenS == 0 {
- if canChange && len(v) != 0 {
- v = v[:0]
+ if canChange {
+ if v == nil {
+ v = []string{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
changed = true
}
- return v, changed
+ slh.End()
+ return
}
if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
if containerLenS > cap(v) {
if canChange {
var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16); xtrunc {
- x2read = xlen
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]string, xlen)
+ }
+ } else {
+ v = make([]string, xlen)
}
- v = make([]string, xlen)
changed = true
} else {
d.arrayCannotExpand(len(v), containerLenS)
- x2read = len(v)
}
+ x2read = len(v)
} else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
}
-
j := 0
for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
v[j] = dd.DecodeString()
}
if xtrunc {
for ; j < containerLenS; j++ {
v = append(v, "")
+ slh.ElemContainerState(j)
v[j] = dd.DecodeString()
}
} else if !canChange {
for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
d.swallow()
}
}
} else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []string{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return
+ }
+ if cap(v) == 0 {
+ v = make([]string, 1, 4)
+ changed = true
+ }
j := 0
- for ; !dd.CheckBreak(); j++ {
+ for ; !breakFound; j++ {
if j >= len(v) {
if canChange {
v = append(v, "")
@@ -13924,14 +17921,20 @@ func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool,
d.arrayCannotExpand(len(v), j+1)
}
}
+ slh.ElemContainerState(j)
if j < len(v) {
v[j] = dd.DecodeString()
} else {
d.swallow()
}
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
}
- slh.End()
}
+ slh.End()
return v, changed
}
@@ -13955,8 +17958,7 @@ func (f fastpathT) DecSliceFloat32X(vp *[]float32, checkNil bool, d *Decoder) {
*vp = v
}
}
-func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool,
- d *Decoder) (_ []float32, changed bool) {
+func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool, d *Decoder) (_ []float32, changed bool) {
dd := d.d
if checkNil && dd.TryDecodeAsNil() {
@@ -13967,59 +17969,83 @@ func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool,
}
slh, containerLenS := d.decSliceHelperStart()
- x2read := containerLenS
- var xtrunc bool
- if canChange && v == nil {
- var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4); xtrunc {
- x2read = xlen
- }
- v = make([]float32, xlen)
- changed = true
- }
if containerLenS == 0 {
- if canChange && len(v) != 0 {
- v = v[:0]
+ if canChange {
+ if v == nil {
+ v = []float32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
changed = true
}
- return v, changed
+ slh.End()
+ return
}
if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
if containerLenS > cap(v) {
if canChange {
var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4); xtrunc {
- x2read = xlen
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]float32, xlen)
+ }
+ } else {
+ v = make([]float32, xlen)
}
- v = make([]float32, xlen)
changed = true
} else {
d.arrayCannotExpand(len(v), containerLenS)
- x2read = len(v)
}
+ x2read = len(v)
} else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
}
-
j := 0
for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
v[j] = float32(dd.DecodeFloat(true))
}
if xtrunc {
for ; j < containerLenS; j++ {
v = append(v, 0)
+ slh.ElemContainerState(j)
v[j] = float32(dd.DecodeFloat(true))
}
} else if !canChange {
for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
d.swallow()
}
}
} else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []float32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return
+ }
+ if cap(v) == 0 {
+ v = make([]float32, 1, 4)
+ changed = true
+ }
j := 0
- for ; !dd.CheckBreak(); j++ {
+ for ; !breakFound; j++ {
if j >= len(v) {
if canChange {
v = append(v, 0)
@@ -14028,14 +18054,20 @@ func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool,
d.arrayCannotExpand(len(v), j+1)
}
}
+ slh.ElemContainerState(j)
if j < len(v) {
v[j] = float32(dd.DecodeFloat(true))
} else {
d.swallow()
}
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
}
- slh.End()
}
+ slh.End()
return v, changed
}
@@ -14059,8 +18091,7 @@ func (f fastpathT) DecSliceFloat64X(vp *[]float64, checkNil bool, d *Decoder) {
*vp = v
}
}
-func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool,
- d *Decoder) (_ []float64, changed bool) {
+func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool, d *Decoder) (_ []float64, changed bool) {
dd := d.d
if checkNil && dd.TryDecodeAsNil() {
@@ -14071,59 +18102,83 @@ func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool,
}
slh, containerLenS := d.decSliceHelperStart()
- x2read := containerLenS
- var xtrunc bool
- if canChange && v == nil {
- var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8); xtrunc {
- x2read = xlen
- }
- v = make([]float64, xlen)
- changed = true
- }
if containerLenS == 0 {
- if canChange && len(v) != 0 {
- v = v[:0]
+ if canChange {
+ if v == nil {
+ v = []float64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
changed = true
}
- return v, changed
+ slh.End()
+ return
}
if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
if containerLenS > cap(v) {
if canChange {
var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8); xtrunc {
- x2read = xlen
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]float64, xlen)
+ }
+ } else {
+ v = make([]float64, xlen)
}
- v = make([]float64, xlen)
changed = true
} else {
d.arrayCannotExpand(len(v), containerLenS)
- x2read = len(v)
}
+ x2read = len(v)
} else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
}
-
j := 0
for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
v[j] = dd.DecodeFloat(false)
}
if xtrunc {
for ; j < containerLenS; j++ {
v = append(v, 0)
+ slh.ElemContainerState(j)
v[j] = dd.DecodeFloat(false)
}
} else if !canChange {
for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
d.swallow()
}
}
} else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []float64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return
+ }
+ if cap(v) == 0 {
+ v = make([]float64, 1, 4)
+ changed = true
+ }
j := 0
- for ; !dd.CheckBreak(); j++ {
+ for ; !breakFound; j++ {
if j >= len(v) {
if canChange {
v = append(v, 0)
@@ -14132,14 +18187,20 @@ func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool,
d.arrayCannotExpand(len(v), j+1)
}
}
+ slh.ElemContainerState(j)
if j < len(v) {
v[j] = dd.DecodeFloat(false)
} else {
d.swallow()
}
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
}
- slh.End()
}
+ slh.End()
return v, changed
}
@@ -14163,8 +18224,7 @@ func (f fastpathT) DecSliceUintX(vp *[]uint, checkNil bool, d *Decoder) {
*vp = v
}
}
-func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool,
- d *Decoder) (_ []uint, changed bool) {
+func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool, d *Decoder) (_ []uint, changed bool) {
dd := d.d
if checkNil && dd.TryDecodeAsNil() {
@@ -14175,59 +18235,83 @@ func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool,
}
slh, containerLenS := d.decSliceHelperStart()
- x2read := containerLenS
- var xtrunc bool
- if canChange && v == nil {
- var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8); xtrunc {
- x2read = xlen
- }
- v = make([]uint, xlen)
- changed = true
- }
if containerLenS == 0 {
- if canChange && len(v) != 0 {
- v = v[:0]
+ if canChange {
+ if v == nil {
+ v = []uint{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
changed = true
}
- return v, changed
+ slh.End()
+ return
}
if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
if containerLenS > cap(v) {
if canChange {
var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8); xtrunc {
- x2read = xlen
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint, xlen)
+ }
+ } else {
+ v = make([]uint, xlen)
}
- v = make([]uint, xlen)
changed = true
} else {
d.arrayCannotExpand(len(v), containerLenS)
- x2read = len(v)
}
+ x2read = len(v)
} else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
}
-
j := 0
for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
v[j] = uint(dd.DecodeUint(uintBitsize))
}
if xtrunc {
for ; j < containerLenS; j++ {
v = append(v, 0)
+ slh.ElemContainerState(j)
v[j] = uint(dd.DecodeUint(uintBitsize))
}
} else if !canChange {
for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
d.swallow()
}
}
} else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uint{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return
+ }
+ if cap(v) == 0 {
+ v = make([]uint, 1, 4)
+ changed = true
+ }
j := 0
- for ; !dd.CheckBreak(); j++ {
+ for ; !breakFound; j++ {
if j >= len(v) {
if canChange {
v = append(v, 0)
@@ -14236,14 +18320,20 @@ func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool,
d.arrayCannotExpand(len(v), j+1)
}
}
+ slh.ElemContainerState(j)
if j < len(v) {
v[j] = uint(dd.DecodeUint(uintBitsize))
} else {
d.swallow()
}
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
}
- slh.End()
}
+ slh.End()
return v, changed
}
@@ -14267,8 +18357,7 @@ func (f fastpathT) DecSliceUint16X(vp *[]uint16, checkNil bool, d *Decoder) {
*vp = v
}
}
-func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool,
- d *Decoder) (_ []uint16, changed bool) {
+func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool, d *Decoder) (_ []uint16, changed bool) {
dd := d.d
if checkNil && dd.TryDecodeAsNil() {
@@ -14279,59 +18368,83 @@ func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool,
}
slh, containerLenS := d.decSliceHelperStart()
- x2read := containerLenS
- var xtrunc bool
- if canChange && v == nil {
- var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2); xtrunc {
- x2read = xlen
- }
- v = make([]uint16, xlen)
- changed = true
- }
if containerLenS == 0 {
- if canChange && len(v) != 0 {
- v = v[:0]
+ if canChange {
+ if v == nil {
+ v = []uint16{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
changed = true
}
- return v, changed
+ slh.End()
+ return
}
if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
if containerLenS > cap(v) {
if canChange {
var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2); xtrunc {
- x2read = xlen
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint16, xlen)
+ }
+ } else {
+ v = make([]uint16, xlen)
}
- v = make([]uint16, xlen)
changed = true
} else {
d.arrayCannotExpand(len(v), containerLenS)
- x2read = len(v)
}
+ x2read = len(v)
} else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
}
-
j := 0
for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
v[j] = uint16(dd.DecodeUint(16))
}
if xtrunc {
for ; j < containerLenS; j++ {
v = append(v, 0)
+ slh.ElemContainerState(j)
v[j] = uint16(dd.DecodeUint(16))
}
} else if !canChange {
for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
d.swallow()
}
}
} else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uint16{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return
+ }
+ if cap(v) == 0 {
+ v = make([]uint16, 1, 4)
+ changed = true
+ }
j := 0
- for ; !dd.CheckBreak(); j++ {
+ for ; !breakFound; j++ {
if j >= len(v) {
if canChange {
v = append(v, 0)
@@ -14340,14 +18453,20 @@ func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool,
d.arrayCannotExpand(len(v), j+1)
}
}
+ slh.ElemContainerState(j)
if j < len(v) {
v[j] = uint16(dd.DecodeUint(16))
} else {
d.swallow()
}
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
}
- slh.End()
}
+ slh.End()
return v, changed
}
@@ -14371,8 +18490,7 @@ func (f fastpathT) DecSliceUint32X(vp *[]uint32, checkNil bool, d *Decoder) {
*vp = v
}
}
-func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool,
- d *Decoder) (_ []uint32, changed bool) {
+func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool, d *Decoder) (_ []uint32, changed bool) {
dd := d.d
if checkNil && dd.TryDecodeAsNil() {
@@ -14383,59 +18501,83 @@ func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool,
}
slh, containerLenS := d.decSliceHelperStart()
- x2read := containerLenS
- var xtrunc bool
- if canChange && v == nil {
- var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4); xtrunc {
- x2read = xlen
- }
- v = make([]uint32, xlen)
- changed = true
- }
if containerLenS == 0 {
- if canChange && len(v) != 0 {
- v = v[:0]
+ if canChange {
+ if v == nil {
+ v = []uint32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
changed = true
}
- return v, changed
+ slh.End()
+ return
}
if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
if containerLenS > cap(v) {
if canChange {
var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4); xtrunc {
- x2read = xlen
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint32, xlen)
+ }
+ } else {
+ v = make([]uint32, xlen)
}
- v = make([]uint32, xlen)
changed = true
} else {
d.arrayCannotExpand(len(v), containerLenS)
- x2read = len(v)
}
+ x2read = len(v)
} else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
}
-
j := 0
for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
v[j] = uint32(dd.DecodeUint(32))
}
if xtrunc {
for ; j < containerLenS; j++ {
v = append(v, 0)
+ slh.ElemContainerState(j)
v[j] = uint32(dd.DecodeUint(32))
}
} else if !canChange {
for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
d.swallow()
}
}
} else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uint32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return
+ }
+ if cap(v) == 0 {
+ v = make([]uint32, 1, 4)
+ changed = true
+ }
j := 0
- for ; !dd.CheckBreak(); j++ {
+ for ; !breakFound; j++ {
if j >= len(v) {
if canChange {
v = append(v, 0)
@@ -14444,14 +18586,20 @@ func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool,
d.arrayCannotExpand(len(v), j+1)
}
}
+ slh.ElemContainerState(j)
if j < len(v) {
v[j] = uint32(dd.DecodeUint(32))
} else {
d.swallow()
}
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
}
- slh.End()
}
+ slh.End()
return v, changed
}
@@ -14475,8 +18623,7 @@ func (f fastpathT) DecSliceUint64X(vp *[]uint64, checkNil bool, d *Decoder) {
*vp = v
}
}
-func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool,
- d *Decoder) (_ []uint64, changed bool) {
+func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool, d *Decoder) (_ []uint64, changed bool) {
dd := d.d
if checkNil && dd.TryDecodeAsNil() {
@@ -14487,59 +18634,83 @@ func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool,
}
slh, containerLenS := d.decSliceHelperStart()
- x2read := containerLenS
- var xtrunc bool
- if canChange && v == nil {
- var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8); xtrunc {
- x2read = xlen
- }
- v = make([]uint64, xlen)
- changed = true
- }
if containerLenS == 0 {
- if canChange && len(v) != 0 {
- v = v[:0]
+ if canChange {
+ if v == nil {
+ v = []uint64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
changed = true
}
- return v, changed
+ slh.End()
+ return
}
if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
if containerLenS > cap(v) {
if canChange {
var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8); xtrunc {
- x2read = xlen
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint64, xlen)
+ }
+ } else {
+ v = make([]uint64, xlen)
}
- v = make([]uint64, xlen)
changed = true
} else {
d.arrayCannotExpand(len(v), containerLenS)
- x2read = len(v)
}
+ x2read = len(v)
} else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
}
-
j := 0
for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
v[j] = dd.DecodeUint(64)
}
if xtrunc {
for ; j < containerLenS; j++ {
v = append(v, 0)
+ slh.ElemContainerState(j)
v[j] = dd.DecodeUint(64)
}
} else if !canChange {
for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
d.swallow()
}
}
} else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uint64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return
+ }
+ if cap(v) == 0 {
+ v = make([]uint64, 1, 4)
+ changed = true
+ }
j := 0
- for ; !dd.CheckBreak(); j++ {
+ for ; !breakFound; j++ {
if j >= len(v) {
if canChange {
v = append(v, 0)
@@ -14548,14 +18719,20 @@ func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool,
d.arrayCannotExpand(len(v), j+1)
}
}
+ slh.ElemContainerState(j)
if j < len(v) {
v[j] = dd.DecodeUint(64)
} else {
d.swallow()
}
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
}
- slh.End()
}
+ slh.End()
return v, changed
}
@@ -14579,8 +18756,7 @@ func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, checkNil bool, d *Decoder) {
*vp = v
}
}
-func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool,
- d *Decoder) (_ []uintptr, changed bool) {
+func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool, d *Decoder) (_ []uintptr, changed bool) {
dd := d.d
if checkNil && dd.TryDecodeAsNil() {
@@ -14591,59 +18767,83 @@ func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool,
}
slh, containerLenS := d.decSliceHelperStart()
- x2read := containerLenS
- var xtrunc bool
- if canChange && v == nil {
- var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8); xtrunc {
- x2read = xlen
- }
- v = make([]uintptr, xlen)
- changed = true
- }
if containerLenS == 0 {
- if canChange && len(v) != 0 {
- v = v[:0]
+ if canChange {
+ if v == nil {
+ v = []uintptr{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
changed = true
}
- return v, changed
+ slh.End()
+ return
}
if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
if containerLenS > cap(v) {
if canChange {
var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8); xtrunc {
- x2read = xlen
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uintptr, xlen)
+ }
+ } else {
+ v = make([]uintptr, xlen)
}
- v = make([]uintptr, xlen)
changed = true
} else {
d.arrayCannotExpand(len(v), containerLenS)
- x2read = len(v)
}
+ x2read = len(v)
} else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
}
-
j := 0
for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
v[j] = uintptr(dd.DecodeUint(uintBitsize))
}
if xtrunc {
for ; j < containerLenS; j++ {
v = append(v, 0)
+ slh.ElemContainerState(j)
v[j] = uintptr(dd.DecodeUint(uintBitsize))
}
} else if !canChange {
for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
d.swallow()
}
}
} else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []uintptr{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return
+ }
+ if cap(v) == 0 {
+ v = make([]uintptr, 1, 4)
+ changed = true
+ }
j := 0
- for ; !dd.CheckBreak(); j++ {
+ for ; !breakFound; j++ {
if j >= len(v) {
if canChange {
v = append(v, 0)
@@ -14652,14 +18852,20 @@ func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool,
d.arrayCannotExpand(len(v), j+1)
}
}
+ slh.ElemContainerState(j)
if j < len(v) {
v[j] = uintptr(dd.DecodeUint(uintBitsize))
} else {
d.swallow()
}
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
}
- slh.End()
}
+ slh.End()
return v, changed
}
@@ -14683,8 +18889,7 @@ func (f fastpathT) DecSliceIntX(vp *[]int, checkNil bool, d *Decoder) {
*vp = v
}
}
-func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool,
- d *Decoder) (_ []int, changed bool) {
+func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool, d *Decoder) (_ []int, changed bool) {
dd := d.d
if checkNil && dd.TryDecodeAsNil() {
@@ -14695,59 +18900,83 @@ func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool,
}
slh, containerLenS := d.decSliceHelperStart()
- x2read := containerLenS
- var xtrunc bool
- if canChange && v == nil {
- var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8); xtrunc {
- x2read = xlen
- }
- v = make([]int, xlen)
- changed = true
- }
if containerLenS == 0 {
- if canChange && len(v) != 0 {
- v = v[:0]
+ if canChange {
+ if v == nil {
+ v = []int{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
changed = true
}
- return v, changed
+ slh.End()
+ return
}
if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
if containerLenS > cap(v) {
if canChange {
var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8); xtrunc {
- x2read = xlen
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int, xlen)
+ }
+ } else {
+ v = make([]int, xlen)
}
- v = make([]int, xlen)
changed = true
} else {
d.arrayCannotExpand(len(v), containerLenS)
- x2read = len(v)
}
+ x2read = len(v)
} else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
}
-
j := 0
for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
v[j] = int(dd.DecodeInt(intBitsize))
}
if xtrunc {
for ; j < containerLenS; j++ {
v = append(v, 0)
+ slh.ElemContainerState(j)
v[j] = int(dd.DecodeInt(intBitsize))
}
} else if !canChange {
for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
d.swallow()
}
}
} else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return
+ }
+ if cap(v) == 0 {
+ v = make([]int, 1, 4)
+ changed = true
+ }
j := 0
- for ; !dd.CheckBreak(); j++ {
+ for ; !breakFound; j++ {
if j >= len(v) {
if canChange {
v = append(v, 0)
@@ -14756,14 +18985,20 @@ func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool,
d.arrayCannotExpand(len(v), j+1)
}
}
+ slh.ElemContainerState(j)
if j < len(v) {
v[j] = int(dd.DecodeInt(intBitsize))
} else {
d.swallow()
}
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
}
- slh.End()
}
+ slh.End()
return v, changed
}
@@ -14787,8 +19022,7 @@ func (f fastpathT) DecSliceInt8X(vp *[]int8, checkNil bool, d *Decoder) {
*vp = v
}
}
-func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool,
- d *Decoder) (_ []int8, changed bool) {
+func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool, d *Decoder) (_ []int8, changed bool) {
dd := d.d
if checkNil && dd.TryDecodeAsNil() {
@@ -14799,59 +19033,83 @@ func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool,
}
slh, containerLenS := d.decSliceHelperStart()
- x2read := containerLenS
- var xtrunc bool
- if canChange && v == nil {
- var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1); xtrunc {
- x2read = xlen
- }
- v = make([]int8, xlen)
- changed = true
- }
if containerLenS == 0 {
- if canChange && len(v) != 0 {
- v = v[:0]
+ if canChange {
+ if v == nil {
+ v = []int8{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
changed = true
}
- return v, changed
+ slh.End()
+ return
}
if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
if containerLenS > cap(v) {
if canChange {
var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1); xtrunc {
- x2read = xlen
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int8, xlen)
+ }
+ } else {
+ v = make([]int8, xlen)
}
- v = make([]int8, xlen)
changed = true
} else {
d.arrayCannotExpand(len(v), containerLenS)
- x2read = len(v)
}
+ x2read = len(v)
} else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
}
-
j := 0
for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
v[j] = int8(dd.DecodeInt(8))
}
if xtrunc {
for ; j < containerLenS; j++ {
v = append(v, 0)
+ slh.ElemContainerState(j)
v[j] = int8(dd.DecodeInt(8))
}
} else if !canChange {
for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
d.swallow()
}
}
} else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int8{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return
+ }
+ if cap(v) == 0 {
+ v = make([]int8, 1, 4)
+ changed = true
+ }
j := 0
- for ; !dd.CheckBreak(); j++ {
+ for ; !breakFound; j++ {
if j >= len(v) {
if canChange {
v = append(v, 0)
@@ -14860,14 +19118,20 @@ func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool,
d.arrayCannotExpand(len(v), j+1)
}
}
+ slh.ElemContainerState(j)
if j < len(v) {
v[j] = int8(dd.DecodeInt(8))
} else {
d.swallow()
}
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
}
- slh.End()
}
+ slh.End()
return v, changed
}
@@ -14891,8 +19155,7 @@ func (f fastpathT) DecSliceInt16X(vp *[]int16, checkNil bool, d *Decoder) {
*vp = v
}
}
-func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool,
- d *Decoder) (_ []int16, changed bool) {
+func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool, d *Decoder) (_ []int16, changed bool) {
dd := d.d
if checkNil && dd.TryDecodeAsNil() {
@@ -14903,59 +19166,83 @@ func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool,
}
slh, containerLenS := d.decSliceHelperStart()
- x2read := containerLenS
- var xtrunc bool
- if canChange && v == nil {
- var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2); xtrunc {
- x2read = xlen
- }
- v = make([]int16, xlen)
- changed = true
- }
if containerLenS == 0 {
- if canChange && len(v) != 0 {
- v = v[:0]
+ if canChange {
+ if v == nil {
+ v = []int16{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
changed = true
}
- return v, changed
+ slh.End()
+ return
}
if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
if containerLenS > cap(v) {
if canChange {
var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2); xtrunc {
- x2read = xlen
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int16, xlen)
+ }
+ } else {
+ v = make([]int16, xlen)
}
- v = make([]int16, xlen)
changed = true
} else {
d.arrayCannotExpand(len(v), containerLenS)
- x2read = len(v)
}
+ x2read = len(v)
} else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
}
-
j := 0
for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
v[j] = int16(dd.DecodeInt(16))
}
if xtrunc {
for ; j < containerLenS; j++ {
v = append(v, 0)
+ slh.ElemContainerState(j)
v[j] = int16(dd.DecodeInt(16))
}
} else if !canChange {
for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
d.swallow()
}
}
} else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int16{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return
+ }
+ if cap(v) == 0 {
+ v = make([]int16, 1, 4)
+ changed = true
+ }
j := 0
- for ; !dd.CheckBreak(); j++ {
+ for ; !breakFound; j++ {
if j >= len(v) {
if canChange {
v = append(v, 0)
@@ -14964,14 +19251,20 @@ func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool,
d.arrayCannotExpand(len(v), j+1)
}
}
+ slh.ElemContainerState(j)
if j < len(v) {
v[j] = int16(dd.DecodeInt(16))
} else {
d.swallow()
}
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
}
- slh.End()
}
+ slh.End()
return v, changed
}
@@ -14995,8 +19288,7 @@ func (f fastpathT) DecSliceInt32X(vp *[]int32, checkNil bool, d *Decoder) {
*vp = v
}
}
-func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool,
- d *Decoder) (_ []int32, changed bool) {
+func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool, d *Decoder) (_ []int32, changed bool) {
dd := d.d
if checkNil && dd.TryDecodeAsNil() {
@@ -15007,59 +19299,83 @@ func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool,
}
slh, containerLenS := d.decSliceHelperStart()
- x2read := containerLenS
- var xtrunc bool
- if canChange && v == nil {
- var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4); xtrunc {
- x2read = xlen
- }
- v = make([]int32, xlen)
- changed = true
- }
if containerLenS == 0 {
- if canChange && len(v) != 0 {
- v = v[:0]
+ if canChange {
+ if v == nil {
+ v = []int32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
changed = true
}
- return v, changed
+ slh.End()
+ return
}
if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
if containerLenS > cap(v) {
if canChange {
var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4); xtrunc {
- x2read = xlen
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int32, xlen)
+ }
+ } else {
+ v = make([]int32, xlen)
}
- v = make([]int32, xlen)
changed = true
} else {
d.arrayCannotExpand(len(v), containerLenS)
- x2read = len(v)
}
+ x2read = len(v)
} else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
}
-
j := 0
for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
v[j] = int32(dd.DecodeInt(32))
}
if xtrunc {
for ; j < containerLenS; j++ {
v = append(v, 0)
+ slh.ElemContainerState(j)
v[j] = int32(dd.DecodeInt(32))
}
} else if !canChange {
for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
d.swallow()
}
}
} else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return
+ }
+ if cap(v) == 0 {
+ v = make([]int32, 1, 4)
+ changed = true
+ }
j := 0
- for ; !dd.CheckBreak(); j++ {
+ for ; !breakFound; j++ {
if j >= len(v) {
if canChange {
v = append(v, 0)
@@ -15068,14 +19384,20 @@ func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool,
d.arrayCannotExpand(len(v), j+1)
}
}
+ slh.ElemContainerState(j)
if j < len(v) {
v[j] = int32(dd.DecodeInt(32))
} else {
d.swallow()
}
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
}
- slh.End()
}
+ slh.End()
return v, changed
}
@@ -15099,8 +19421,7 @@ func (f fastpathT) DecSliceInt64X(vp *[]int64, checkNil bool, d *Decoder) {
*vp = v
}
}
-func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool,
- d *Decoder) (_ []int64, changed bool) {
+func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool, d *Decoder) (_ []int64, changed bool) {
dd := d.d
if checkNil && dd.TryDecodeAsNil() {
@@ -15111,59 +19432,83 @@ func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool,
}
slh, containerLenS := d.decSliceHelperStart()
- x2read := containerLenS
- var xtrunc bool
- if canChange && v == nil {
- var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8); xtrunc {
- x2read = xlen
- }
- v = make([]int64, xlen)
- changed = true
- }
if containerLenS == 0 {
- if canChange && len(v) != 0 {
- v = v[:0]
+ if canChange {
+ if v == nil {
+ v = []int64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
changed = true
}
- return v, changed
+ slh.End()
+ return
}
if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
if containerLenS > cap(v) {
if canChange {
var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8); xtrunc {
- x2read = xlen
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int64, xlen)
+ }
+ } else {
+ v = make([]int64, xlen)
}
- v = make([]int64, xlen)
changed = true
} else {
d.arrayCannotExpand(len(v), containerLenS)
- x2read = len(v)
}
+ x2read = len(v)
} else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
}
-
j := 0
for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
v[j] = dd.DecodeInt(64)
}
if xtrunc {
for ; j < containerLenS; j++ {
v = append(v, 0)
+ slh.ElemContainerState(j)
v[j] = dd.DecodeInt(64)
}
} else if !canChange {
for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
d.swallow()
}
}
} else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []int64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return
+ }
+ if cap(v) == 0 {
+ v = make([]int64, 1, 4)
+ changed = true
+ }
j := 0
- for ; !dd.CheckBreak(); j++ {
+ for ; !breakFound; j++ {
if j >= len(v) {
if canChange {
v = append(v, 0)
@@ -15172,14 +19517,20 @@ func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool,
d.arrayCannotExpand(len(v), j+1)
}
}
+ slh.ElemContainerState(j)
if j < len(v) {
v[j] = dd.DecodeInt(64)
} else {
d.swallow()
}
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
}
- slh.End()
}
+ slh.End()
return v, changed
}
@@ -15203,8 +19554,7 @@ func (f fastpathT) DecSliceBoolX(vp *[]bool, checkNil bool, d *Decoder) {
*vp = v
}
}
-func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool,
- d *Decoder) (_ []bool, changed bool) {
+func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool, d *Decoder) (_ []bool, changed bool) {
dd := d.d
if checkNil && dd.TryDecodeAsNil() {
@@ -15215,59 +19565,83 @@ func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool,
}
slh, containerLenS := d.decSliceHelperStart()
- x2read := containerLenS
- var xtrunc bool
- if canChange && v == nil {
- var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1); xtrunc {
- x2read = xlen
- }
- v = make([]bool, xlen)
- changed = true
- }
if containerLenS == 0 {
- if canChange && len(v) != 0 {
- v = v[:0]
+ if canChange {
+ if v == nil {
+ v = []bool{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
changed = true
}
- return v, changed
+ slh.End()
+ return
}
if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
if containerLenS > cap(v) {
if canChange {
var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1); xtrunc {
- x2read = xlen
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]bool, xlen)
+ }
+ } else {
+ v = make([]bool, xlen)
}
- v = make([]bool, xlen)
changed = true
} else {
d.arrayCannotExpand(len(v), containerLenS)
- x2read = len(v)
}
+ x2read = len(v)
} else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
}
-
j := 0
for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
v[j] = dd.DecodeBool()
}
if xtrunc {
for ; j < containerLenS; j++ {
v = append(v, false)
+ slh.ElemContainerState(j)
v[j] = dd.DecodeBool()
}
} else if !canChange {
for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
d.swallow()
}
}
} else {
+ breakFound := dd.CheckBreak()
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []bool{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return
+ }
+ if cap(v) == 0 {
+ v = make([]bool, 1, 4)
+ changed = true
+ }
j := 0
- for ; !dd.CheckBreak(); j++ {
+ for ; !breakFound; j++ {
if j >= len(v) {
if canChange {
v = append(v, false)
@@ -15276,14 +19650,20 @@ func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool,
d.arrayCannotExpand(len(v), j+1)
}
}
+ slh.ElemContainerState(j)
if j < len(v) {
v[j] = dd.DecodeBool()
} else {
d.swallow()
}
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
}
- slh.End()
}
+ slh.End()
return v, changed
}
@@ -15308,6 +19688,7 @@ func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, checkNil boo
func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, canChange bool,
d *Decoder) (_ map[interface{}]interface{}, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -15327,11 +19708,17 @@ func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, checkNil bool,
var mv interface{}
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -15344,11 +19731,17 @@ func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, checkNil bool,
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -15359,7 +19752,9 @@ func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, checkNil bool,
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -15385,6 +19780,7 @@ func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, checkNil bool,
func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, checkNil bool, canChange bool,
d *Decoder) (_ map[interface{}]string, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -15404,11 +19800,17 @@ func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, checkNil bool, ca
var mv string
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
@@ -15416,17 +19818,25 @@ func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, checkNil bool, ca
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -15452,6 +19862,7 @@ func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, checkNil bool, d *D
func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, checkNil bool, canChange bool,
d *Decoder) (_ map[interface{}]uint, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -15471,11 +19882,17 @@ func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, checkNil bool, canCha
var mv uint
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -15483,17 +19900,25 @@ func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, checkNil bool, canCha
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -15519,6 +19944,7 @@ func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, checkNil bool, d
func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, checkNil bool, canChange bool,
d *Decoder) (_ map[interface{}]uint8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -15538,11 +19964,17 @@ func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, checkNil bool, canC
var mv uint8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
@@ -15550,17 +19982,25 @@ func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, checkNil bool, canC
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -15586,6 +20026,7 @@ func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, checkNil bool,
func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, checkNil bool, canChange bool,
d *Decoder) (_ map[interface{}]uint16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -15605,11 +20046,17 @@ func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, checkNil bool, ca
var mv uint16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
@@ -15617,17 +20064,25 @@ func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, checkNil bool, ca
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -15653,6 +20108,7 @@ func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, checkNil bool,
func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, checkNil bool, canChange bool,
d *Decoder) (_ map[interface{}]uint32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -15672,11 +20128,17 @@ func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, checkNil bool, ca
var mv uint32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
@@ -15684,17 +20146,25 @@ func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, checkNil bool, ca
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -15720,6 +20190,7 @@ func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, checkNil bool,
func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, checkNil bool, canChange bool,
d *Decoder) (_ map[interface{}]uint64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -15739,11 +20210,17 @@ func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, checkNil bool, ca
var mv uint64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
@@ -15751,17 +20228,25 @@ func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, checkNil bool, ca
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -15787,6 +20272,7 @@ func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, checkNil bool
func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, canChange bool,
d *Decoder) (_ map[interface{}]uintptr, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -15806,11 +20292,17 @@ func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool,
var mv uintptr
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -15818,17 +20310,25 @@ func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool,
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -15854,6 +20354,7 @@ func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, checkNil bool, d *Dec
func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, checkNil bool, canChange bool,
d *Decoder) (_ map[interface{}]int, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -15873,11 +20374,17 @@ func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, checkNil bool, canChang
var mv int
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
@@ -15885,17 +20392,25 @@ func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, checkNil bool, canChang
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -15921,6 +20436,7 @@ func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, checkNil bool, d *D
func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, checkNil bool, canChange bool,
d *Decoder) (_ map[interface{}]int8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -15940,11 +20456,17 @@ func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, checkNil bool, canCha
var mv int8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
@@ -15952,17 +20474,25 @@ func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, checkNil bool, canCha
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -15988,6 +20518,7 @@ func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, checkNil bool, d
func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, checkNil bool, canChange bool,
d *Decoder) (_ map[interface{}]int16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -16007,11 +20538,17 @@ func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, checkNil bool, canC
var mv int16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
@@ -16019,17 +20556,25 @@ func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, checkNil bool, canC
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -16055,6 +20600,7 @@ func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, checkNil bool, d
func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, checkNil bool, canChange bool,
d *Decoder) (_ map[interface{}]int32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -16074,11 +20620,17 @@ func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, checkNil bool, canC
var mv int32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
@@ -16086,17 +20638,25 @@ func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, checkNil bool, canC
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -16122,6 +20682,7 @@ func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, checkNil bool, d
func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, checkNil bool, canChange bool,
d *Decoder) (_ map[interface{}]int64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -16141,11 +20702,17 @@ func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, checkNil bool, canC
var mv int64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
@@ -16153,17 +20720,25 @@ func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, checkNil bool, canC
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -16189,6 +20764,7 @@ func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, checkNil bool
func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, checkNil bool, canChange bool,
d *Decoder) (_ map[interface{}]float32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -16208,11 +20784,17 @@ func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, checkNil bool,
var mv float32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
@@ -16220,17 +20802,25 @@ func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, checkNil bool,
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -16256,6 +20846,7 @@ func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, checkNil bool
func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, checkNil bool, canChange bool,
d *Decoder) (_ map[interface{}]float64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -16275,11 +20866,17 @@ func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, checkNil bool,
var mv float64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
@@ -16287,17 +20884,25 @@ func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, checkNil bool,
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -16323,6 +20928,7 @@ func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, checkNil bool, d *D
func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, checkNil bool, canChange bool,
d *Decoder) (_ map[interface{}]bool, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -16342,11 +20948,17 @@ func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, checkNil bool, canCha
var mv bool
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
@@ -16354,17 +20966,25 @@ func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, checkNil bool, canCha
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv)
}
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -16390,6 +21010,7 @@ func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, checkNil bool,
func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, checkNil bool, canChange bool,
d *Decoder) (_ map[string]interface{}, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -16409,7 +21030,13 @@ func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, checkNil bool, ca
var mv interface{}
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -16422,7 +21049,13 @@ func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, checkNil bool, ca
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -16433,7 +21066,9 @@ func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, checkNil bool, ca
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -16459,6 +21094,7 @@ func (f fastpathT) DecMapStringStringX(vp *map[string]string, checkNil bool, d *
func (_ fastpathT) DecMapStringStringV(v map[string]string, checkNil bool, canChange bool,
d *Decoder) (_ map[string]string, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -16478,7 +21114,13 @@ func (_ fastpathT) DecMapStringStringV(v map[string]string, checkNil bool, canCh
var mv string
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
@@ -16486,13 +21128,21 @@ func (_ fastpathT) DecMapStringStringV(v map[string]string, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -16518,6 +21168,7 @@ func (f fastpathT) DecMapStringUintX(vp *map[string]uint, checkNil bool, d *Deco
func (_ fastpathT) DecMapStringUintV(v map[string]uint, checkNil bool, canChange bool,
d *Decoder) (_ map[string]uint, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -16537,7 +21188,13 @@ func (_ fastpathT) DecMapStringUintV(v map[string]uint, checkNil bool, canChange
var mv uint
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -16545,13 +21202,21 @@ func (_ fastpathT) DecMapStringUintV(v map[string]uint, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -16577,6 +21242,7 @@ func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, checkNil bool, d *De
func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, checkNil bool, canChange bool,
d *Decoder) (_ map[string]uint8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -16596,7 +21262,13 @@ func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, checkNil bool, canChan
var mv uint8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
@@ -16604,13 +21276,21 @@ func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -16636,6 +21316,7 @@ func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, checkNil bool, d *
func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, checkNil bool, canChange bool,
d *Decoder) (_ map[string]uint16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -16655,7 +21336,13 @@ func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, checkNil bool, canCh
var mv uint16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
@@ -16663,13 +21350,21 @@ func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -16695,6 +21390,7 @@ func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, checkNil bool, d *
func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, checkNil bool, canChange bool,
d *Decoder) (_ map[string]uint32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -16714,7 +21410,13 @@ func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, checkNil bool, canCh
var mv uint32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
@@ -16722,13 +21424,21 @@ func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -16754,6 +21464,7 @@ func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, checkNil bool, d *
func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, checkNil bool, canChange bool,
d *Decoder) (_ map[string]uint64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -16773,7 +21484,13 @@ func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, checkNil bool, canCh
var mv uint64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
@@ -16781,13 +21498,21 @@ func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -16813,6 +21538,7 @@ func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, checkNil bool, d
func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, checkNil bool, canChange bool,
d *Decoder) (_ map[string]uintptr, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -16832,7 +21558,13 @@ func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, checkNil bool, can
var mv uintptr
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -16840,13 +21572,21 @@ func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -16872,6 +21612,7 @@ func (f fastpathT) DecMapStringIntX(vp *map[string]int, checkNil bool, d *Decode
func (_ fastpathT) DecMapStringIntV(v map[string]int, checkNil bool, canChange bool,
d *Decoder) (_ map[string]int, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -16891,7 +21632,13 @@ func (_ fastpathT) DecMapStringIntV(v map[string]int, checkNil bool, canChange b
var mv int
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
@@ -16899,13 +21646,21 @@ func (_ fastpathT) DecMapStringIntV(v map[string]int, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -16931,6 +21686,7 @@ func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, checkNil bool, d *Deco
func (_ fastpathT) DecMapStringInt8V(v map[string]int8, checkNil bool, canChange bool,
d *Decoder) (_ map[string]int8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -16950,7 +21706,13 @@ func (_ fastpathT) DecMapStringInt8V(v map[string]int8, checkNil bool, canChange
var mv int8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
@@ -16958,13 +21720,21 @@ func (_ fastpathT) DecMapStringInt8V(v map[string]int8, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -16990,6 +21760,7 @@ func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, checkNil bool, d *De
func (_ fastpathT) DecMapStringInt16V(v map[string]int16, checkNil bool, canChange bool,
d *Decoder) (_ map[string]int16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -17009,7 +21780,13 @@ func (_ fastpathT) DecMapStringInt16V(v map[string]int16, checkNil bool, canChan
var mv int16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
@@ -17017,13 +21794,21 @@ func (_ fastpathT) DecMapStringInt16V(v map[string]int16, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -17049,6 +21834,7 @@ func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, checkNil bool, d *De
func (_ fastpathT) DecMapStringInt32V(v map[string]int32, checkNil bool, canChange bool,
d *Decoder) (_ map[string]int32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -17068,7 +21854,13 @@ func (_ fastpathT) DecMapStringInt32V(v map[string]int32, checkNil bool, canChan
var mv int32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
@@ -17076,13 +21868,21 @@ func (_ fastpathT) DecMapStringInt32V(v map[string]int32, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -17108,6 +21908,7 @@ func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, checkNil bool, d *De
func (_ fastpathT) DecMapStringInt64V(v map[string]int64, checkNil bool, canChange bool,
d *Decoder) (_ map[string]int64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -17127,7 +21928,13 @@ func (_ fastpathT) DecMapStringInt64V(v map[string]int64, checkNil bool, canChan
var mv int64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
@@ -17135,13 +21942,21 @@ func (_ fastpathT) DecMapStringInt64V(v map[string]int64, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -17167,6 +21982,7 @@ func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, checkNil bool, d
func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, checkNil bool, canChange bool,
d *Decoder) (_ map[string]float32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -17186,7 +22002,13 @@ func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, checkNil bool, can
var mv float32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
@@ -17194,13 +22016,21 @@ func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -17226,6 +22056,7 @@ func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, checkNil bool, d
func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, checkNil bool, canChange bool,
d *Decoder) (_ map[string]float64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -17245,7 +22076,13 @@ func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, checkNil bool, can
var mv float64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
@@ -17253,13 +22090,21 @@ func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -17285,6 +22130,7 @@ func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, checkNil bool, d *Deco
func (_ fastpathT) DecMapStringBoolV(v map[string]bool, checkNil bool, canChange bool,
d *Decoder) (_ map[string]bool, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -17304,7 +22150,13 @@ func (_ fastpathT) DecMapStringBoolV(v map[string]bool, checkNil bool, canChange
var mv bool
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
@@ -17312,13 +22164,21 @@ func (_ fastpathT) DecMapStringBoolV(v map[string]bool, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeString()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -17344,6 +22204,7 @@ func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, checkNil bool
func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, checkNil bool, canChange bool,
d *Decoder) (_ map[float32]interface{}, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -17363,7 +22224,13 @@ func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, checkNil bool,
var mv interface{}
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -17376,7 +22243,13 @@ func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, checkNil bool,
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -17387,7 +22260,9 @@ func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, checkNil bool,
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -17413,6 +22288,7 @@ func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, checkNil bool, d
func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, checkNil bool, canChange bool,
d *Decoder) (_ map[float32]string, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -17432,7 +22308,13 @@ func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, checkNil bool, can
var mv string
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
@@ -17440,13 +22322,21 @@ func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -17472,6 +22362,7 @@ func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, checkNil bool, d *De
func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, checkNil bool, canChange bool,
d *Decoder) (_ map[float32]uint, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -17491,7 +22382,13 @@ func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, checkNil bool, canChan
var mv uint
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -17499,13 +22396,21 @@ func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -17531,6 +22436,7 @@ func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, checkNil bool, d *
func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, checkNil bool, canChange bool,
d *Decoder) (_ map[float32]uint8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -17550,7 +22456,13 @@ func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, checkNil bool, canCh
var mv uint8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
@@ -17558,13 +22470,21 @@ func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -17590,6 +22510,7 @@ func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, checkNil bool, d
func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, checkNil bool, canChange bool,
d *Decoder) (_ map[float32]uint16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -17609,7 +22530,13 @@ func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, checkNil bool, can
var mv uint16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
@@ -17617,13 +22544,21 @@ func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -17649,6 +22584,7 @@ func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, checkNil bool, d
func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, checkNil bool, canChange bool,
d *Decoder) (_ map[float32]uint32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -17668,7 +22604,13 @@ func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, checkNil bool, can
var mv uint32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
@@ -17676,13 +22618,21 @@ func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -17708,6 +22658,7 @@ func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, checkNil bool, d
func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, checkNil bool, canChange bool,
d *Decoder) (_ map[float32]uint64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -17727,7 +22678,13 @@ func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, checkNil bool, can
var mv uint64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
@@ -17735,13 +22692,21 @@ func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -17767,6 +22732,7 @@ func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, checkNil bool,
func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, canChange bool,
d *Decoder) (_ map[float32]uintptr, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -17786,7 +22752,13 @@ func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, c
var mv uintptr
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -17794,13 +22766,21 @@ func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, c
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -17826,6 +22806,7 @@ func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, checkNil bool, d *Deco
func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, checkNil bool, canChange bool,
d *Decoder) (_ map[float32]int, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -17845,7 +22826,13 @@ func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, checkNil bool, canChange
var mv int
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
@@ -17853,13 +22840,21 @@ func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -17885,6 +22880,7 @@ func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, checkNil bool, d *De
func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, checkNil bool, canChange bool,
d *Decoder) (_ map[float32]int8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -17904,7 +22900,13 @@ func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, checkNil bool, canChan
var mv int8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
@@ -17912,13 +22914,21 @@ func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -17944,6 +22954,7 @@ func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, checkNil bool, d *
func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, checkNil bool, canChange bool,
d *Decoder) (_ map[float32]int16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -17963,7 +22974,13 @@ func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, checkNil bool, canCh
var mv int16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
@@ -17971,13 +22988,21 @@ func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -18003,6 +23028,7 @@ func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, checkNil bool, d *
func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, checkNil bool, canChange bool,
d *Decoder) (_ map[float32]int32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -18022,7 +23048,13 @@ func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, checkNil bool, canCh
var mv int32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
@@ -18030,13 +23062,21 @@ func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -18062,6 +23102,7 @@ func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, checkNil bool, d *
func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, checkNil bool, canChange bool,
d *Decoder) (_ map[float32]int64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -18081,7 +23122,13 @@ func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, checkNil bool, canCh
var mv int64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
@@ -18089,13 +23136,21 @@ func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -18121,6 +23176,7 @@ func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, checkNil bool,
func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, checkNil bool, canChange bool,
d *Decoder) (_ map[float32]float32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -18140,7 +23196,13 @@ func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, checkNil bool, c
var mv float32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
@@ -18148,13 +23210,21 @@ func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, checkNil bool, c
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -18180,6 +23250,7 @@ func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, checkNil bool,
func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, checkNil bool, canChange bool,
d *Decoder) (_ map[float32]float64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -18199,7 +23270,13 @@ func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, checkNil bool, c
var mv float64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
@@ -18207,13 +23284,21 @@ func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, checkNil bool, c
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -18239,6 +23324,7 @@ func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, checkNil bool, d *De
func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, checkNil bool, canChange bool,
d *Decoder) (_ map[float32]bool, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -18258,7 +23344,13 @@ func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, checkNil bool, canChan
var mv bool
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
@@ -18266,13 +23358,21 @@ func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = float32(dd.DecodeFloat(true))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -18298,6 +23398,7 @@ func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, checkNil bool
func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, checkNil bool, canChange bool,
d *Decoder) (_ map[float64]interface{}, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -18317,7 +23418,13 @@ func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, checkNil bool,
var mv interface{}
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -18330,7 +23437,13 @@ func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, checkNil bool,
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -18341,7 +23454,9 @@ func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, checkNil bool,
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -18367,6 +23482,7 @@ func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, checkNil bool, d
func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, checkNil bool, canChange bool,
d *Decoder) (_ map[float64]string, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -18386,7 +23502,13 @@ func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, checkNil bool, can
var mv string
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
@@ -18394,13 +23516,21 @@ func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -18426,6 +23556,7 @@ func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, checkNil bool, d *De
func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, checkNil bool, canChange bool,
d *Decoder) (_ map[float64]uint, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -18445,7 +23576,13 @@ func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, checkNil bool, canChan
var mv uint
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -18453,13 +23590,21 @@ func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -18485,6 +23630,7 @@ func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, checkNil bool, d *
func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, checkNil bool, canChange bool,
d *Decoder) (_ map[float64]uint8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -18504,7 +23650,13 @@ func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, checkNil bool, canCh
var mv uint8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
@@ -18512,13 +23664,21 @@ func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -18544,6 +23704,7 @@ func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, checkNil bool, d
func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, checkNil bool, canChange bool,
d *Decoder) (_ map[float64]uint16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -18563,7 +23724,13 @@ func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, checkNil bool, can
var mv uint16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
@@ -18571,13 +23738,21 @@ func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -18603,6 +23778,7 @@ func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, checkNil bool, d
func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, checkNil bool, canChange bool,
d *Decoder) (_ map[float64]uint32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -18622,7 +23798,13 @@ func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, checkNil bool, can
var mv uint32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
@@ -18630,13 +23812,21 @@ func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -18662,6 +23852,7 @@ func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, checkNil bool, d
func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, checkNil bool, canChange bool,
d *Decoder) (_ map[float64]uint64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -18681,7 +23872,13 @@ func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, checkNil bool, can
var mv uint64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
@@ -18689,13 +23886,21 @@ func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -18721,6 +23926,7 @@ func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, checkNil bool,
func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, canChange bool,
d *Decoder) (_ map[float64]uintptr, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -18740,7 +23946,13 @@ func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, c
var mv uintptr
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -18748,13 +23960,21 @@ func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, c
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -18780,6 +24000,7 @@ func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, checkNil bool, d *Deco
func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, checkNil bool, canChange bool,
d *Decoder) (_ map[float64]int, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -18799,7 +24020,13 @@ func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, checkNil bool, canChange
var mv int
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
@@ -18807,13 +24034,21 @@ func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -18839,6 +24074,7 @@ func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, checkNil bool, d *De
func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, checkNil bool, canChange bool,
d *Decoder) (_ map[float64]int8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -18858,7 +24094,13 @@ func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, checkNil bool, canChan
var mv int8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
@@ -18866,13 +24108,21 @@ func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -18898,6 +24148,7 @@ func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, checkNil bool, d *
func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, checkNil bool, canChange bool,
d *Decoder) (_ map[float64]int16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -18917,7 +24168,13 @@ func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, checkNil bool, canCh
var mv int16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
@@ -18925,13 +24182,21 @@ func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -18957,6 +24222,7 @@ func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, checkNil bool, d *
func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, checkNil bool, canChange bool,
d *Decoder) (_ map[float64]int32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -18976,7 +24242,13 @@ func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, checkNil bool, canCh
var mv int32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
@@ -18984,13 +24256,21 @@ func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -19016,6 +24296,7 @@ func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, checkNil bool, d *
func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, checkNil bool, canChange bool,
d *Decoder) (_ map[float64]int64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -19035,7 +24316,13 @@ func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, checkNil bool, canCh
var mv int64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
@@ -19043,13 +24330,21 @@ func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -19075,6 +24370,7 @@ func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, checkNil bool,
func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, checkNil bool, canChange bool,
d *Decoder) (_ map[float64]float32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -19094,7 +24390,13 @@ func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, checkNil bool, c
var mv float32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
@@ -19102,13 +24404,21 @@ func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, checkNil bool, c
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -19134,6 +24444,7 @@ func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, checkNil bool,
func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, checkNil bool, canChange bool,
d *Decoder) (_ map[float64]float64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -19153,7 +24464,13 @@ func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, checkNil bool, c
var mv float64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
@@ -19161,13 +24478,21 @@ func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, checkNil bool, c
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -19193,6 +24518,7 @@ func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, checkNil bool, d *De
func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, checkNil bool, canChange bool,
d *Decoder) (_ map[float64]bool, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -19212,7 +24538,13 @@ func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, checkNil bool, canChan
var mv bool
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
@@ -19220,13 +24552,21 @@ func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeFloat(false)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -19252,6 +24592,7 @@ func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, checkNil bool, d *D
func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, checkNil bool, canChange bool,
d *Decoder) (_ map[uint]interface{}, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -19271,7 +24612,13 @@ func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, checkNil bool, canCha
var mv interface{}
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -19284,7 +24631,13 @@ func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, checkNil bool, canCha
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -19295,7 +24648,9 @@ func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, checkNil bool, canCha
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -19321,6 +24676,7 @@ func (f fastpathT) DecMapUintStringX(vp *map[uint]string, checkNil bool, d *Deco
func (_ fastpathT) DecMapUintStringV(v map[uint]string, checkNil bool, canChange bool,
d *Decoder) (_ map[uint]string, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -19340,7 +24696,13 @@ func (_ fastpathT) DecMapUintStringV(v map[uint]string, checkNil bool, canChange
var mv string
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
@@ -19348,13 +24710,21 @@ func (_ fastpathT) DecMapUintStringV(v map[uint]string, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -19380,6 +24750,7 @@ func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, checkNil bool, d *Decoder)
func (_ fastpathT) DecMapUintUintV(v map[uint]uint, checkNil bool, canChange bool,
d *Decoder) (_ map[uint]uint, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -19399,7 +24770,13 @@ func (_ fastpathT) DecMapUintUintV(v map[uint]uint, checkNil bool, canChange boo
var mv uint
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -19407,13 +24784,21 @@ func (_ fastpathT) DecMapUintUintV(v map[uint]uint, checkNil bool, canChange boo
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -19439,6 +24824,7 @@ func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, checkNil bool, d *Decode
func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, checkNil bool, canChange bool,
d *Decoder) (_ map[uint]uint8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -19458,7 +24844,13 @@ func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, checkNil bool, canChange b
var mv uint8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
@@ -19466,13 +24858,21 @@ func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -19498,6 +24898,7 @@ func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, checkNil bool, d *Deco
func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, checkNil bool, canChange bool,
d *Decoder) (_ map[uint]uint16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -19517,7 +24918,13 @@ func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, checkNil bool, canChange
var mv uint16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
@@ -19525,13 +24932,21 @@ func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -19557,6 +24972,7 @@ func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, checkNil bool, d *Deco
func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, checkNil bool, canChange bool,
d *Decoder) (_ map[uint]uint32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -19576,7 +24992,13 @@ func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, checkNil bool, canChange
var mv uint32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
@@ -19584,13 +25006,21 @@ func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -19616,6 +25046,7 @@ func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, checkNil bool, d *Deco
func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, checkNil bool, canChange bool,
d *Decoder) (_ map[uint]uint64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -19635,7 +25066,13 @@ func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, checkNil bool, canChange
var mv uint64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
@@ -19643,13 +25080,21 @@ func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -19675,6 +25120,7 @@ func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, checkNil bool, d *De
func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, checkNil bool, canChange bool,
d *Decoder) (_ map[uint]uintptr, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -19694,7 +25140,13 @@ func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, checkNil bool, canChan
var mv uintptr
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -19702,13 +25154,21 @@ func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -19734,6 +25194,7 @@ func (f fastpathT) DecMapUintIntX(vp *map[uint]int, checkNil bool, d *Decoder) {
func (_ fastpathT) DecMapUintIntV(v map[uint]int, checkNil bool, canChange bool,
d *Decoder) (_ map[uint]int, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -19753,7 +25214,13 @@ func (_ fastpathT) DecMapUintIntV(v map[uint]int, checkNil bool, canChange bool,
var mv int
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
@@ -19761,13 +25228,21 @@ func (_ fastpathT) DecMapUintIntV(v map[uint]int, checkNil bool, canChange bool,
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -19793,6 +25268,7 @@ func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, checkNil bool, d *Decoder)
func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, checkNil bool, canChange bool,
d *Decoder) (_ map[uint]int8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -19812,7 +25288,13 @@ func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, checkNil bool, canChange boo
var mv int8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
@@ -19820,13 +25302,21 @@ func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, checkNil bool, canChange boo
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -19852,6 +25342,7 @@ func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, checkNil bool, d *Decode
func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, checkNil bool, canChange bool,
d *Decoder) (_ map[uint]int16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -19871,7 +25362,13 @@ func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, checkNil bool, canChange b
var mv int16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
@@ -19879,13 +25376,21 @@ func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -19911,6 +25416,7 @@ func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, checkNil bool, d *Decode
func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, checkNil bool, canChange bool,
d *Decoder) (_ map[uint]int32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -19930,7 +25436,13 @@ func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, checkNil bool, canChange b
var mv int32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
@@ -19938,13 +25450,21 @@ func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -19970,6 +25490,7 @@ func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, checkNil bool, d *Decode
func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, checkNil bool, canChange bool,
d *Decoder) (_ map[uint]int64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -19989,7 +25510,13 @@ func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, checkNil bool, canChange b
var mv int64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
@@ -19997,13 +25524,21 @@ func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -20029,6 +25564,7 @@ func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, checkNil bool, d *De
func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, checkNil bool, canChange bool,
d *Decoder) (_ map[uint]float32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -20048,7 +25584,13 @@ func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, checkNil bool, canChan
var mv float32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
@@ -20056,13 +25598,21 @@ func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -20088,6 +25638,7 @@ func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, checkNil bool, d *De
func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, checkNil bool, canChange bool,
d *Decoder) (_ map[uint]float64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -20107,7 +25658,13 @@ func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, checkNil bool, canChan
var mv float64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
@@ -20115,13 +25672,21 @@ func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -20147,6 +25712,7 @@ func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, checkNil bool, d *Decoder)
func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, checkNil bool, canChange bool,
d *Decoder) (_ map[uint]bool, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -20166,7 +25732,13 @@ func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, checkNil bool, canChange boo
var mv bool
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
@@ -20174,13 +25746,21 @@ func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, checkNil bool, canChange boo
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -20206,6 +25786,7 @@ func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, checkNil bool, d
func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, checkNil bool, canChange bool,
d *Decoder) (_ map[uint8]interface{}, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -20225,7 +25806,13 @@ func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, checkNil bool, canC
var mv interface{}
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -20238,7 +25825,13 @@ func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, checkNil bool, canC
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -20249,7 +25842,9 @@ func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, checkNil bool, canC
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -20275,6 +25870,7 @@ func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, checkNil bool, d *De
func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, checkNil bool, canChange bool,
d *Decoder) (_ map[uint8]string, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -20294,7 +25890,13 @@ func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, checkNil bool, canChan
var mv string
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
@@ -20302,13 +25904,21 @@ func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -20334,6 +25944,7 @@ func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, checkNil bool, d *Decode
func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, checkNil bool, canChange bool,
d *Decoder) (_ map[uint8]uint, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -20353,7 +25964,13 @@ func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, checkNil bool, canChange b
var mv uint
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -20361,13 +25978,21 @@ func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -20393,6 +26018,7 @@ func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, checkNil bool, d *Deco
func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, checkNil bool, canChange bool,
d *Decoder) (_ map[uint8]uint8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -20412,7 +26038,13 @@ func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, checkNil bool, canChange
var mv uint8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
@@ -20420,13 +26052,21 @@ func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -20452,6 +26092,7 @@ func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, checkNil bool, d *De
func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, checkNil bool, canChange bool,
d *Decoder) (_ map[uint8]uint16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -20471,7 +26112,13 @@ func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, checkNil bool, canChan
var mv uint16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
@@ -20479,13 +26126,21 @@ func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -20511,6 +26166,7 @@ func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, checkNil bool, d *De
func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, checkNil bool, canChange bool,
d *Decoder) (_ map[uint8]uint32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -20530,7 +26186,13 @@ func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, checkNil bool, canChan
var mv uint32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
@@ -20538,13 +26200,21 @@ func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -20570,6 +26240,7 @@ func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, checkNil bool, d *De
func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, checkNil bool, canChange bool,
d *Decoder) (_ map[uint8]uint64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -20589,7 +26260,13 @@ func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, checkNil bool, canChan
var mv uint64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
@@ -20597,13 +26274,21 @@ func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -20629,6 +26314,7 @@ func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, checkNil bool, d *
func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, canChange bool,
d *Decoder) (_ map[uint8]uintptr, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -20648,7 +26334,13 @@ func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, canCh
var mv uintptr
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -20656,13 +26348,21 @@ func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -20688,6 +26388,7 @@ func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, checkNil bool, d *Decoder)
func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, checkNil bool, canChange bool,
d *Decoder) (_ map[uint8]int, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -20707,7 +26408,13 @@ func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, checkNil bool, canChange boo
var mv int
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
@@ -20715,13 +26422,21 @@ func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, checkNil bool, canChange boo
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -20747,6 +26462,7 @@ func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, checkNil bool, d *Decode
func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, checkNil bool, canChange bool,
d *Decoder) (_ map[uint8]int8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -20766,7 +26482,13 @@ func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, checkNil bool, canChange b
var mv int8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
@@ -20774,13 +26496,21 @@ func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -20806,6 +26536,7 @@ func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, checkNil bool, d *Deco
func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, checkNil bool, canChange bool,
d *Decoder) (_ map[uint8]int16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -20825,7 +26556,13 @@ func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, checkNil bool, canChange
var mv int16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
@@ -20833,13 +26570,21 @@ func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -20865,6 +26610,7 @@ func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, checkNil bool, d *Deco
func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, checkNil bool, canChange bool,
d *Decoder) (_ map[uint8]int32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -20884,7 +26630,13 @@ func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, checkNil bool, canChange
var mv int32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
@@ -20892,13 +26644,21 @@ func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -20924,6 +26684,7 @@ func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, checkNil bool, d *Deco
func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, checkNil bool, canChange bool,
d *Decoder) (_ map[uint8]int64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -20943,7 +26704,13 @@ func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, checkNil bool, canChange
var mv int64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
@@ -20951,13 +26718,21 @@ func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -20983,6 +26758,7 @@ func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, checkNil bool, d *
func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, checkNil bool, canChange bool,
d *Decoder) (_ map[uint8]float32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -21002,7 +26778,13 @@ func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, checkNil bool, canCh
var mv float32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
@@ -21010,13 +26792,21 @@ func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -21042,6 +26832,7 @@ func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, checkNil bool, d *
func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, checkNil bool, canChange bool,
d *Decoder) (_ map[uint8]float64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -21061,7 +26852,13 @@ func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, checkNil bool, canCh
var mv float64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
@@ -21069,13 +26866,21 @@ func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -21101,6 +26906,7 @@ func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, checkNil bool, d *Decode
func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, checkNil bool, canChange bool,
d *Decoder) (_ map[uint8]bool, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -21120,7 +26926,13 @@ func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, checkNil bool, canChange b
var mv bool
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
@@ -21128,13 +26940,21 @@ func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint8(dd.DecodeUint(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -21160,6 +26980,7 @@ func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, checkNil bool,
func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, checkNil bool, canChange bool,
d *Decoder) (_ map[uint16]interface{}, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -21179,7 +27000,13 @@ func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, checkNil bool, ca
var mv interface{}
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -21192,7 +27019,13 @@ func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, checkNil bool, ca
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -21203,7 +27036,9 @@ func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, checkNil bool, ca
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -21229,6 +27064,7 @@ func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, checkNil bool, d *
func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, checkNil bool, canChange bool,
d *Decoder) (_ map[uint16]string, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -21248,7 +27084,13 @@ func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, checkNil bool, canCh
var mv string
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
@@ -21256,13 +27098,21 @@ func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -21288,6 +27138,7 @@ func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, checkNil bool, d *Deco
func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, checkNil bool, canChange bool,
d *Decoder) (_ map[uint16]uint, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -21307,7 +27158,13 @@ func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, checkNil bool, canChange
var mv uint
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -21315,13 +27172,21 @@ func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -21347,6 +27212,7 @@ func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, checkNil bool, d *De
func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, checkNil bool, canChange bool,
d *Decoder) (_ map[uint16]uint8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -21366,7 +27232,13 @@ func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, checkNil bool, canChan
var mv uint8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
@@ -21374,13 +27246,21 @@ func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -21406,6 +27286,7 @@ func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, checkNil bool, d *
func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, checkNil bool, canChange bool,
d *Decoder) (_ map[uint16]uint16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -21425,7 +27306,13 @@ func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, checkNil bool, canCh
var mv uint16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
@@ -21433,13 +27320,21 @@ func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -21465,6 +27360,7 @@ func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, checkNil bool, d *
func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, checkNil bool, canChange bool,
d *Decoder) (_ map[uint16]uint32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -21484,7 +27380,13 @@ func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, checkNil bool, canCh
var mv uint32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
@@ -21492,13 +27394,21 @@ func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -21524,6 +27434,7 @@ func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, checkNil bool, d *
func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, checkNil bool, canChange bool,
d *Decoder) (_ map[uint16]uint64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -21543,7 +27454,13 @@ func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, checkNil bool, canCh
var mv uint64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
@@ -21551,13 +27468,21 @@ func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -21583,6 +27508,7 @@ func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, checkNil bool, d
func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, canChange bool,
d *Decoder) (_ map[uint16]uintptr, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -21602,7 +27528,13 @@ func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, can
var mv uintptr
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -21610,13 +27542,21 @@ func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -21642,6 +27582,7 @@ func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, checkNil bool, d *Decode
func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, checkNil bool, canChange bool,
d *Decoder) (_ map[uint16]int, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -21661,7 +27602,13 @@ func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, checkNil bool, canChange b
var mv int
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
@@ -21669,13 +27616,21 @@ func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -21701,6 +27656,7 @@ func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, checkNil bool, d *Deco
func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, checkNil bool, canChange bool,
d *Decoder) (_ map[uint16]int8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -21720,7 +27676,13 @@ func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, checkNil bool, canChange
var mv int8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
@@ -21728,13 +27690,21 @@ func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -21760,6 +27730,7 @@ func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, checkNil bool, d *De
func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, checkNil bool, canChange bool,
d *Decoder) (_ map[uint16]int16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -21779,7 +27750,13 @@ func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, checkNil bool, canChan
var mv int16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
@@ -21787,13 +27764,21 @@ func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -21819,6 +27804,7 @@ func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, checkNil bool, d *De
func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, checkNil bool, canChange bool,
d *Decoder) (_ map[uint16]int32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -21838,7 +27824,13 @@ func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, checkNil bool, canChan
var mv int32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
@@ -21846,13 +27838,21 @@ func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -21878,6 +27878,7 @@ func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, checkNil bool, d *De
func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, checkNil bool, canChange bool,
d *Decoder) (_ map[uint16]int64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -21897,7 +27898,13 @@ func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, checkNil bool, canChan
var mv int64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
@@ -21905,13 +27912,21 @@ func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -21937,6 +27952,7 @@ func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, checkNil bool, d
func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, checkNil bool, canChange bool,
d *Decoder) (_ map[uint16]float32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -21956,7 +27972,13 @@ func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, checkNil bool, can
var mv float32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
@@ -21964,13 +27986,21 @@ func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -21996,6 +28026,7 @@ func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, checkNil bool, d
func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, checkNil bool, canChange bool,
d *Decoder) (_ map[uint16]float64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -22015,7 +28046,13 @@ func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, checkNil bool, can
var mv float64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
@@ -22023,13 +28060,21 @@ func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -22055,6 +28100,7 @@ func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, checkNil bool, d *Deco
func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, checkNil bool, canChange bool,
d *Decoder) (_ map[uint16]bool, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -22074,7 +28120,13 @@ func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, checkNil bool, canChange
var mv bool
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
@@ -22082,13 +28134,21 @@ func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint16(dd.DecodeUint(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -22114,6 +28174,7 @@ func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, checkNil bool,
func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, checkNil bool, canChange bool,
d *Decoder) (_ map[uint32]interface{}, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -22133,7 +28194,13 @@ func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, checkNil bool, ca
var mv interface{}
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -22146,7 +28213,13 @@ func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, checkNil bool, ca
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -22157,7 +28230,9 @@ func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, checkNil bool, ca
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -22183,6 +28258,7 @@ func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, checkNil bool, d *
func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, checkNil bool, canChange bool,
d *Decoder) (_ map[uint32]string, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -22202,7 +28278,13 @@ func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, checkNil bool, canCh
var mv string
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
@@ -22210,13 +28292,21 @@ func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -22242,6 +28332,7 @@ func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, checkNil bool, d *Deco
func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, checkNil bool, canChange bool,
d *Decoder) (_ map[uint32]uint, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -22261,7 +28352,13 @@ func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, checkNil bool, canChange
var mv uint
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -22269,13 +28366,21 @@ func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -22301,6 +28406,7 @@ func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, checkNil bool, d *De
func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, checkNil bool, canChange bool,
d *Decoder) (_ map[uint32]uint8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -22320,7 +28426,13 @@ func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, checkNil bool, canChan
var mv uint8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
@@ -22328,13 +28440,21 @@ func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -22360,6 +28480,7 @@ func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, checkNil bool, d *
func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, checkNil bool, canChange bool,
d *Decoder) (_ map[uint32]uint16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -22379,7 +28500,13 @@ func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, checkNil bool, canCh
var mv uint16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
@@ -22387,13 +28514,21 @@ func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -22419,6 +28554,7 @@ func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, checkNil bool, d *
func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, checkNil bool, canChange bool,
d *Decoder) (_ map[uint32]uint32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -22438,7 +28574,13 @@ func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, checkNil bool, canCh
var mv uint32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
@@ -22446,13 +28588,21 @@ func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -22478,6 +28628,7 @@ func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, checkNil bool, d *
func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, checkNil bool, canChange bool,
d *Decoder) (_ map[uint32]uint64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -22497,7 +28648,13 @@ func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, checkNil bool, canCh
var mv uint64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
@@ -22505,13 +28662,21 @@ func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -22537,6 +28702,7 @@ func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, checkNil bool, d
func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, canChange bool,
d *Decoder) (_ map[uint32]uintptr, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -22556,7 +28722,13 @@ func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, can
var mv uintptr
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -22564,13 +28736,21 @@ func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -22596,6 +28776,7 @@ func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, checkNil bool, d *Decode
func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, checkNil bool, canChange bool,
d *Decoder) (_ map[uint32]int, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -22615,7 +28796,13 @@ func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, checkNil bool, canChange b
var mv int
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
@@ -22623,13 +28810,21 @@ func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -22655,6 +28850,7 @@ func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, checkNil bool, d *Deco
func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, checkNil bool, canChange bool,
d *Decoder) (_ map[uint32]int8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -22674,7 +28870,13 @@ func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, checkNil bool, canChange
var mv int8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
@@ -22682,13 +28884,21 @@ func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -22714,6 +28924,7 @@ func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, checkNil bool, d *De
func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, checkNil bool, canChange bool,
d *Decoder) (_ map[uint32]int16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -22733,7 +28944,13 @@ func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, checkNil bool, canChan
var mv int16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
@@ -22741,13 +28958,21 @@ func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -22773,6 +28998,7 @@ func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, checkNil bool, d *De
func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, checkNil bool, canChange bool,
d *Decoder) (_ map[uint32]int32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -22792,7 +29018,13 @@ func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, checkNil bool, canChan
var mv int32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
@@ -22800,13 +29032,21 @@ func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -22832,6 +29072,7 @@ func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, checkNil bool, d *De
func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, checkNil bool, canChange bool,
d *Decoder) (_ map[uint32]int64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -22851,7 +29092,13 @@ func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, checkNil bool, canChan
var mv int64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
@@ -22859,13 +29106,21 @@ func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -22891,6 +29146,7 @@ func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, checkNil bool, d
func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, checkNil bool, canChange bool,
d *Decoder) (_ map[uint32]float32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -22910,7 +29166,13 @@ func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, checkNil bool, can
var mv float32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
@@ -22918,13 +29180,21 @@ func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -22950,6 +29220,7 @@ func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, checkNil bool, d
func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, checkNil bool, canChange bool,
d *Decoder) (_ map[uint32]float64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -22969,7 +29240,13 @@ func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, checkNil bool, can
var mv float64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
@@ -22977,13 +29254,21 @@ func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -23009,6 +29294,7 @@ func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, checkNil bool, d *Deco
func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, checkNil bool, canChange bool,
d *Decoder) (_ map[uint32]bool, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -23028,7 +29314,13 @@ func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, checkNil bool, canChange
var mv bool
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
@@ -23036,13 +29328,21 @@ func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uint32(dd.DecodeUint(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -23068,6 +29368,7 @@ func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, checkNil bool,
func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, checkNil bool, canChange bool,
d *Decoder) (_ map[uint64]interface{}, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -23087,7 +29388,13 @@ func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, checkNil bool, ca
var mv interface{}
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -23100,7 +29407,13 @@ func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, checkNil bool, ca
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -23111,7 +29424,9 @@ func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, checkNil bool, ca
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -23137,6 +29452,7 @@ func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, checkNil bool, d *
func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, checkNil bool, canChange bool,
d *Decoder) (_ map[uint64]string, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -23156,7 +29472,13 @@ func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, checkNil bool, canCh
var mv string
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
@@ -23164,13 +29486,21 @@ func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -23196,6 +29526,7 @@ func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, checkNil bool, d *Deco
func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, checkNil bool, canChange bool,
d *Decoder) (_ map[uint64]uint, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -23215,7 +29546,13 @@ func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, checkNil bool, canChange
var mv uint
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -23223,13 +29560,21 @@ func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -23255,6 +29600,7 @@ func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, checkNil bool, d *De
func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, checkNil bool, canChange bool,
d *Decoder) (_ map[uint64]uint8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -23274,7 +29620,13 @@ func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, checkNil bool, canChan
var mv uint8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
@@ -23282,13 +29634,21 @@ func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -23314,6 +29674,7 @@ func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, checkNil bool, d *
func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, checkNil bool, canChange bool,
d *Decoder) (_ map[uint64]uint16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -23333,7 +29694,13 @@ func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, checkNil bool, canCh
var mv uint16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
@@ -23341,13 +29708,21 @@ func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -23373,6 +29748,7 @@ func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, checkNil bool, d *
func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, checkNil bool, canChange bool,
d *Decoder) (_ map[uint64]uint32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -23392,7 +29768,13 @@ func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, checkNil bool, canCh
var mv uint32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
@@ -23400,13 +29782,21 @@ func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -23432,6 +29822,7 @@ func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, checkNil bool, d *
func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, checkNil bool, canChange bool,
d *Decoder) (_ map[uint64]uint64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -23451,7 +29842,13 @@ func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, checkNil bool, canCh
var mv uint64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
@@ -23459,13 +29856,21 @@ func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -23491,6 +29896,7 @@ func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, checkNil bool, d
func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, canChange bool,
d *Decoder) (_ map[uint64]uintptr, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -23510,7 +29916,13 @@ func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, can
var mv uintptr
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -23518,13 +29930,21 @@ func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -23550,6 +29970,7 @@ func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, checkNil bool, d *Decode
func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, checkNil bool, canChange bool,
d *Decoder) (_ map[uint64]int, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -23569,7 +29990,13 @@ func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, checkNil bool, canChange b
var mv int
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
@@ -23577,13 +30004,21 @@ func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -23609,6 +30044,7 @@ func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, checkNil bool, d *Deco
func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, checkNil bool, canChange bool,
d *Decoder) (_ map[uint64]int8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -23628,7 +30064,13 @@ func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, checkNil bool, canChange
var mv int8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
@@ -23636,13 +30078,21 @@ func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -23668,6 +30118,7 @@ func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, checkNil bool, d *De
func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, checkNil bool, canChange bool,
d *Decoder) (_ map[uint64]int16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -23687,7 +30138,13 @@ func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, checkNil bool, canChan
var mv int16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
@@ -23695,13 +30152,21 @@ func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -23727,6 +30192,7 @@ func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, checkNil bool, d *De
func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, checkNil bool, canChange bool,
d *Decoder) (_ map[uint64]int32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -23746,7 +30212,13 @@ func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, checkNil bool, canChan
var mv int32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
@@ -23754,13 +30226,21 @@ func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -23786,6 +30266,7 @@ func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, checkNil bool, d *De
func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, checkNil bool, canChange bool,
d *Decoder) (_ map[uint64]int64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -23805,7 +30286,13 @@ func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, checkNil bool, canChan
var mv int64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
@@ -23813,13 +30300,21 @@ func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -23845,6 +30340,7 @@ func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, checkNil bool, d
func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, checkNil bool, canChange bool,
d *Decoder) (_ map[uint64]float32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -23864,7 +30360,13 @@ func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, checkNil bool, can
var mv float32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
@@ -23872,13 +30374,21 @@ func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -23904,6 +30414,7 @@ func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, checkNil bool, d
func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, checkNil bool, canChange bool,
d *Decoder) (_ map[uint64]float64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -23923,7 +30434,13 @@ func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, checkNil bool, can
var mv float64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
@@ -23931,13 +30448,21 @@ func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -23963,6 +30488,7 @@ func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, checkNil bool, d *Deco
func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, checkNil bool, canChange bool,
d *Decoder) (_ map[uint64]bool, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -23982,7 +30508,13 @@ func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, checkNil bool, canChange
var mv bool
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
@@ -23990,13 +30522,21 @@ func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeUint(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -24022,6 +30562,7 @@ func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, checkNil bool
func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, canChange bool,
d *Decoder) (_ map[uintptr]interface{}, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -24041,7 +30582,13 @@ func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool,
var mv interface{}
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -24054,7 +30601,13 @@ func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool,
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -24065,7 +30618,9 @@ func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool,
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -24091,6 +30646,7 @@ func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, checkNil bool, d
func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, checkNil bool, canChange bool,
d *Decoder) (_ map[uintptr]string, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -24110,7 +30666,13 @@ func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, checkNil bool, can
var mv string
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
@@ -24118,13 +30680,21 @@ func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -24150,6 +30720,7 @@ func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, checkNil bool, d *De
func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, checkNil bool, canChange bool,
d *Decoder) (_ map[uintptr]uint, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -24169,7 +30740,13 @@ func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, checkNil bool, canChan
var mv uint
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -24177,13 +30754,21 @@ func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -24209,6 +30794,7 @@ func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, checkNil bool, d *
func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, canChange bool,
d *Decoder) (_ map[uintptr]uint8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -24228,7 +30814,13 @@ func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, canCh
var mv uint8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
@@ -24236,13 +30828,21 @@ func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -24268,6 +30868,7 @@ func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, checkNil bool, d
func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, canChange bool,
d *Decoder) (_ map[uintptr]uint16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -24287,7 +30888,13 @@ func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, can
var mv uint16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
@@ -24295,13 +30902,21 @@ func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -24327,6 +30942,7 @@ func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, checkNil bool, d
func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, canChange bool,
d *Decoder) (_ map[uintptr]uint32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -24346,7 +30962,13 @@ func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, can
var mv uint32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
@@ -24354,13 +30976,21 @@ func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -24386,6 +31016,7 @@ func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, checkNil bool, d
func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, canChange bool,
d *Decoder) (_ map[uintptr]uint64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -24405,7 +31036,13 @@ func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, can
var mv uint64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
@@ -24413,13 +31050,21 @@ func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, can
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -24445,6 +31090,7 @@ func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, checkNil bool,
func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, canChange bool,
d *Decoder) (_ map[uintptr]uintptr, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -24464,7 +31110,13 @@ func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, c
var mv uintptr
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -24472,13 +31124,21 @@ func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, c
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -24504,6 +31164,7 @@ func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, checkNil bool, d *Deco
func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, checkNil bool, canChange bool,
d *Decoder) (_ map[uintptr]int, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -24523,7 +31184,13 @@ func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, checkNil bool, canChange
var mv int
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
@@ -24531,13 +31198,21 @@ func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -24563,6 +31238,7 @@ func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, checkNil bool, d *De
func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, checkNil bool, canChange bool,
d *Decoder) (_ map[uintptr]int8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -24582,7 +31258,13 @@ func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, checkNil bool, canChan
var mv int8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
@@ -24590,13 +31272,21 @@ func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -24622,6 +31312,7 @@ func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, checkNil bool, d *
func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, checkNil bool, canChange bool,
d *Decoder) (_ map[uintptr]int16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -24641,7 +31332,13 @@ func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, checkNil bool, canCh
var mv int16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
@@ -24649,13 +31346,21 @@ func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -24681,6 +31386,7 @@ func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, checkNil bool, d *
func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, checkNil bool, canChange bool,
d *Decoder) (_ map[uintptr]int32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -24700,7 +31406,13 @@ func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, checkNil bool, canCh
var mv int32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
@@ -24708,13 +31420,21 @@ func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -24740,6 +31460,7 @@ func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, checkNil bool, d *
func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, checkNil bool, canChange bool,
d *Decoder) (_ map[uintptr]int64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -24759,7 +31480,13 @@ func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, checkNil bool, canCh
var mv int64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
@@ -24767,13 +31494,21 @@ func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -24799,6 +31534,7 @@ func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, checkNil bool,
func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, canChange bool,
d *Decoder) (_ map[uintptr]float32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -24818,7 +31554,13 @@ func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, c
var mv float32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
@@ -24826,13 +31568,21 @@ func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, c
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -24858,6 +31608,7 @@ func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, checkNil bool,
func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, canChange bool,
d *Decoder) (_ map[uintptr]float64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -24877,7 +31628,13 @@ func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, c
var mv float64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
@@ -24885,13 +31642,21 @@ func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, c
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -24917,6 +31682,7 @@ func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, checkNil bool, d *De
func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, checkNil bool, canChange bool,
d *Decoder) (_ map[uintptr]bool, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -24936,7 +31702,13 @@ func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, checkNil bool, canChan
var mv bool
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
@@ -24944,13 +31716,21 @@ func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = uintptr(dd.DecodeUint(uintBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -24976,6 +31756,7 @@ func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, checkNil bool, d *Dec
func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, checkNil bool, canChange bool,
d *Decoder) (_ map[int]interface{}, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -24995,7 +31776,13 @@ func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, checkNil bool, canChang
var mv interface{}
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -25008,7 +31795,13 @@ func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, checkNil bool, canChang
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -25019,7 +31812,9 @@ func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, checkNil bool, canChang
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -25045,6 +31840,7 @@ func (f fastpathT) DecMapIntStringX(vp *map[int]string, checkNil bool, d *Decode
func (_ fastpathT) DecMapIntStringV(v map[int]string, checkNil bool, canChange bool,
d *Decoder) (_ map[int]string, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -25064,7 +31860,13 @@ func (_ fastpathT) DecMapIntStringV(v map[int]string, checkNil bool, canChange b
var mv string
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
@@ -25072,13 +31874,21 @@ func (_ fastpathT) DecMapIntStringV(v map[int]string, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -25104,6 +31914,7 @@ func (f fastpathT) DecMapIntUintX(vp *map[int]uint, checkNil bool, d *Decoder) {
func (_ fastpathT) DecMapIntUintV(v map[int]uint, checkNil bool, canChange bool,
d *Decoder) (_ map[int]uint, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -25123,7 +31934,13 @@ func (_ fastpathT) DecMapIntUintV(v map[int]uint, checkNil bool, canChange bool,
var mv uint
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -25131,13 +31948,21 @@ func (_ fastpathT) DecMapIntUintV(v map[int]uint, checkNil bool, canChange bool,
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -25163,6 +31988,7 @@ func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, checkNil bool, d *Decoder)
func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, checkNil bool, canChange bool,
d *Decoder) (_ map[int]uint8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -25182,7 +32008,13 @@ func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, checkNil bool, canChange boo
var mv uint8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
@@ -25190,13 +32022,21 @@ func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, checkNil bool, canChange boo
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -25222,6 +32062,7 @@ func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, checkNil bool, d *Decode
func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, checkNil bool, canChange bool,
d *Decoder) (_ map[int]uint16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -25241,7 +32082,13 @@ func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, checkNil bool, canChange b
var mv uint16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
@@ -25249,13 +32096,21 @@ func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -25281,6 +32136,7 @@ func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, checkNil bool, d *Decode
func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, checkNil bool, canChange bool,
d *Decoder) (_ map[int]uint32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -25300,7 +32156,13 @@ func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, checkNil bool, canChange b
var mv uint32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
@@ -25308,13 +32170,21 @@ func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -25340,6 +32210,7 @@ func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, checkNil bool, d *Decode
func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, checkNil bool, canChange bool,
d *Decoder) (_ map[int]uint64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -25359,7 +32230,13 @@ func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, checkNil bool, canChange b
var mv uint64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
@@ -25367,13 +32244,21 @@ func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -25399,6 +32284,7 @@ func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, checkNil bool, d *Deco
func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, checkNil bool, canChange bool,
d *Decoder) (_ map[int]uintptr, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -25418,7 +32304,13 @@ func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, checkNil bool, canChange
var mv uintptr
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -25426,13 +32318,21 @@ func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -25458,6 +32358,7 @@ func (f fastpathT) DecMapIntIntX(vp *map[int]int, checkNil bool, d *Decoder) {
func (_ fastpathT) DecMapIntIntV(v map[int]int, checkNil bool, canChange bool,
d *Decoder) (_ map[int]int, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -25477,7 +32378,13 @@ func (_ fastpathT) DecMapIntIntV(v map[int]int, checkNil bool, canChange bool,
var mv int
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
@@ -25485,13 +32392,21 @@ func (_ fastpathT) DecMapIntIntV(v map[int]int, checkNil bool, canChange bool,
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -25517,6 +32432,7 @@ func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, checkNil bool, d *Decoder) {
func (_ fastpathT) DecMapIntInt8V(v map[int]int8, checkNil bool, canChange bool,
d *Decoder) (_ map[int]int8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -25536,7 +32452,13 @@ func (_ fastpathT) DecMapIntInt8V(v map[int]int8, checkNil bool, canChange bool,
var mv int8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
@@ -25544,13 +32466,21 @@ func (_ fastpathT) DecMapIntInt8V(v map[int]int8, checkNil bool, canChange bool,
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -25576,6 +32506,7 @@ func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, checkNil bool, d *Decoder)
func (_ fastpathT) DecMapIntInt16V(v map[int]int16, checkNil bool, canChange bool,
d *Decoder) (_ map[int]int16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -25595,7 +32526,13 @@ func (_ fastpathT) DecMapIntInt16V(v map[int]int16, checkNil bool, canChange boo
var mv int16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
@@ -25603,13 +32540,21 @@ func (_ fastpathT) DecMapIntInt16V(v map[int]int16, checkNil bool, canChange boo
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -25635,6 +32580,7 @@ func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, checkNil bool, d *Decoder)
func (_ fastpathT) DecMapIntInt32V(v map[int]int32, checkNil bool, canChange bool,
d *Decoder) (_ map[int]int32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -25654,7 +32600,13 @@ func (_ fastpathT) DecMapIntInt32V(v map[int]int32, checkNil bool, canChange boo
var mv int32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
@@ -25662,13 +32614,21 @@ func (_ fastpathT) DecMapIntInt32V(v map[int]int32, checkNil bool, canChange boo
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -25694,6 +32654,7 @@ func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, checkNil bool, d *Decoder)
func (_ fastpathT) DecMapIntInt64V(v map[int]int64, checkNil bool, canChange bool,
d *Decoder) (_ map[int]int64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -25713,7 +32674,13 @@ func (_ fastpathT) DecMapIntInt64V(v map[int]int64, checkNil bool, canChange boo
var mv int64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
@@ -25721,13 +32688,21 @@ func (_ fastpathT) DecMapIntInt64V(v map[int]int64, checkNil bool, canChange boo
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -25753,6 +32728,7 @@ func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, checkNil bool, d *Deco
func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, checkNil bool, canChange bool,
d *Decoder) (_ map[int]float32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -25772,7 +32748,13 @@ func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, checkNil bool, canChange
var mv float32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
@@ -25780,13 +32762,21 @@ func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -25812,6 +32802,7 @@ func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, checkNil bool, d *Deco
func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, checkNil bool, canChange bool,
d *Decoder) (_ map[int]float64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -25831,7 +32822,13 @@ func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, checkNil bool, canChange
var mv float64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
@@ -25839,13 +32836,21 @@ func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -25871,6 +32876,7 @@ func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, checkNil bool, d *Decoder) {
func (_ fastpathT) DecMapIntBoolV(v map[int]bool, checkNil bool, canChange bool,
d *Decoder) (_ map[int]bool, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -25890,7 +32896,13 @@ func (_ fastpathT) DecMapIntBoolV(v map[int]bool, checkNil bool, canChange bool,
var mv bool
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
@@ -25898,13 +32910,21 @@ func (_ fastpathT) DecMapIntBoolV(v map[int]bool, checkNil bool, canChange bool,
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int(dd.DecodeInt(intBitsize))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -25930,6 +32950,7 @@ func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, checkNil bool, d *D
func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, checkNil bool, canChange bool,
d *Decoder) (_ map[int8]interface{}, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -25949,7 +32970,13 @@ func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, checkNil bool, canCha
var mv interface{}
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -25962,7 +32989,13 @@ func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, checkNil bool, canCha
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -25973,7 +33006,9 @@ func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, checkNil bool, canCha
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -25999,6 +33034,7 @@ func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, checkNil bool, d *Deco
func (_ fastpathT) DecMapInt8StringV(v map[int8]string, checkNil bool, canChange bool,
d *Decoder) (_ map[int8]string, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -26018,7 +33054,13 @@ func (_ fastpathT) DecMapInt8StringV(v map[int8]string, checkNil bool, canChange
var mv string
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
@@ -26026,13 +33068,21 @@ func (_ fastpathT) DecMapInt8StringV(v map[int8]string, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -26058,6 +33108,7 @@ func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, checkNil bool, d *Decoder)
func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, checkNil bool, canChange bool,
d *Decoder) (_ map[int8]uint, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -26077,7 +33128,13 @@ func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, checkNil bool, canChange boo
var mv uint
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -26085,13 +33142,21 @@ func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, checkNil bool, canChange boo
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -26117,6 +33182,7 @@ func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, checkNil bool, d *Decode
func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, checkNil bool, canChange bool,
d *Decoder) (_ map[int8]uint8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -26136,7 +33202,13 @@ func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, checkNil bool, canChange b
var mv uint8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
@@ -26144,13 +33216,21 @@ func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -26176,6 +33256,7 @@ func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, checkNil bool, d *Deco
func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, checkNil bool, canChange bool,
d *Decoder) (_ map[int8]uint16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -26195,7 +33276,13 @@ func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, checkNil bool, canChange
var mv uint16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
@@ -26203,13 +33290,21 @@ func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -26235,6 +33330,7 @@ func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, checkNil bool, d *Deco
func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, checkNil bool, canChange bool,
d *Decoder) (_ map[int8]uint32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -26254,7 +33350,13 @@ func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, checkNil bool, canChange
var mv uint32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
@@ -26262,13 +33364,21 @@ func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -26294,6 +33404,7 @@ func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, checkNil bool, d *Deco
func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, checkNil bool, canChange bool,
d *Decoder) (_ map[int8]uint64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -26313,7 +33424,13 @@ func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, checkNil bool, canChange
var mv uint64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
@@ -26321,13 +33438,21 @@ func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -26353,6 +33478,7 @@ func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, checkNil bool, d *De
func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, checkNil bool, canChange bool,
d *Decoder) (_ map[int8]uintptr, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -26372,7 +33498,13 @@ func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, checkNil bool, canChan
var mv uintptr
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -26380,13 +33512,21 @@ func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -26412,6 +33552,7 @@ func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, checkNil bool, d *Decoder) {
func (_ fastpathT) DecMapInt8IntV(v map[int8]int, checkNil bool, canChange bool,
d *Decoder) (_ map[int8]int, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -26431,7 +33572,13 @@ func (_ fastpathT) DecMapInt8IntV(v map[int8]int, checkNil bool, canChange bool,
var mv int
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
@@ -26439,13 +33586,21 @@ func (_ fastpathT) DecMapInt8IntV(v map[int8]int, checkNil bool, canChange bool,
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -26471,6 +33626,7 @@ func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, checkNil bool, d *Decoder)
func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, checkNil bool, canChange bool,
d *Decoder) (_ map[int8]int8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -26490,7 +33646,13 @@ func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, checkNil bool, canChange boo
var mv int8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
@@ -26498,13 +33660,21 @@ func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, checkNil bool, canChange boo
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -26530,6 +33700,7 @@ func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, checkNil bool, d *Decode
func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, checkNil bool, canChange bool,
d *Decoder) (_ map[int8]int16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -26549,7 +33720,13 @@ func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, checkNil bool, canChange b
var mv int16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
@@ -26557,13 +33734,21 @@ func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -26589,6 +33774,7 @@ func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, checkNil bool, d *Decode
func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, checkNil bool, canChange bool,
d *Decoder) (_ map[int8]int32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -26608,7 +33794,13 @@ func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, checkNil bool, canChange b
var mv int32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
@@ -26616,13 +33808,21 @@ func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -26648,6 +33848,7 @@ func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, checkNil bool, d *Decode
func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, checkNil bool, canChange bool,
d *Decoder) (_ map[int8]int64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -26667,7 +33868,13 @@ func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, checkNil bool, canChange b
var mv int64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
@@ -26675,13 +33882,21 @@ func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -26707,6 +33922,7 @@ func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, checkNil bool, d *De
func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, checkNil bool, canChange bool,
d *Decoder) (_ map[int8]float32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -26726,7 +33942,13 @@ func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, checkNil bool, canChan
var mv float32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
@@ -26734,13 +33956,21 @@ func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -26766,6 +33996,7 @@ func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, checkNil bool, d *De
func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, checkNil bool, canChange bool,
d *Decoder) (_ map[int8]float64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -26785,7 +34016,13 @@ func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, checkNil bool, canChan
var mv float64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
@@ -26793,13 +34030,21 @@ func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -26825,6 +34070,7 @@ func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, checkNil bool, d *Decoder)
func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, checkNil bool, canChange bool,
d *Decoder) (_ map[int8]bool, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -26844,7 +34090,13 @@ func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, checkNil bool, canChange boo
var mv bool
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
@@ -26852,13 +34104,21 @@ func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, checkNil bool, canChange boo
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int8(dd.DecodeInt(8))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -26884,6 +34144,7 @@ func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, checkNil bool, d
func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, checkNil bool, canChange bool,
d *Decoder) (_ map[int16]interface{}, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -26903,7 +34164,13 @@ func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, checkNil bool, canC
var mv interface{}
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -26916,7 +34183,13 @@ func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, checkNil bool, canC
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -26927,7 +34200,9 @@ func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, checkNil bool, canC
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -26953,6 +34228,7 @@ func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, checkNil bool, d *De
func (_ fastpathT) DecMapInt16StringV(v map[int16]string, checkNil bool, canChange bool,
d *Decoder) (_ map[int16]string, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -26972,7 +34248,13 @@ func (_ fastpathT) DecMapInt16StringV(v map[int16]string, checkNil bool, canChan
var mv string
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
@@ -26980,13 +34262,21 @@ func (_ fastpathT) DecMapInt16StringV(v map[int16]string, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -27012,6 +34302,7 @@ func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, checkNil bool, d *Decode
func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, checkNil bool, canChange bool,
d *Decoder) (_ map[int16]uint, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -27031,7 +34322,13 @@ func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, checkNil bool, canChange b
var mv uint
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -27039,13 +34336,21 @@ func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -27071,6 +34376,7 @@ func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, checkNil bool, d *Deco
func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, checkNil bool, canChange bool,
d *Decoder) (_ map[int16]uint8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -27090,7 +34396,13 @@ func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, checkNil bool, canChange
var mv uint8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
@@ -27098,13 +34410,21 @@ func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -27130,6 +34450,7 @@ func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, checkNil bool, d *De
func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, checkNil bool, canChange bool,
d *Decoder) (_ map[int16]uint16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -27149,7 +34470,13 @@ func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, checkNil bool, canChan
var mv uint16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
@@ -27157,13 +34484,21 @@ func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -27189,6 +34524,7 @@ func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, checkNil bool, d *De
func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, checkNil bool, canChange bool,
d *Decoder) (_ map[int16]uint32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -27208,7 +34544,13 @@ func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, checkNil bool, canChan
var mv uint32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
@@ -27216,13 +34558,21 @@ func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -27248,6 +34598,7 @@ func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, checkNil bool, d *De
func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, checkNil bool, canChange bool,
d *Decoder) (_ map[int16]uint64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -27267,7 +34618,13 @@ func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, checkNil bool, canChan
var mv uint64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
@@ -27275,13 +34632,21 @@ func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -27307,6 +34672,7 @@ func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, checkNil bool, d *
func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, checkNil bool, canChange bool,
d *Decoder) (_ map[int16]uintptr, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -27326,7 +34692,13 @@ func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, checkNil bool, canCh
var mv uintptr
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -27334,13 +34706,21 @@ func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -27366,6 +34746,7 @@ func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, checkNil bool, d *Decoder)
func (_ fastpathT) DecMapInt16IntV(v map[int16]int, checkNil bool, canChange bool,
d *Decoder) (_ map[int16]int, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -27385,7 +34766,13 @@ func (_ fastpathT) DecMapInt16IntV(v map[int16]int, checkNil bool, canChange boo
var mv int
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
@@ -27393,13 +34780,21 @@ func (_ fastpathT) DecMapInt16IntV(v map[int16]int, checkNil bool, canChange boo
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -27425,6 +34820,7 @@ func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, checkNil bool, d *Decode
func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, checkNil bool, canChange bool,
d *Decoder) (_ map[int16]int8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -27444,7 +34840,13 @@ func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, checkNil bool, canChange b
var mv int8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
@@ -27452,13 +34854,21 @@ func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -27484,6 +34894,7 @@ func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, checkNil bool, d *Deco
func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, checkNil bool, canChange bool,
d *Decoder) (_ map[int16]int16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -27503,7 +34914,13 @@ func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, checkNil bool, canChange
var mv int16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
@@ -27511,13 +34928,21 @@ func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -27543,6 +34968,7 @@ func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, checkNil bool, d *Deco
func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, checkNil bool, canChange bool,
d *Decoder) (_ map[int16]int32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -27562,7 +34988,13 @@ func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, checkNil bool, canChange
var mv int32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
@@ -27570,13 +35002,21 @@ func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -27602,6 +35042,7 @@ func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, checkNil bool, d *Deco
func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, checkNil bool, canChange bool,
d *Decoder) (_ map[int16]int64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -27621,7 +35062,13 @@ func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, checkNil bool, canChange
var mv int64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
@@ -27629,13 +35076,21 @@ func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -27661,6 +35116,7 @@ func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, checkNil bool, d *
func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, checkNil bool, canChange bool,
d *Decoder) (_ map[int16]float32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -27680,7 +35136,13 @@ func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, checkNil bool, canCh
var mv float32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
@@ -27688,13 +35150,21 @@ func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -27720,6 +35190,7 @@ func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, checkNil bool, d *
func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, checkNil bool, canChange bool,
d *Decoder) (_ map[int16]float64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -27739,7 +35210,13 @@ func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, checkNil bool, canCh
var mv float64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
@@ -27747,13 +35224,21 @@ func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -27779,6 +35264,7 @@ func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, checkNil bool, d *Decode
func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, checkNil bool, canChange bool,
d *Decoder) (_ map[int16]bool, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -27798,7 +35284,13 @@ func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, checkNil bool, canChange b
var mv bool
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
@@ -27806,13 +35298,21 @@ func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int16(dd.DecodeInt(16))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -27838,6 +35338,7 @@ func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, checkNil bool, d
func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, checkNil bool, canChange bool,
d *Decoder) (_ map[int32]interface{}, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -27857,7 +35358,13 @@ func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, checkNil bool, canC
var mv interface{}
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -27870,7 +35377,13 @@ func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, checkNil bool, canC
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -27881,7 +35394,9 @@ func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, checkNil bool, canC
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -27907,6 +35422,7 @@ func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, checkNil bool, d *De
func (_ fastpathT) DecMapInt32StringV(v map[int32]string, checkNil bool, canChange bool,
d *Decoder) (_ map[int32]string, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -27926,7 +35442,13 @@ func (_ fastpathT) DecMapInt32StringV(v map[int32]string, checkNil bool, canChan
var mv string
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
@@ -27934,13 +35456,21 @@ func (_ fastpathT) DecMapInt32StringV(v map[int32]string, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -27966,6 +35496,7 @@ func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, checkNil bool, d *Decode
func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, checkNil bool, canChange bool,
d *Decoder) (_ map[int32]uint, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -27985,7 +35516,13 @@ func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, checkNil bool, canChange b
var mv uint
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -27993,13 +35530,21 @@ func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -28025,6 +35570,7 @@ func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, checkNil bool, d *Deco
func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, checkNil bool, canChange bool,
d *Decoder) (_ map[int32]uint8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -28044,7 +35590,13 @@ func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, checkNil bool, canChange
var mv uint8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
@@ -28052,13 +35604,21 @@ func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -28084,6 +35644,7 @@ func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, checkNil bool, d *De
func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, checkNil bool, canChange bool,
d *Decoder) (_ map[int32]uint16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -28103,7 +35664,13 @@ func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, checkNil bool, canChan
var mv uint16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
@@ -28111,13 +35678,21 @@ func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -28143,6 +35718,7 @@ func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, checkNil bool, d *De
func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, checkNil bool, canChange bool,
d *Decoder) (_ map[int32]uint32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -28162,7 +35738,13 @@ func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, checkNil bool, canChan
var mv uint32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
@@ -28170,13 +35752,21 @@ func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -28202,6 +35792,7 @@ func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, checkNil bool, d *De
func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, checkNil bool, canChange bool,
d *Decoder) (_ map[int32]uint64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -28221,7 +35812,13 @@ func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, checkNil bool, canChan
var mv uint64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
@@ -28229,13 +35826,21 @@ func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -28261,6 +35866,7 @@ func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, checkNil bool, d *
func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, checkNil bool, canChange bool,
d *Decoder) (_ map[int32]uintptr, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -28280,7 +35886,13 @@ func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, checkNil bool, canCh
var mv uintptr
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -28288,13 +35900,21 @@ func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -28320,6 +35940,7 @@ func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, checkNil bool, d *Decoder)
func (_ fastpathT) DecMapInt32IntV(v map[int32]int, checkNil bool, canChange bool,
d *Decoder) (_ map[int32]int, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -28339,7 +35960,13 @@ func (_ fastpathT) DecMapInt32IntV(v map[int32]int, checkNil bool, canChange boo
var mv int
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
@@ -28347,13 +35974,21 @@ func (_ fastpathT) DecMapInt32IntV(v map[int32]int, checkNil bool, canChange boo
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -28379,6 +36014,7 @@ func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, checkNil bool, d *Decode
func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, checkNil bool, canChange bool,
d *Decoder) (_ map[int32]int8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -28398,7 +36034,13 @@ func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, checkNil bool, canChange b
var mv int8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
@@ -28406,13 +36048,21 @@ func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -28438,6 +36088,7 @@ func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, checkNil bool, d *Deco
func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, checkNil bool, canChange bool,
d *Decoder) (_ map[int32]int16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -28457,7 +36108,13 @@ func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, checkNil bool, canChange
var mv int16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
@@ -28465,13 +36122,21 @@ func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -28497,6 +36162,7 @@ func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, checkNil bool, d *Deco
func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, checkNil bool, canChange bool,
d *Decoder) (_ map[int32]int32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -28516,7 +36182,13 @@ func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, checkNil bool, canChange
var mv int32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
@@ -28524,13 +36196,21 @@ func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -28556,6 +36236,7 @@ func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, checkNil bool, d *Deco
func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, checkNil bool, canChange bool,
d *Decoder) (_ map[int32]int64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -28575,7 +36256,13 @@ func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, checkNil bool, canChange
var mv int64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
@@ -28583,13 +36270,21 @@ func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -28615,6 +36310,7 @@ func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, checkNil bool, d *
func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, checkNil bool, canChange bool,
d *Decoder) (_ map[int32]float32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -28634,7 +36330,13 @@ func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, checkNil bool, canCh
var mv float32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
@@ -28642,13 +36344,21 @@ func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -28674,6 +36384,7 @@ func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, checkNil bool, d *
func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, checkNil bool, canChange bool,
d *Decoder) (_ map[int32]float64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -28693,7 +36404,13 @@ func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, checkNil bool, canCh
var mv float64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
@@ -28701,13 +36418,21 @@ func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -28733,6 +36458,7 @@ func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, checkNil bool, d *Decode
func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, checkNil bool, canChange bool,
d *Decoder) (_ map[int32]bool, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -28752,7 +36478,13 @@ func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, checkNil bool, canChange b
var mv bool
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
@@ -28760,13 +36492,21 @@ func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = int32(dd.DecodeInt(32))
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -28792,6 +36532,7 @@ func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, checkNil bool, d
func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, checkNil bool, canChange bool,
d *Decoder) (_ map[int64]interface{}, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -28811,7 +36552,13 @@ func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, checkNil bool, canC
var mv interface{}
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -28824,7 +36571,13 @@ func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, checkNil bool, canC
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -28835,7 +36588,9 @@ func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, checkNil bool, canC
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -28861,6 +36616,7 @@ func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, checkNil bool, d *De
func (_ fastpathT) DecMapInt64StringV(v map[int64]string, checkNil bool, canChange bool,
d *Decoder) (_ map[int64]string, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -28880,7 +36636,13 @@ func (_ fastpathT) DecMapInt64StringV(v map[int64]string, checkNil bool, canChan
var mv string
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
@@ -28888,13 +36650,21 @@ func (_ fastpathT) DecMapInt64StringV(v map[int64]string, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -28920,6 +36690,7 @@ func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, checkNil bool, d *Decode
func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, checkNil bool, canChange bool,
d *Decoder) (_ map[int64]uint, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -28939,7 +36710,13 @@ func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, checkNil bool, canChange b
var mv uint
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -28947,13 +36724,21 @@ func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -28979,6 +36764,7 @@ func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, checkNil bool, d *Deco
func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, checkNil bool, canChange bool,
d *Decoder) (_ map[int64]uint8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -28998,7 +36784,13 @@ func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, checkNil bool, canChange
var mv uint8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
@@ -29006,13 +36798,21 @@ func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -29038,6 +36838,7 @@ func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, checkNil bool, d *De
func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, checkNil bool, canChange bool,
d *Decoder) (_ map[int64]uint16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -29057,7 +36858,13 @@ func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, checkNil bool, canChan
var mv uint16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
@@ -29065,13 +36872,21 @@ func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -29097,6 +36912,7 @@ func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, checkNil bool, d *De
func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, checkNil bool, canChange bool,
d *Decoder) (_ map[int64]uint32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -29116,7 +36932,13 @@ func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, checkNil bool, canChan
var mv uint32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
@@ -29124,13 +36946,21 @@ func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -29156,6 +36986,7 @@ func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, checkNil bool, d *De
func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, checkNil bool, canChange bool,
d *Decoder) (_ map[int64]uint64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -29175,7 +37006,13 @@ func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, checkNil bool, canChan
var mv uint64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
@@ -29183,13 +37020,21 @@ func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -29215,6 +37060,7 @@ func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, checkNil bool, d *
func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, checkNil bool, canChange bool,
d *Decoder) (_ map[int64]uintptr, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -29234,7 +37080,13 @@ func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, checkNil bool, canCh
var mv uintptr
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -29242,13 +37094,21 @@ func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -29274,6 +37134,7 @@ func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, checkNil bool, d *Decoder)
func (_ fastpathT) DecMapInt64IntV(v map[int64]int, checkNil bool, canChange bool,
d *Decoder) (_ map[int64]int, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -29293,7 +37154,13 @@ func (_ fastpathT) DecMapInt64IntV(v map[int64]int, checkNil bool, canChange boo
var mv int
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
@@ -29301,13 +37168,21 @@ func (_ fastpathT) DecMapInt64IntV(v map[int64]int, checkNil bool, canChange boo
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -29333,6 +37208,7 @@ func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, checkNil bool, d *Decode
func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, checkNil bool, canChange bool,
d *Decoder) (_ map[int64]int8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -29352,7 +37228,13 @@ func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, checkNil bool, canChange b
var mv int8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
@@ -29360,13 +37242,21 @@ func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -29392,6 +37282,7 @@ func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, checkNil bool, d *Deco
func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, checkNil bool, canChange bool,
d *Decoder) (_ map[int64]int16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -29411,7 +37302,13 @@ func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, checkNil bool, canChange
var mv int16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
@@ -29419,13 +37316,21 @@ func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -29451,6 +37356,7 @@ func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, checkNil bool, d *Deco
func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, checkNil bool, canChange bool,
d *Decoder) (_ map[int64]int32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -29470,7 +37376,13 @@ func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, checkNil bool, canChange
var mv int32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
@@ -29478,13 +37390,21 @@ func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -29510,6 +37430,7 @@ func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, checkNil bool, d *Deco
func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, checkNil bool, canChange bool,
d *Decoder) (_ map[int64]int64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -29529,7 +37450,13 @@ func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, checkNil bool, canChange
var mv int64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
@@ -29537,13 +37464,21 @@ func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -29569,6 +37504,7 @@ func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, checkNil bool, d *
func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, checkNil bool, canChange bool,
d *Decoder) (_ map[int64]float32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -29588,7 +37524,13 @@ func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, checkNil bool, canCh
var mv float32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
@@ -29596,13 +37538,21 @@ func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -29628,6 +37578,7 @@ func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, checkNil bool, d *
func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, checkNil bool, canChange bool,
d *Decoder) (_ map[int64]float64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -29647,7 +37598,13 @@ func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, checkNil bool, canCh
var mv float64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
@@ -29655,13 +37612,21 @@ func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, checkNil bool, canCh
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -29687,6 +37652,7 @@ func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, checkNil bool, d *Decode
func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, checkNil bool, canChange bool,
d *Decoder) (_ map[int64]bool, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -29706,7 +37672,13 @@ func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, checkNil bool, canChange b
var mv bool
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
@@ -29714,13 +37686,21 @@ func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeInt(64)
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -29746,6 +37726,7 @@ func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, checkNil bool, d *D
func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, checkNil bool, canChange bool,
d *Decoder) (_ map[bool]interface{}, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -29765,7 +37746,13 @@ func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, checkNil bool, canCha
var mv interface{}
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -29778,7 +37765,13 @@ func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, checkNil bool, canCha
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
if mapGet {
mv = v[mk]
} else {
@@ -29789,7 +37782,9 @@ func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, checkNil bool, canCha
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -29815,6 +37810,7 @@ func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, checkNil bool, d *Deco
func (_ fastpathT) DecMapBoolStringV(v map[bool]string, checkNil bool, canChange bool,
d *Decoder) (_ map[bool]string, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -29834,7 +37830,13 @@ func (_ fastpathT) DecMapBoolStringV(v map[bool]string, checkNil bool, canChange
var mv string
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
@@ -29842,13 +37844,21 @@ func (_ fastpathT) DecMapBoolStringV(v map[bool]string, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeString()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -29874,6 +37884,7 @@ func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, checkNil bool, d *Decoder)
func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, checkNil bool, canChange bool,
d *Decoder) (_ map[bool]uint, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -29893,7 +37904,13 @@ func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, checkNil bool, canChange boo
var mv uint
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -29901,13 +37918,21 @@ func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, checkNil bool, canChange boo
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -29933,6 +37958,7 @@ func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, checkNil bool, d *Decode
func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, checkNil bool, canChange bool,
d *Decoder) (_ map[bool]uint8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -29952,7 +37978,13 @@ func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, checkNil bool, canChange b
var mv uint8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
@@ -29960,13 +37992,21 @@ func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint8(dd.DecodeUint(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -29992,6 +38032,7 @@ func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, checkNil bool, d *Deco
func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, checkNil bool, canChange bool,
d *Decoder) (_ map[bool]uint16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -30011,7 +38052,13 @@ func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, checkNil bool, canChange
var mv uint16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
@@ -30019,13 +38066,21 @@ func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint16(dd.DecodeUint(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -30051,6 +38106,7 @@ func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, checkNil bool, d *Deco
func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, checkNil bool, canChange bool,
d *Decoder) (_ map[bool]uint32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -30070,7 +38126,13 @@ func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, checkNil bool, canChange
var mv uint32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
@@ -30078,13 +38140,21 @@ func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uint32(dd.DecodeUint(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -30110,6 +38180,7 @@ func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, checkNil bool, d *Deco
func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, checkNil bool, canChange bool,
d *Decoder) (_ map[bool]uint64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -30129,7 +38200,13 @@ func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, checkNil bool, canChange
var mv uint64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
@@ -30137,13 +38214,21 @@ func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, checkNil bool, canChange
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeUint(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -30169,6 +38254,7 @@ func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, checkNil bool, d *De
func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, checkNil bool, canChange bool,
d *Decoder) (_ map[bool]uintptr, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -30188,7 +38274,13 @@ func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, checkNil bool, canChan
var mv uintptr
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
@@ -30196,13 +38288,21 @@ func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = uintptr(dd.DecodeUint(uintBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -30228,6 +38328,7 @@ func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, checkNil bool, d *Decoder) {
func (_ fastpathT) DecMapBoolIntV(v map[bool]int, checkNil bool, canChange bool,
d *Decoder) (_ map[bool]int, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -30247,7 +38348,13 @@ func (_ fastpathT) DecMapBoolIntV(v map[bool]int, checkNil bool, canChange bool,
var mv int
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
@@ -30255,13 +38362,21 @@ func (_ fastpathT) DecMapBoolIntV(v map[bool]int, checkNil bool, canChange bool,
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int(dd.DecodeInt(intBitsize))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -30287,6 +38402,7 @@ func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, checkNil bool, d *Decoder)
func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, checkNil bool, canChange bool,
d *Decoder) (_ map[bool]int8, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -30306,7 +38422,13 @@ func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, checkNil bool, canChange boo
var mv int8
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
@@ -30314,13 +38436,21 @@ func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, checkNil bool, canChange boo
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int8(dd.DecodeInt(8))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -30346,6 +38476,7 @@ func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, checkNil bool, d *Decode
func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, checkNil bool, canChange bool,
d *Decoder) (_ map[bool]int16, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -30365,7 +38496,13 @@ func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, checkNil bool, canChange b
var mv int16
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
@@ -30373,13 +38510,21 @@ func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int16(dd.DecodeInt(16))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -30405,6 +38550,7 @@ func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, checkNil bool, d *Decode
func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, checkNil bool, canChange bool,
d *Decoder) (_ map[bool]int32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -30424,7 +38570,13 @@ func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, checkNil bool, canChange b
var mv int32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
@@ -30432,13 +38584,21 @@ func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = int32(dd.DecodeInt(32))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -30464,6 +38624,7 @@ func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, checkNil bool, d *Decode
func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, checkNil bool, canChange bool,
d *Decoder) (_ map[bool]int64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -30483,7 +38644,13 @@ func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, checkNil bool, canChange b
var mv int64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
@@ -30491,13 +38658,21 @@ func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, checkNil bool, canChange b
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeInt(64)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -30523,6 +38698,7 @@ func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, checkNil bool, d *De
func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, checkNil bool, canChange bool,
d *Decoder) (_ map[bool]float32, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -30542,7 +38718,13 @@ func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, checkNil bool, canChan
var mv float32
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
@@ -30550,13 +38732,21 @@ func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = float32(dd.DecodeFloat(true))
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -30582,6 +38772,7 @@ func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, checkNil bool, d *De
func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, checkNil bool, canChange bool,
d *Decoder) (_ map[bool]float64, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -30601,7 +38792,13 @@ func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, checkNil bool, canChan
var mv float64
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
@@ -30609,13 +38806,21 @@ func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, checkNil bool, canChan
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeFloat(false)
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
@@ -30641,6 +38846,7 @@ func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, checkNil bool, d *Decoder)
func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, checkNil bool, canChange bool,
d *Decoder) (_ map[bool]bool, changed bool) {
dd := d.d
+ cr := d.cr
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -30660,7 +38866,13 @@ func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, checkNil bool, canChange boo
var mv bool
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
@@ -30668,13 +38880,21 @@ func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, checkNil bool, canChange boo
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil {
+ cr.sendContainerState(containerMapKey)
+ }
mk = dd.DecodeBool()
+ if cr != nil {
+ cr.sendContainerState(containerMapValue)
+ }
mv = dd.DecodeBool()
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
+ }
+ if cr != nil {
+ cr.sendContainerState(containerMapEnd)
}
return v, changed
}
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.go.tmpl b/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.go.tmpl
index 7bcbaf831..58cc6df4c 100644
--- a/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.go.tmpl
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.go.tmpl
@@ -1,4 +1,4 @@
-// //+build ignore
+// +build !notfastpath
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
@@ -106,6 +106,9 @@ func init() {
// -- -- fast path type switch
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
case []{{ .Elem }}:{{else}}
@@ -116,13 +119,16 @@ func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
{{end}}{{end}}
default:
- _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
return false
}
return true
}
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
case []{{ .Elem }}:
@@ -131,12 +137,16 @@ func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
{{end}}{{end}}{{end}}
default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
return false
}
return true
}
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
case map[{{ .MapKey }}]{{ .Elem }}:
@@ -145,6 +155,7 @@ func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
{{end}}{{end}}{{end}}
default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
return false
}
return true
@@ -157,16 +168,18 @@ func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value)
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e)
}
func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) {
- ee := e.e
+ ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeArrayStart(len(v))
for _, v2 := range v {
+ if cr != nil { cr.sendContainerState(containerArrayElem) }
{{ encmd .Elem "v2"}}
}
- ee.EncodeEnd()
+ if cr != nil { cr.sendContainerState(containerArrayEnd) }{{/* ee.EncodeEnd() */}}
}
{{end}}{{end}}{{end}}
@@ -178,6 +191,7 @@ func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value)
}
func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, e *Encoder) {
ee := e.e
+ cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
@@ -201,7 +215,9 @@ func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Ele
}
sort.Sort(bytesISlice(v2))
for j := range v2 {
+ if cr != nil { cr.sendContainerState(containerMapKey) }
e.asis(v2[j].v)
+ if cr != nil { cr.sendContainerState(containerMapValue) }
e.encode(v[v2[j].i])
} {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v))
var i int
@@ -211,24 +227,28 @@ func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Ele
}
sort.Sort({{ sorttype .MapKey false}}(v2))
for _, k2 := range v2 {
+ if cr != nil { cr.sendContainerState(containerMapKey) }
{{if eq .MapKey "string"}}if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}{{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}}
+ if cr != nil { cr.sendContainerState(containerMapValue) }
{{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }}
} {{end}}
} else {
for k2, v2 := range v {
+ if cr != nil { cr.sendContainerState(containerMapKey) }
{{if eq .MapKey "string"}}if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}{{else}}{{ encmd .MapKey "k2"}}{{end}}
+ if cr != nil { cr.sendContainerState(containerMapValue) }
{{ encmd .Elem "v2"}}
}
}
- ee.EncodeEnd()
+ if cr != nil { cr.sendContainerState(containerMapEnd) }{{/* ee.EncodeEnd() */}}
}
{{end}}{{end}}{{end}}
@@ -237,6 +257,9 @@ func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Ele
// -- -- fast path type switch
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
+ if !fastpathEnabled {
+ return false
+ }
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
case []{{ .Elem }}:{{else}}
@@ -250,6 +273,7 @@ func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
}
{{end}}{{end}}
default:
+ _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
return false
}
return true
@@ -283,8 +307,7 @@ func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, checkNil
*vp = v
}
}
-func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil bool, canChange bool,
- d *Decoder) (_ []{{ .Elem }}, changed bool) {
+func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil bool, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) {
dd := d.d
{{/* // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() */}}
if checkNil && dd.TryDecodeAsNil() {
@@ -295,28 +318,22 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil b
}
slh, containerLenS := d.decSliceHelperStart()
- x2read := containerLenS
- var xtrunc bool
- if canChange && v == nil {
- var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}); xtrunc {
- x2read = xlen
- }
- v = make([]{{ .Elem }}, xlen)
- changed = true
- }
if containerLenS == 0 {
- if canChange && len(v) != 0 {
- v = v[:0]
- changed = true
- }{{/*
- // slh.End() // dd.ReadArrayEnd()
- */}}
- return v, changed
+ if canChange {
+ if v == nil {
+ v = []{{ .Elem }}{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return
}
- {{/* // for j := 0; j < containerLenS; j++ { */}}
if containerLenS > 0 {
+ x2read := containerLenS
+ var xtrunc bool
if containerLenS > cap(v) {
if canChange { {{/*
// fast-path is for "basic" immutable types, so no need to copy them over
@@ -324,37 +341,64 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil b
// copy(s, v[:cap(v)])
// v = s */}}
var xlen int
- if xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}); xtrunc {
- x2read = xlen
+ xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }})
+ if xtrunc {
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]{{ .Elem }}, xlen)
+ }
+ } else {
+ v = make([]{{ .Elem }}, xlen)
}
- v = make([]{{ .Elem }}, xlen)
changed = true
} else {
d.arrayCannotExpand(len(v), containerLenS)
- x2read = len(v)
}
+ x2read = len(v)
} else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
- }
- {{/* // all checks done. cannot go past len. */}}
+ if canChange {
+ v = v[:containerLenS]
+ changed = true
+ }
+ } {{/* // all checks done. cannot go past len. */}}
j := 0
- for ; j < x2read; j++ {
+ for ; j < x2read; j++ {
+ slh.ElemContainerState(j)
{{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }}
}
if xtrunc { {{/* // means canChange=true, changed=true already. */}}
for ; j < containerLenS; j++ {
v = append(v, {{ zerocmd .Elem }})
+ slh.ElemContainerState(j)
{{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }}
}
} else if !canChange {
- for ; j < containerLenS; j++ {
+ for ; j < containerLenS; j++ {
+ slh.ElemContainerState(j)
d.swallow()
}
}
} else {
- j := 0
- for ; !dd.CheckBreak(); j++ {
+ breakFound := dd.CheckBreak() {{/* check break first, so we can initialize v with a capacity of 4 if necessary */}}
+ if breakFound {
+ if canChange {
+ if v == nil {
+ v = []{{ .Elem }}{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return
+ }
+ if cap(v) == 0 {
+ v = make([]{{ .Elem }}, 1, 4)
+ changed = true
+ }
+ j := 0
+ for ; !breakFound; j++ {
if j >= len(v) {
if canChange {
v = append(v, {{ zerocmd .Elem }})
@@ -362,16 +406,22 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil b
} else {
d.arrayCannotExpand(len(v), j+1)
}
- }
+ }
+ slh.ElemContainerState(j)
if j < len(v) { {{/* // all checks done. cannot go past len. */}}
{{ if eq .Elem "interface{}" }}d.decode(&v[j])
{{ else }}v[j] = {{ decmd .Elem }}{{ end }}
} else {
d.swallow()
}
+ breakFound = dd.CheckBreak()
+ }
+ if canChange && j < len(v) {
+ v = v[:j]
+ changed = true
}
- slh.End()
}
+ slh.End()
return v, changed
}
@@ -405,6 +455,7 @@ func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .E
func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, canChange bool,
d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) {
dd := d.d
+ cr := d.cr
{{/* // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() */}}
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
@@ -424,11 +475,13 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Ele
var mv {{ .Elem }}
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
+ if cr != nil { cr.sendContainerState(containerMapKey) }
{{ if eq .MapKey "interface{}" }}mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}}
}{{ else }}mk = {{ decmd .MapKey }}{{ end }}
+ if cr != nil { cr.sendContainerState(containerMapValue) }
{{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil }
d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }}
if v != nil {
@@ -437,19 +490,21 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Ele
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
+ if cr != nil { cr.sendContainerState(containerMapKey) }
{{ if eq .MapKey "interface{}" }}mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}}
}{{ else }}mk = {{ decmd .MapKey }}{{ end }}
+ if cr != nil { cr.sendContainerState(containerMapValue) }
{{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil }
d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }}
if v != nil {
v[mk] = mv
}
}
- dd.ReadEnd()
}
+ if cr != nil { cr.sendContainerState(containerMapEnd) }
return v, changed
}
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.not.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.not.go
new file mode 100644
index 000000000..d6f5f0c91
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.not.go
@@ -0,0 +1,32 @@
+// +build notfastpath
+
+package codec
+
+import "reflect"
+
+// The generated fast-path code is very large, and adds a few seconds to the build time.
+// This causes test execution, execution of small tools which use codec, etc
+// to take a long time.
+//
+// To mitigate, we now support the notfastpath tag.
+// This tag disables fastpath during build, allowing for faster build, test execution,
+// short-program runs, etc.
+
+func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
+func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
+func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
+func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
+
+type fastpathT struct{}
+type fastpathE struct {
+ rtid uintptr
+ rt reflect.Type
+ encfn func(*encFnInfo, reflect.Value)
+ decfn func(*decFnInfo, reflect.Value)
+}
+type fastpathA [0]fastpathE
+
+func (x fastpathA) index(rtid uintptr) int { return -1 }
+
+var fastpathAV fastpathA
+var fastpathTV fastpathT
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-array.go.tmpl b/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-array.go.tmpl
index eb31a9a96..2caae5bfd 100644
--- a/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-array.go.tmpl
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-array.go.tmpl
@@ -1,86 +1,101 @@
-{{var "v"}} := {{ if not isArray}}*{{ end }}{{ .Varname }}
+{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}
-
-var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}}
-var {{var "c"}}, {{var "rt"}} bool {{/* // changed, truncated */}}
-_, _, _ = {{var "c"}}, {{var "rt"}}, {{var "rl"}}
-{{var "rr"}} = {{var "l"}}
-{{/* rl is NOT used. Only used for getting DecInferLen. len(r) used directly in code */}}
-
-{{ if not isArray }}if {{var "v"}} == nil {
- if {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}); {{var "rt"}} {
- {{var "rr"}} = {{var "rl"}}
- }
- {{var "v"}} = make({{ .CTyp }}, {{var "rl"}})
- {{var "c"}} = true
-}
-{{ end }}
-if {{var "l"}} == 0 { {{ if isSlice }}
- if len({{var "v"}}) != 0 {
- {{var "v"}} = {{var "v"}}[:0]
- {{var "c"}} = true
- } {{ end }}
+var {{var "c"}} bool {{/* // changed */}}
+if {{var "l"}} == 0 {
+ {{if isSlice }}if {{var "v"}} == nil {
+ {{var "v"}} = []{{ .Typ }}{}
+ {{var "c"}} = true
+ } else if len({{var "v"}}) != 0 {
+ {{var "v"}} = {{var "v"}}[:0]
+ {{var "c"}} = true
+ } {{end}} {{if isChan }}if {{var "v"}} == nil {
+ {{var "v"}} = make({{ .CTyp }}, 0)
+ {{var "c"}} = true
+ } {{end}}
} else if {{var "l"}} > 0 {
- {{ if isChan }}
+ {{if isChan }}if {{var "v"}} == nil {
+ {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ {{var "v"}} = make({{ .CTyp }}, {{var "rl"}})
+ {{var "c"}} = true
+ }
for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ {
+ {{var "h"}}.ElemContainerState({{var "r"}})
var {{var "t"}} {{ .Typ }}
{{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
- {{var "v"}} <- {{var "t"}}
- {{ else }}
- if {{var "l"}} > cap({{var "v"}}) {
- {{ if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}})
- {{ else }}{{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
- {{ if .Immutable }}
- {{var "v2"}} := {{var "v"}}
- {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
- if len({{var "v"}}) > 0 {
- copy({{var "v"}}, {{var "v2"}}[:cap({{var "v2"}})])
- }
- {{ else }}{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
- {{ end }}{{var "c"}} = true
- {{ end }}
- {{var "rr"}} = len({{var "v"}})
- } else if {{var "l"}} != len({{var "v"}}) {
- {{ if isSlice }}{{var "v"}} = {{var "v"}}[:{{var "l"}}]
- {{var "c"}} = true {{ end }}
+ {{var "v"}} <- {{var "t"}}
}
+ {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}}
+ var {{var "rt"}} bool {{/* truncated */}}
+ if {{var "l"}} > cap({{var "v"}}) {
+ {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}})
+ {{ else }}{{if not .Immutable }}
+ {{var "rg"}} := len({{var "v"}}) > 0
+ {{var "v2"}} := {{var "v"}} {{end}}
+ {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ if {{var "rt"}} {
+ if {{var "rl"}} <= cap({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "rl"}}]
+ } else {
+ {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+ }
+ } else {
+ {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+ }
+ {{var "c"}} = true
+ {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }}
+ if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}}
+ } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "l"}}]
+ {{var "c"}} = true
+ } {{end}} {{/* end isSlice:47 */}}
{{var "j"}} := 0
for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ {
+ {{var "h"}}.ElemContainerState({{var "j"}})
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
}
- {{ if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
+ {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
+ {{var "h"}}.ElemContainerState({{var "j"}})
z.DecSwallow()
}
- {{ else }}if {{var "rt"}} { {{/* means that it is mutable and slice */}}
+ {{ else }}if {{var "rt"}} {
for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
{{var "v"}} = append({{var "v"}}, {{ zero}})
+ {{var "h"}}.ElemContainerState({{var "j"}})
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
}
- }
- {{ end }}
- {{ end }}{{/* closing 'if not chan' */}}
-} else {
- for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
- if {{var "j"}} >= len({{var "v"}}) {
- {{ if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1)
- {{ else if isSlice}}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }}
- {{var "c"}} = true {{ end }}
- }
- {{ if isChan}}
+ } {{end}} {{/* end isArray:56 */}}
+ {{end}} {{/* end isChan:16 */}}
+} else { {{/* len < 0 */}}
+ {{var "j"}} := 0
+ for ; !r.CheckBreak(); {{var "j"}}++ {
+ {{if isChan }}
+ {{var "h"}}.ElemContainerState({{var "j"}})
var {{var "t"}} {{ .Typ }}
{{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
{{var "v"}} <- {{var "t"}}
{{ else }}
+ if {{var "j"}} >= len({{var "v"}}) {
+ {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1)
+ {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }}
+ {{var "c"}} = true {{end}}
+ }
+ {{var "h"}}.ElemContainerState({{var "j"}})
if {{var "j"}} < len({{var "v"}}) {
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
} else {
z.DecSwallow()
}
- {{ end }}
+ {{end}}
}
- {{var "h"}}.End()
+ {{if isSlice }}if {{var "j"}} < len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "j"}}]
+ {{var "c"}} = true
+ } else if {{var "j"}} == 0 && {{var "v"}} == nil {
+ {{var "v"}} = []{{ .Typ }}{}
+ {{var "c"}} = true
+ }{{end}}
}
-{{ if not isArray }}if {{var "c"}} {
+{{var "h"}}.End()
+{{if not isArray }}if {{var "c"}} {
*{{ .Varname }} = {{var "v"}}
-}{{ end }}
-
+}{{end}}
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-map.go.tmpl b/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-map.go.tmpl
index 836eb3b5d..77400e0a1 100644
--- a/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-map.go.tmpl
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-map.go.tmpl
@@ -8,7 +8,7 @@ if {{var "v"}} == nil {
}
var {{var "mk"}} {{ .KTyp }}
var {{var "mv"}} {{ .Typ }}
-var {{var "mg"}} bool
+var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
if {{var "bh"}}.MapValueReset {
{{if decElemKindPtr}}{{var "mg"}} = true
{{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
@@ -16,31 +16,43 @@ if {{var "bh"}}.MapValueReset {
{{end}} }
if {{var "l"}} > 0 {
for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = string({{var "bv"}})
- }{{ end }}
+ }{{ end }}{{if decElemKindPtr}}
+ {{var "ms"}} = true{{end}}
if {{var "mg"}} {
- {{var "mv"}} = {{var "v"}}[{{var "mk"}}]
+ {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
+ if {{var "mok"}} {
+ {{var "ms"}} = false
+ } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+ z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
- if {{var "v"}} != nil {
+ if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
}
}
} else if {{var "l"}} < 0 {
for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = string({{var "bv"}})
- }{{ end }}
+ }{{ end }}{{if decElemKindPtr}}
+ {{var "ms"}} = true {{ end }}
if {{var "mg"}} {
- {{var "mv"}} = {{var "v"}}[{{var "mk"}}]
+ {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
+ if {{var "mok"}} {
+ {{var "ms"}} = false
+ } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+ z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
- if {{var "v"}} != nil {
+ if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
}
}
-r.ReadEnd()
} // else len==0: TODO: Should we clear map entries?
+z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }})
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.generated.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.generated.go
index 9710eca50..22bce776b 100644
--- a/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.generated.go
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.generated.go
@@ -115,6 +115,15 @@ func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
return false
}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncSendContainerState(c containerState) {
+ if f.e.cr != nil {
+ f.e.cr.sendContainerState(c)
+ }
+}
+
+// ---------------- DECODER FOLLOWS -----------------
+
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
return f.d.h
@@ -167,11 +176,8 @@ func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
// bs := f.dd.DecodeBytes(f.d.b[:], true, true)
- f.d.r.track()
- f.d.swallow()
- bs := f.d.r.stopTrack()
- // fmt.Printf(">>>>>> CODECGEN JSON: %s\n", bs)
- fnerr := tm.UnmarshalJSON(bs)
+ // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
+ fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
if fnerr != nil {
panic(fnerr)
}
@@ -218,3 +224,10 @@ func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) {
return decInferLen(clen, maxlen, unit)
}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSendContainerState(c containerState) {
+ if f.d.cr != nil {
+ f.d.cr.sendContainerState(c)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.go.tmpl b/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.go.tmpl
index 8bc506112..31958574f 100644
--- a/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.go.tmpl
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.go.tmpl
@@ -106,6 +106,14 @@ func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
}
return false
}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncSendContainerState(c containerState) {
+ if f.e.cr != nil {
+ f.e.cr.sendContainerState(c)
+ }
+}
+
+// ---------------- DECODER FOLLOWS -----------------
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
@@ -150,11 +158,8 @@ func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
// bs := f.dd.DecodeBytes(f.d.b[:], true, true)
- f.d.r.track()
- f.d.swallow()
- bs := f.d.r.stopTrack()
- // fmt.Printf(">>>>>> CODECGEN JSON: %s\n", bs)
- fnerr := tm.UnmarshalJSON(bs)
+ // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
+ fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
if fnerr != nil {
panic(fnerr)
}
@@ -195,6 +200,12 @@ func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) {
return decInferLen(clen, maxlen, unit)
}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSendContainerState(c containerState) {
+ if f.d.cr != nil {
+ f.d.cr.sendContainerState(c)
+ }
+}
{{/*
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.generated.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.generated.go
index dab6d94bd..fb6f4b809 100644
--- a/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.generated.go
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.generated.go
@@ -16,7 +16,7 @@ if {{var "v"}} == nil {
}
var {{var "mk"}} {{ .KTyp }}
var {{var "mv"}} {{ .Typ }}
-var {{var "mg"}} bool
+var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
if {{var "bh"}}.MapValueReset {
{{if decElemKindPtr}}{{var "mg"}} = true
{{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
@@ -24,122 +24,149 @@ if {{var "bh"}}.MapValueReset {
{{end}} }
if {{var "l"}} > 0 {
for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = string({{var "bv"}})
- }{{ end }}
+ }{{ end }}{{if decElemKindPtr}}
+ {{var "ms"}} = true{{end}}
if {{var "mg"}} {
- {{var "mv"}} = {{var "v"}}[{{var "mk"}}]
+ {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
+ if {{var "mok"}} {
+ {{var "ms"}} = false
+ } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+ z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
- if {{var "v"}} != nil {
+ if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
}
}
} else if {{var "l"}} < 0 {
for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = string({{var "bv"}})
- }{{ end }}
+ }{{ end }}{{if decElemKindPtr}}
+ {{var "ms"}} = true {{ end }}
if {{var "mg"}} {
- {{var "mv"}} = {{var "v"}}[{{var "mk"}}]
+ {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
+ if {{var "mok"}} {
+ {{var "ms"}} = false
+ } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+ z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
- if {{var "v"}} != nil {
+ if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
}
}
-r.ReadEnd()
} // else len==0: TODO: Should we clear map entries?
+z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }})
`
const genDecListTmpl = `
-{{var "v"}} := {{ if not isArray}}*{{ end }}{{ .Varname }}
+{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}
-
-var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}}
-var {{var "c"}}, {{var "rt"}} bool {{/* // changed, truncated */}}
-_, _, _ = {{var "c"}}, {{var "rt"}}, {{var "rl"}}
-{{var "rr"}} = {{var "l"}}
-{{/* rl is NOT used. Only used for getting DecInferLen. len(r) used directly in code */}}
-
-{{ if not isArray }}if {{var "v"}} == nil {
- if {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}); {{var "rt"}} {
- {{var "rr"}} = {{var "rl"}}
- }
- {{var "v"}} = make({{ .CTyp }}, {{var "rl"}})
- {{var "c"}} = true
-}
-{{ end }}
-if {{var "l"}} == 0 { {{ if isSlice }}
- if len({{var "v"}}) != 0 {
- {{var "v"}} = {{var "v"}}[:0]
- {{var "c"}} = true
- } {{ end }}
+var {{var "c"}} bool {{/* // changed */}}
+if {{var "l"}} == 0 {
+ {{if isSlice }}if {{var "v"}} == nil {
+ {{var "v"}} = []{{ .Typ }}{}
+ {{var "c"}} = true
+ } else if len({{var "v"}}) != 0 {
+ {{var "v"}} = {{var "v"}}[:0]
+ {{var "c"}} = true
+ } {{end}} {{if isChan }}if {{var "v"}} == nil {
+ {{var "v"}} = make({{ .CTyp }}, 0)
+ {{var "c"}} = true
+ } {{end}}
} else if {{var "l"}} > 0 {
- {{ if isChan }}
+ {{if isChan }}if {{var "v"}} == nil {
+ {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ {{var "v"}} = make({{ .CTyp }}, {{var "rl"}})
+ {{var "c"}} = true
+ }
for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ {
+ {{var "h"}}.ElemContainerState({{var "r"}})
var {{var "t"}} {{ .Typ }}
{{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
- {{var "v"}} <- {{var "t"}}
- {{ else }}
- if {{var "l"}} > cap({{var "v"}}) {
- {{ if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}})
- {{ else }}{{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
- {{ if .Immutable }}
- {{var "v2"}} := {{var "v"}}
- {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
- if len({{var "v"}}) > 0 {
- copy({{var "v"}}, {{var "v2"}}[:cap({{var "v2"}})])
- }
- {{ else }}{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
- {{ end }}{{var "c"}} = true
- {{ end }}
- {{var "rr"}} = len({{var "v"}})
- } else if {{var "l"}} != len({{var "v"}}) {
- {{ if isSlice }}{{var "v"}} = {{var "v"}}[:{{var "l"}}]
- {{var "c"}} = true {{ end }}
+ {{var "v"}} <- {{var "t"}}
}
+ {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}}
+ var {{var "rt"}} bool {{/* truncated */}}
+ if {{var "l"}} > cap({{var "v"}}) {
+ {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}})
+ {{ else }}{{if not .Immutable }}
+ {{var "rg"}} := len({{var "v"}}) > 0
+ {{var "v2"}} := {{var "v"}} {{end}}
+ {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ if {{var "rt"}} {
+ if {{var "rl"}} <= cap({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "rl"}}]
+ } else {
+ {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+ }
+ } else {
+ {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+ }
+ {{var "c"}} = true
+ {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }}
+ if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}}
+ } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "l"}}]
+ {{var "c"}} = true
+ } {{end}} {{/* end isSlice:47 */}}
{{var "j"}} := 0
for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ {
+ {{var "h"}}.ElemContainerState({{var "j"}})
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
}
- {{ if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
+ {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
+ {{var "h"}}.ElemContainerState({{var "j"}})
z.DecSwallow()
}
- {{ else }}if {{var "rt"}} { {{/* means that it is mutable and slice */}}
+ {{ else }}if {{var "rt"}} {
for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
{{var "v"}} = append({{var "v"}}, {{ zero}})
+ {{var "h"}}.ElemContainerState({{var "j"}})
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
}
- }
- {{ end }}
- {{ end }}{{/* closing 'if not chan' */}}
-} else {
- for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
- if {{var "j"}} >= len({{var "v"}}) {
- {{ if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1)
- {{ else if isSlice}}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }}
- {{var "c"}} = true {{ end }}
- }
- {{ if isChan}}
+ } {{end}} {{/* end isArray:56 */}}
+ {{end}} {{/* end isChan:16 */}}
+} else { {{/* len < 0 */}}
+ {{var "j"}} := 0
+ for ; !r.CheckBreak(); {{var "j"}}++ {
+ {{if isChan }}
+ {{var "h"}}.ElemContainerState({{var "j"}})
var {{var "t"}} {{ .Typ }}
{{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
{{var "v"}} <- {{var "t"}}
{{ else }}
+ if {{var "j"}} >= len({{var "v"}}) {
+ {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1)
+ {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }}
+ {{var "c"}} = true {{end}}
+ }
+ {{var "h"}}.ElemContainerState({{var "j"}})
if {{var "j"}} < len({{var "v"}}) {
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
} else {
z.DecSwallow()
}
- {{ end }}
+ {{end}}
}
- {{var "h"}}.End()
+ {{if isSlice }}if {{var "j"}} < len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "j"}}]
+ {{var "c"}} = true
+ } else if {{var "j"}} == 0 && {{var "v"}} == nil {
+ {{var "v"}} = []{{ .Typ }}{}
+ {{var "c"}} = true
+ }{{end}}
}
-{{ if not isArray }}if {{var "c"}} {
+{{var "h"}}.End()
+{{if not isArray }}if {{var "c"}} {
*{{ .Varname }} = {{var "v"}}
-}{{ end }}
-
+}{{end}}
`
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.go
index b158564ba..a075e7c0d 100644
--- a/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.go
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.go
@@ -91,7 +91,8 @@ import (
// v3: Changes for Kubernetes:
// changes in signature of some unpublished helper methods and codecgen cmdline arguments.
// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen)
-const GenVersion = 4
+// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections.
+const GenVersion = 5
const (
genCodecPkg = "codec1978"
@@ -110,6 +111,14 @@ const (
genUseOneFunctionForDecStructMap = true
)
+type genStructMapStyle uint8
+
+const (
+ genStructMapStyleConsolidated genStructMapStyle = iota
+ genStructMapStyleLenPrefix
+ genStructMapStyleCheckBreak
+)
+
var (
genAllTypesSamePkgErr = errors.New("All types must be in the same package")
genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice")
@@ -230,10 +239,18 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn
x.line("")
x.line("const (")
+ x.linef("// ----- content types ----")
x.linef("codecSelferC_UTF8%s = %v", x.xs, int64(c_UTF8))
x.linef("codecSelferC_RAW%s = %v", x.xs, int64(c_RAW))
+ x.linef("// ----- value types used ----")
x.linef("codecSelferValueTypeArray%s = %v", x.xs, int64(valueTypeArray))
x.linef("codecSelferValueTypeMap%s = %v", x.xs, int64(valueTypeMap))
+ x.linef("// ----- containerStateValues ----")
+ x.linef("codecSelfer_containerMapKey%s = %v", x.xs, int64(containerMapKey))
+ x.linef("codecSelfer_containerMapValue%s = %v", x.xs, int64(containerMapValue))
+ x.linef("codecSelfer_containerMapEnd%s = %v", x.xs, int64(containerMapEnd))
+ x.linef("codecSelfer_containerArrayElem%s = %v", x.xs, int64(containerArrayElem))
+ x.linef("codecSelfer_containerArrayEnd%s = %v", x.xs, int64(containerArrayEnd))
x.line(")")
x.line("var (")
x.line("codecSelferBitsize" + x.xs + " = uint8(reflect.TypeOf(uint(0)).Bits())")
@@ -255,8 +272,6 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn
x.line(`err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `)
x.linef(`%v, %sGenVersion, file)`, GenVersion, x.cpfx)
x.line("panic(err)")
- // x.linef(`panic(fmt.Errorf("Re-run codecgen due to version mismatch: `+
- // `current: %%v, need %%v, file: %%v", %v, %sGenVersion, file))`, GenVersion, x.cpfx)
x.linef("}")
x.line("if false { // reference the types, but skip this branch at build/run time")
var n int
@@ -515,21 +530,21 @@ func (x *genRunner) selfer(encode bool) {
x.out(fnSigPfx)
x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {")
x.genRequiredMethodVars(false)
- x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, 0)
+ x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleConsolidated)
x.line("}")
x.line("")
} else {
x.out(fnSigPfx)
x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {")
x.genRequiredMethodVars(false)
- x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, 1)
+ x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleLenPrefix)
x.line("}")
x.line("")
x.out(fnSigPfx)
x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {")
x.genRequiredMethodVars(false)
- x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, 2)
+ x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleCheckBreak)
x.line("}")
x.line("")
}
@@ -548,10 +563,8 @@ func (x *genRunner) selfer(encode bool) {
func (x *genRunner) xtraSM(varname string, encode bool, t reflect.Type) {
if encode {
x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), varname)
- // x.line("h.enc" + x.genMethodNameT(t) + "(" + x.genTypeName(t) + "(" + varname + "), e)")
} else {
x.linef("h.dec%s((*%s)(%s), d)", x.genMethodNameT(t), x.genTypeName(t), varname)
- // x.line("h.dec" + x.genMethodNameT(t) + "((*" + x.genTypeName(t) + ")(" + varname + "), d)")
}
if _, ok := x.tm[t]; !ok {
x.tm[t] = struct{}{}
@@ -815,12 +828,14 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
}
x.linef("%s[%v] = %s", numfieldsvar, j, omitline)
}
+ x.linef("var %snn%s int", genTempVarPfx, i)
x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray {
x.line("r.EncodeArrayStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")")
x.linef("} else {") // if not ti.toArray
- x.linef("var %snn%s int = %v", genTempVarPfx, i, nn)
+ x.linef("%snn%s = %v", genTempVarPfx, i, nn)
x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i)
x.linef("r.EncodeMapStart(%snn%s)", genTempVarPfx, i)
+ x.linef("%snn%s = %v", genTempVarPfx, i, 0)
// x.line("r.EncodeMapStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")")
x.line("}") // close if not StructToArray
@@ -864,11 +879,9 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
if labelUsed {
x.line("if " + isNilVarName + " { r.EncodeNil() } else { ")
}
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
if si.omitEmpty {
x.linef("if %s[%v] {", numfieldsvar, j)
- // omitEmptyVarNameX := genTempVarPfx + "ov" + i
- // x.line("var " + omitEmptyVarNameX + " " + x.genTypeName(t2.Type))
- // x.encVar(omitEmptyVarNameX, t2.Type)
}
x.encVar(varname+"."+t2.Name, t2.Type)
if si.omitEmpty {
@@ -879,21 +892,15 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
if labelUsed {
x.line("}")
}
+
x.linef("} else {") // if not ti.toArray
- // omitEmptyVar := genTempVarPfx + "x" + i + t2.Name
- // x.line("const " + omitEmptyVar + " bool = " + strconv.FormatBool(si.omitEmpty))
- // doOmitEmpty := si.omitEmpty && t2.Type.Kind() != reflect.Struct
+
if si.omitEmpty {
x.linef("if %s[%v] {", numfieldsvar, j)
- // x.linef(`println("Encoding field: %v")`, j)
- // x.out("if ")
- // if labelUsed {
- // x.out("!" + isNilVarName + " && ")
- // }
- // x.line(varname + "." + t2.Name + " != " + genZeroValueR(t2.Type, x.tc) + " {")
}
- // x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(\"" + t2.Name + "\"))")
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(\"" + si.encName + "\"))")
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
if labelUsed {
x.line("if " + isNilVarName + " { r.EncodeNil() } else { ")
x.encVar(varname+"."+t2.Name, t2.Type)
@@ -906,9 +913,12 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
}
x.linef("} ") // end if/else ti.toArray
}
- x.line("if " + sepVarname + " {")
- x.line("r.EncodeEnd()")
+ x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray {
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
+ x.line("} else {")
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
x.line("}")
+
}
func (x *genRunner) encListFallback(varname string, t reflect.Type) {
@@ -917,14 +927,16 @@ func (x *genRunner) encListFallback(varname string, t reflect.Type) {
x.line("r.EncodeArrayStart(len(" + varname + "))")
if t.Kind() == reflect.Chan {
x.linef("for %si%s, %si2%s := 0, len(%s); %si%s < %si2%s; %si%s++ {", g, i, g, i, varname, g, i, g, i, g, i)
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
x.linef("%sv%s := <-%s", g, i, varname)
} else {
// x.linef("for %si%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname)
x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname)
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
}
x.encVar(genTempVarPfx+"v"+i, t.Elem())
x.line("}")
- x.line("r.EncodeEnd()")
+ x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
}
func (x *genRunner) encMapFallback(varname string, t reflect.Type) {
@@ -933,10 +945,12 @@ func (x *genRunner) encMapFallback(varname string, t reflect.Type) {
x.line("r.EncodeMapStart(len(" + varname + "))")
x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname)
// x.line("for " + genTempVarPfx + "k" + i + ", " + genTempVarPfx + "v" + i + " := range " + varname + " {")
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
x.encVar(genTempVarPfx+"k"+i, t.Key())
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
x.encVar(genTempVarPfx+"v"+i, t.Elem())
x.line("}")
- x.line("r.EncodeEnd()")
+ x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
}
func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) {
@@ -954,8 +968,6 @@ func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) {
x.line("if r.TryDecodeAsNil() {")
if t.Kind() == reflect.Ptr {
x.line("if " + varname + " != nil { ")
- // x.line("var " + genTempVarPfx + i + " " + x.genTypeName(t.Elem()))
- // x.line("*" + varname + " = " + genTempVarPfx + i)
// if varname is a field of a struct (has a dot in it),
// then just set it to nil
@@ -964,12 +976,8 @@ func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) {
} else {
x.line("*" + varname + " = " + x.genZeroValueR(t.Elem()))
}
- // x.line("*" + varname + " = nil")
x.line("}")
-
} else {
- // x.line("var " + genTempVarPfx + i + " " + x.genTypeName(t))
- // x.line(varname + " = " + genTempVarPfx + i)
x.line(varname + " = " + x.genZeroValueR(t))
}
x.line("} else {")
@@ -1149,8 +1157,6 @@ func (x *genRunner) dec(varname string, t reflect.Type) {
} else if fastpathAV.index(rtid) != -1 {
g := x.newGenV(t)
x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)")
- // x.line("z." + g.MethodNamePfx("Dec", false) + "(" + varname + ")")
- // x.line(g.FastpathName(false) + "(" + varname + ", d)")
} else {
x.xtraSM(varname, false, t)
// x.decListFallback(varname, rtid, false, t)
@@ -1163,8 +1169,6 @@ func (x *genRunner) dec(varname string, t reflect.Type) {
if fastpathAV.index(rtid) != -1 {
g := x.newGenV(t)
x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)")
- // x.line("z." + g.MethodNamePfx("Dec", false) + "(" + varname + ")")
- // x.line(g.FastpathName(false) + "(" + varname + ", d)")
} else {
x.xtraSM(varname, false, t)
// x.decMapFallback(varname, rtid, t)
@@ -1294,6 +1298,7 @@ func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type
func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) {
type tstruc struct {
TempVar string
+ Sfx string
Rand string
Varname string
KTyp string
@@ -1303,7 +1308,7 @@ func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type)
telem := t.Elem()
tkey := t.Key()
ts := tstruc{
- genTempVarPfx, x.varsfx(), varname, x.genTypeName(tkey),
+ genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey),
x.genTypeName(telem), int(telem.Size() + tkey.Size()),
}
@@ -1359,6 +1364,7 @@ func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintpt
if si.i != -1 {
t2 = t.Field(int(si.i))
} else {
+ //we must accomodate anonymous fields, where the embedded field is a nil pointer in the value.
// t2 = t.FieldByIndex(si.is)
t2typ := t
varname3 := varname
@@ -1370,8 +1376,7 @@ func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintpt
t2typ = t2.Type
varname3 = varname3 + "." + t2.Name
if t2typ.Kind() == reflect.Ptr {
- x.line("if " + varname3 + " == nil {" +
- varname3 + " = new(" + x.genTypeName(t2typ.Elem()) + ") }")
+ x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem()))
}
}
}
@@ -1380,11 +1385,10 @@ func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintpt
x.line("default:")
// pass the slice here, so that the string will not escape, and maybe save allocation
x.line("z.DecStructFieldNotFound(-1, " + kName + ")")
- // x.line("z.DecStructFieldNotFoundB(" + kName + "Slc)")
x.line("} // end switch " + kName)
}
-func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style uint8) {
+func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) {
tpfx := genTempVarPfx
i := x.varsfx()
kName := tpfx + "s" + i
@@ -1406,14 +1410,11 @@ func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t ref
x.line("var " + kName + "Slc = z.DecScratchBuffer() // default slice to decode into")
- // x.line("var " + kName + " string // default string to decode into")
- // x.line("_ = " + kName)
x.line("_ = " + kName + "Slc")
- // x.linef("var %sb%s bool", tpfx, i) // break
switch style {
- case 1:
+ case genStructMapStyleLenPrefix:
x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i)
- case 2:
+ case genStructMapStyleCheckBreak:
x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i)
default: // 0, otherwise.
x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length
@@ -1421,11 +1422,9 @@ func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t ref
x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname)
x.line("} else { if r.CheckBreak() { break }; }")
}
- // x.line(kName + " = z.ReadStringAsBytes(" + kName + ")")
- // x.line(kName + " = z.ReadString()")
+ x.linef("z.DecSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
x.line(kName + "Slc = r.DecodeBytes(" + kName + "Slc, true, true)")
// let string be scoped to this loop alone, so it doesn't escape.
- // x.line(kName + " := " + x.cpfx + "GenBytesToStringRO(" + kName + "Slc)")
if x.unsafe {
x.line(kName + "SlcHdr := codecSelferUnsafeString" + x.xs + "{uintptr(unsafe.Pointer(&" +
kName + "Slc[0])), len(" + kName + "Slc)}")
@@ -1433,16 +1432,11 @@ func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t ref
} else {
x.line(kName + " := string(" + kName + "Slc)")
}
+ x.linef("z.DecSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
x.decStructMapSwitch(kName, varname, rtid, t)
x.line("} // end for " + tpfx + "j" + i)
- switch style {
- case 1:
- case 2:
- x.line("r.ReadEnd()")
- default:
- x.linef("if !%shl%s { r.ReadEnd() }", tpfx, i)
- }
+ x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
}
func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) {
@@ -1451,25 +1445,37 @@ func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid
ti := x.ti.get(rtid, t)
tisfi := ti.sfip // always use sequence from file. decStruct expects same thing.
x.linef("var %sj%s int", tpfx, i)
- x.linef("var %sb%s bool", tpfx, i) // break
- // x.linef("var %sl%s := r.ReadArrayStart()", tpfx, i)
+ x.linef("var %sb%s bool", tpfx, i) // break
x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length
for _, si := range tisfi {
var t2 reflect.StructField
if si.i != -1 {
t2 = t.Field(int(si.i))
} else {
- t2 = t.FieldByIndex(si.is)
+ //we must accomodate anonymous fields, where the embedded field is a nil pointer in the value.
+ // t2 = t.FieldByIndex(si.is)
+ t2typ := t
+ varname3 := varname
+ for _, ix := range si.is {
+ for t2typ.Kind() == reflect.Ptr {
+ t2typ = t2typ.Elem()
+ }
+ t2 = t2typ.Field(ix)
+ t2typ = t2.Type
+ varname3 = varname3 + "." + t2.Name
+ if t2typ.Kind() == reflect.Ptr {
+ x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem()))
+ }
+ }
}
x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }",
tpfx, i, tpfx, i, tpfx, i,
tpfx, i, lenvarname, tpfx, i)
- // x.line("if " + tpfx + "j" + i + "++; " + tpfx + "j" +
- // i + " <= " + tpfx + "l" + i + " {")
- x.linef("if %sb%s { r.ReadEnd(); %s }", tpfx, i, breakString)
+ x.linef("if %sb%s { z.DecSendContainerState(codecSelfer_containerArrayEnd%s); %s }",
+ tpfx, i, x.xs, breakString)
+ x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
x.decVar(varname+"."+t2.Name, t2.Type, true)
- // x.line("} // end if " + tpfx + "j" + i + " <= " + tpfx + "l" + i)
}
// read remaining values and throw away.
x.line("for {")
@@ -1477,19 +1483,20 @@ func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid
tpfx, i, tpfx, i, tpfx, i,
tpfx, i, lenvarname, tpfx, i)
x.linef("if %sb%s { break }", tpfx, i)
+ x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i)
x.line("}")
- x.line("r.ReadEnd()")
+ x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
}
func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) {
// if container is map
- // x.line("if z.DecContainerIsMap() { ")
i := x.varsfx()
- x.line("if r.IsContainerType(codecSelferValueTypeMap" + x.xs + ") {")
+ x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i)
+ x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs)
x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()")
x.linef("if %sl%s == 0 {", genTempVarPfx, i)
- x.line("r.ReadEnd()")
+ x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
if genUseOneFunctionForDecStructMap {
x.line("} else { ")
x.linef("x.codecDecodeSelfFromMap(%sl%s, d)", genTempVarPfx, i)
@@ -1502,18 +1509,16 @@ func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) {
x.line("}")
// else if container is array
- // x.line("} else if z.DecContainerIsArray() { ")
- x.line("} else if r.IsContainerType(codecSelferValueTypeArray" + x.xs + ") {")
+ x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs)
x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()")
x.linef("if %sl%s == 0 {", genTempVarPfx, i)
- x.line("r.ReadEnd()")
+ x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
x.line("} else { ")
x.linef("x.codecDecodeSelfFromArray(%sl%s, d)", genTempVarPfx, i)
x.line("}")
// else panic
x.line("} else { ")
x.line("panic(codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + ")")
- // x.line("panic(`only encoded map or array can be decoded into a struct`)")
x.line("} ")
}
@@ -1849,10 +1854,6 @@ func genInternalInit() {
"float64": 8,
"bool": 1,
}
- // mapvaltypes2 := make(map[string]bool)
- // for _, s := range mapvaltypes {
- // mapvaltypes2[s] = true
- // }
var gt genInternal
// For each slice or map type, there must be a (symetrical) Encode and Decode fast-path function
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/helper.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/helper.go
index 96b5a1f22..560014ae3 100644
--- a/Godeps/_workspace/src/github.com/ugorji/go/codec/helper.go
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/helper.go
@@ -112,8 +112,6 @@ import (
"strings"
"sync"
"time"
- "unicode"
- "unicode/utf8"
)
const (
@@ -194,14 +192,6 @@ const (
type seqType uint8
-// mirror json.Marshaler and json.Unmarshaler here, so we don't import the encoding/json package
-type jsonMarshaler interface {
- MarshalJSON() ([]byte, error)
-}
-type jsonUnmarshaler interface {
- UnmarshalJSON([]byte) error
-}
-
const (
_ seqType = iota
seqTypeArray
@@ -209,13 +199,43 @@ const (
seqTypeChan
)
+// note that containerMapStart and containerArraySend are not sent.
+// This is because the ReadXXXStart and EncodeXXXStart already does these.
+type containerState uint8
+
+const (
+ _ containerState = iota
+
+ containerMapStart // slot left open, since Driver method already covers it
+ containerMapKey
+ containerMapValue
+ containerMapEnd
+ containerArrayStart // slot left open, since Driver methods already cover it
+ containerArrayElem
+ containerArrayEnd
+)
+
+type containerStateRecv interface {
+ sendContainerState(containerState)
+}
+
+// mirror json.Marshaler and json.Unmarshaler here,
+// so we don't import the encoding/json package
+type jsonMarshaler interface {
+ MarshalJSON() ([]byte, error)
+}
+type jsonUnmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
var (
bigen = binary.BigEndian
structInfoFieldName = "_struct"
- // mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
- intfSliceTyp = reflect.TypeOf([]interface{}(nil))
- intfTyp = intfSliceTyp.Elem()
+ mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
+ mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
+ intfSliceTyp = reflect.TypeOf([]interface{}(nil))
+ intfTyp = intfSliceTyp.Elem()
stringTyp = reflect.TypeOf("")
timeTyp = reflect.TypeOf(time.Time{})
@@ -241,6 +261,9 @@ var (
timeTypId = reflect.ValueOf(timeTyp).Pointer()
stringTypId = reflect.ValueOf(stringTyp).Pointer()
+ mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer()
+ mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer()
+ intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer()
// mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer()
intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits())
@@ -283,7 +306,7 @@ type MapBySlice interface {
type BasicHandle struct {
// TypeInfos is used to get the type info for any type.
//
- // If not configure, the default TypeInfos is used, which uses struct tag keys: codec, json
+ // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json
TypeInfos *TypeInfos
extHandle
@@ -332,6 +355,8 @@ type RawExt struct {
// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
type BytesExt interface {
// WriteExt converts a value to a []byte.
+ //
+ // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array.
WriteExt(v interface{}) []byte
// ReadExt updates a value from a []byte.
@@ -344,6 +369,8 @@ type BytesExt interface {
// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types.
type InterfaceExt interface {
// ConvertExt converts a value into a simpler interface for easy encoding e.g. convert time.Time to int64.
+ //
+ // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array.
ConvertExt(v interface{}) interface{}
// UpdateExt updates a value from a simpler interface for easy decoding e.g. convert int64 to time.Time.
@@ -363,7 +390,6 @@ type addExtWrapper struct {
}
func (x addExtWrapper) WriteExt(v interface{}) []byte {
- // fmt.Printf(">>>>>>>>>> WriteExt: %T, %v\n", v, v)
bs, err := x.encFn(reflect.ValueOf(v))
if err != nil {
panic(err)
@@ -372,7 +398,6 @@ func (x addExtWrapper) WriteExt(v interface{}) []byte {
}
func (x addExtWrapper) ReadExt(v interface{}, bs []byte) {
- // fmt.Printf(">>>>>>>>>> ReadExt: %T, %v\n", v, v)
if err := x.decFn(reflect.ValueOf(v), bs); err != nil {
panic(err)
}
@@ -474,7 +499,7 @@ type extTypeTagFn struct {
ext Ext
}
-type extHandle []*extTypeTagFn
+type extHandle []extTypeTagFn
// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead.
//
@@ -513,12 +538,17 @@ func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
}
}
- *o = append(*o, &extTypeTagFn{rtid, rt, tag, ext})
+ if *o == nil {
+ *o = make([]extTypeTagFn, 0, 4)
+ }
+ *o = append(*o, extTypeTagFn{rtid, rt, tag, ext})
return
}
func (o extHandle) getExt(rtid uintptr) *extTypeTagFn {
- for _, v := range o {
+ var v *extTypeTagFn
+ for i := range o {
+ v = &o[i]
if v.rtid == rtid {
return v
}
@@ -527,7 +557,9 @@ func (o extHandle) getExt(rtid uintptr) *extTypeTagFn {
}
func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn {
- for _, v := range o {
+ var v *extTypeTagFn
+ for i := range o {
+ v = &o[i]
if v.tag == tag {
return v
}
@@ -650,6 +682,8 @@ type typeInfo struct {
rt reflect.Type
rtid uintptr
+ numMeth uint16 // number of methods
+
// baseId gives pointer to the base reflect.Type, after deferencing
// the pointers. E.g. base type of ***time.Time is time.Time.
base reflect.Type
@@ -746,14 +780,10 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
return
}
- x.mu.Lock()
- defer x.mu.Unlock()
- if pti, ok = x.infos[rtid]; ok {
- return
- }
-
+ // do not hold lock while computing this.
+ // it may lead to duplication, but that's ok.
ti := typeInfo{rt: rt, rtid: rtid}
- pti = &ti
+ ti.numMeth = uint16(rt.NumMethod())
var indir int8
if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok {
@@ -813,7 +843,13 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
copy(ti.sfi, sfip)
}
// sfi = sfip
- x.infos[rtid] = pti
+
+ x.mu.Lock()
+ if pti, ok = x.infos[rtid]; !ok {
+ pti = &ti
+ x.infos[rtid] = pti
+ }
+ x.mu.Unlock()
return
}
@@ -822,45 +858,49 @@ func (x *TypeInfos) rget(rt reflect.Type, indexstack []int, fnameToHastag map[st
) {
for j := 0; j < rt.NumField(); j++ {
f := rt.Field(j)
- // func types are skipped.
- if tk := f.Type.Kind(); tk == reflect.Func {
+ fkind := f.Type.Kind()
+ // skip if a func type, or is unexported, or structTag value == "-"
+ if fkind == reflect.Func {
+ continue
+ }
+ // if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) {
+ if f.PkgPath != "" && !f.Anonymous { // unexported, not embedded
continue
}
stag := x.structTag(f.Tag)
if stag == "-" {
continue
}
- if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) {
- continue
- }
var si *structFieldInfo
- // if anonymous and there is no struct tag (or it's blank)
- // and its a struct (or pointer to struct), inline it.
- var doInline bool
- if f.Anonymous && f.Type.Kind() != reflect.Interface {
- doInline = stag == ""
+ // if anonymous and no struct tag (or it's blank), and a struct (or pointer to struct), inline it.
+ if f.Anonymous && fkind != reflect.Interface {
+ doInline := stag == ""
if !doInline {
si = parseStructFieldInfo("", stag)
doInline = si.encName == ""
// doInline = si.isZero()
- // fmt.Printf(">>>> doInline for si.isZero: %s: %v\n", f.Name, doInline)
+ }
+ if doInline {
+ ft := f.Type
+ for ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+ if ft.Kind() == reflect.Struct {
+ indexstack2 := make([]int, len(indexstack)+1, len(indexstack)+4)
+ copy(indexstack2, indexstack)
+ indexstack2[len(indexstack)] = j
+ // indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
+ x.rget(ft, indexstack2, fnameToHastag, sfi, siInfo)
+ continue
+ }
}
}
- if doInline {
- ft := f.Type
- for ft.Kind() == reflect.Ptr {
- ft = ft.Elem()
- }
- if ft.Kind() == reflect.Struct {
- indexstack2 := make([]int, len(indexstack)+1, len(indexstack)+4)
- copy(indexstack2, indexstack)
- indexstack2[len(indexstack)] = j
- // indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
- x.rget(ft, indexstack2, fnameToHastag, sfi, siInfo)
- continue
- }
+ // after the anonymous dance: if an unexported field, skip
+ if f.PkgPath != "" { // unexported
+ continue
}
+
// do not let fields with same name in embedded structs override field at higher level.
// this must be done after anonymous check, to allow anonymous field
// still include their child fields
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_test.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_test.go
new file mode 100644
index 000000000..e1dea52f4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_test.go
@@ -0,0 +1,242 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// All non-std package dependencies related to testing live in this file,
+// so porting to different environment is easy (just update functions).
+//
+// This file sets up the variables used, including testInitFns.
+// Each file should add initialization that should be performed
+// after flags are parsed.
+//
+// init is a multi-step process:
+// - setup vars (handled by init functions in each file)
+// - parse flags
+// - setup derived vars (handled by pre-init registered functions - registered in init function)
+// - post init (handled by post-init registered functions - registered in init function)
+// This way, no one has to manage carefully control the initialization
+// using file names, etc.
+//
+// Tests which require external dependencies need the -tag=x parameter.
+// They should be run as:
+// go test -tags=x -run=.
+// Benchmarks should also take this parameter, to include the sereal, xdr, etc.
+// To run against codecgen, etc, make sure you pass extra parameters.
+// Example usage:
+// go test "-tags=x codecgen unsafe" -bench=.
+//
+// To fully test everything:
+// go test -tags=x -benchtime=100ms -tv -bg -bi -brw -bu -v -run=. -bench=.
+
+// Handling flags
+// codec_test.go will define a set of global flags for testing, including:
+// - Use Reset
+// - Use IO reader/writer (vs direct bytes)
+// - Set Canonical
+// - Set InternStrings
+// - Use Symbols
+//
+// This way, we can test them all by running same set of tests with a different
+// set of flags.
+//
+// Following this, all the benchmarks will utilize flags set by codec_test.go
+// and will not redefine these "global" flags.
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "reflect"
+ "sync"
+ "testing"
+)
+
+type testHED struct {
+ H Handle
+ E *Encoder
+ D *Decoder
+}
+
+var (
+ testNoopH = NoopHandle(8)
+ testMsgpackH = &MsgpackHandle{}
+ testBincH = &BincHandle{}
+ testSimpleH = &SimpleHandle{}
+ testCborH = &CborHandle{}
+ testJsonH = &JsonHandle{}
+
+ testHandles []Handle
+ testPreInitFns []func()
+ testPostInitFns []func()
+
+ testOnce sync.Once
+
+ testHEDs []testHED
+)
+
+func init() {
+ testHEDs = make([]testHED, 0, 32)
+ testHandles = append(testHandles,
+ testNoopH, testMsgpackH, testBincH, testSimpleH,
+ testCborH, testJsonH)
+}
+
+func testHEDGet(h Handle) *testHED {
+ for i := range testHEDs {
+ v := &testHEDs[i]
+ if v.H == h {
+ return v
+ }
+ }
+ testHEDs = append(testHEDs, testHED{h, NewEncoder(nil, h), NewDecoder(nil, h)})
+ return &testHEDs[len(testHEDs)-1]
+}
+
+func testInitAll() {
+ flag.Parse()
+ for _, f := range testPreInitFns {
+ f()
+ }
+ for _, f := range testPostInitFns {
+ f()
+ }
+}
+
+func testCodecEncode(ts interface{}, bsIn []byte,
+ fn func([]byte) *bytes.Buffer, h Handle) (bs []byte, err error) {
+ // bs = make([]byte, 0, approxSize)
+ var e *Encoder
+ var buf *bytes.Buffer
+ if testUseReset {
+ e = testHEDGet(h).E
+ } else {
+ e = NewEncoder(nil, h)
+ }
+ if testUseIoEncDec {
+ buf = fn(bsIn)
+ e.Reset(buf)
+ } else {
+ bs = bsIn
+ e.ResetBytes(&bs)
+ }
+ if testUseMust {
+ e.MustEncode(ts)
+ } else {
+ err = e.Encode(ts)
+ }
+ if testUseIoEncDec {
+ bs = buf.Bytes()
+ }
+ return
+}
+
+func testCodecDecode(bs []byte, ts interface{}, h Handle) (err error) {
+ var d *Decoder
+ var buf *bytes.Reader
+ if testUseReset {
+ d = testHEDGet(h).D
+ } else {
+ d = NewDecoder(nil, h)
+ }
+ if testUseIoEncDec {
+ buf = bytes.NewReader(bs)
+ d.Reset(buf)
+ } else {
+ d.ResetBytes(bs)
+ }
+ if testUseMust {
+ d.MustDecode(ts)
+ } else {
+ err = d.Decode(ts)
+ }
+ return
+}
+
+// ----- functions below are used only by tests (not benchmarks)
+
+const (
+ testLogToT = true
+ failNowOnFail = true
+)
+
+func checkErrT(t *testing.T, err error) {
+ if err != nil {
+ logT(t, err.Error())
+ failT(t)
+ }
+}
+
+func checkEqualT(t *testing.T, v1 interface{}, v2 interface{}, desc string) (err error) {
+ if err = deepEqual(v1, v2); err != nil {
+ logT(t, "Not Equal: %s: %v. v1: %v, v2: %v", desc, err, v1, v2)
+ failT(t)
+ }
+ return
+}
+
+func failT(t *testing.T) {
+ if failNowOnFail {
+ t.FailNow()
+ } else {
+ t.Fail()
+ }
+}
+
+// --- these functions are used by both benchmarks and tests
+
+func deepEqual(v1, v2 interface{}) (err error) {
+ if !reflect.DeepEqual(v1, v2) {
+ err = errors.New("Not Match")
+ }
+ return
+}
+
+func logT(x interface{}, format string, args ...interface{}) {
+ if t, ok := x.(*testing.T); ok && t != nil && testLogToT {
+ if testVerbose {
+ t.Logf(format, args...)
+ }
+ } else if b, ok := x.(*testing.B); ok && b != nil && testLogToT {
+ b.Logf(format, args...)
+ } else {
+ if len(format) == 0 || format[len(format)-1] != '\n' {
+ format = format + "\n"
+ }
+ fmt.Printf(format, args...)
+ }
+}
+
+func approxDataSize(rv reflect.Value) (sum int) {
+ switch rk := rv.Kind(); rk {
+ case reflect.Invalid:
+ case reflect.Ptr, reflect.Interface:
+ sum += int(rv.Type().Size())
+ sum += approxDataSize(rv.Elem())
+ case reflect.Slice:
+ sum += int(rv.Type().Size())
+ for j := 0; j < rv.Len(); j++ {
+ sum += approxDataSize(rv.Index(j))
+ }
+ case reflect.String:
+ sum += int(rv.Type().Size())
+ sum += rv.Len()
+ case reflect.Map:
+ sum += int(rv.Type().Size())
+ for _, mk := range rv.MapKeys() {
+ sum += approxDataSize(mk)
+ sum += approxDataSize(rv.MapIndex(mk))
+ }
+ case reflect.Struct:
+ //struct size already includes the full data size.
+ //sum += int(rv.Type().Size())
+ for j := 0; j < rv.NumField(); j++ {
+ sum += approxDataSize(rv.Field(j))
+ }
+ default:
+ //pure value types
+ sum += int(rv.Type().Size())
+ }
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/json.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/json.go
index e70fc658d..a18a5f706 100644
--- a/Godeps/_workspace/src/github.com/ugorji/go/codec/json.go
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/json.go
@@ -30,8 +30,6 @@ package codec
// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver
// MUST not call one-another.
-// They all must call sep(), and sep() MUST NOT be called more than once for each read.
-// If sep() is called and read is not done, you MUST call retryRead so separator wouldn't be read/written twice.
import (
"bytes"
@@ -39,7 +37,6 @@ import (
"fmt"
"reflect"
"strconv"
- "sync"
"unicode/utf16"
"unicode/utf8"
)
@@ -60,12 +57,13 @@ var jsonUint64Pow10 = [...]uint64{
}
const (
- // if jsonTrackSkipWhitespace, we track Whitespace and reduce the number of redundant checks.
- // Make it a const flag, so that it can be elided during linking if false.
+ // jsonUnreadAfterDecNum controls whether we unread after decoding a number.
//
- // It is not a clear win, because we continually set a flag behind a pointer
- // and then check it each time, as opposed to just 4 conditionals on a stack variable.
- jsonTrackSkipWhitespace = true
+ // instead of unreading, just update d.tok (iff it's not a whitespace char)
+ // However, doing this means that we may HOLD onto some data which belongs to another stream.
+ // Thus, it is safest to unread the data when done.
+ // keep behind a constant flag for now.
+ jsonUnreadAfterDecNum = true
// If !jsonValidateSymbols, decoding will be faster, by skipping some checks:
// - If we see first character of null, false or true,
@@ -89,100 +87,6 @@ const (
// jsonNumDigitsUint64Largest = 19
)
-// A stack is used to keep track of where we are in the tree.
-// This is necessary, as the Handle must know whether to consume or emit a separator.
-
-type jsonStackElem struct {
- st byte // top of stack (either '}' or ']' or 0 for map, array or neither).
- sf bool // NOT first time in that container at top of stack
- so bool // stack ctr odd
- sr bool // value has NOT been read, so do not re-send separator
-}
-
-func (x *jsonStackElem) retryRead() {
- if x != nil && !x.sr {
- x.sr = true
- }
-}
-
-func (x *jsonStackElem) sep() (c byte) {
- // do not use switch, so it's a candidate for inlining.
- // to inline effectively, this must not be called from within another method.
- // v := j.st
- if x == nil || x.st == 0 {
- return
- }
- if x.sr {
- x.sr = false
- return
- }
- // v == '}' OR ']'
- if x.st == '}' {
- // put , or : depending on if even or odd respectively
- if x.so {
- c = ':'
- if !x.sf {
- x.sf = true
- }
- } else if x.sf {
- c = ','
- }
- } else {
- if x.sf {
- c = ','
- } else {
- x.sf = true
- }
- }
- x.so = !x.so
- // Note: Anything more, and this function doesn't inline. Keep it tight.
- // if x.sr {
- // x.sr = false
- // }
- return
-}
-
-const jsonStackPoolArrayLen = 32
-
-// pool used to prevent constant allocation of stacks.
-var jsonStackPool = sync.Pool{
- New: func() interface{} {
- return new([jsonStackPoolArrayLen]jsonStackElem)
- },
-}
-
-// jsonStack contains the stack for tracking the state of the container (branch).
-// The same data structure is used during encode and decode, as it is similar functionality.
-type jsonStack struct {
- s []jsonStackElem // stack for map or array end tag. map=}, array=]
- sc *jsonStackElem // pointer to current (top) element on the stack.
- sp *[jsonStackPoolArrayLen]jsonStackElem
-}
-
-func (j *jsonStack) start(c byte) {
- if j.s == nil {
- // j.s = make([]jsonStackElem, 0, 8)
- j.sp = jsonStackPool.Get().(*[jsonStackPoolArrayLen]jsonStackElem)
- j.s = j.sp[:0]
- }
- j.s = append(j.s, jsonStackElem{st: c})
- j.sc = &(j.s[len(j.s)-1])
-}
-
-func (j *jsonStack) end() {
- l := len(j.s) - 1 // length of new stack after pop'ing
- if l == 0 {
- jsonStackPool.Put(j.sp)
- j.s = nil
- j.sp = nil
- j.sc = nil
- } else {
- j.s = j.s[:l]
- j.sc = &(j.s[l-1])
- }
- //j.sc = &(j.s[len(j.s)-1])
-}
-
type jsonEncDriver struct {
e *Encoder
w encWriter
@@ -190,21 +94,35 @@ type jsonEncDriver struct {
b [64]byte // scratch
bs []byte // scratch
se setExtWrapper
- s jsonStack
+ c containerState
noBuiltInTypes
}
-func (e *jsonEncDriver) EncodeNil() {
- if c := e.s.sc.sep(); c != 0 {
- e.w.writen1(c)
+func (e *jsonEncDriver) sendContainerState(c containerState) {
+ // determine whether to output separators
+ if c == containerMapKey {
+ if e.c != containerMapStart {
+ e.w.writen1(',')
+ }
+ } else if c == containerMapValue {
+ e.w.writen1(':')
+ } else if c == containerMapEnd {
+ e.w.writen1('}')
+ } else if c == containerArrayElem {
+ if e.c != containerArrayStart {
+ e.w.writen1(',')
+ }
+ } else if c == containerArrayEnd {
+ e.w.writen1(']')
}
+ e.c = c
+}
+
+func (e *jsonEncDriver) EncodeNil() {
e.w.writeb(jsonLiterals[9:13]) // null
}
func (e *jsonEncDriver) EncodeBool(b bool) {
- if c := e.s.sc.sep(); c != 0 {
- e.w.writen1(c)
- }
if b {
e.w.writeb(jsonLiterals[0:4]) // true
} else {
@@ -213,94 +131,56 @@ func (e *jsonEncDriver) EncodeBool(b bool) {
}
func (e *jsonEncDriver) EncodeFloat32(f float32) {
- if c := e.s.sc.sep(); c != 0 {
- e.w.writen1(c)
- }
e.w.writeb(strconv.AppendFloat(e.b[:0], float64(f), 'E', -1, 32))
}
func (e *jsonEncDriver) EncodeFloat64(f float64) {
- if c := e.s.sc.sep(); c != 0 {
- e.w.writen1(c)
- }
// e.w.writestr(strconv.FormatFloat(f, 'E', -1, 64))
e.w.writeb(strconv.AppendFloat(e.b[:0], f, 'E', -1, 64))
}
func (e *jsonEncDriver) EncodeInt(v int64) {
- if c := e.s.sc.sep(); c != 0 {
- e.w.writen1(c)
- }
e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
}
func (e *jsonEncDriver) EncodeUint(v uint64) {
- if c := e.s.sc.sep(); c != 0 {
- e.w.writen1(c)
- }
e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
}
func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
- if c := e.s.sc.sep(); c != 0 {
- e.w.writen1(c)
- }
if v := ext.ConvertExt(rv); v == nil {
e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil()
} else {
- e.s.sc.retryRead()
en.encode(v)
}
}
func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
- if c := e.s.sc.sep(); c != 0 {
- e.w.writen1(c)
- }
// only encodes re.Value (never re.Data)
if re.Value == nil {
e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil()
} else {
- e.s.sc.retryRead()
en.encode(re.Value)
}
}
func (e *jsonEncDriver) EncodeArrayStart(length int) {
- if c := e.s.sc.sep(); c != 0 {
- e.w.writen1(c)
- }
- e.s.start(']')
e.w.writen1('[')
+ e.c = containerArrayStart
}
func (e *jsonEncDriver) EncodeMapStart(length int) {
- if c := e.s.sc.sep(); c != 0 {
- e.w.writen1(c)
- }
- e.s.start('}')
e.w.writen1('{')
-}
-
-func (e *jsonEncDriver) EncodeEnd() {
- b := e.s.sc.st
- e.s.end()
- e.w.writen1(b)
+ e.c = containerMapStart
}
func (e *jsonEncDriver) EncodeString(c charEncoding, v string) {
// e.w.writestr(strconv.Quote(v))
- if c := e.s.sc.sep(); c != 0 {
- e.w.writen1(c)
- }
e.quoteStr(v)
}
func (e *jsonEncDriver) EncodeSymbol(v string) {
// e.EncodeString(c_UTF8, v)
- if c := e.s.sc.sep(); c != 0 {
- e.w.writen1(c)
- }
e.quoteStr(v)
}
@@ -310,14 +190,8 @@ func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
e.EncodeExt(v, 0, &e.se, e.e)
return
}
- if c := e.s.sc.sep(); c != 0 {
- e.w.writen1(c)
- }
if c == c_RAW {
slen := base64.StdEncoding.EncodedLen(len(v))
- if e.bs == nil {
- e.bs = e.b[:]
- }
if cap(e.bs) >= slen {
e.bs = e.bs[:slen]
} else {
@@ -334,9 +208,6 @@ func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
}
func (e *jsonEncDriver) EncodeAsis(v []byte) {
- if c := e.s.sc.sep(); c != 0 {
- e.w.writen1(c)
- }
e.w.writeb(v)
}
@@ -491,185 +362,219 @@ func (x *jsonNum) floatVal() (f float64, parseUsingStrConv bool) {
}
type jsonDecDriver struct {
- d *Decoder
- h *JsonHandle
- r decReader // *bytesDecReader decReader
- ct valueType // container type. one of unset, array or map.
- bstr [8]byte // scratch used for string \UXXX parsing
- b [64]byte // scratch, used for parsing strings or numbers
- b2 [64]byte // scratch, used only for decodeBytes (after base64)
- bs []byte // scratch. Initialized from b. Used for parsing strings or numbers.
+ noBuiltInTypes
+ d *Decoder
+ h *JsonHandle
+ r decReader
- wsSkipped bool // whitespace skipped
+ c containerState
+ // tok is used to store the token read right after skipWhiteSpace.
+ tok uint8
+
+ bstr [8]byte // scratch used for string \UXXX parsing
+ b [64]byte // scratch, used for parsing strings or numbers
+ b2 [64]byte // scratch, used only for decodeBytes (after base64)
+ bs []byte // scratch. Initialized from b. Used for parsing strings or numbers.
se setExtWrapper
- s jsonStack
-
n jsonNum
- noBuiltInTypes
}
-// This will skip whitespace characters and return the next byte to read.
-// The next byte determines what the value will be one of.
-func (d *jsonDecDriver) skipWhitespace(unread bool) (b byte) {
- // as initReadNext is not called all the time, we set ct to unSet whenever
- // we skipwhitespace, as this is the signal that something new is about to be read.
- d.ct = valueTypeUnset
- b = d.r.readn1()
- if !jsonTrackSkipWhitespace || !d.wsSkipped {
- for ; b == ' ' || b == '\t' || b == '\r' || b == '\n'; b = d.r.readn1() {
- }
- if jsonTrackSkipWhitespace {
- d.wsSkipped = true
- }
- }
- if unread {
+func jsonIsWS(b byte) bool {
+ return b == ' ' || b == '\t' || b == '\r' || b == '\n'
+}
+
+// // This will skip whitespace characters and return the next byte to read.
+// // The next byte determines what the value will be one of.
+// func (d *jsonDecDriver) skipWhitespace() {
+// // fast-path: do not enter loop. Just check first (in case no whitespace).
+// b := d.r.readn1()
+// if jsonIsWS(b) {
+// r := d.r
+// for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+// }
+// }
+// d.tok = b
+// }
+
+func (d *jsonDecDriver) uncacheRead() {
+ if d.tok != 0 {
d.r.unreadn1()
+ d.tok = 0
}
- return b
+}
+
+func (d *jsonDecDriver) sendContainerState(c containerState) {
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ var xc uint8 // char expected
+ if c == containerMapKey {
+ if d.c != containerMapStart {
+ xc = ','
+ }
+ } else if c == containerMapValue {
+ xc = ':'
+ } else if c == containerMapEnd {
+ xc = '}'
+ } else if c == containerArrayElem {
+ if d.c != containerArrayStart {
+ xc = ','
+ }
+ } else if c == containerArrayEnd {
+ xc = ']'
+ }
+ if xc != 0 {
+ if d.tok != xc {
+ d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok)
+ }
+ d.tok = 0
+ }
+ d.c = c
}
func (d *jsonDecDriver) CheckBreak() bool {
- b := d.skipWhitespace(true)
- return b == '}' || b == ']'
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ if d.tok == '}' || d.tok == ']' {
+ // d.tok = 0 // only checking, not consuming
+ return true
+ }
+ return false
}
func (d *jsonDecDriver) readStrIdx(fromIdx, toIdx uint8) {
bs := d.r.readx(int(toIdx - fromIdx))
+ d.tok = 0
if jsonValidateSymbols {
if !bytes.Equal(bs, jsonLiterals[fromIdx:toIdx]) {
d.d.errorf("json: expecting %s: got %s", jsonLiterals[fromIdx:toIdx], bs)
return
}
}
- if jsonTrackSkipWhitespace {
- d.wsSkipped = false
- }
}
func (d *jsonDecDriver) TryDecodeAsNil() bool {
- // we mustn't consume the state here, and end up trying to read separator twice.
- // Instead, we keep track of the state and restore it if we couldn't decode as nil.
-
- if c := d.s.sc.sep(); c != 0 {
- d.expectChar(c)
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
}
- b := d.skipWhitespace(false)
- if b == 'n' {
+ if d.tok == 'n' {
d.readStrIdx(10, 13) // ull
- d.ct = valueTypeNil
return true
}
- d.r.unreadn1()
- d.s.sc.retryRead()
return false
}
func (d *jsonDecDriver) DecodeBool() bool {
- if c := d.s.sc.sep(); c != 0 {
- d.expectChar(c)
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
}
- b := d.skipWhitespace(false)
- if b == 'f' {
+ if d.tok == 'f' {
d.readStrIdx(5, 9) // alse
return false
}
- if b == 't' {
+ if d.tok == 't' {
d.readStrIdx(1, 4) // rue
return true
}
- d.d.errorf("json: decode bool: got first char %c", b)
+ d.d.errorf("json: decode bool: got first char %c", d.tok)
return false // "unreachable"
}
func (d *jsonDecDriver) ReadMapStart() int {
- if c := d.s.sc.sep(); c != 0 {
- d.expectChar(c)
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
}
- d.s.start('}')
- d.expectChar('{')
- d.ct = valueTypeMap
+ if d.tok != '{' {
+ d.d.errorf("json: expect char '%c' but got char '%c'", '{', d.tok)
+ }
+ d.tok = 0
+ d.c = containerMapStart
return -1
}
func (d *jsonDecDriver) ReadArrayStart() int {
- if c := d.s.sc.sep(); c != 0 {
- d.expectChar(c)
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
}
- d.s.start(']')
- d.expectChar('[')
- d.ct = valueTypeArray
+ if d.tok != '[' {
+ d.d.errorf("json: expect char '%c' but got char '%c'", '[', d.tok)
+ }
+ d.tok = 0
+ d.c = containerArrayStart
return -1
}
-func (d *jsonDecDriver) ReadEnd() {
- b := d.s.sc.st
- d.s.end()
- d.expectChar(b)
-}
-
-func (d *jsonDecDriver) expectChar(c uint8) {
- b := d.skipWhitespace(false)
- if b != c {
- d.d.errorf("json: expect char '%c' but got char '%c'", c, b)
- return
- }
- if jsonTrackSkipWhitespace {
- d.wsSkipped = false
- }
-}
-
-// func (d *jsonDecDriver) maybeChar(c uint8) {
-// b := d.skipWhitespace(false)
-// if b != c {
-// d.r.unreadn1()
-// return
-// }
-// if jsonTrackSkipWhitespace {
-// d.wsSkipped = false
-// }
-// }
-
-func (d *jsonDecDriver) IsContainerType(vt valueType) bool {
+func (d *jsonDecDriver) ContainerType() (vt valueType) {
// check container type by checking the first char
- if d.ct == valueTypeUnset {
- b := d.skipWhitespace(true)
- if b == '{' {
- d.ct = valueTypeMap
- } else if b == '[' {
- d.ct = valueTypeArray
- } else if b == 'n' {
- d.ct = valueTypeNil
- } else if b == '"' {
- d.ct = valueTypeString
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
}
+ d.tok = b
}
- if vt == valueTypeNil || vt == valueTypeBytes || vt == valueTypeString ||
- vt == valueTypeArray || vt == valueTypeMap {
- return d.ct == vt
+ if b := d.tok; b == '{' {
+ return valueTypeMap
+ } else if b == '[' {
+ return valueTypeArray
+ } else if b == 'n' {
+ return valueTypeNil
+ } else if b == '"' {
+ return valueTypeString
}
- // ugorji: made switch into conditionals, so that IsContainerType can be inlined.
- // switch vt {
- // case valueTypeNil, valueTypeBytes, valueTypeString, valueTypeArray, valueTypeMap:
- // return d.ct == vt
- // }
- d.d.errorf("isContainerType: unsupported parameter: %v", vt)
- return false // "unreachable"
+ return valueTypeUnset
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ // return false // "unreachable"
}
func (d *jsonDecDriver) decNum(storeBytes bool) {
// If it is has a . or an e|E, decode as a float; else decode as an int.
- b := d.skipWhitespace(false)
+ if d.tok == 0 {
+ var b byte
+ r := d.r
+ for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+ }
+ d.tok = b
+ }
+ b := d.tok
if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) {
d.d.errorf("json: decNum: got first char '%c'", b)
return
}
+ d.tok = 0
const cutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base)
const jsonNumUintMaxVal = 1<= mpPosFixNumMin && bd <= mpPosFixNumMax:
// positive fixnum (always signed)
- vt = valueTypeInt
- v = int64(int8(bd))
+ n.v = valueTypeInt
+ n.i = int64(int8(bd))
case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
// negative fixnum
- vt = valueTypeInt
- v = int64(int8(bd))
+ n.v = valueTypeInt
+ n.i = int64(int8(bd))
case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
if d.h.RawToString {
- var rvm string
- vt = valueTypeString
- v = &rvm
+ n.v = valueTypeString
+ n.s = d.DecodeString()
} else {
- var rvm = zeroByteSlice
- vt = valueTypeBytes
- v = &rvm
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
}
- decodeFurther = true
case bd == mpBin8, bd == mpBin16, bd == mpBin32:
- var rvm = zeroByteSlice
- vt = valueTypeBytes
- v = &rvm
- decodeFurther = true
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false, false)
case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
- vt = valueTypeArray
+ n.v = valueTypeArray
decodeFurther = true
case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
- vt = valueTypeMap
+ n.v = valueTypeMap
decodeFurther = true
case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
+ n.v = valueTypeExt
clen := d.readExtLen()
- var re RawExt
- re.Tag = uint64(d.r.readn1())
- re.Data = d.r.readx(clen)
- v = &re
- vt = valueTypeExt
+ n.u = uint64(d.r.readn1())
+ n.l = d.r.readx(clen)
default:
d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
- return
}
}
if !decodeFurther {
d.bdRead = false
}
- if vt == valueTypeUint && d.h.SignedInteger {
- d.bdType = valueTypeInt
- v = int64(v.(uint64))
+ if n.v == valueTypeUint && d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = int64(n.v)
}
return
}
@@ -566,28 +559,27 @@ func (d *msgpackDecDriver) DecodeString() (s string) {
func (d *msgpackDecDriver) readNextBd() {
d.bd = d.r.readn1()
d.bdRead = true
- d.bdType = valueTypeUnset
}
-func (d *msgpackDecDriver) IsContainerType(vt valueType) bool {
+func (d *msgpackDecDriver) ContainerType() (vt valueType) {
bd := d.bd
- switch vt {
- case valueTypeNil:
- return bd == mpNil
- case valueTypeBytes:
- return bd == mpBin8 || bd == mpBin16 || bd == mpBin32 ||
- (!d.h.RawToString &&
- (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)))
- case valueTypeString:
- return d.h.RawToString &&
- (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))
- case valueTypeArray:
- return bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax)
- case valueTypeMap:
- return bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax)
+ if bd == mpNil {
+ return valueTypeNil
+ } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 ||
+ (!d.h.RawToString &&
+ (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))) {
+ return valueTypeBytes
+ } else if d.h.RawToString &&
+ (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)) {
+ return valueTypeString
+ } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) {
+ return valueTypeArray
+ } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) {
+ return valueTypeMap
+ } else {
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
}
- d.d.errorf("isContainerType: unsupported parameter: %v", vt)
- return false // "unreachable"
+ return valueTypeUnset
}
func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) {
@@ -701,7 +693,6 @@ func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs
//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format.
type MsgpackHandle struct {
BasicHandle
- binaryEncodingType
// RawToString controls how raw bytes are decoded into a nil interface{}.
RawToString bool
@@ -717,6 +708,11 @@ type MsgpackHandle struct {
// type is provided (e.g. decoding into a nil interface{}), you get back
// a []byte or string based on the setting of RawToString.
WriteExt bool
+ binaryEncodingType
+}
+
+func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+ return h.SetExt(rt, tag, &setExtWrapper{b: ext})
}
func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver {
@@ -727,8 +723,12 @@ func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver {
return &msgpackDecDriver{d: d, r: d.r, h: h, br: d.bytes}
}
-func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
- return h.SetExt(rt, tag, &setExtWrapper{b: ext})
+func (e *msgpackEncDriver) reset() {
+ e.w = e.e.w
+}
+
+func (d *msgpackDecDriver) reset() {
+ d.r = d.d.r
}
//--------------------------------------------------
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/noop.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/noop.go
index ca02c6a7e..cfee3d084 100644
--- a/Godeps/_workspace/src/github.com/ugorji/go/codec/noop.go
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/noop.go
@@ -38,21 +38,26 @@ type noopHandle struct {
}
type noopDrv struct {
+ d *Decoder
+ e *Encoder
i int
S []string
B [][]byte
mks []bool // stack. if map (true), else if array (false)
mk bool // top of stack. what container are we on? map or array?
- ct valueType // last request for IsContainerType.
- cb bool // last response for IsContainerType.
+ ct valueType // last response for IsContainerType.
+ cb int // counter for ContainerType
rand *rand.Rand
}
func (h *noopDrv) r(v int) int { return h.rand.Intn(v) }
func (h *noopDrv) m(v int) int { h.i++; return h.i % v }
-func (h *noopDrv) newEncDriver(_ *Encoder) encDriver { return h }
-func (h *noopDrv) newDecDriver(_ *Decoder) decDriver { return h }
+func (h *noopDrv) newEncDriver(e *Encoder) encDriver { h.e = e; return h }
+func (h *noopDrv) newDecDriver(d *Decoder) decDriver { h.d = d; return h }
+
+func (h *noopDrv) reset() {}
+func (h *noopDrv) uncacheRead() {}
// --- encDriver
@@ -111,18 +116,48 @@ func (h *noopDrv) ReadEnd() { h.end() }
func (h *noopDrv) ReadMapStart() int { h.start(true); return h.m(10) }
func (h *noopDrv) ReadArrayStart() int { h.start(false); return h.m(10) }
-func (h *noopDrv) IsContainerType(vt valueType) bool {
+func (h *noopDrv) ContainerType() (vt valueType) {
// return h.m(2) == 0
- // handle kStruct
- if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap {
- h.cb = !h.cb
- h.ct = vt
- return h.cb
- }
- // go in a loop and check it.
- h.ct = vt
- h.cb = h.m(7) == 0
- return h.cb
+ // handle kStruct, which will bomb is it calls this and doesn't get back a map or array.
+ // consequently, if the return value is not map or array, reset it to one of them based on h.m(7) % 2
+ // for kstruct: at least one out of every 2 times, return one of valueTypeMap or Array (else kstruct bombs)
+ // however, every 10th time it is called, we just return something else.
+ var vals = [...]valueType{valueTypeArray, valueTypeMap}
+ // ------------ TAKE ------------
+ // if h.cb%2 == 0 {
+ // if h.ct == valueTypeMap || h.ct == valueTypeArray {
+ // } else {
+ // h.ct = vals[h.m(2)]
+ // }
+ // } else if h.cb%5 == 0 {
+ // h.ct = valueType(h.m(8))
+ // } else {
+ // h.ct = vals[h.m(2)]
+ // }
+ // ------------ TAKE ------------
+ // if h.cb%16 == 0 {
+ // h.ct = valueType(h.cb % 8)
+ // } else {
+ // h.ct = vals[h.cb%2]
+ // }
+ h.ct = vals[h.cb%2]
+ h.cb++
+ return h.ct
+
+ // if h.ct == valueTypeNil || h.ct == valueTypeString || h.ct == valueTypeBytes {
+ // return h.ct
+ // }
+ // return valueTypeUnset
+ // TODO: may need to tweak this so it works.
+ // if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap {
+ // h.cb = !h.cb
+ // h.ct = vt
+ // return h.cb
+ // }
+ // // go in a loop and check it.
+ // h.ct = vt
+ // h.cb = h.m(7) == 0
+ // return h.cb
}
func (h *noopDrv) TryDecodeAsNil() bool {
if h.mk {
@@ -135,7 +170,7 @@ func (h *noopDrv) DecodeExt(rv interface{}, xtag uint64, ext Ext) uint64 {
return 0
}
-func (h *noopDrv) DecodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
+func (h *noopDrv) DecodeNaked() {
// use h.r (random) not h.m() because h.m() could cause the same value to be given.
var sk int
if h.mk {
@@ -144,32 +179,35 @@ func (h *noopDrv) DecodeNaked() (v interface{}, vt valueType, decodeFurther bool
} else {
sk = h.r(12)
}
+ n := &h.d.n
switch sk {
case 0:
- vt = valueTypeNil
+ n.v = valueTypeNil
case 1:
- vt, v = valueTypeBool, false
+ n.v, n.b = valueTypeBool, false
case 2:
- vt, v = valueTypeBool, true
+ n.v, n.b = valueTypeBool, true
case 3:
- vt, v = valueTypeInt, h.DecodeInt(64)
+ n.v, n.i = valueTypeInt, h.DecodeInt(64)
case 4:
- vt, v = valueTypeUint, h.DecodeUint(64)
+ n.v, n.u = valueTypeUint, h.DecodeUint(64)
case 5:
- vt, v = valueTypeFloat, h.DecodeFloat(true)
+ n.v, n.f = valueTypeFloat, h.DecodeFloat(true)
case 6:
- vt, v = valueTypeFloat, h.DecodeFloat(false)
+ n.v, n.f = valueTypeFloat, h.DecodeFloat(false)
case 7:
- vt, v = valueTypeString, h.DecodeString()
+ n.v, n.s = valueTypeString, h.DecodeString()
case 8:
- vt, v = valueTypeBytes, h.B[h.m(len(h.B))]
+ n.v, n.l = valueTypeBytes, h.B[h.m(len(h.B))]
case 9:
- vt, decodeFurther = valueTypeArray, true
+ n.v = valueTypeArray
case 10:
- vt, decodeFurther = valueTypeMap, true
+ n.v = valueTypeMap
default:
- vt, v = valueTypeExt, &RawExt{Tag: h.DecodeUint(64), Data: h.B[h.m(len(h.B))]}
+ n.v = valueTypeExt
+ n.u = h.DecodeUint(64)
+ n.l = h.B[h.m(len(h.B))]
}
- h.ct = vt
+ h.ct = n.v
return
}
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.sh b/Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.sh
index 9c8119f36..98f442487 100644
--- a/Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.sh
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.sh
@@ -49,7 +49,8 @@ _build() {
# [ -e "safe${_gg}" ] && mv safe${_gg} safe${_gg}__${_zts}.bak
# [ -e "unsafe${_gg}" ] && mv unsafe${_gg} unsafe${_gg}__${_zts}.bak
else
- rm -f fast-path.generated.go gen.generated.go gen-helper.generated.go *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go
+ rm -f fast-path.generated.go gen.generated.go gen-helper.generated.go \
+ *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go
fi
cat > gen.generated.go <> gen.generated.go < fast-path.generated.go < gen-from-tmpl.codec.generated.go <>>>>>> TAGS: $ztags"
OPTIND=1
- while getopts "xurtcinsvg" flag
+ while getopts "xurtcinsvgzmef" flag
do
case "x$flag" in
- 'xt') printf ">>>>>>> REGULAR : "; go test "-tags=$ztags" "$zverbose" ; sleep 2 ;;
- 'xc') printf ">>>>>>> CANONICAL : "; go test "-tags=$ztags" "$zverbose" -tc; sleep 2 ;;
- 'xi') printf ">>>>>>> I/O : "; go test "-tags=$ztags" "$zverbose" -ti; sleep 2 ;;
- 'xn') printf ">>>>>>> NO_SYMBOLS : "; go test "-tags=$ztags" "$zverbose" -tn; sleep 2 ;;
- 'xs') printf ">>>>>>> TO_ARRAY : "; go test "-tags=$ztags" "$zverbose" -ts; sleep 2 ;;
+ 'xt') printf ">>>>>>> REGULAR : "; go test "-tags=$ztags" $zargs ; sleep 2 ;;
+ 'xc') printf ">>>>>>> CANONICAL : "; go test "-tags=$ztags" $zargs -tc; sleep 2 ;;
+ 'xi') printf ">>>>>>> I/O : "; go test "-tags=$ztags" $zargs -ti; sleep 2 ;;
+ 'xn') printf ">>>>>>> NO_SYMBOLS : "; go test "-tags=$ztags" $zargs -tn; sleep 2 ;;
+ 'xs') printf ">>>>>>> TO_ARRAY : "; go test "-tags=$ztags" $zargs -ts; sleep 2 ;;
+ 'xe') printf ">>>>>>> INTERN : "; go test "-tags=$ztags" $zargs -te; sleep 2 ;;
*) ;;
esac
done
@@ -46,11 +54,21 @@ _run() {
# echo ">>>>>>> RUNNING VARIATIONS OF TESTS"
if [[ "x$@" = "x" ]]; then
- # r, x, g, gu
- _run "-rtcins"
- _run "-xtcins"
- _run "-gtcins"
- _run "-gutcins"
+ # All: r, x, g, gu
+ _run "-rtcinsm" # regular
+ _run "-rtcinsmz" # regular with reset
+ _run "-rtcinsmf" # regular with no fastpath (notfastpath)
+ _run "-xtcinsm" # external
+ _run "-gxtcinsm" # codecgen: requires external
+ _run "-gxutcinsm" # codecgen + unsafe
+elif [[ "x$@" = "x-Z" ]]; then
+ # Regular
+ _run "-rtcinsm" # regular
+ _run "-rtcinsmz" # regular with reset
+elif [[ "x$@" = "x-F" ]]; then
+ # regular with notfastpath
+ _run "-rtcinsmf" # regular
+ _run "-rtcinsmzf" # regular with reset
else
_run "$@"
fi
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/time.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/time.go
index 733fc3fb7..fc4c63e1d 100644
--- a/Godeps/_workspace/src/github.com/ugorji/go/codec/time.go
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/time.go
@@ -4,6 +4,7 @@
package codec
import (
+ "fmt"
"time"
)
@@ -11,6 +12,34 @@ var (
timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
)
+type timeExt struct{}
+
+func (x timeExt) WriteExt(v interface{}) (bs []byte) {
+ switch v2 := v.(type) {
+ case time.Time:
+ bs = encodeTime(v2)
+ case *time.Time:
+ bs = encodeTime(*v2)
+ default:
+ panic(fmt.Errorf("unsupported format for time conversion: expecting time.Time; got %T", v2))
+ }
+ return
+}
+func (x timeExt) ReadExt(v interface{}, bs []byte) {
+ tt, err := decodeTime(bs)
+ if err != nil {
+ panic(err)
+ }
+ *(v.(*time.Time)) = tt
+}
+
+func (x timeExt) ConvertExt(v interface{}) interface{} {
+ return x.WriteExt(v)
+}
+func (x timeExt) UpdateExt(v interface{}, src interface{}) {
+ x.ReadExt(v, src.([]byte))
+}
+
// EncodeTime encodes a time.Time as a []byte, including
// information on the instant in time and UTC offset.
//
diff --git a/Godeps/_workspace/src/github.com/ugorji/go/codec/values_test.go b/Godeps/_workspace/src/github.com/ugorji/go/codec/values_test.go
new file mode 100644
index 000000000..4ec28e131
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/ugorji/go/codec/values_test.go
@@ -0,0 +1,203 @@
+// // +build testing
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// This file contains values used by tests and benchmarks.
+// JSON/BSON do not like maps with keys that are not strings,
+// so we only use maps with string keys here.
+
+import (
+ "math"
+ "time"
+)
+
+var testStrucTime = time.Date(2012, 2, 2, 2, 2, 2, 2000, time.UTC).UTC()
+
+type AnonInTestStruc struct {
+ AS string
+ AI64 int64
+ AI16 int16
+ AUi64 uint64
+ ASslice []string
+ AI64slice []int64
+ AF64slice []float64
+ // AMI32U32 map[int32]uint32
+ // AMU32F64 map[uint32]float64 // json/bson do not like it
+ AMSU16 map[string]uint16
+}
+
+type AnonInTestStrucIntf struct {
+ Islice []interface{}
+ Ms map[string]interface{}
+ Nintf interface{} //don't set this, so we can test for nil
+ T time.Time
+}
+
+type TestStruc struct {
+ _struct struct{} `codec:",omitempty"` //set omitempty for every field
+
+ S string
+ I64 int64
+ I16 int16
+ Ui64 uint64
+ Ui8 uint8
+ B bool
+ By uint8 // byte: msgp doesn't like byte
+
+ Sslice []string
+ I64slice []int64
+ I16slice []int16
+ Ui64slice []uint64
+ Ui8slice []uint8
+ Bslice []bool
+ Byslice []byte
+
+ Iptrslice []*int64
+
+ // TODO: test these separately, specifically for reflection and codecgen.
+ // Unfortunately, ffjson doesn't support these. Its compilation even fails.
+ // Ui64array [4]uint64
+ // Ui64slicearray [][4]uint64
+
+ AnonInTestStruc
+
+ //M map[interface{}]interface{} `json:"-",bson:"-"`
+ Msi64 map[string]int64
+
+ // make this a ptr, so that it could be set or not.
+ // for comparison (e.g. with msgp), give it a struct tag (so it is not inlined),
+ // make this one omitempty (so it is included if nil).
+ *AnonInTestStrucIntf `codec:",omitempty"`
+
+ Nmap map[string]bool //don't set this, so we can test for nil
+ Nslice []byte //don't set this, so we can test for nil
+ Nint64 *int64 //don't set this, so we can test for nil
+ Mtsptr map[string]*TestStruc
+ Mts map[string]TestStruc
+ Its []*TestStruc
+ Nteststruc *TestStruc
+}
+
+// small struct for testing that codecgen works for unexported types
+type tLowerFirstLetter struct {
+ I int
+ u uint64
+ S string
+ b []byte
+}
+
+func newTestStruc(depth int, bench bool, useInterface, useStringKeyOnly bool) (ts *TestStruc) {
+ var i64a, i64b, i64c, i64d int64 = 64, 6464, 646464, 64646464
+
+ ts = &TestStruc{
+ S: "some string",
+ I64: math.MaxInt64 * 2 / 3, // 64,
+ I16: 1616,
+ Ui64: uint64(int64(math.MaxInt64 * 2 / 3)), // 64, //don't use MaxUint64, as bson can't write it
+ Ui8: 160,
+ B: true,
+ By: 5,
+
+ Sslice: []string{"one", "two", "three"},
+ I64slice: []int64{1111, 2222, 3333},
+ I16slice: []int16{44, 55, 66},
+ Ui64slice: []uint64{12121212, 34343434, 56565656},
+ Ui8slice: []uint8{210, 211, 212},
+ Bslice: []bool{true, false, true, false},
+ Byslice: []byte{13, 14, 15},
+
+ Msi64: map[string]int64{
+ "one": 1,
+ "two": 2,
+ },
+ AnonInTestStruc: AnonInTestStruc{
+ // There's more leeway in altering this.
+ AS: "A-String",
+ AI64: -64646464,
+ AI16: 1616,
+ AUi64: 64646464,
+ // (U+1D11E)G-clef character may be represented in json as "\uD834\uDD1E".
+ // single reverse solidus character may be represented in json as "\u005C".
+ // include these in ASslice below.
+ ASslice: []string{"Aone", "Atwo", "Athree",
+ "Afour.reverse_solidus.\u005c", "Afive.Gclef.\U0001d11E"},
+ AI64slice: []int64{1, -22, 333, -4444, 55555, -666666},
+ AMSU16: map[string]uint16{"1": 1, "22": 2, "333": 3, "4444": 4},
+ AF64slice: []float64{11.11e-11, 22.22E+22, 33.33E-33, 44.44e+44, 555.55E-6, 666.66E6},
+ },
+ }
+ if useInterface {
+ ts.AnonInTestStrucIntf = &AnonInTestStrucIntf{
+ Islice: []interface{}{"true", true, "no", false, uint64(288), float64(0.4)},
+ Ms: map[string]interface{}{
+ "true": "true",
+ "int64(9)": false,
+ },
+ T: testStrucTime,
+ }
+ }
+
+ //For benchmarks, some things will not work.
+ if !bench {
+ //json and bson require string keys in maps
+ //ts.M = map[interface{}]interface{}{
+ // true: "true",
+ // int8(9): false,
+ //}
+ //gob cannot encode nil in element in array (encodeArray: nil element)
+ ts.Iptrslice = []*int64{nil, &i64a, nil, &i64b, nil, &i64c, nil, &i64d, nil}
+ // ts.Iptrslice = nil
+ }
+ if !useStringKeyOnly {
+ // ts.AnonInTestStruc.AMU32F64 = map[uint32]float64{1: 1, 2: 2, 3: 3} // Json/Bson barf
+ }
+ if depth > 0 {
+ depth--
+ if ts.Mtsptr == nil {
+ ts.Mtsptr = make(map[string]*TestStruc)
+ }
+ if ts.Mts == nil {
+ ts.Mts = make(map[string]TestStruc)
+ }
+ ts.Mtsptr["0"] = newTestStruc(depth, bench, useInterface, useStringKeyOnly)
+ ts.Mts["0"] = *(ts.Mtsptr["0"])
+ ts.Its = append(ts.Its, ts.Mtsptr["0"])
+ }
+ return
+}
+
+// Some other types
+
+type Sstring string
+type Bbool bool
+type Sstructsmall struct {
+ A int
+}
+
+type Sstructbig struct {
+ A int
+ B bool
+ c string
+ // Sval Sstruct
+ Ssmallptr *Sstructsmall
+ Ssmall *Sstructsmall
+ Sptr *Sstructbig
+}
+
+type SstructbigMapBySlice struct {
+ _struct struct{} `codec:",toarray"`
+ A int
+ B bool
+ c string
+ // Sval Sstruct
+ Ssmallptr *Sstructsmall
+ Ssmall *Sstructsmall
+ Sptr *Sstructbig
+}
+
+type Sinterface interface {
+ Noop()
+}
diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_linux_test.go b/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_linux_test.go
new file mode 100644
index 000000000..38a6f0004
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_linux_test.go
@@ -0,0 +1,43 @@
+package ini
+
+import (
+ "reflect"
+ "syscall"
+ "testing"
+)
+
+func TestLoadFile(t *testing.T) {
+ originalOpenFiles := numFilesOpen(t)
+
+ file, err := LoadFile("test.ini")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if originalOpenFiles != numFilesOpen(t) {
+ t.Error("test.ini not closed")
+ }
+
+ if !reflect.DeepEqual(file, File{"default": {"stuff": "things"}}) {
+ t.Error("file not read correctly")
+ }
+}
+
+func numFilesOpen(t *testing.T) (num uint64) {
+ var rlimit syscall.Rlimit
+ err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit)
+ if err != nil {
+ t.Fatal(err)
+ }
+ maxFds := int(rlimit.Cur)
+
+ var stat syscall.Stat_t
+ for i := 0; i < maxFds; i++ {
+ if syscall.Fstat(i, &stat) == nil {
+ num++
+ } else {
+ return
+ }
+ }
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_test.go b/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_test.go
new file mode 100644
index 000000000..06a4d05ea
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_test.go
@@ -0,0 +1,89 @@
+package ini
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func TestLoad(t *testing.T) {
+ src := `
+ # Comments are ignored
+
+ herp = derp
+
+ [foo]
+ hello=world
+ whitespace should = not matter
+ ; sneaky semicolon-style comment
+ multiple = equals = signs
+
+ [bar]
+ this = that`
+
+ file, err := Load(strings.NewReader(src))
+ if err != nil {
+ t.Fatal(err)
+ }
+ check := func(section, key, expect string) {
+ if value, _ := file.Get(section, key); value != expect {
+ t.Errorf("Get(%q, %q): expected %q, got %q", section, key, expect, value)
+ }
+ }
+
+ check("", "herp", "derp")
+ check("foo", "hello", "world")
+ check("foo", "whitespace should", "not matter")
+ check("foo", "multiple", "equals = signs")
+ check("bar", "this", "that")
+}
+
+func TestSyntaxError(t *testing.T) {
+ src := `
+ # Line 2
+ [foo]
+ bar = baz
+ # Here's an error on line 6:
+ wut?
+ herp = derp`
+ _, err := Load(strings.NewReader(src))
+ t.Logf("%T: %v", err, err)
+ if err == nil {
+ t.Fatal("expected an error, got nil")
+ }
+ syntaxErr, ok := err.(ErrSyntax)
+ if !ok {
+ t.Fatal("expected an error of type ErrSyntax")
+ }
+ if syntaxErr.Line != 6 {
+ t.Fatal("incorrect line number")
+ }
+ if syntaxErr.Source != "wut?" {
+ t.Fatal("incorrect source")
+ }
+}
+
+func TestDefinedSectionBehaviour(t *testing.T) {
+ check := func(src string, expect File) {
+ file, err := Load(strings.NewReader(src))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(file, expect) {
+ t.Errorf("expected %v, got %v", expect, file)
+ }
+ }
+ // No sections for an empty file
+ check("", File{})
+ // Default section only if there are actually values for it
+ check("foo=bar", File{"": {"foo": "bar"}})
+ // User-defined sections should always be present, even if empty
+ check("[a]\n[b]\nfoo=bar", File{
+ "a": {},
+ "b": {"foo": "bar"},
+ })
+ check("foo=bar\n[a]\nthis=that", File{
+ "": {"foo": "bar"},
+ "a": {"this": "that"},
+ })
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/bcrypt/bcrypt_test.go b/Godeps/_workspace/src/golang.org/x/crypto/bcrypt/bcrypt_test.go
new file mode 100644
index 000000000..f08a6f5b2
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/bcrypt/bcrypt_test.go
@@ -0,0 +1,226 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bcrypt
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+)
+
+func TestBcryptingIsEasy(t *testing.T) {
+ pass := []byte("mypassword")
+ hp, err := GenerateFromPassword(pass, 0)
+ if err != nil {
+ t.Fatalf("GenerateFromPassword error: %s", err)
+ }
+
+ if CompareHashAndPassword(hp, pass) != nil {
+ t.Errorf("%v should hash %s correctly", hp, pass)
+ }
+
+ notPass := "notthepass"
+ err = CompareHashAndPassword(hp, []byte(notPass))
+ if err != ErrMismatchedHashAndPassword {
+ t.Errorf("%v and %s should be mismatched", hp, notPass)
+ }
+}
+
+func TestBcryptingIsCorrect(t *testing.T) {
+ pass := []byte("allmine")
+ salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
+ expectedHash := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga")
+
+ hash, err := bcrypt(pass, 10, salt)
+ if err != nil {
+ t.Fatalf("bcrypt blew up: %v", err)
+ }
+ if !bytes.HasSuffix(expectedHash, hash) {
+ t.Errorf("%v should be the suffix of %v", hash, expectedHash)
+ }
+
+ h, err := newFromHash(expectedHash)
+ if err != nil {
+ t.Errorf("Unable to parse %s: %v", string(expectedHash), err)
+ }
+
+ // This is not the safe way to compare these hashes. We do this only for
+ // testing clarity. Use bcrypt.CompareHashAndPassword()
+ if err == nil && !bytes.Equal(expectedHash, h.Hash()) {
+ t.Errorf("Parsed hash %v should equal %v", h.Hash(), expectedHash)
+ }
+}
+
+func TestVeryShortPasswords(t *testing.T) {
+ key := []byte("k")
+ salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
+ _, err := bcrypt(key, 10, salt)
+ if err != nil {
+ t.Errorf("One byte key resulted in error: %s", err)
+ }
+}
+
+func TestTooLongPasswordsWork(t *testing.T) {
+ salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
+ // One byte over the usual 56 byte limit that blowfish has
+ tooLongPass := []byte("012345678901234567890123456789012345678901234567890123456")
+ tooLongExpected := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C")
+ hash, err := bcrypt(tooLongPass, 10, salt)
+ if err != nil {
+ t.Fatalf("bcrypt blew up on long password: %v", err)
+ }
+ if !bytes.HasSuffix(tooLongExpected, hash) {
+ t.Errorf("%v should be the suffix of %v", hash, tooLongExpected)
+ }
+}
+
+type InvalidHashTest struct {
+ err error
+ hash []byte
+}
+
+var invalidTests = []InvalidHashTest{
+ {ErrHashTooShort, []byte("$2a$10$fooo")},
+ {ErrHashTooShort, []byte("$2a")},
+ {HashVersionTooNewError('3'), []byte("$3a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
+ {InvalidHashPrefixError('%'), []byte("%2a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
+ {InvalidCostError(32), []byte("$2a$32$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
+}
+
+func TestInvalidHashErrors(t *testing.T) {
+ check := func(name string, expected, err error) {
+ if err == nil {
+ t.Errorf("%s: Should have returned an error", name)
+ }
+ if err != nil && err != expected {
+ t.Errorf("%s gave err %v but should have given %v", name, err, expected)
+ }
+ }
+ for _, iht := range invalidTests {
+ _, err := newFromHash(iht.hash)
+ check("newFromHash", iht.err, err)
+ err = CompareHashAndPassword(iht.hash, []byte("anything"))
+ check("CompareHashAndPassword", iht.err, err)
+ }
+}
+
+func TestUnpaddedBase64Encoding(t *testing.T) {
+ original := []byte{101, 201, 101, 75, 19, 227, 199, 20, 239, 236, 133, 32, 30, 109, 243, 30}
+ encodedOriginal := []byte("XajjQvNhvvRt5GSeFk1xFe")
+
+ encoded := base64Encode(original)
+
+ if !bytes.Equal(encodedOriginal, encoded) {
+ t.Errorf("Encoded %v should have equaled %v", encoded, encodedOriginal)
+ }
+
+ decoded, err := base64Decode(encodedOriginal)
+ if err != nil {
+ t.Fatalf("base64Decode blew up: %s", err)
+ }
+
+ if !bytes.Equal(decoded, original) {
+ t.Errorf("Decoded %v should have equaled %v", decoded, original)
+ }
+}
+
+func TestCost(t *testing.T) {
+ suffix := "XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C"
+ for _, vers := range []string{"2a", "2"} {
+ for _, cost := range []int{4, 10} {
+ s := fmt.Sprintf("$%s$%02d$%s", vers, cost, suffix)
+ h := []byte(s)
+ actual, err := Cost(h)
+ if err != nil {
+ t.Errorf("Cost, error: %s", err)
+ continue
+ }
+ if actual != cost {
+ t.Errorf("Cost, expected: %d, actual: %d", cost, actual)
+ }
+ }
+ }
+ _, err := Cost([]byte("$a$a$" + suffix))
+ if err == nil {
+ t.Errorf("Cost, malformed but no error returned")
+ }
+}
+
+func TestCostValidationInHash(t *testing.T) {
+ if testing.Short() {
+ return
+ }
+
+ pass := []byte("mypassword")
+
+ for c := 0; c < MinCost; c++ {
+ p, _ := newFromPassword(pass, c)
+ if p.cost != DefaultCost {
+ t.Errorf("newFromPassword should default costs below %d to %d, but was %d", MinCost, DefaultCost, p.cost)
+ }
+ }
+
+ p, _ := newFromPassword(pass, 14)
+ if p.cost != 14 {
+ t.Errorf("newFromPassword should default cost to 14, but was %d", p.cost)
+ }
+
+ hp, _ := newFromHash(p.Hash())
+ if p.cost != hp.cost {
+ t.Errorf("newFromHash should maintain the cost at %d, but was %d", p.cost, hp.cost)
+ }
+
+ _, err := newFromPassword(pass, 32)
+ if err == nil {
+ t.Fatalf("newFromPassword: should return a cost error")
+ }
+ if err != InvalidCostError(32) {
+ t.Errorf("newFromPassword: should return cost error, got %#v", err)
+ }
+}
+
+func TestCostReturnsWithLeadingZeroes(t *testing.T) {
+ hp, _ := newFromPassword([]byte("abcdefgh"), 7)
+ cost := hp.Hash()[4:7]
+ expected := []byte("07$")
+
+ if !bytes.Equal(expected, cost) {
+ t.Errorf("single digit costs in hash should have leading zeros: was %v instead of %v", cost, expected)
+ }
+}
+
+func TestMinorNotRequired(t *testing.T) {
+ noMinorHash := []byte("$2$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga")
+ h, err := newFromHash(noMinorHash)
+ if err != nil {
+ t.Fatalf("No minor hash blew up: %s", err)
+ }
+ if h.minor != 0 {
+ t.Errorf("Should leave minor version at 0, but was %d", h.minor)
+ }
+
+ if !bytes.Equal(noMinorHash, h.Hash()) {
+ t.Errorf("Should generate hash %v, but created %v", noMinorHash, h.Hash())
+ }
+}
+
+func BenchmarkEqual(b *testing.B) {
+ b.StopTimer()
+ passwd := []byte("somepasswordyoulike")
+ hash, _ := GenerateFromPassword(passwd, 10)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ CompareHashAndPassword(hash, passwd)
+ }
+}
+
+func BenchmarkGeneration(b *testing.B) {
+ b.StopTimer()
+ passwd := []byte("mylongpassword1234")
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ GenerateFromPassword(passwd, 10)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/blowfish/blowfish_test.go b/Godeps/_workspace/src/golang.org/x/crypto/blowfish/blowfish_test.go
new file mode 100644
index 000000000..7afa1fdf3
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/blowfish/blowfish_test.go
@@ -0,0 +1,274 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package blowfish
+
+import "testing"
+
+type CryptTest struct {
+ key []byte
+ in []byte
+ out []byte
+}
+
+// Test vector values are from http://www.schneier.com/code/vectors.txt.
+var encryptTests = []CryptTest{
+ {
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}},
+ {
+ []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
+ []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
+ []byte{0x51, 0x86, 0x6F, 0xD5, 0xB8, 0x5E, 0xCB, 0x8A}},
+ {
+ []byte{0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+ []byte{0x7D, 0x85, 0x6F, 0x9A, 0x61, 0x30, 0x63, 0xF2}},
+ {
+ []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
+ []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
+ []byte{0x24, 0x66, 0xDD, 0x87, 0x8B, 0x96, 0x3C, 0x9D}},
+
+ {
+ []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
+ []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
+ []byte{0x61, 0xF9, 0xC3, 0x80, 0x22, 0x81, 0xB0, 0x96}},
+ {
+ []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
+ []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
+ []byte{0x7D, 0x0C, 0xC6, 0x30, 0xAF, 0xDA, 0x1E, 0xC7}},
+ {
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}},
+ {
+ []byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10},
+ []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
+ []byte{0x0A, 0xCE, 0xAB, 0x0F, 0xC6, 0xA0, 0xA2, 0x8D}},
+ {
+ []byte{0x7C, 0xA1, 0x10, 0x45, 0x4A, 0x1A, 0x6E, 0x57},
+ []byte{0x01, 0xA1, 0xD6, 0xD0, 0x39, 0x77, 0x67, 0x42},
+ []byte{0x59, 0xC6, 0x82, 0x45, 0xEB, 0x05, 0x28, 0x2B}},
+ {
+ []byte{0x01, 0x31, 0xD9, 0x61, 0x9D, 0xC1, 0x37, 0x6E},
+ []byte{0x5C, 0xD5, 0x4C, 0xA8, 0x3D, 0xEF, 0x57, 0xDA},
+ []byte{0xB1, 0xB8, 0xCC, 0x0B, 0x25, 0x0F, 0x09, 0xA0}},
+ {
+ []byte{0x07, 0xA1, 0x13, 0x3E, 0x4A, 0x0B, 0x26, 0x86},
+ []byte{0x02, 0x48, 0xD4, 0x38, 0x06, 0xF6, 0x71, 0x72},
+ []byte{0x17, 0x30, 0xE5, 0x77, 0x8B, 0xEA, 0x1D, 0xA4}},
+ {
+ []byte{0x38, 0x49, 0x67, 0x4C, 0x26, 0x02, 0x31, 0x9E},
+ []byte{0x51, 0x45, 0x4B, 0x58, 0x2D, 0xDF, 0x44, 0x0A},
+ []byte{0xA2, 0x5E, 0x78, 0x56, 0xCF, 0x26, 0x51, 0xEB}},
+ {
+ []byte{0x04, 0xB9, 0x15, 0xBA, 0x43, 0xFE, 0xB5, 0xB6},
+ []byte{0x42, 0xFD, 0x44, 0x30, 0x59, 0x57, 0x7F, 0xA2},
+ []byte{0x35, 0x38, 0x82, 0xB1, 0x09, 0xCE, 0x8F, 0x1A}},
+ {
+ []byte{0x01, 0x13, 0xB9, 0x70, 0xFD, 0x34, 0xF2, 0xCE},
+ []byte{0x05, 0x9B, 0x5E, 0x08, 0x51, 0xCF, 0x14, 0x3A},
+ []byte{0x48, 0xF4, 0xD0, 0x88, 0x4C, 0x37, 0x99, 0x18}},
+ {
+ []byte{0x01, 0x70, 0xF1, 0x75, 0x46, 0x8F, 0xB5, 0xE6},
+ []byte{0x07, 0x56, 0xD8, 0xE0, 0x77, 0x47, 0x61, 0xD2},
+ []byte{0x43, 0x21, 0x93, 0xB7, 0x89, 0x51, 0xFC, 0x98}},
+ {
+ []byte{0x43, 0x29, 0x7F, 0xAD, 0x38, 0xE3, 0x73, 0xFE},
+ []byte{0x76, 0x25, 0x14, 0xB8, 0x29, 0xBF, 0x48, 0x6A},
+ []byte{0x13, 0xF0, 0x41, 0x54, 0xD6, 0x9D, 0x1A, 0xE5}},
+ {
+ []byte{0x07, 0xA7, 0x13, 0x70, 0x45, 0xDA, 0x2A, 0x16},
+ []byte{0x3B, 0xDD, 0x11, 0x90, 0x49, 0x37, 0x28, 0x02},
+ []byte{0x2E, 0xED, 0xDA, 0x93, 0xFF, 0xD3, 0x9C, 0x79}},
+ {
+ []byte{0x04, 0x68, 0x91, 0x04, 0xC2, 0xFD, 0x3B, 0x2F},
+ []byte{0x26, 0x95, 0x5F, 0x68, 0x35, 0xAF, 0x60, 0x9A},
+ []byte{0xD8, 0x87, 0xE0, 0x39, 0x3C, 0x2D, 0xA6, 0xE3}},
+ {
+ []byte{0x37, 0xD0, 0x6B, 0xB5, 0x16, 0xCB, 0x75, 0x46},
+ []byte{0x16, 0x4D, 0x5E, 0x40, 0x4F, 0x27, 0x52, 0x32},
+ []byte{0x5F, 0x99, 0xD0, 0x4F, 0x5B, 0x16, 0x39, 0x69}},
+ {
+ []byte{0x1F, 0x08, 0x26, 0x0D, 0x1A, 0xC2, 0x46, 0x5E},
+ []byte{0x6B, 0x05, 0x6E, 0x18, 0x75, 0x9F, 0x5C, 0xCA},
+ []byte{0x4A, 0x05, 0x7A, 0x3B, 0x24, 0xD3, 0x97, 0x7B}},
+ {
+ []byte{0x58, 0x40, 0x23, 0x64, 0x1A, 0xBA, 0x61, 0x76},
+ []byte{0x00, 0x4B, 0xD6, 0xEF, 0x09, 0x17, 0x60, 0x62},
+ []byte{0x45, 0x20, 0x31, 0xC1, 0xE4, 0xFA, 0xDA, 0x8E}},
+ {
+ []byte{0x02, 0x58, 0x16, 0x16, 0x46, 0x29, 0xB0, 0x07},
+ []byte{0x48, 0x0D, 0x39, 0x00, 0x6E, 0xE7, 0x62, 0xF2},
+ []byte{0x75, 0x55, 0xAE, 0x39, 0xF5, 0x9B, 0x87, 0xBD}},
+ {
+ []byte{0x49, 0x79, 0x3E, 0xBC, 0x79, 0xB3, 0x25, 0x8F},
+ []byte{0x43, 0x75, 0x40, 0xC8, 0x69, 0x8F, 0x3C, 0xFA},
+ []byte{0x53, 0xC5, 0x5F, 0x9C, 0xB4, 0x9F, 0xC0, 0x19}},
+ {
+ []byte{0x4F, 0xB0, 0x5E, 0x15, 0x15, 0xAB, 0x73, 0xA7},
+ []byte{0x07, 0x2D, 0x43, 0xA0, 0x77, 0x07, 0x52, 0x92},
+ []byte{0x7A, 0x8E, 0x7B, 0xFA, 0x93, 0x7E, 0x89, 0xA3}},
+ {
+ []byte{0x49, 0xE9, 0x5D, 0x6D, 0x4C, 0xA2, 0x29, 0xBF},
+ []byte{0x02, 0xFE, 0x55, 0x77, 0x81, 0x17, 0xF1, 0x2A},
+ []byte{0xCF, 0x9C, 0x5D, 0x7A, 0x49, 0x86, 0xAD, 0xB5}},
+ {
+ []byte{0x01, 0x83, 0x10, 0xDC, 0x40, 0x9B, 0x26, 0xD6},
+ []byte{0x1D, 0x9D, 0x5C, 0x50, 0x18, 0xF7, 0x28, 0xC2},
+ []byte{0xD1, 0xAB, 0xB2, 0x90, 0x65, 0x8B, 0xC7, 0x78}},
+ {
+ []byte{0x1C, 0x58, 0x7F, 0x1C, 0x13, 0x92, 0x4F, 0xEF},
+ []byte{0x30, 0x55, 0x32, 0x28, 0x6D, 0x6F, 0x29, 0x5A},
+ []byte{0x55, 0xCB, 0x37, 0x74, 0xD1, 0x3E, 0xF2, 0x01}},
+ {
+ []byte{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01},
+ []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
+ []byte{0xFA, 0x34, 0xEC, 0x48, 0x47, 0xB2, 0x68, 0xB2}},
+ {
+ []byte{0x1F, 0x1F, 0x1F, 0x1F, 0x0E, 0x0E, 0x0E, 0x0E},
+ []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
+ []byte{0xA7, 0x90, 0x79, 0x51, 0x08, 0xEA, 0x3C, 0xAE}},
+ {
+ []byte{0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1, 0xFE},
+ []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
+ []byte{0xC3, 0x9E, 0x07, 0x2D, 0x9F, 0xAC, 0x63, 0x1D}},
+ {
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
+ []byte{0x01, 0x49, 0x33, 0xE0, 0xCD, 0xAF, 0xF6, 0xE4}},
+ {
+ []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0xF2, 0x1E, 0x9A, 0x77, 0xB7, 0x1C, 0x49, 0xBC}},
+ {
+ []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x24, 0x59, 0x46, 0x88, 0x57, 0x54, 0x36, 0x9A}},
+ {
+ []byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10},
+ []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
+ []byte{0x6B, 0x5C, 0x5A, 0x9C, 0x5D, 0x9E, 0x0A, 0x5A}},
+}
+
+func TestCipherEncrypt(t *testing.T) {
+ for i, tt := range encryptTests {
+ c, err := NewCipher(tt.key)
+ if err != nil {
+ t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err)
+ continue
+ }
+ ct := make([]byte, len(tt.out))
+ c.Encrypt(ct, tt.in)
+ for j, v := range ct {
+ if v != tt.out[j] {
+ t.Errorf("Cipher.Encrypt, test vector #%d: cipher-text[%d] = %#x, expected %#x", i, j, v, tt.out[j])
+ break
+ }
+ }
+ }
+}
+
+func TestCipherDecrypt(t *testing.T) {
+ for i, tt := range encryptTests {
+ c, err := NewCipher(tt.key)
+ if err != nil {
+ t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err)
+ continue
+ }
+ pt := make([]byte, len(tt.in))
+ c.Decrypt(pt, tt.out)
+ for j, v := range pt {
+ if v != tt.in[j] {
+ t.Errorf("Cipher.Decrypt, test vector #%d: plain-text[%d] = %#x, expected %#x", i, j, v, tt.in[j])
+ break
+ }
+ }
+ }
+}
+
+func TestSaltedCipherKeyLength(t *testing.T) {
+ if _, err := NewSaltedCipher(nil, []byte{'a'}); err != KeySizeError(0) {
+ t.Errorf("NewSaltedCipher with short key, gave error %#v, expected %#v", err, KeySizeError(0))
+ }
+
+ // A 57-byte key. One over the typical blowfish restriction.
+ key := []byte("012345678901234567890123456789012345678901234567890123456")
+ if _, err := NewSaltedCipher(key, []byte{'a'}); err != nil {
+ t.Errorf("NewSaltedCipher with long key, gave error %#v", err)
+ }
+}
+
+// Test vectors generated with Blowfish from OpenSSH.
+var saltedVectors = [][8]byte{
+ {0x0c, 0x82, 0x3b, 0x7b, 0x8d, 0x01, 0x4b, 0x7e},
+ {0xd1, 0xe1, 0x93, 0xf0, 0x70, 0xa6, 0xdb, 0x12},
+ {0xfc, 0x5e, 0xba, 0xde, 0xcb, 0xf8, 0x59, 0xad},
+ {0x8a, 0x0c, 0x76, 0xe7, 0xdd, 0x2c, 0xd3, 0xa8},
+ {0x2c, 0xcb, 0x7b, 0xee, 0xac, 0x7b, 0x7f, 0xf8},
+ {0xbb, 0xf6, 0x30, 0x6f, 0xe1, 0x5d, 0x62, 0xbf},
+ {0x97, 0x1e, 0xc1, 0x3d, 0x3d, 0xe0, 0x11, 0xe9},
+ {0x06, 0xd7, 0x4d, 0xb1, 0x80, 0xa3, 0xb1, 0x38},
+ {0x67, 0xa1, 0xa9, 0x75, 0x0e, 0x5b, 0xc6, 0xb4},
+ {0x51, 0x0f, 0x33, 0x0e, 0x4f, 0x67, 0xd2, 0x0c},
+ {0xf1, 0x73, 0x7e, 0xd8, 0x44, 0xea, 0xdb, 0xe5},
+ {0x14, 0x0e, 0x16, 0xce, 0x7f, 0x4a, 0x9c, 0x7b},
+ {0x4b, 0xfe, 0x43, 0xfd, 0xbf, 0x36, 0x04, 0x47},
+ {0xb1, 0xeb, 0x3e, 0x15, 0x36, 0xa7, 0xbb, 0xe2},
+ {0x6d, 0x0b, 0x41, 0xdd, 0x00, 0x98, 0x0b, 0x19},
+ {0xd3, 0xce, 0x45, 0xce, 0x1d, 0x56, 0xb7, 0xfc},
+ {0xd9, 0xf0, 0xfd, 0xda, 0xc0, 0x23, 0xb7, 0x93},
+ {0x4c, 0x6f, 0xa1, 0xe4, 0x0c, 0xa8, 0xca, 0x57},
+ {0xe6, 0x2f, 0x28, 0xa7, 0x0c, 0x94, 0x0d, 0x08},
+ {0x8f, 0xe3, 0xf0, 0xb6, 0x29, 0xe3, 0x44, 0x03},
+ {0xff, 0x98, 0xdd, 0x04, 0x45, 0xb4, 0x6d, 0x1f},
+ {0x9e, 0x45, 0x4d, 0x18, 0x40, 0x53, 0xdb, 0xef},
+ {0xb7, 0x3b, 0xef, 0x29, 0xbe, 0xa8, 0x13, 0x71},
+ {0x02, 0x54, 0x55, 0x41, 0x8e, 0x04, 0xfc, 0xad},
+ {0x6a, 0x0a, 0xee, 0x7c, 0x10, 0xd9, 0x19, 0xfe},
+ {0x0a, 0x22, 0xd9, 0x41, 0xcc, 0x23, 0x87, 0x13},
+ {0x6e, 0xff, 0x1f, 0xff, 0x36, 0x17, 0x9c, 0xbe},
+ {0x79, 0xad, 0xb7, 0x40, 0xf4, 0x9f, 0x51, 0xa6},
+ {0x97, 0x81, 0x99, 0xa4, 0xde, 0x9e, 0x9f, 0xb6},
+ {0x12, 0x19, 0x7a, 0x28, 0xd0, 0xdc, 0xcc, 0x92},
+ {0x81, 0xda, 0x60, 0x1e, 0x0e, 0xdd, 0x65, 0x56},
+ {0x7d, 0x76, 0x20, 0xb2, 0x73, 0xc9, 0x9e, 0xee},
+}
+
+func TestSaltedCipher(t *testing.T) {
+ var key, salt [32]byte
+ for i := range key {
+ key[i] = byte(i)
+ salt[i] = byte(i + 32)
+ }
+ for i, v := range saltedVectors {
+ c, err := NewSaltedCipher(key[:], salt[:i])
+ if err != nil {
+ t.Fatal(err)
+ }
+ var buf [8]byte
+ c.Encrypt(buf[:], buf[:])
+ if v != buf {
+ t.Errorf("%d: expected %x, got %x", i, v, buf)
+ }
+ }
+}
+
+func BenchmarkExpandKeyWithSalt(b *testing.B) {
+ key := make([]byte, 32)
+ salt := make([]byte, 16)
+ c, _ := NewCipher(key)
+ for i := 0; i < b.N; i++ {
+ expandKeyWithSalt(key, salt, c)
+ }
+}
+
+func BenchmarkExpandKey(b *testing.B) {
+ key := make([]byte, 32)
+ c, _ := NewCipher(key)
+ for i := 0; i < b.N; i++ {
+ ExpandKey(key, c)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/cast5/cast5_test.go b/Godeps/_workspace/src/golang.org/x/crypto/cast5/cast5_test.go
new file mode 100644
index 000000000..778b272a6
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/cast5/cast5_test.go
@@ -0,0 +1,106 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cast5
+
+import (
+ "bytes"
+ "encoding/hex"
+ "testing"
+)
+
+// This test vector is taken from RFC 2144, App B.1.
+// Since the other two test vectors are for reduced-round variants, we can't
+// use them.
+var basicTests = []struct {
+ key, plainText, cipherText string
+}{
+ {
+ "0123456712345678234567893456789a",
+ "0123456789abcdef",
+ "238b4fe5847e44b2",
+ },
+}
+
+func TestBasic(t *testing.T) {
+ for i, test := range basicTests {
+ key, _ := hex.DecodeString(test.key)
+ plainText, _ := hex.DecodeString(test.plainText)
+ expected, _ := hex.DecodeString(test.cipherText)
+
+ c, err := NewCipher(key)
+ if err != nil {
+ t.Errorf("#%d: failed to create Cipher: %s", i, err)
+ continue
+ }
+ var cipherText [BlockSize]byte
+ c.Encrypt(cipherText[:], plainText)
+ if !bytes.Equal(cipherText[:], expected) {
+ t.Errorf("#%d: got:%x want:%x", i, cipherText, expected)
+ }
+
+ var plainTextAgain [BlockSize]byte
+ c.Decrypt(plainTextAgain[:], cipherText[:])
+ if !bytes.Equal(plainTextAgain[:], plainText) {
+ t.Errorf("#%d: got:%x want:%x", i, plainTextAgain, plainText)
+ }
+ }
+}
+
+// TestFull performs the test specified in RFC 2144, App B.2.
+// However, due to the length of time taken, it's disabled here and a more
+// limited version is included, below.
+func TestFull(t *testing.T) {
+ if testing.Short() {
+ // This is too slow for normal testing
+ return
+ }
+
+ a, b := iterate(1000000)
+
+ const expectedA = "eea9d0a249fd3ba6b3436fb89d6dca92"
+ const expectedB = "b2c95eb00c31ad7180ac05b8e83d696e"
+
+ if hex.EncodeToString(a) != expectedA {
+ t.Errorf("a: got:%x want:%s", a, expectedA)
+ }
+ if hex.EncodeToString(b) != expectedB {
+ t.Errorf("b: got:%x want:%s", b, expectedB)
+ }
+}
+
+func iterate(iterations int) ([]byte, []byte) {
+ const initValueHex = "0123456712345678234567893456789a"
+
+ initValue, _ := hex.DecodeString(initValueHex)
+
+ var a, b [16]byte
+ copy(a[:], initValue)
+ copy(b[:], initValue)
+
+ for i := 0; i < iterations; i++ {
+ c, _ := NewCipher(b[:])
+ c.Encrypt(a[:8], a[:8])
+ c.Encrypt(a[8:], a[8:])
+ c, _ = NewCipher(a[:])
+ c.Encrypt(b[:8], b[:8])
+ c.Encrypt(b[8:], b[8:])
+ }
+
+ return a[:], b[:]
+}
+
+func TestLimited(t *testing.T) {
+ a, b := iterate(1000)
+
+ const expectedA = "23f73b14b02a2ad7dfb9f2c35644798d"
+ const expectedB = "e5bf37eff14c456a40b21ce369370a9f"
+
+ if hex.EncodeToString(a) != expectedA {
+ t.Errorf("a: got:%x want:%s", a, expectedA)
+ }
+ if hex.EncodeToString(b) != expectedB {
+ t.Errorf("b: got:%x want:%s", b, expectedB)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/curve25519_test.go b/Godeps/_workspace/src/golang.org/x/crypto/curve25519/curve25519_test.go
new file mode 100644
index 000000000..14b0ee87c
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/curve25519/curve25519_test.go
@@ -0,0 +1,29 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package curve25519
+
+import (
+ "fmt"
+ "testing"
+)
+
+const expectedHex = "89161fde887b2b53de549af483940106ecc114d6982daa98256de23bdf77661a"
+
+func TestBaseScalarMult(t *testing.T) {
+ var a, b [32]byte
+ in := &a
+ out := &b
+ a[0] = 1
+
+ for i := 0; i < 200; i++ {
+ ScalarBaseMult(out, in)
+ in, out = out, in
+ }
+
+ result := fmt.Sprintf("%x", in[:])
+ if result != expectedHex {
+ t.Errorf("incorrect result: got %s, want %s", result, expectedHex)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/armor/armor_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/armor/armor_test.go
new file mode 100644
index 000000000..9334e94e9
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/armor/armor_test.go
@@ -0,0 +1,95 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package armor
+
+import (
+ "bytes"
+ "hash/adler32"
+ "io/ioutil"
+ "testing"
+)
+
+func TestDecodeEncode(t *testing.T) {
+ buf := bytes.NewBuffer([]byte(armorExample1))
+ result, err := Decode(buf)
+ if err != nil {
+ t.Error(err)
+ }
+ expectedType := "PGP SIGNATURE"
+ if result.Type != expectedType {
+ t.Errorf("result.Type: got:%s want:%s", result.Type, expectedType)
+ }
+ if len(result.Header) != 1 {
+ t.Errorf("len(result.Header): got:%d want:1", len(result.Header))
+ }
+ v, ok := result.Header["Version"]
+ if !ok || v != "GnuPG v1.4.10 (GNU/Linux)" {
+ t.Errorf("result.Header: got:%#v", result.Header)
+ }
+
+ contents, err := ioutil.ReadAll(result.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if adler32.Checksum(contents) != 0x27b144be {
+ t.Errorf("contents: got: %x", contents)
+ }
+
+ buf = bytes.NewBuffer(nil)
+ w, err := Encode(buf, result.Type, result.Header)
+ if err != nil {
+ t.Error(err)
+ }
+ _, err = w.Write(contents)
+ if err != nil {
+ t.Error(err)
+ }
+ w.Close()
+
+ if !bytes.Equal(buf.Bytes(), []byte(armorExample1)) {
+ t.Errorf("got: %s\nwant: %s", string(buf.Bytes()), armorExample1)
+ }
+}
+
+func TestLongHeader(t *testing.T) {
+ buf := bytes.NewBuffer([]byte(armorLongLine))
+ result, err := Decode(buf)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ value, ok := result.Header["Version"]
+ if !ok {
+ t.Errorf("missing Version header")
+ }
+ if value != longValueExpected {
+ t.Errorf("got: %s want: %s", value, longValueExpected)
+ }
+}
+
+const armorExample1 = `-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.4.10 (GNU/Linux)
+
+iJwEAAECAAYFAk1Fv/0ACgkQo01+GMIMMbsYTwQAiAw+QAaNfY6WBdplZ/uMAccm
+4g+81QPmTSGHnetSb6WBiY13kVzK4HQiZH8JSkmmroMLuGeJwsRTEL4wbjRyUKEt
+p1xwUZDECs234F1xiG5enc5SGlRtP7foLBz9lOsjx+LEcA4sTl5/2eZR9zyFZqWW
+TxRjs+fJCIFuo71xb1g=
+=/teI
+-----END PGP SIGNATURE-----`
+
+const armorLongLine = `-----BEGIN PGP SIGNATURE-----
+Version: 0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz
+
+iQEcBAABAgAGBQJMtFESAAoJEKsQXJGvOPsVj40H/1WW6jaMXv4BW+1ueDSMDwM8
+kx1fLOXbVM5/Kn5LStZNt1jWWnpxdz7eq3uiqeCQjmqUoRde3YbB2EMnnwRbAhpp
+cacnAvy9ZQ78OTxUdNW1mhX5bS6q1MTEJnl+DcyigD70HG/yNNQD7sOPMdYQw0TA
+byQBwmLwmTsuZsrYqB68QyLHI+DUugn+kX6Hd2WDB62DKa2suoIUIHQQCd/ofwB3
+WfCYInXQKKOSxu2YOg2Eb4kLNhSMc1i9uKUWAH+sdgJh7NBgdoE4MaNtBFkHXRvv
+okWuf3+xA9ksp1npSY/mDvgHijmjvtpRDe6iUeqfCn8N9u9CBg8geANgaG8+QA4=
+=wfQG
+-----END PGP SIGNATURE-----`
+
+const longValueExpected = "0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz"
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/canonical_text_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/canonical_text_test.go
new file mode 100644
index 000000000..8f3ba2a88
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/canonical_text_test.go
@@ -0,0 +1,52 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package openpgp
+
+import (
+ "bytes"
+ "testing"
+)
+
+type recordingHash struct {
+ buf *bytes.Buffer
+}
+
+func (r recordingHash) Write(b []byte) (n int, err error) {
+ return r.buf.Write(b)
+}
+
+func (r recordingHash) Sum(in []byte) []byte {
+ return append(in, r.buf.Bytes()...)
+}
+
+func (r recordingHash) Reset() {
+ panic("shouldn't be called")
+}
+
+func (r recordingHash) Size() int {
+ panic("shouldn't be called")
+}
+
+func (r recordingHash) BlockSize() int {
+ panic("shouldn't be called")
+}
+
+func testCanonicalText(t *testing.T, input, expected string) {
+ r := recordingHash{bytes.NewBuffer(nil)}
+ c := NewCanonicalTextHash(r)
+ c.Write([]byte(input))
+ result := c.Sum(nil)
+ if expected != string(result) {
+ t.Errorf("input: %x got: %x want: %x", input, result, expected)
+ }
+}
+
+func TestCanonicalText(t *testing.T) {
+ testCanonicalText(t, "foo\n", "foo\r\n")
+ testCanonicalText(t, "foo", "foo")
+ testCanonicalText(t, "foo\r\n", "foo\r\n")
+ testCanonicalText(t, "foo\r\nbar", "foo\r\nbar")
+ testCanonicalText(t, "foo\r\nbar\n\n", "foo\r\nbar\r\n\r\n")
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go
new file mode 100644
index 000000000..406377c67
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go
@@ -0,0 +1,197 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package clearsign
+
+import (
+ "bytes"
+ "golang.org/x/crypto/openpgp"
+ "testing"
+)
+
+func testParse(t *testing.T, input []byte, expected, expectedPlaintext string) {
+ b, rest := Decode(input)
+ if b == nil {
+ t.Fatal("failed to decode clearsign message")
+ }
+ if !bytes.Equal(rest, []byte("trailing")) {
+ t.Errorf("unexpected remaining bytes returned: %s", string(rest))
+ }
+ if b.ArmoredSignature.Type != "PGP SIGNATURE" {
+ t.Errorf("bad armor type, got:%s, want:PGP SIGNATURE", b.ArmoredSignature.Type)
+ }
+ if !bytes.Equal(b.Bytes, []byte(expected)) {
+ t.Errorf("bad body, got:%x want:%x", b.Bytes, expected)
+ }
+
+ if !bytes.Equal(b.Plaintext, []byte(expectedPlaintext)) {
+ t.Errorf("bad plaintext, got:%x want:%x", b.Plaintext, expectedPlaintext)
+ }
+
+ keyring, err := openpgp.ReadArmoredKeyRing(bytes.NewBufferString(signingKey))
+ if err != nil {
+ t.Errorf("failed to parse public key: %s", err)
+ }
+
+ if _, err := openpgp.CheckDetachedSignature(keyring, bytes.NewBuffer(b.Bytes), b.ArmoredSignature.Body); err != nil {
+ t.Errorf("failed to check signature: %s", err)
+ }
+}
+
+func TestParse(t *testing.T) {
+ testParse(t, clearsignInput, "Hello world\r\nline 2", "Hello world\nline 2\n")
+ testParse(t, clearsignInput2, "\r\n\r\n(This message has a couple of blank lines at the start and end.)\r\n\r\n", "\n\n(This message has a couple of blank lines at the start and end.)\n\n\n")
+}
+
+func TestParseWithNoNewlineAtEnd(t *testing.T) {
+ input := clearsignInput
+ input = input[:len(input)-len("trailing")-1]
+ b, rest := Decode(input)
+ if b == nil {
+ t.Fatal("failed to decode clearsign message")
+ }
+ if len(rest) > 0 {
+ t.Errorf("unexpected remaining bytes returned: %s", string(rest))
+ }
+}
+
+var signingTests = []struct {
+ in, signed, plaintext string
+}{
+ {"", "", ""},
+ {"a", "a", "a\n"},
+ {"a\n", "a", "a\n"},
+ {"-a\n", "-a", "-a\n"},
+ {"--a\nb", "--a\r\nb", "--a\nb\n"},
+ // leading whitespace
+ {" a\n", " a", " a\n"},
+ {" a\n", " a", " a\n"},
+ // trailing whitespace (should be stripped)
+ {"a \n", "a", "a\n"},
+ {"a ", "a", "a\n"},
+ // whitespace-only lines (should be stripped)
+ {" \n", "", "\n"},
+ {" ", "", "\n"},
+ {"a\n \n \nb\n", "a\r\n\r\n\r\nb", "a\n\n\nb\n"},
+}
+
+func TestSigning(t *testing.T) {
+ keyring, err := openpgp.ReadArmoredKeyRing(bytes.NewBufferString(signingKey))
+ if err != nil {
+ t.Errorf("failed to parse public key: %s", err)
+ }
+
+ for i, test := range signingTests {
+ var buf bytes.Buffer
+
+ plaintext, err := Encode(&buf, keyring[0].PrivateKey, nil)
+ if err != nil {
+ t.Errorf("#%d: error from Encode: %s", i, err)
+ continue
+ }
+ if _, err := plaintext.Write([]byte(test.in)); err != nil {
+ t.Errorf("#%d: error from Write: %s", i, err)
+ continue
+ }
+ if err := plaintext.Close(); err != nil {
+ t.Fatalf("#%d: error from Close: %s", i, err)
+ continue
+ }
+
+ b, _ := Decode(buf.Bytes())
+ if b == nil {
+ t.Errorf("#%d: failed to decode clearsign message", i)
+ continue
+ }
+ if !bytes.Equal(b.Bytes, []byte(test.signed)) {
+ t.Errorf("#%d: bad result, got:%x, want:%x", i, b.Bytes, test.signed)
+ continue
+ }
+ if !bytes.Equal(b.Plaintext, []byte(test.plaintext)) {
+ t.Errorf("#%d: bad result, got:%x, want:%x", i, b.Plaintext, test.plaintext)
+ continue
+ }
+
+ if _, err := openpgp.CheckDetachedSignature(keyring, bytes.NewBuffer(b.Bytes), b.ArmoredSignature.Body); err != nil {
+ t.Errorf("#%d: failed to check signature: %s", i, err)
+ }
+ }
+}
+
+var clearsignInput = []byte(`
+;lasjlkfdsa
+
+-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+
+Hello world
+line 2
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.4.10 (GNU/Linux)
+
+iJwEAQECAAYFAk8kMuEACgkQO9o98PRieSpMsAQAhmY/vwmNpflrPgmfWsYhk5O8
+pjnBUzZwqTDoDeINjZEoPDSpQAHGhjFjgaDx/Gj4fAl0dM4D0wuUEBb6QOrwflog
+2A2k9kfSOMOtk0IH/H5VuFN1Mie9L/erYXjTQIptv9t9J7NoRBMU0QOOaFU0JaO9
+MyTpno24AjIAGb+mH1U=
+=hIJ6
+-----END PGP SIGNATURE-----
+trailing`)
+
+var clearsignInput2 = []byte(`
+asdlfkjasdlkfjsadf
+
+-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA256
+
+
+
+(This message has a couple of blank lines at the start and end.)
+
+
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.4.11 (GNU/Linux)
+
+iJwEAQEIAAYFAlPpSREACgkQO9o98PRieSpZTAP+M8QUoCt/7Rf3YbXPcdzIL32v
+pt1I+cMNeopzfLy0u4ioEFi8s5VkwpL1AFmirvgViCwlf82inoRxzZRiW05JQ5LI
+ESEzeCoy2LIdRCQ2hcrG8pIUPzUO4TqO5D/dMbdHwNH4h5nNmGJUAEG6FpURlPm+
+qZg6BaTvOxepqOxnhVU=
+=e+C6
+-----END PGP SIGNATURE-----
+
+trailing`)
+
+var signingKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
+Version: GnuPG v1.4.10 (GNU/Linux)
+
+lQHYBE2rFNoBBADFwqWQIW/DSqcB4yCQqnAFTJ27qS5AnB46ccAdw3u4Greeu3Bp
+idpoHdjULy7zSKlwR1EA873dO/k/e11Ml3dlAFUinWeejWaK2ugFP6JjiieSsrKn
+vWNicdCS4HTWn0X4sjl0ZiAygw6GNhqEQ3cpLeL0g8E9hnYzJKQ0LWJa0QARAQAB
+AAP/TB81EIo2VYNmTq0pK1ZXwUpxCrvAAIG3hwKjEzHcbQznsjNvPUihZ+NZQ6+X
+0HCfPAdPkGDCLCb6NavcSW+iNnLTrdDnSI6+3BbIONqWWdRDYJhqZCkqmG6zqSfL
+IdkJgCw94taUg5BWP/AAeQrhzjChvpMQTVKQL5mnuZbUCeMCAN5qrYMP2S9iKdnk
+VANIFj7656ARKt/nf4CBzxcpHTyB8+d2CtPDKCmlJP6vL8t58Jmih+kHJMvC0dzn
+gr5f5+sCAOOe5gt9e0am7AvQWhdbHVfJU0TQJx+m2OiCJAqGTB1nvtBLHdJnfdC9
+TnXXQ6ZXibqLyBies/xeY2sCKL5qtTMCAKnX9+9d/5yQxRyrQUHt1NYhaXZnJbHx
+q4ytu0eWz+5i68IYUSK69jJ1NWPM0T6SkqpB3KCAIv68VFm9PxqG1KmhSrQIVGVz
+dCBLZXmIuAQTAQIAIgUCTasU2gIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AA
+CgkQO9o98PRieSoLhgQAkLEZex02Qt7vGhZzMwuN0R22w3VwyYyjBx+fM3JFETy1
+ut4xcLJoJfIaF5ZS38UplgakHG0FQ+b49i8dMij0aZmDqGxrew1m4kBfjXw9B/v+
+eIqpODryb6cOSwyQFH0lQkXC040pjq9YqDsO5w0WYNXYKDnzRV0p4H1pweo2VDid
+AdgETasU2gEEAN46UPeWRqKHvA99arOxee38fBt2CI08iiWyI8T3J6ivtFGixSqV
+bRcPxYO/qLpVe5l84Nb3X71GfVXlc9hyv7CD6tcowL59hg1E/DC5ydI8K8iEpUmK
+/UnHdIY5h8/kqgGxkY/T/hgp5fRQgW1ZoZxLajVlMRZ8W4tFtT0DeA+JABEBAAEA
+A/0bE1jaaZKj6ndqcw86jd+QtD1SF+Cf21CWRNeLKnUds4FRRvclzTyUMuWPkUeX
+TaNNsUOFqBsf6QQ2oHUBBK4VCHffHCW4ZEX2cd6umz7mpHW6XzN4DECEzOVksXtc
+lUC1j4UB91DC/RNQqwX1IV2QLSwssVotPMPqhOi0ZLNY7wIA3n7DWKInxYZZ4K+6
+rQ+POsz6brEoRHwr8x6XlHenq1Oki855pSa1yXIARoTrSJkBtn5oI+f8AzrnN0BN
+oyeQAwIA/7E++3HDi5aweWrViiul9cd3rcsS0dEnksPhvS0ozCJiHsq/6GFmy7J8
+QSHZPteedBnZyNp5jR+H7cIfVN3KgwH/Skq4PsuPhDq5TKK6i8Pc1WW8MA6DXTdU
+nLkX7RGmMwjC0DBf7KWAlPjFaONAX3a8ndnz//fy1q7u2l9AZwrj1qa1iJ8EGAEC
+AAkFAk2rFNoCGwwACgkQO9o98PRieSo2/QP/WTzr4ioINVsvN1akKuekmEMI3LAp
+BfHwatufxxP1U+3Si/6YIk7kuPB9Hs+pRqCXzbvPRrI8NHZBmc8qIGthishdCYad
+AHcVnXjtxrULkQFGbGvhKURLvS9WnzD/m1K2zzwxzkPTzT9/Yf06O6Mal5AdugPL
+VrM0m72/jnpKo04=
+=zNCn
+-----END PGP PRIVATE KEY BLOCK-----
+`
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/elgamal/elgamal_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/elgamal/elgamal_test.go
new file mode 100644
index 000000000..c4f99f5c4
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/elgamal/elgamal_test.go
@@ -0,0 +1,49 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package elgamal
+
+import (
+ "bytes"
+ "crypto/rand"
+ "math/big"
+ "testing"
+)
+
+// This is the 1024-bit MODP group from RFC 5114, section 2.1:
+const primeHex = "B10B8F96A080E01DDE92DE5EAE5D54EC52C99FBCFB06A3C69A6A9DCA52D23B616073E28675A23D189838EF1E2EE652C013ECB4AEA906112324975C3CD49B83BFACCBDD7D90C4BD7098488E9C219A73724EFFD6FAE5644738FAA31A4FF55BCCC0A151AF5F0DC8B4BD45BF37DF365C1A65E68CFDA76D4DA708DF1FB2BC2E4A4371"
+
+const generatorHex = "A4D1CBD5C3FD34126765A442EFB99905F8104DD258AC507FD6406CFF14266D31266FEA1E5C41564B777E690F5504F213160217B4B01B886A5E91547F9E2749F4D7FBD7D3B9A92EE1909D0D2263F80A76A6A24C087A091F531DBF0A0169B6A28AD662A4D18E73AFA32D779D5918D08BC8858F4DCEF97C2A24855E6EEB22B3B2E5"
+
+func fromHex(hex string) *big.Int {
+ n, ok := new(big.Int).SetString(hex, 16)
+ if !ok {
+ panic("failed to parse hex number")
+ }
+ return n
+}
+
+func TestEncryptDecrypt(t *testing.T) {
+ priv := &PrivateKey{
+ PublicKey: PublicKey{
+ G: fromHex(generatorHex),
+ P: fromHex(primeHex),
+ },
+ X: fromHex("42"),
+ }
+ priv.Y = new(big.Int).Exp(priv.G, priv.X, priv.P)
+
+ message := []byte("hello world")
+ c1, c2, err := Encrypt(rand.Reader, &priv.PublicKey, message)
+ if err != nil {
+ t.Errorf("error encrypting: %s", err)
+ }
+ message2, err := Decrypt(priv, c1, c2)
+ if err != nil {
+ t.Errorf("error decrypting: %s", err)
+ }
+ if !bytes.Equal(message2, message) {
+ t.Errorf("decryption failed, got: %x, want: %x", message2, message)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/keys_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/keys_test.go
new file mode 100644
index 000000000..d5e2056bb
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/keys_test.go
@@ -0,0 +1,370 @@
+package openpgp
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+ "time"
+
+ "golang.org/x/crypto/openpgp/errors"
+ "golang.org/x/crypto/openpgp/packet"
+)
+
+func TestKeyExpiry(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(expiringKeyHex))
+ entity := kring[0]
+
+ const timeFormat = "2006-01-02"
+ time1, _ := time.Parse(timeFormat, "2013-07-01")
+
+ // The expiringKeyHex key is structured as:
+ //
+ // pub 1024R/5E237D8C created: 2013-07-01 expires: 2013-07-31 usage: SC
+ // sub 1024R/1ABB25A0 created: 2013-07-01 23:11:07 +0200 CEST expires: 2013-07-08 usage: E
+ // sub 1024R/96A672F5 created: 2013-07-01 23:11:23 +0200 CEST expires: 2013-07-31 usage: E
+ //
+ // So this should select the newest, non-expired encryption key.
+ key, _ := entity.encryptionKey(time1)
+ if id := key.PublicKey.KeyIdShortString(); id != "96A672F5" {
+ t.Errorf("Expected key 1ABB25A0 at time %s, but got key %s", time1.Format(timeFormat), id)
+ }
+
+ // Once the first encryption subkey has expired, the second should be
+ // selected.
+ time2, _ := time.Parse(timeFormat, "2013-07-09")
+ key, _ = entity.encryptionKey(time2)
+ if id := key.PublicKey.KeyIdShortString(); id != "96A672F5" {
+ t.Errorf("Expected key 96A672F5 at time %s, but got key %s", time2.Format(timeFormat), id)
+ }
+
+ // Once all the keys have expired, nothing should be returned.
+ time3, _ := time.Parse(timeFormat, "2013-08-01")
+ if key, ok := entity.encryptionKey(time3); ok {
+ t.Errorf("Expected no key at time %s, but got key %s", time3.Format(timeFormat), key.PublicKey.KeyIdShortString())
+ }
+}
+
+func TestMissingCrossSignature(t *testing.T) {
+ // This public key has a signing subkey, but the subkey does not
+ // contain a cross-signature.
+ keys, err := ReadArmoredKeyRing(bytes.NewBufferString(missingCrossSignatureKey))
+ if len(keys) != 0 {
+ t.Errorf("Accepted key with missing cross signature")
+ }
+ if err == nil {
+ t.Fatal("Failed to detect error in keyring with missing cross signature")
+ }
+ structural, ok := err.(errors.StructuralError)
+ if !ok {
+ t.Fatalf("Unexpected class of error: %T. Wanted StructuralError", err)
+ }
+ const expectedMsg = "signing subkey is missing cross-signature"
+ if !strings.Contains(string(structural), expectedMsg) {
+ t.Fatalf("Unexpected error: %q. Expected it to contain %q", err, expectedMsg)
+ }
+}
+
+func TestInvalidCrossSignature(t *testing.T) {
+ // This public key has a signing subkey, and the subkey has an
+ // embedded cross-signature. However, the cross-signature does
+ // not correctly validate over the primary and subkey.
+ keys, err := ReadArmoredKeyRing(bytes.NewBufferString(invalidCrossSignatureKey))
+ if len(keys) != 0 {
+ t.Errorf("Accepted key with invalid cross signature")
+ }
+ if err == nil {
+ t.Fatal("Failed to detect error in keyring with an invalid cross signature")
+ }
+ structural, ok := err.(errors.StructuralError)
+ if !ok {
+ t.Fatalf("Unexpected class of error: %T. Wanted StructuralError", err)
+ }
+ const expectedMsg = "subkey signature invalid"
+ if !strings.Contains(string(structural), expectedMsg) {
+ t.Fatalf("Unexpected error: %q. Expected it to contain %q", err, expectedMsg)
+ }
+}
+
+func TestGoodCrossSignature(t *testing.T) {
+ // This public key has a signing subkey, and the subkey has an
+ // embedded cross-signature which correctly validates over the
+ // primary and subkey.
+ keys, err := ReadArmoredKeyRing(bytes.NewBufferString(goodCrossSignatureKey))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(keys) != 1 {
+ t.Errorf("Failed to accept key with good cross signature, %d", len(keys))
+ }
+ if len(keys[0].Subkeys) != 1 {
+ t.Errorf("Failed to accept good subkey, %d", len(keys[0].Subkeys))
+ }
+}
+
+// TestExternallyRevokableKey attempts to load and parse a key with a third party revocation permission.
+func TestExternallyRevocableKey(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(subkeyUsageHex))
+
+ // The 0xA42704B92866382A key can be revoked by 0xBE3893CB843D0FE70C
+ // according to this signature that appears within the key:
+ // :signature packet: algo 1, keyid A42704B92866382A
+ // version 4, created 1396409682, md5len 0, sigclass 0x1f
+ // digest algo 2, begin of digest a9 84
+ // hashed subpkt 2 len 4 (sig created 2014-04-02)
+ // hashed subpkt 12 len 22 (revocation key: c=80 a=1 f=CE094AA433F7040BB2DDF0BE3893CB843D0FE70C)
+ // hashed subpkt 7 len 1 (not revocable)
+ // subpkt 16 len 8 (issuer key ID A42704B92866382A)
+ // data: [1024 bits]
+
+ id := uint64(0xA42704B92866382A)
+ keys := kring.KeysById(id)
+ if len(keys) != 1 {
+ t.Errorf("Expected to find key id %X, but got %d matches", id, len(keys))
+ }
+}
+
+func TestKeyRevocation(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(revokedKeyHex))
+
+ // revokedKeyHex contains these keys:
+ // pub 1024R/9A34F7C0 2014-03-25 [revoked: 2014-03-25]
+ // sub 1024R/1BA3CD60 2014-03-25 [revoked: 2014-03-25]
+ ids := []uint64{0xA401D9F09A34F7C0, 0x5CD3BE0A1BA3CD60}
+
+ for _, id := range ids {
+ keys := kring.KeysById(id)
+ if len(keys) != 1 {
+ t.Errorf("Expected KeysById to find revoked key %X, but got %d matches", id, len(keys))
+ }
+ keys = kring.KeysByIdUsage(id, 0)
+ if len(keys) != 0 {
+ t.Errorf("Expected KeysByIdUsage to filter out revoked key %X, but got %d matches", id, len(keys))
+ }
+ }
+}
+
+func TestSubkeyRevocation(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(revokedSubkeyHex))
+
+ // revokedSubkeyHex contains these keys:
+ // pub 1024R/4EF7E4BECCDE97F0 2014-03-25
+ // sub 1024R/D63636E2B96AE423 2014-03-25
+ // sub 1024D/DBCE4EE19529437F 2014-03-25
+ // sub 1024R/677815E371C2FD23 2014-03-25 [revoked: 2014-03-25]
+ validKeys := []uint64{0x4EF7E4BECCDE97F0, 0xD63636E2B96AE423, 0xDBCE4EE19529437F}
+ revokedKey := uint64(0x677815E371C2FD23)
+
+ for _, id := range validKeys {
+ keys := kring.KeysById(id)
+ if len(keys) != 1 {
+ t.Errorf("Expected KeysById to find key %X, but got %d matches", id, len(keys))
+ }
+ keys = kring.KeysByIdUsage(id, 0)
+ if len(keys) != 1 {
+ t.Errorf("Expected KeysByIdUsage to find key %X, but got %d matches", id, len(keys))
+ }
+ }
+
+ keys := kring.KeysById(revokedKey)
+ if len(keys) != 1 {
+ t.Errorf("Expected KeysById to find key %X, but got %d matches", revokedKey, len(keys))
+ }
+
+ keys = kring.KeysByIdUsage(revokedKey, 0)
+ if len(keys) != 0 {
+ t.Errorf("Expected KeysByIdUsage to filter out revoked key %X, but got %d matches", revokedKey, len(keys))
+ }
+}
+
+func TestKeyUsage(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(subkeyUsageHex))
+
+ // subkeyUsageHex contains these keys:
+ // pub 1024R/2866382A created: 2014-04-01 expires: never usage: SC
+ // sub 1024R/936C9153 created: 2014-04-01 expires: never usage: E
+ // sub 1024R/64D5F5BB created: 2014-04-02 expires: never usage: E
+ // sub 1024D/BC0BA992 created: 2014-04-02 expires: never usage: S
+ certifiers := []uint64{0xA42704B92866382A}
+ signers := []uint64{0xA42704B92866382A, 0x42CE2C64BC0BA992}
+ encrypters := []uint64{0x09C0C7D9936C9153, 0xC104E98664D5F5BB}
+
+ for _, id := range certifiers {
+ keys := kring.KeysByIdUsage(id, packet.KeyFlagCertify)
+ if len(keys) == 1 {
+ if keys[0].PublicKey.KeyId != id {
+ t.Errorf("Expected to find certifier key id %X, but got %X", id, keys[0].PublicKey.KeyId)
+ }
+ } else {
+ t.Errorf("Expected one match for certifier key id %X, but got %d matches", id, len(keys))
+ }
+ }
+
+ for _, id := range signers {
+ keys := kring.KeysByIdUsage(id, packet.KeyFlagSign)
+ if len(keys) == 1 {
+ if keys[0].PublicKey.KeyId != id {
+ t.Errorf("Expected to find signing key id %X, but got %X", id, keys[0].PublicKey.KeyId)
+ }
+ } else {
+ t.Errorf("Expected one match for signing key id %X, but got %d matches", id, len(keys))
+ }
+
+ // This keyring contains no encryption keys that are also good for signing.
+ keys = kring.KeysByIdUsage(id, packet.KeyFlagEncryptStorage|packet.KeyFlagEncryptCommunications)
+ if len(keys) != 0 {
+ t.Errorf("Unexpected match for encryption key id %X", id)
+ }
+ }
+
+ for _, id := range encrypters {
+ keys := kring.KeysByIdUsage(id, packet.KeyFlagEncryptStorage|packet.KeyFlagEncryptCommunications)
+ if len(keys) == 1 {
+ if keys[0].PublicKey.KeyId != id {
+ t.Errorf("Expected to find encryption key id %X, but got %X", id, keys[0].PublicKey.KeyId)
+ }
+ } else {
+ t.Errorf("Expected one match for encryption key id %X, but got %d matches", id, len(keys))
+ }
+
+ // This keyring contains no encryption keys that are also good for signing.
+ keys = kring.KeysByIdUsage(id, packet.KeyFlagSign)
+ if len(keys) != 0 {
+ t.Errorf("Unexpected match for signing key id %X", id)
+ }
+ }
+}
+
+func TestIdVerification(t *testing.T) {
+ kring, err := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := kring[1].PrivateKey.Decrypt([]byte("passphrase")); err != nil {
+ t.Fatal(err)
+ }
+
+ const identity = "Test Key 1 (RSA)"
+ if err := kring[0].SignIdentity(identity, kring[1], nil); err != nil {
+ t.Fatal(err)
+ }
+
+ ident, ok := kring[0].Identities[identity]
+ if !ok {
+ t.Fatal("identity missing from key after signing")
+ }
+
+ checked := false
+ for _, sig := range ident.Signatures {
+ if sig.IssuerKeyId == nil || *sig.IssuerKeyId != kring[1].PrimaryKey.KeyId {
+ continue
+ }
+
+ if err := kring[1].PrimaryKey.VerifyUserIdSignature(identity, kring[0].PrimaryKey, sig); err != nil {
+ t.Fatalf("error verifying new identity signature: %s", err)
+ }
+ checked = true
+ break
+ }
+
+ if !checked {
+ t.Fatal("didn't find identity signature in Entity")
+ }
+}
+
+const expiringKeyHex = "988d0451d1ec5d010400ba3385721f2dc3f4ab096b2ee867ab77213f0a27a8538441c35d2fa225b08798a1439a66a5150e6bdc3f40f5d28d588c712394c632b6299f77db8c0d48d37903fb72ebd794d61be6aa774688839e5fdecfe06b2684cc115d240c98c66cb1ef22ae84e3aa0c2b0c28665c1e7d4d044e7f270706193f5223c8d44e0d70b7b8da830011010001b40f4578706972792074657374206b657988be041301020028050251d1ec5d021b03050900278d00060b090807030206150802090a0b0416020301021e01021780000a091072589ad75e237d8c033503fd10506d72837834eb7f994117740723adc39227104b0d326a1161871c0b415d25b4aedef946ca77ea4c05af9c22b32cf98be86ab890111fced1ee3f75e87b7cc3c00dc63bbc85dfab91c0dc2ad9de2c4d13a34659333a85c6acc1a669c5e1d6cecb0cf1e56c10e72d855ae177ddc9e766f9b2dda57ccbb75f57156438bbdb4e42b88d0451d1ec5d0104009c64906559866c5cb61578f5846a94fcee142a489c9b41e67b12bb54cfe86eb9bc8566460f9a720cb00d6526fbccfd4f552071a8e3f7744b1882d01036d811ee5a3fb91a1c568055758f43ba5d2c6a9676b012f3a1a89e47bbf624f1ad571b208f3cc6224eb378f1645dd3d47584463f9eadeacfd1ce6f813064fbfdcc4b5a53001101000188a504180102000f021b0c050251d1f06b050900093e89000a091072589ad75e237d8c20e00400ab8310a41461425b37889c4da28129b5fae6084fafbc0a47dd1adc74a264c6e9c9cc125f40462ee1433072a58384daef88c961c390ed06426a81b464a53194c4e291ddd7e2e2ba3efced01537d713bd111f48437bde2363446200995e8e0d4e528dda377fd1e8f8ede9c8e2198b393bd86852ce7457a7e3daf74d510461a5b77b88d0451d1ece8010400b3a519f83ab0010307e83bca895170acce8964a044190a2b368892f7a244758d9fc193482648acb1fb9780d28cc22d171931f38bb40279389fc9bf2110876d4f3db4fcfb13f22f7083877fe56592b3b65251312c36f83ffcb6d313c6a17f197dd471f0712aad15a8537b435a92471ba2e5b0c72a6c72536c3b567c558d7b6051001101000188a504180102000f021b0c050251d1f07b050900279091000a091072589ad75e237d8ce69e03fe286026afacf7c97ee20673864d4459a2240b5655219950643c7dba0ac384b1d4359c67805b21d98211f7b09c2a0ccf6410c8c04d4ff4a51293725d8d6570d9d8bb0e10c07d22357caeb49626df99c180be02d77d1fe8ed25e7a54481237646083a9f89a11566cd20b9e995b1487c5f9e02aeb434f3a1897cd416dd0a87861838da3e9e"
+const subkeyUsageHex = "988d04533a52bc010400d26af43085558f65b9e7dbc90cb9238015259aed5e954637adcfa2181548b2d0b60c65f1f42ec5081cbf1bc0a8aa4900acfb77070837c58f26012fbce297d70afe96e759ad63531f0037538e70dbf8e384569b9720d99d8eb39d8d0a2947233ed242436cb6ac7dfe74123354b3d0119b5c235d3dd9c9d6c004f8ffaf67ad8583001101000188b7041f010200210502533b8552170c8001ce094aa433f7040bb2ddf0be3893cb843d0fe70c020700000a0910a42704b92866382aa98404009d63d916a27543da4221c60087c33f1c44bec9998c5438018ed370cca4962876c748e94b73eb39c58eb698063f3fd6346d58dd2a11c0247934c4a9d71f24754f7468f96fb24c3e791dd2392b62f626148ad724189498cbf993db2df7c0cdc2d677c35da0f16cb16c9ce7c33b4de65a4a91b1d21a130ae9cc26067718910ef8e2b417556d627261203c756d627261407379642e65642e61753e88b80413010200220502533a52bc021b03060b090807030206150802090a0b0416020301021e01021780000a0910a42704b92866382a47840400c0c2bd04f5fca586de408b395b3c280a278259c93eaaa8b79a53b97003f8ed502a8a00446dd9947fb462677e4fcac0dac2f0701847d15130aadb6cd9e0705ea0cf5f92f129136c7be21a718d46c8e641eb7f044f2adae573e11ae423a0a9ca51324f03a8a2f34b91fa40c3cc764bee4dccadedb54c768ba0469b683ea53f1c29b88d04533a52bc01040099c92a5d6f8b744224da27bc2369127c35269b58bec179de6bbc038f749344222f85a31933224f26b70243c4e4b2d242f0c4777eaef7b5502f9dad6d8bf3aaeb471210674b74de2d7078af497d55f5cdad97c7bedfbc1b41e8065a97c9c3d344b21fc81d27723af8e374bc595da26ea242dccb6ae497be26eea57e563ed517e90011010001889f0418010200090502533a52bc021b0c000a0910a42704b92866382afa1403ff70284c2de8a043ff51d8d29772602fa98009b7861c540535f874f2c230af8caf5638151a636b21f8255003997ccd29747fdd06777bb24f9593bd7d98a3e887689bf902f999915fcc94625ae487e5d13e6616f89090ebc4fdc7eb5cad8943e4056995bb61c6af37f8043016876a958ec7ebf39c43d20d53b7f546cfa83e8d2604b88d04533b8283010400c0b529316dbdf58b4c54461e7e669dc11c09eb7f73819f178ccd4177b9182b91d138605fcf1e463262fabefa73f94a52b5e15d1904635541c7ea540f07050ce0fb51b73e6f88644cec86e91107c957a114f69554548a85295d2b70bd0b203992f76eb5d493d86d9eabcaa7ef3fc7db7e458438db3fcdb0ca1cc97c638439a9170011010001889f0418010200090502533b8283021b0c000a0910a42704b92866382adc6d0400cfff6258485a21675adb7a811c3e19ebca18851533f75a7ba317950b9997fda8d1a4c8c76505c08c04b6c2cc31dc704d33da36a21273f2b388a1a706f7c3378b66d887197a525936ed9a69acb57fe7f718133da85ec742001c5d1864e9c6c8ea1b94f1c3759cebfd93b18606066c063a63be86085b7e37bdbc65f9a915bf084bb901a204533b85cd110400aed3d2c52af2b38b5b67904b0ef73d6dd7aef86adb770e2b153cd22489654dcc91730892087bb9856ae2d9f7ed1eb48f214243fe86bfe87b349ebd7c30e630e49c07b21fdabf78b7a95c8b7f969e97e3d33f2e074c63552ba64a2ded7badc05ce0ea2be6d53485f6900c7860c7aa76560376ce963d7271b9b54638a4028b573f00a0d8854bfcdb04986141568046202192263b9b67350400aaa1049dbc7943141ef590a70dcb028d730371d92ea4863de715f7f0f16d168bd3dc266c2450457d46dcbbf0b071547e5fbee7700a820c3750b236335d8d5848adb3c0da010e998908dfd93d961480084f3aea20b247034f8988eccb5546efaa35a92d0451df3aaf1aee5aa36a4c4d462c760ecd9cebcabfbe1412b1f21450f203fd126687cd486496e971a87fd9e1a8a765fe654baa219a6871ab97768596ab05c26c1aeea8f1a2c72395a58dbc12ef9640d2b95784e974a4d2d5a9b17c25fedacfe551bda52602de8f6d2e48443f5dd1a2a2a8e6a5e70ecdb88cd6e766ad9745c7ee91d78cc55c3d06536b49c3fee6c3d0b6ff0fb2bf13a314f57c953b8f4d93bf88e70418010200090502533b85cd021b0200520910a42704b92866382a47200419110200060502533b85cd000a091042ce2c64bc0ba99214b2009e26b26852c8b13b10c35768e40e78fbbb48bd084100a0c79d9ea0844fa5853dd3c85ff3ecae6f2c9dd6c557aa04008bbbc964cd65b9b8299d4ebf31f41cc7264b8cf33a00e82c5af022331fac79efc9563a822497ba012953cefe2629f1242fcdcb911dbb2315985bab060bfd58261ace3c654bdbbe2e8ed27a46e836490145c86dc7bae15c011f7e1ffc33730109b9338cd9f483e7cef3d2f396aab5bd80efb6646d7e778270ee99d934d187dd98"
+const revokedKeyHex = "988d045331ce82010400c4fdf7b40a5477f206e6ee278eaef888ca73bf9128a9eef9f2f1ddb8b7b71a4c07cfa241f028a04edb405e4d916c61d6beabc333813dc7b484d2b3c52ee233c6a79b1eea4e9cc51596ba9cd5ac5aeb9df62d86ea051055b79d03f8a4fa9f38386f5bd17529138f3325d46801514ea9047977e0829ed728e68636802796801be10011010001889f04200102000905025331d0e3021d03000a0910a401d9f09a34f7c042aa040086631196405b7e6af71026b88e98012eab44aa9849f6ef3fa930c7c9f23deaedba9db1538830f8652fb7648ec3fcade8dbcbf9eaf428e83c6cbcc272201bfe2fbb90d41963397a7c0637a1a9d9448ce695d9790db2dc95433ad7be19eb3de72dacf1d6db82c3644c13eae2a3d072b99bb341debba012c5ce4006a7d34a1f4b94b444526567205265766f6b657220283c52656727732022424d204261726973746122204b657920262530305c303e5c29203c72656740626d626172697374612e636f2e61753e88b704130102002205025331ce82021b03060b090807030206150802090a0b0416020301021e01021780000a0910a401d9f09a34f7c0019c03f75edfbeb6a73e7225ad3cc52724e2872e04260d7daf0d693c170d8c4b243b8767bc7785763533febc62ec2600c30603c433c095453ede59ff2fcabeb84ce32e0ed9d5cf15ffcbc816202b64370d4d77c1e9077d74e94a16fb4fa2e5bec23a56d7a73cf275f91691ae1801a976fcde09e981a2f6327ac27ea1fecf3185df0d56889c04100102000605025331cfb5000a0910fe9645554e8266b64b4303fc084075396674fb6f778d302ac07cef6bc0b5d07b66b2004c44aef711cbac79617ef06d836b4957522d8772dd94bf41a2f4ac8b1ee6d70c57503f837445a74765a076d07b829b8111fc2a918423ddb817ead7ca2a613ef0bfb9c6b3562aec6c3cf3c75ef3031d81d95f6563e4cdcc9960bcb386c5d757b104fcca5fe11fc709df884604101102000605025331cfe7000a09107b15a67f0b3ddc0317f6009e360beea58f29c1d963a22b962b80788c3fa6c84e009d148cfde6b351469b8eae91187eff07ad9d08fcaab88d045331ce820104009f25e20a42b904f3fa555530fe5c46737cf7bd076c35a2a0d22b11f7e0b61a69320b768f4a80fe13980ce380d1cfc4a0cd8fbe2d2e2ef85416668b77208baa65bf973fe8e500e78cc310d7c8705cdb34328bf80e24f0385fce5845c33bc7943cf6b11b02348a23da0bf6428e57c05135f2dc6bd7c1ce325d666d5a5fd2fd5e410011010001889f04180102000905025331ce82021b0c000a0910a401d9f09a34f7c0418003fe34feafcbeaef348a800a0d908a7a6809cc7304017d820f70f0474d5e23cb17e38b67dc6dca282c6ca00961f4ec9edf2738d0f087b1d81e4871ef08e1798010863afb4eac4c44a376cb343be929c5be66a78cfd4456ae9ec6a99d97f4e1c3ff3583351db2147a65c0acef5c003fb544ab3a2e2dc4d43646f58b811a6c3a369d1f"
+const revokedSubkeyHex = "988d04533121f6010400aefc803a3e4bb1a61c86e8a86d2726c6a43e0079e9f2713f1fa017e9854c83877f4aced8e331d675c67ea83ddab80aacbfa0b9040bb12d96f5a3d6be09455e2a76546cbd21677537db941cab710216b6d24ec277ee0bd65b910f416737ed120f6b93a9d3b306245c8cfd8394606fdb462e5cf43c551438d2864506c63367fc890011010001b41d416c696365203c616c69636540626d626172697374612e636f2e61753e88bb041301020025021b03060b090807030206150802090a0b0416020301021e01021780050253312798021901000a09104ef7e4beccde97f015a803ff5448437780f63263b0df8442a995e7f76c221351a51edd06f2063d8166cf3157aada4923dfc44aa0f2a6a4da5cf83b7fe722ba8ab416c976e77c6b5682e7f1069026673bd0de56ba06fd5d7a9f177607f277d9b55ff940a638c3e68525c67517e2b3d976899b93ca267f705b3e5efad7d61220e96b618a4497eab8d04403d23f8846041011020006050253312910000a09107b15a67f0b3ddc03d96e009f50b6365d86c4be5d5e9d0ea42d5e56f5794c617700a0ab274e19c2827780016d23417ce89e0a2c0d987d889c04100102000605025331cf7a000a0910a401d9f09a34f7c0ee970400aca292f213041c9f3b3fc49148cbda9d84afee6183c8dd6c5ff2600b29482db5fecd4303797be1ee6d544a20a858080fec43412061c9a71fae4039fd58013b4ae341273e6c66ad4c7cdd9e68245bedb260562e7b166f2461a1032f2b38c0e0e5715fb3d1656979e052b55ca827a76f872b78a9fdae64bc298170bfcebedc1271b41a416c696365203c616c696365407379646973702e6f722e61753e88b804130102002205025331278b021b03060b090807030206150802090a0b0416020301021e01021780000a09104ef7e4beccde97f06a7003fa03c3af68d272ebc1fa08aa72a03b02189c26496a2833d90450801c4e42c5b5f51ad96ce2d2c9cef4b7c02a6a2fcf1412d6a2d486098eb762f5010a201819c17fd2888aec8eda20c65a3b75744de7ee5cc8ac7bfc470cbe3cb982720405a27a3c6a8c229cfe36905f881b02ed5680f6a8f05866efb9d6c5844897e631deb949ca8846041011020006050253312910000a09107b15a67f0b3ddc0347bc009f7fa35db59147469eb6f2c5aaf6428accb138b22800a0caa2f5f0874bacc5909c652a57a31beda65eddd5889c04100102000605025331cf7a000a0910a401d9f09a34f7c0316403ff46f2a5c101256627f16384d34a38fb47a6c88ba60506843e532d91614339fccae5f884a5741e7582ffaf292ba38ee10a270a05f139bde3814b6a077e8cd2db0f105ebea2a83af70d385f13b507fac2ad93ff79d84950328bb86f3074745a8b7f9b64990fb142e2a12976e27e8d09a28dc5621f957ac49091116da410ac3cbde1b88d04533121f6010400cbd785b56905e4192e2fb62a720727d43c4fa487821203cf72138b884b78b701093243e1d8c92a0248a6c0203a5a88693da34af357499abacaf4b3309c640797d03093870a323b4b6f37865f6eaa2838148a67df4735d43a90ca87942554cdf1c4a751b1e75f9fd4ce4e97e278d6c1c7ed59d33441df7d084f3f02beb68896c70011010001889f0418010200090502533121f6021b0c000a09104ef7e4beccde97f0b98b03fc0a5ccf6a372995835a2f5da33b282a7d612c0ab2a97f59cf9fff73e9110981aac2858c41399afa29624a7fd8a0add11654e3d882c0fd199e161bdad65e5e2548f7b68a437ea64293db1246e3011cbb94dc1bcdeaf0f2539bd88ff16d95547144d97cead6a8c5927660a91e6db0d16eb36b7b49a3525b54d1644e65599b032b7eb901a204533127a0110400bd3edaa09eff9809c4edc2c2a0ebe52e53c50a19c1e49ab78e6167bf61473bb08f2050d78a5cbbc6ed66aff7b42cd503f16b4a0b99fa1609681fca9b7ce2bbb1a5b3864d6cdda4d7ef7849d156d534dea30fb0efb9e4cf8959a2b2ce623905882d5430b995a15c3b9fe92906086788b891002924f94abe139b42cbbfaaabe42f00a0b65dc1a1ad27d798adbcb5b5ad02d2688c89477b03ff4eebb6f7b15a73b96a96bed201c0e5e4ea27e4c6e2dd1005b94d4b90137a5b1cf5e01c6226c070c4cc999938101578877ee76d296b9aab8246d57049caacf489e80a3f40589cade790a020b1ac146d6f7a6241184b8c7fcde680eae3188f5dcbe846d7f7bdad34f6fcfca08413e19c1d5df83fc7c7c627d493492e009c2f52a80400a2fe82de87136fd2e8845888c4431b032ba29d9a29a804277e31002a8201fb8591a3e55c7a0d0881496caf8b9fb07544a5a4879291d0dc026a0ea9e5bd88eb4aa4947bbd694b25012e208a250d65ddc6f1eea59d3aed3b4ec15fcab85e2afaa23a40ab1ef9ce3e11e1bc1c34a0e758e7aa64deb8739276df0af7d4121f834a9b88e70418010200090502533127a0021b02005209104ef7e4beccde97f047200419110200060502533127a0000a0910dbce4ee19529437fe045009c0b32f5ead48ee8a7e98fac0dea3d3e6c0e2c552500a0ad71fadc5007cfaf842d9b7db3335a8cdad15d3d1a6404009b08e2c68fe8f3b45c1bb72a4b3278cdf3012aa0f229883ad74aa1f6000bb90b18301b2f85372ca5d6b9bf478d235b733b1b197d19ccca48e9daf8e890cb64546b4ce1b178faccfff07003c172a2d4f5ebaba9f57153955f3f61a9b80a4f5cb959908f8b211b03b7026a8a82fc612bfedd3794969bcf458c4ce92be215a1176ab88d045331d144010400a5063000c5aaf34953c1aa3bfc95045b3aab9882b9a8027fecfe2142dc6b47ba8aca667399990244d513dd0504716908c17d92c65e74219e004f7b83fc125e575dd58efec3ab6dd22e3580106998523dea42ec75bf9aa111734c82df54630bebdff20fe981cfc36c76f865eb1c2fb62c9e85bc3a6e5015a361a2eb1c8431578d0011010001889f04280102000905025331d433021d03000a09104ef7e4beccde97f02e5503ff5e0630d1b65291f4882b6d40a29da4616bb5088717d469fbcc3648b8276de04a04988b1f1b9f3e18f52265c1f8b6c85861691c1a6b8a3a25a1809a0b32ad330aec5667cb4262f4450649184e8113849b05e5ad06a316ea80c001e8e71838190339a6e48bbde30647bcf245134b9a97fa875c1d83a9862cae87ffd7e2c4ce3a1b89013d04180102000905025331d144021b0200a809104ef7e4beccde97f09d2004190102000605025331d144000a0910677815e371c2fd23522203fe22ab62b8e7a151383cea3edd3a12995693911426f8ccf125e1f6426388c0010f88d9ca7da2224aee8d1c12135998640c5e1813d55a93df472faae75bef858457248db41b4505827590aeccf6f9eb646da7f980655dd3050c6897feddddaca90676dee856d66db8923477d251712bb9b3186b4d0114daf7d6b59272b53218dd1da94a03ff64006fcbe71211e5daecd9961fba66cdb6de3f914882c58ba5beddeba7dcb950c1156d7fba18c19ea880dccc800eae335deec34e3b84ac75ffa24864f782f87815cda1c0f634b3dd2fa67cea30811d21723d21d9551fa12ccbcfa62b6d3a15d01307b99925707992556d50065505b090aadb8579083a20fe65bd2a270da9b011"
+const missingCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
+Charset: UTF-8
+
+mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY
+ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG
+zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54
+QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ
+QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo
+9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu
+Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/
+dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R
+JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL
+ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew
+RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW
+/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu
+yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAJcXQeP+NmuciE99YcJoffxv
+2gVLU4ZXBNHEaP0mgaJ1+tmMD089vUQAcyGRvw8jfsNsVZQIOAuRxY94aHQhIRHR
+bUzBN28ofo/AJJtfx62C15xt6fDKRV6HXYqAiygrHIpEoRLyiN69iScUsjIJeyFL
+C8wa72e8pSL6dkHoaV1N9ZH/xmrJ+k0vsgkQaAh9CzYufncDxcwkoP+aOlGtX1gP
+WwWoIbz0JwLEMPHBWvDDXQcQPQTYQyj+LGC9U6f9VZHN25E94subM1MjuT9OhN9Y
+MLfWaaIc5WyhLFyQKW2Upofn9wSFi8ubyBnv640Dfd0rVmaWv7LNTZpoZ/GbJAMA
+EQEAAYkBHwQYAQIACQUCU5ygeQIbAgAKCRDt1A0FCB6SP0zCB/sEzaVR38vpx+OQ
+MMynCBJrakiqDmUZv9xtplY7zsHSQjpd6xGflbU2n+iX99Q+nav0ETQZifNUEd4N
+1ljDGQejcTyKD6Pkg6wBL3x9/RJye7Zszazm4+toJXZ8xJ3800+BtaPoI39akYJm
++ijzbskvN0v/j5GOFJwQO0pPRAFtdHqRs9Kf4YanxhedB4dIUblzlIJuKsxFit6N
+lgGRblagG3Vv2eBszbxzPbJjHCgVLR3RmrVezKOsZjr/2i7X+xLWIR0uD3IN1qOW
+CXQxLBizEEmSNVNxsp7KPGTLnqO3bPtqFirxS9PJLIMPTPLNBY7ZYuPNTMqVIUWF
+4artDmrG
+=7FfJ
+-----END PGP PUBLIC KEY BLOCK-----`
+
+const invalidCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY
+ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG
+zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54
+QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ
+QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo
+9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu
+Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/
+dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R
+JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL
+ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew
+RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW
+/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu
+yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAIINDqlj7X6jYKc6DjwrOkjQ
+UIRWbQQar0LwmNilehmt70g5DCL1SYm9q4LcgJJ2Nhxj0/5qqsYib50OSWMcKeEe
+iRXpXzv1ObpcQtI5ithp0gR53YPXBib80t3bUzomQ5UyZqAAHzMp3BKC54/vUrSK
+FeRaxDzNLrCeyI00+LHNUtwghAqHvdNcsIf8VRumK8oTm3RmDh0TyjASWYbrt9c8
+R1Um3zuoACOVy+mEIgIzsfHq0u7dwYwJB5+KeM7ZLx+HGIYdUYzHuUE1sLwVoELh
++SHIGHI1HDicOjzqgajShuIjj5hZTyQySVprrsLKiXS6NEwHAP20+XjayJ/R3tEA
+EQEAAYkCPgQYAQIBKAUCU5ygeQIbAsBdIAQZAQIABgUCU5ygeQAKCRCpVlnFZmhO
+52RJB/9uD1MSa0wjY6tHOIgquZcP3bHBvHmrHNMw9HR2wRCMO91ZkhrpdS3ZHtgb
+u3/55etj0FdvDo1tb8P8FGSVtO5Vcwf5APM8sbbqoi8L951Q3i7qt847lfhu6sMl
+w0LWFvPTOLHrliZHItPRjOltS1WAWfr2jUYhsU9ytaDAJmvf9DujxEOsN5G1YJep
+54JCKVCkM/y585Zcnn+yxk/XwqoNQ0/iJUT9qRrZWvoeasxhl1PQcwihCwss44A+
+YXaAt3hbk+6LEQuZoYS73yR3WHj+42tfm7YxRGeubXfgCEz/brETEWXMh4pe0vCL
+bfWrmfSPq2rDegYcAybxRQz0lF8PAAoJEO3UDQUIHpI/exkH/0vQfdHA8g/N4T6E
+i6b1CUVBAkvtdJpCATZjWPhXmShOw62gkDw306vHPilL4SCvEEi4KzG72zkp6VsB
+DSRcpxCwT4mHue+duiy53/aRMtSJ+vDfiV1Vhq+3sWAck/yUtfDU9/u4eFaiNok1
+8/Gd7reyuZt5CiJnpdPpjCwelK21l2w7sHAnJF55ITXdOxI8oG3BRKufz0z5lyDY
+s2tXYmhhQIggdgelN8LbcMhWs/PBbtUr6uZlNJG2lW1yscD4aI529VjwJlCeo745
+U7pO4eF05VViUJ2mmfoivL3tkhoTUWhx8xs8xCUcCg8DoEoSIhxtOmoTPR22Z9BL
+6LCg2mg=
+=Dhm4
+-----END PGP PUBLIC KEY BLOCK-----`
+
+const goodCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mI0EVUqeVwEEAMufHRrMPWK3gyvi0O0tABCs/oON9zV9KDZlr1a1M91ShCSFwCPo
+7r80PxdWVWcj0V5h50/CJYtpN3eE/mUIgW2z1uDYQF1OzrQ8ubrksfsJvpAhENom
+lTQEppv9mV8qhcM278teb7TX0pgrUHLYF5CfPdp1L957JLLXoQR/lwLVABEBAAG0
+E2dvb2Qtc2lnbmluZy1zdWJrZXmIuAQTAQIAIgUCVUqeVwIbAwYLCQgHAwIGFQgC
+CQoLBBYCAwECHgECF4AACgkQNRjL95IRWP69XQQAlH6+eyXJN4DZTLX78KGjHrsw
+6FCvxxClEPtPUjcJy/1KCRQmtLAt9PbbA78dvgzjDeZMZqRAwdjyJhjyg/fkU2OH
+7wq4ktjUu+dLcOBb+BFMEY+YjKZhf6EJuVfxoTVr5f82XNPbYHfTho9/OABKH6kv
+X70PaKZhbwnwij8Nts65AaIEVUqftREEAJ3WxZfqAX0bTDbQPf2CMT2IVMGDfhK7
+GyubOZgDFFjwUJQvHNvsrbeGLZ0xOBumLINyPO1amIfTgJNm1iiWFWfmnHReGcDl
+y5mpYG60Mb79Whdcer7CMm3AqYh/dW4g6IB02NwZMKoUHo3PXmFLxMKXnWyJ0clw
+R0LI/Qn509yXAKDh1SO20rqrBM+EAP2c5bfI98kyNwQAi3buu94qo3RR1ZbvfxgW
+CKXDVm6N99jdZGNK7FbRifXqzJJDLcXZKLnstnC4Sd3uyfyf1uFhmDLIQRryn5m+
+LBYHfDBPN3kdm7bsZDDq9GbTHiFZUfm/tChVKXWxkhpAmHhU/tH6GGzNSMXuIWSO
+aOz3Rqq0ED4NXyNKjdF9MiwD/i83S0ZBc0LmJYt4Z10jtH2B6tYdqnAK29uQaadx
+yZCX2scE09UIm32/w7pV77CKr1Cp/4OzAXS1tmFzQ+bX7DR+Gl8t4wxr57VeEMvl
+BGw4Vjh3X8//m3xynxycQU18Q1zJ6PkiMyPw2owZ/nss3hpSRKFJsxMLhW3fKmKr
+Ey2KiOcEGAECAAkFAlVKn7UCGwIAUgkQNRjL95IRWP5HIAQZEQIABgUCVUqftQAK
+CRD98VjDN10SqkWrAKDTpEY8D8HC02E/KVC5YUI01B30wgCgurpILm20kXEDCeHp
+C5pygfXw1DJrhAP+NyPJ4um/bU1I+rXaHHJYroYJs8YSweiNcwiHDQn0Engh/mVZ
+SqLHvbKh2dL/RXymC3+rjPvQf5cup9bPxNMa6WagdYBNAfzWGtkVISeaQW+cTEp/
+MtgVijRGXR/lGLGETPg2X3Afwn9N9bLMBkBprKgbBqU7lpaoPupxT61bL70=
+=vtbN
+-----END PGP PUBLIC KEY BLOCK-----`
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/compressed_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/compressed_test.go
new file mode 100644
index 000000000..cb2d70bd4
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/compressed_test.go
@@ -0,0 +1,41 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "encoding/hex"
+ "io"
+ "io/ioutil"
+ "testing"
+)
+
+func TestCompressed(t *testing.T) {
+ packet, err := Read(readerFromHex(compressedHex))
+ if err != nil {
+ t.Errorf("failed to read Compressed: %s", err)
+ return
+ }
+
+ c, ok := packet.(*Compressed)
+ if !ok {
+ t.Error("didn't find Compressed packet")
+ return
+ }
+
+ contents, err := ioutil.ReadAll(c.Body)
+ if err != nil && err != io.EOF {
+ t.Error(err)
+ return
+ }
+
+ expected, _ := hex.DecodeString(compressedExpectedHex)
+ if !bytes.Equal(expected, contents) {
+ t.Errorf("got:%x want:%x", contents, expected)
+ }
+}
+
+const compressedHex = "a3013b2d90c4e02b72e25f727e5e496a5e49b11e1700"
+const compressedExpectedHex = "cb1062004d14c8fe636f6e74656e74732e0a"
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go
new file mode 100644
index 000000000..fee14cf3c
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go
@@ -0,0 +1,146 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "crypto/rsa"
+ "encoding/hex"
+ "fmt"
+ "math/big"
+ "testing"
+)
+
+func bigFromBase10(s string) *big.Int {
+ b, ok := new(big.Int).SetString(s, 10)
+ if !ok {
+ panic("bigFromBase10 failed")
+ }
+ return b
+}
+
+var encryptedKeyPub = rsa.PublicKey{
+ E: 65537,
+ N: bigFromBase10("115804063926007623305902631768113868327816898845124614648849934718568541074358183759250136204762053879858102352159854352727097033322663029387610959884180306668628526686121021235757016368038585212410610742029286439607686208110250133174279811431933746643015923132833417396844716207301518956640020862630546868823"),
+}
+
+var encryptedKeyRSAPriv = &rsa.PrivateKey{
+ PublicKey: encryptedKeyPub,
+ D: bigFromBase10("32355588668219869544751561565313228297765464314098552250409557267371233892496951383426602439009993875125222579159850054973310859166139474359774543943714622292329487391199285040721944491839695981199720170366763547754915493640685849961780092241140181198779299712578774460837139360803883139311171713302987058393"),
+}
+
+var encryptedKeyPriv = &PrivateKey{
+ PublicKey: PublicKey{
+ PubKeyAlgo: PubKeyAlgoRSA,
+ },
+ PrivateKey: encryptedKeyRSAPriv,
+}
+
+func TestDecryptingEncryptedKey(t *testing.T) {
+ const encryptedKeyHex = "c18c032a67d68660df41c70104005789d0de26b6a50c985a02a13131ca829c413a35d0e6fa8d6842599252162808ac7439c72151c8c6183e76923fe3299301414d0c25a2f06a2257db3839e7df0ec964773f6e4c4ac7ff3b48c444237166dd46ba8ff443a5410dc670cb486672fdbe7c9dfafb75b4fea83af3a204fe2a7dfa86bd20122b4f3d2646cbeecb8f7be8"
+ const expectedKeyHex = "d930363f7e0308c333b9618617ea728963d8df993665ae7be1092d4926fd864b"
+
+ p, err := Read(readerFromHex(encryptedKeyHex))
+ if err != nil {
+ t.Errorf("error from Read: %s", err)
+ return
+ }
+ ek, ok := p.(*EncryptedKey)
+ if !ok {
+ t.Errorf("didn't parse an EncryptedKey, got %#v", p)
+ return
+ }
+
+ if ek.KeyId != 0x2a67d68660df41c7 || ek.Algo != PubKeyAlgoRSA {
+ t.Errorf("unexpected EncryptedKey contents: %#v", ek)
+ return
+ }
+
+ err = ek.Decrypt(encryptedKeyPriv, nil)
+ if err != nil {
+ t.Errorf("error from Decrypt: %s", err)
+ return
+ }
+
+ if ek.CipherFunc != CipherAES256 {
+ t.Errorf("unexpected EncryptedKey contents: %#v", ek)
+ return
+ }
+
+ keyHex := fmt.Sprintf("%x", ek.Key)
+ if keyHex != expectedKeyHex {
+ t.Errorf("bad key, got %s want %x", keyHex, expectedKeyHex)
+ }
+}
+
+func TestEncryptingEncryptedKey(t *testing.T) {
+ key := []byte{1, 2, 3, 4}
+ const expectedKeyHex = "01020304"
+ const keyId = 42
+
+ pub := &PublicKey{
+ PublicKey: &encryptedKeyPub,
+ KeyId: keyId,
+ PubKeyAlgo: PubKeyAlgoRSAEncryptOnly,
+ }
+
+ buf := new(bytes.Buffer)
+ err := SerializeEncryptedKey(buf, pub, CipherAES128, key, nil)
+ if err != nil {
+ t.Errorf("error writing encrypted key packet: %s", err)
+ }
+
+ p, err := Read(buf)
+ if err != nil {
+ t.Errorf("error from Read: %s", err)
+ return
+ }
+ ek, ok := p.(*EncryptedKey)
+ if !ok {
+ t.Errorf("didn't parse an EncryptedKey, got %#v", p)
+ return
+ }
+
+ if ek.KeyId != keyId || ek.Algo != PubKeyAlgoRSAEncryptOnly {
+ t.Errorf("unexpected EncryptedKey contents: %#v", ek)
+ return
+ }
+
+ err = ek.Decrypt(encryptedKeyPriv, nil)
+ if err != nil {
+ t.Errorf("error from Decrypt: %s", err)
+ return
+ }
+
+ if ek.CipherFunc != CipherAES128 {
+ t.Errorf("unexpected EncryptedKey contents: %#v", ek)
+ return
+ }
+
+ keyHex := fmt.Sprintf("%x", ek.Key)
+ if keyHex != expectedKeyHex {
+ t.Errorf("bad key, got %s want %x", keyHex, expectedKeyHex)
+ }
+}
+
+func TestSerializingEncryptedKey(t *testing.T) {
+ const encryptedKeyHex = "c18c032a67d68660df41c70104005789d0de26b6a50c985a02a13131ca829c413a35d0e6fa8d6842599252162808ac7439c72151c8c6183e76923fe3299301414d0c25a2f06a2257db3839e7df0ec964773f6e4c4ac7ff3b48c444237166dd46ba8ff443a5410dc670cb486672fdbe7c9dfafb75b4fea83af3a204fe2a7dfa86bd20122b4f3d2646cbeecb8f7be8"
+
+ p, err := Read(readerFromHex(encryptedKeyHex))
+ if err != nil {
+ t.Fatalf("error from Read: %s", err)
+ }
+ ek, ok := p.(*EncryptedKey)
+ if !ok {
+ t.Fatalf("didn't parse an EncryptedKey, got %#v", p)
+ }
+
+ var buf bytes.Buffer
+ ek.Serialize(&buf)
+
+ if bufHex := hex.EncodeToString(buf.Bytes()); bufHex != encryptedKeyHex {
+ t.Fatalf("serialization of encrypted key differed from original. Original was %s, but reserialized as %s", encryptedKeyHex, bufHex)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/ocfb_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/ocfb_test.go
new file mode 100644
index 000000000..91022c042
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/ocfb_test.go
@@ -0,0 +1,46 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "crypto/aes"
+ "crypto/rand"
+ "testing"
+)
+
+var commonKey128 = []byte{0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c}
+
+func testOCFB(t *testing.T, resync OCFBResyncOption) {
+ block, err := aes.NewCipher(commonKey128)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ plaintext := []byte("this is the plaintext, which is long enough to span several blocks.")
+ randData := make([]byte, block.BlockSize())
+ rand.Reader.Read(randData)
+ ocfb, prefix := NewOCFBEncrypter(block, randData, resync)
+ ciphertext := make([]byte, len(plaintext))
+ ocfb.XORKeyStream(ciphertext, plaintext)
+
+ ocfbdec := NewOCFBDecrypter(block, prefix, resync)
+ if ocfbdec == nil {
+ t.Errorf("NewOCFBDecrypter failed (resync: %t)", resync)
+ return
+ }
+ plaintextCopy := make([]byte, len(plaintext))
+ ocfbdec.XORKeyStream(plaintextCopy, ciphertext)
+
+ if !bytes.Equal(plaintextCopy, plaintext) {
+ t.Errorf("got: %x, want: %x (resync: %t)", plaintextCopy, plaintext, resync)
+ }
+}
+
+func TestOCFB(t *testing.T) {
+ testOCFB(t, OCFBNoResync)
+ testOCFB(t, OCFBResync)
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/opaque_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/opaque_test.go
new file mode 100644
index 000000000..f27bbfe09
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/opaque_test.go
@@ -0,0 +1,67 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "encoding/hex"
+ "io"
+ "testing"
+)
+
+// Test packet.Read error handling in OpaquePacket.Parse,
+// which attempts to re-read an OpaquePacket as a supported
+// Packet type.
+func TestOpaqueParseReason(t *testing.T) {
+ buf, err := hex.DecodeString(UnsupportedKeyHex)
+ if err != nil {
+ t.Fatal(err)
+ }
+ or := NewOpaqueReader(bytes.NewBuffer(buf))
+ count := 0
+ badPackets := 0
+ var uid *UserId
+ for {
+ op, err := or.Next()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ t.Errorf("#%d: opaque read error: %v", count, err)
+ break
+ }
+ // try to parse opaque packet
+ p, err := op.Parse()
+ switch pkt := p.(type) {
+ case *UserId:
+ uid = pkt
+ case *OpaquePacket:
+ // If an OpaquePacket can't re-parse, packet.Read
+ // certainly had its reasons.
+ if pkt.Reason == nil {
+ t.Errorf("#%d: opaque packet, no reason", count)
+ } else {
+ badPackets++
+ }
+ }
+ count++
+ }
+
+ const expectedBad = 3
+ // Test post-conditions, make sure we actually parsed packets as expected.
+ if badPackets != expectedBad {
+ t.Errorf("unexpected # unparseable packets: %d (want %d)", badPackets, expectedBad)
+ }
+ if uid == nil {
+ t.Errorf("failed to find expected UID in unsupported keyring")
+ } else if uid.Id != "Armin M. Warda " {
+ t.Errorf("unexpected UID: %v", uid.Id)
+ }
+}
+
+// This key material has public key and signature packet versions modified to
+// an unsupported value (1), so that trying to parse the OpaquePacket to
+// a typed packet will get an error. It also contains a GnuPG trust packet.
+// (Created with: od -An -t x1 pubring.gpg | xargs | sed 's/ //g')
+const UnsupportedKeyHex = `988d012e7a18a20000010400d6ac00d92b89c1f4396c243abb9b76d2e9673ad63483291fed88e22b82e255e441c078c6abbbf7d2d195e50b62eeaa915b85b0ec20c225ce2c64c167cacb6e711daf2e45da4a8356a059b8160e3b3628ac0dd8437b31f06d53d6e8ea4214d4a26406a6b63e1001406ef23e0bb3069fac9a99a91f77dfafd5de0f188a5da5e3c9000511b42741726d696e204d2e205761726461203c7761726461406e657068696c696d2e727568722e64653e8900950105102e8936c705d1eb399e58489901013f0e03ff5a0c4f421e34fcfa388129166420c08cd76987bcdec6f01bd0271459a85cc22048820dd4e44ac2c7d23908d540f54facf1b36b0d9c20488781ce9dca856531e76e2e846826e9951338020a03a09b57aa5faa82e9267458bd76105399885ac35af7dc1cbb6aaed7c39e1039f3b5beda2c0e916bd38560509bab81235d1a0ead83b0020000`
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/packet_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/packet_test.go
new file mode 100644
index 000000000..1dab5c3d5
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/packet_test.go
@@ -0,0 +1,255 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "golang.org/x/crypto/openpgp/errors"
+ "io"
+ "io/ioutil"
+ "testing"
+)
+
+func TestReadFull(t *testing.T) {
+ var out [4]byte
+
+ b := bytes.NewBufferString("foo")
+ n, err := readFull(b, out[:3])
+ if n != 3 || err != nil {
+ t.Errorf("full read failed n:%d err:%s", n, err)
+ }
+
+ b = bytes.NewBufferString("foo")
+ n, err = readFull(b, out[:4])
+ if n != 3 || err != io.ErrUnexpectedEOF {
+ t.Errorf("partial read failed n:%d err:%s", n, err)
+ }
+
+ b = bytes.NewBuffer(nil)
+ n, err = readFull(b, out[:3])
+ if n != 0 || err != io.ErrUnexpectedEOF {
+ t.Errorf("empty read failed n:%d err:%s", n, err)
+ }
+}
+
+func readerFromHex(s string) io.Reader {
+ data, err := hex.DecodeString(s)
+ if err != nil {
+ panic("readerFromHex: bad input")
+ }
+ return bytes.NewBuffer(data)
+}
+
+var readLengthTests = []struct {
+ hexInput string
+ length int64
+ isPartial bool
+ err error
+}{
+ {"", 0, false, io.ErrUnexpectedEOF},
+ {"1f", 31, false, nil},
+ {"c0", 0, false, io.ErrUnexpectedEOF},
+ {"c101", 256 + 1 + 192, false, nil},
+ {"e0", 1, true, nil},
+ {"e1", 2, true, nil},
+ {"e2", 4, true, nil},
+ {"ff", 0, false, io.ErrUnexpectedEOF},
+ {"ff00", 0, false, io.ErrUnexpectedEOF},
+ {"ff0000", 0, false, io.ErrUnexpectedEOF},
+ {"ff000000", 0, false, io.ErrUnexpectedEOF},
+ {"ff00000000", 0, false, nil},
+ {"ff01020304", 16909060, false, nil},
+}
+
+func TestReadLength(t *testing.T) {
+ for i, test := range readLengthTests {
+ length, isPartial, err := readLength(readerFromHex(test.hexInput))
+ if test.err != nil {
+ if err != test.err {
+ t.Errorf("%d: expected different error got:%s want:%s", i, err, test.err)
+ }
+ continue
+ }
+ if err != nil {
+ t.Errorf("%d: unexpected error: %s", i, err)
+ continue
+ }
+ if length != test.length || isPartial != test.isPartial {
+ t.Errorf("%d: bad result got:(%d,%t) want:(%d,%t)", i, length, isPartial, test.length, test.isPartial)
+ }
+ }
+}
+
+var partialLengthReaderTests = []struct {
+ hexInput string
+ err error
+ hexOutput string
+}{
+ {"e0", io.ErrUnexpectedEOF, ""},
+ {"e001", io.ErrUnexpectedEOF, ""},
+ {"e0010102", nil, "0102"},
+ {"ff00000000", nil, ""},
+ {"e10102e1030400", nil, "01020304"},
+ {"e101", io.ErrUnexpectedEOF, ""},
+}
+
+func TestPartialLengthReader(t *testing.T) {
+ for i, test := range partialLengthReaderTests {
+ r := &partialLengthReader{readerFromHex(test.hexInput), 0, true}
+ out, err := ioutil.ReadAll(r)
+ if test.err != nil {
+ if err != test.err {
+ t.Errorf("%d: expected different error got:%s want:%s", i, err, test.err)
+ }
+ continue
+ }
+ if err != nil {
+ t.Errorf("%d: unexpected error: %s", i, err)
+ continue
+ }
+
+ got := fmt.Sprintf("%x", out)
+ if got != test.hexOutput {
+ t.Errorf("%d: got:%s want:%s", i, test.hexOutput, got)
+ }
+ }
+}
+
+var readHeaderTests = []struct {
+ hexInput string
+ structuralError bool
+ unexpectedEOF bool
+ tag int
+ length int64
+ hexOutput string
+}{
+ {"", false, false, 0, 0, ""},
+ {"7f", true, false, 0, 0, ""},
+
+ // Old format headers
+ {"80", false, true, 0, 0, ""},
+ {"8001", false, true, 0, 1, ""},
+ {"800102", false, false, 0, 1, "02"},
+ {"81000102", false, false, 0, 1, "02"},
+ {"820000000102", false, false, 0, 1, "02"},
+ {"860000000102", false, false, 1, 1, "02"},
+ {"83010203", false, false, 0, -1, "010203"},
+
+ // New format headers
+ {"c0", false, true, 0, 0, ""},
+ {"c000", false, false, 0, 0, ""},
+ {"c00102", false, false, 0, 1, "02"},
+ {"c0020203", false, false, 0, 2, "0203"},
+ {"c00202", false, true, 0, 2, ""},
+ {"c3020203", false, false, 3, 2, "0203"},
+}
+
+func TestReadHeader(t *testing.T) {
+ for i, test := range readHeaderTests {
+ tag, length, contents, err := readHeader(readerFromHex(test.hexInput))
+ if test.structuralError {
+ if _, ok := err.(errors.StructuralError); ok {
+ continue
+ }
+ t.Errorf("%d: expected StructuralError, got:%s", i, err)
+ continue
+ }
+ if err != nil {
+ if len(test.hexInput) == 0 && err == io.EOF {
+ continue
+ }
+ if !test.unexpectedEOF || err != io.ErrUnexpectedEOF {
+ t.Errorf("%d: unexpected error from readHeader: %s", i, err)
+ }
+ continue
+ }
+ if int(tag) != test.tag || length != test.length {
+ t.Errorf("%d: got:(%d,%d) want:(%d,%d)", i, int(tag), length, test.tag, test.length)
+ continue
+ }
+
+ body, err := ioutil.ReadAll(contents)
+ if err != nil {
+ if !test.unexpectedEOF || err != io.ErrUnexpectedEOF {
+ t.Errorf("%d: unexpected error from contents: %s", i, err)
+ }
+ continue
+ }
+ if test.unexpectedEOF {
+ t.Errorf("%d: expected ErrUnexpectedEOF from contents but got no error", i)
+ continue
+ }
+ got := fmt.Sprintf("%x", body)
+ if got != test.hexOutput {
+ t.Errorf("%d: got:%s want:%s", i, got, test.hexOutput)
+ }
+ }
+}
+
+func TestSerializeHeader(t *testing.T) {
+ tag := packetTypePublicKey
+ lengths := []int{0, 1, 2, 64, 192, 193, 8000, 8384, 8385, 10000}
+
+ for _, length := range lengths {
+ buf := bytes.NewBuffer(nil)
+ serializeHeader(buf, tag, length)
+ tag2, length2, _, err := readHeader(buf)
+ if err != nil {
+ t.Errorf("length %d, err: %s", length, err)
+ }
+ if tag2 != tag {
+ t.Errorf("length %d, tag incorrect (got %d, want %d)", length, tag2, tag)
+ }
+ if int(length2) != length {
+ t.Errorf("length %d, length incorrect (got %d)", length, length2)
+ }
+ }
+}
+
+func TestPartialLengths(t *testing.T) {
+ buf := bytes.NewBuffer(nil)
+ w := new(partialLengthWriter)
+ w.w = noOpCloser{buf}
+
+ const maxChunkSize = 64
+
+ var b [maxChunkSize]byte
+ var n uint8
+ for l := 1; l <= maxChunkSize; l++ {
+ for i := 0; i < l; i++ {
+ b[i] = n
+ n++
+ }
+ m, err := w.Write(b[:l])
+ if m != l {
+ t.Errorf("short write got: %d want: %d", m, l)
+ }
+ if err != nil {
+ t.Errorf("error from write: %s", err)
+ }
+ }
+ w.Close()
+
+ want := (maxChunkSize * (maxChunkSize + 1)) / 2
+ copyBuf := bytes.NewBuffer(nil)
+ r := &partialLengthReader{buf, 0, true}
+ m, err := io.Copy(copyBuf, r)
+ if m != int64(want) {
+ t.Errorf("short copy got: %d want: %d", m, want)
+ }
+ if err != nil {
+ t.Errorf("error from copy: %s", err)
+ }
+
+ copyBytes := copyBuf.Bytes()
+ for i := 0; i < want; i++ {
+ if copyBytes[i] != uint8(i) {
+ t.Errorf("bad pattern in copy at %d", i)
+ break
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/private_key_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/private_key_test.go
new file mode 100644
index 000000000..25c8931f2
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/private_key_test.go
@@ -0,0 +1,69 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "testing"
+ "time"
+)
+
+var privateKeyTests = []struct {
+ privateKeyHex string
+ creationTime time.Time
+}{
+ {
+ privKeyRSAHex,
+ time.Unix(0x4cc349a8, 0),
+ },
+ {
+ privKeyElGamalHex,
+ time.Unix(0x4df9ee1a, 0),
+ },
+}
+
+func TestPrivateKeyRead(t *testing.T) {
+ for i, test := range privateKeyTests {
+ packet, err := Read(readerFromHex(test.privateKeyHex))
+ if err != nil {
+ t.Errorf("#%d: failed to parse: %s", i, err)
+ continue
+ }
+
+ privKey := packet.(*PrivateKey)
+
+ if !privKey.Encrypted {
+ t.Errorf("#%d: private key isn't encrypted", i)
+ continue
+ }
+
+ err = privKey.Decrypt([]byte("wrong password"))
+ if err == nil {
+ t.Errorf("#%d: decrypted with incorrect key", i)
+ continue
+ }
+
+ err = privKey.Decrypt([]byte("testing"))
+ if err != nil {
+ t.Errorf("#%d: failed to decrypt: %s", i, err)
+ continue
+ }
+
+ if !privKey.CreationTime.Equal(test.creationTime) || privKey.Encrypted {
+ t.Errorf("#%d: bad result, got: %#v", i, privKey)
+ }
+ }
+}
+
+func TestIssue11505(t *testing.T) {
+ // parsing a rsa private key with p or q == 1 used to panic due to a divide by zero
+ _, _ = Read(readerFromHex("9c3004303030300100000011303030000000000000010130303030303030303030303030303030303030303030303030303030303030303030303030303030303030"))
+}
+
+// Generated with `gpg --export-secret-keys "Test Key 2"`
+const privKeyRSAHex = "9501fe044cc349a8010400b70ca0010e98c090008d45d1ee8f9113bd5861fd57b88bacb7c68658747663f1e1a3b5a98f32fda6472373c024b97359cd2efc88ff60f77751adfbf6af5e615e6a1408cfad8bf0cea30b0d5f53aa27ad59089ba9b15b7ebc2777a25d7b436144027e3bcd203909f147d0e332b240cf63d3395f5dfe0df0a6c04e8655af7eacdf0011010001fe0303024a252e7d475fd445607de39a265472aa74a9320ba2dac395faa687e9e0336aeb7e9a7397e511b5afd9dc84557c80ac0f3d4d7bfec5ae16f20d41c8c84a04552a33870b930420e230e179564f6d19bb153145e76c33ae993886c388832b0fa042ddda7f133924f3854481533e0ede31d51278c0519b29abc3bf53da673e13e3e1214b52413d179d7f66deee35cac8eacb060f78379d70ef4af8607e68131ff529439668fc39c9ce6dfef8a5ac234d234802cbfb749a26107db26406213ae5c06d4673253a3cbee1fcbae58d6ab77e38d6e2c0e7c6317c48e054edadb5a40d0d48acb44643d998139a8a66bb820be1f3f80185bc777d14b5954b60effe2448a036d565c6bc0b915fcea518acdd20ab07bc1529f561c58cd044f723109b93f6fd99f876ff891d64306b5d08f48bab59f38695e9109c4dec34013ba3153488ce070268381ba923ee1eb77125b36afcb4347ec3478c8f2735b06ef17351d872e577fa95d0c397c88c71b59629a36aec"
+
+// Generated by `gpg --export-secret-keys` followed by a manual extraction of
+// the ElGamal subkey from the packets.
+const privKeyElGamalHex = "9d0157044df9ee1a100400eb8e136a58ec39b582629cdadf830bc64e0a94ed8103ca8bb247b27b11b46d1d25297ef4bcc3071785ba0c0bedfe89eabc5287fcc0edf81ab5896c1c8e4b20d27d79813c7aede75320b33eaeeaa586edc00fd1036c10133e6ba0ff277245d0d59d04b2b3421b7244aca5f4a8d870c6f1c1fbff9e1c26699a860b9504f35ca1d700030503fd1ededd3b840795be6d9ccbe3c51ee42e2f39233c432b831ddd9c4e72b7025a819317e47bf94f9ee316d7273b05d5fcf2999c3a681f519b1234bbfa6d359b4752bd9c3f77d6b6456cde152464763414ca130f4e91d91041432f90620fec0e6d6b5116076c2985d5aeaae13be492b9b329efcaf7ee25120159a0a30cd976b42d7afe030302dae7eb80db744d4960c4df930d57e87fe81412eaace9f900e6c839817a614ddb75ba6603b9417c33ea7b6c93967dfa2bcff3fa3c74a5ce2c962db65b03aece14c96cbd0038fc"
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/public_key_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/public_key_test.go
new file mode 100644
index 000000000..7ad7d9185
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/public_key_test.go
@@ -0,0 +1,202 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "encoding/hex"
+ "testing"
+ "time"
+)
+
+var pubKeyTests = []struct {
+ hexData string
+ hexFingerprint string
+ creationTime time.Time
+ pubKeyAlgo PublicKeyAlgorithm
+ keyId uint64
+ keyIdString string
+ keyIdShort string
+}{
+ {rsaPkDataHex, rsaFingerprintHex, time.Unix(0x4d3c5c10, 0), PubKeyAlgoRSA, 0xa34d7e18c20c31bb, "A34D7E18C20C31BB", "C20C31BB"},
+ {dsaPkDataHex, dsaFingerprintHex, time.Unix(0x4d432f89, 0), PubKeyAlgoDSA, 0x8e8fbe54062f19ed, "8E8FBE54062F19ED", "062F19ED"},
+ {ecdsaPkDataHex, ecdsaFingerprintHex, time.Unix(0x5071c294, 0), PubKeyAlgoECDSA, 0x43fe956c542ca00b, "43FE956C542CA00B", "542CA00B"},
+}
+
+func TestPublicKeyRead(t *testing.T) {
+ for i, test := range pubKeyTests {
+ packet, err := Read(readerFromHex(test.hexData))
+ if err != nil {
+ t.Errorf("#%d: Read error: %s", i, err)
+ continue
+ }
+ pk, ok := packet.(*PublicKey)
+ if !ok {
+ t.Errorf("#%d: failed to parse, got: %#v", i, packet)
+ continue
+ }
+ if pk.PubKeyAlgo != test.pubKeyAlgo {
+ t.Errorf("#%d: bad public key algorithm got:%x want:%x", i, pk.PubKeyAlgo, test.pubKeyAlgo)
+ }
+ if !pk.CreationTime.Equal(test.creationTime) {
+ t.Errorf("#%d: bad creation time got:%v want:%v", i, pk.CreationTime, test.creationTime)
+ }
+ expectedFingerprint, _ := hex.DecodeString(test.hexFingerprint)
+ if !bytes.Equal(expectedFingerprint, pk.Fingerprint[:]) {
+ t.Errorf("#%d: bad fingerprint got:%x want:%x", i, pk.Fingerprint[:], expectedFingerprint)
+ }
+ if pk.KeyId != test.keyId {
+ t.Errorf("#%d: bad keyid got:%x want:%x", i, pk.KeyId, test.keyId)
+ }
+ if g, e := pk.KeyIdString(), test.keyIdString; g != e {
+ t.Errorf("#%d: bad KeyIdString got:%q want:%q", i, g, e)
+ }
+ if g, e := pk.KeyIdShortString(), test.keyIdShort; g != e {
+ t.Errorf("#%d: bad KeyIdShortString got:%q want:%q", i, g, e)
+ }
+ }
+}
+
+func TestPublicKeySerialize(t *testing.T) {
+ for i, test := range pubKeyTests {
+ packet, err := Read(readerFromHex(test.hexData))
+ if err != nil {
+ t.Errorf("#%d: Read error: %s", i, err)
+ continue
+ }
+ pk, ok := packet.(*PublicKey)
+ if !ok {
+ t.Errorf("#%d: failed to parse, got: %#v", i, packet)
+ continue
+ }
+ serializeBuf := bytes.NewBuffer(nil)
+ err = pk.Serialize(serializeBuf)
+ if err != nil {
+ t.Errorf("#%d: failed to serialize: %s", i, err)
+ continue
+ }
+
+ packet, err = Read(serializeBuf)
+ if err != nil {
+ t.Errorf("#%d: Read error (from serialized data): %s", i, err)
+ continue
+ }
+ pk, ok = packet.(*PublicKey)
+ if !ok {
+ t.Errorf("#%d: failed to parse serialized data, got: %#v", i, packet)
+ continue
+ }
+ }
+}
+
+func TestEcc384Serialize(t *testing.T) {
+ r := readerFromHex(ecc384PubHex)
+ var w bytes.Buffer
+ for i := 0; i < 2; i++ {
+ // Public key
+ p, err := Read(r)
+ if err != nil {
+ t.Error(err)
+ }
+ pubkey := p.(*PublicKey)
+ if !bytes.Equal(pubkey.ec.oid, []byte{0x2b, 0x81, 0x04, 0x00, 0x22}) {
+ t.Errorf("Unexpected pubkey OID: %x", pubkey.ec.oid)
+ }
+ if !bytes.Equal(pubkey.ec.p.bytes[:5], []byte{0x04, 0xf6, 0xb8, 0xc5, 0xac}) {
+ t.Errorf("Unexpected pubkey P[:5]: %x", pubkey.ec.p.bytes)
+ }
+ if pubkey.KeyId != 0x098033880F54719F {
+ t.Errorf("Unexpected pubkey ID: %x", pubkey.KeyId)
+ }
+ err = pubkey.Serialize(&w)
+ if err != nil {
+ t.Error(err)
+ }
+ // User ID
+ p, err = Read(r)
+ if err != nil {
+ t.Error(err)
+ }
+ uid := p.(*UserId)
+ if uid.Id != "ec_dsa_dh_384 " {
+ t.Error("Unexpected UID:", uid.Id)
+ }
+ err = uid.Serialize(&w)
+ if err != nil {
+ t.Error(err)
+ }
+ // User ID Sig
+ p, err = Read(r)
+ if err != nil {
+ t.Error(err)
+ }
+ uidSig := p.(*Signature)
+ err = pubkey.VerifyUserIdSignature(uid.Id, pubkey, uidSig)
+ if err != nil {
+ t.Error(err, ": UID")
+ }
+ err = uidSig.Serialize(&w)
+ if err != nil {
+ t.Error(err)
+ }
+ // Subkey
+ p, err = Read(r)
+ if err != nil {
+ t.Error(err)
+ }
+ subkey := p.(*PublicKey)
+ if !bytes.Equal(subkey.ec.oid, []byte{0x2b, 0x81, 0x04, 0x00, 0x22}) {
+ t.Errorf("Unexpected subkey OID: %x", subkey.ec.oid)
+ }
+ if !bytes.Equal(subkey.ec.p.bytes[:5], []byte{0x04, 0x2f, 0xaa, 0x84, 0x02}) {
+ t.Errorf("Unexpected subkey P[:5]: %x", subkey.ec.p.bytes)
+ }
+ if subkey.ecdh.KdfHash != 0x09 {
+ t.Error("Expected KDF hash function SHA384 (0x09), got", subkey.ecdh.KdfHash)
+ }
+ if subkey.ecdh.KdfAlgo != 0x09 {
+ t.Error("Expected KDF symmetric alg AES256 (0x09), got", subkey.ecdh.KdfAlgo)
+ }
+ if subkey.KeyId != 0xAA8B938F9A201946 {
+ t.Errorf("Unexpected subkey ID: %x", subkey.KeyId)
+ }
+ err = subkey.Serialize(&w)
+ if err != nil {
+ t.Error(err)
+ }
+ // Subkey Sig
+ p, err = Read(r)
+ if err != nil {
+ t.Error(err)
+ }
+ subkeySig := p.(*Signature)
+ err = pubkey.VerifyKeySignature(subkey, subkeySig)
+ if err != nil {
+ t.Error(err)
+ }
+ err = subkeySig.Serialize(&w)
+ if err != nil {
+ t.Error(err)
+ }
+ // Now read back what we've written again
+ r = bytes.NewBuffer(w.Bytes())
+ w.Reset()
+ }
+}
+
+const rsaFingerprintHex = "5fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb"
+
+const rsaPkDataHex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001"
+
+const dsaFingerprintHex = "eece4c094db002103714c63c8e8fbe54062f19ed"
+
+const dsaPkDataHex = "9901a2044d432f89110400cd581334f0d7a1e1bdc8b9d6d8c0baf68793632735d2bb0903224cbaa1dfbf35a60ee7a13b92643421e1eb41aa8d79bea19a115a677f6b8ba3c7818ce53a6c2a24a1608bd8b8d6e55c5090cbde09dd26e356267465ae25e69ec8bdd57c7bbb2623e4d73336f73a0a9098f7f16da2e25252130fd694c0e8070c55a812a423ae7f00a0ebf50e70c2f19c3520a551bd4b08d30f23530d3d03ff7d0bf4a53a64a09dc5e6e6e35854b7d70c882b0c60293401958b1bd9e40abec3ea05ba87cf64899299d4bd6aa7f459c201d3fbbd6c82004bdc5e8a9eb8082d12054cc90fa9d4ec251a843236a588bf49552441817436c4f43326966fe85447d4e6d0acf8fa1ef0f014730770603ad7634c3088dc52501c237328417c31c89ed70400b2f1a98b0bf42f11fefc430704bebbaa41d9f355600c3facee1e490f64208e0e094ea55e3a598a219a58500bf78ac677b670a14f4e47e9cf8eab4f368cc1ddcaa18cc59309d4cc62dd4f680e73e6cc3e1ce87a84d0925efbcb26c575c093fc42eecf45135fabf6403a25c2016e1774c0484e440a18319072c617cc97ac0a3bb0"
+
+const ecdsaFingerprintHex = "9892270b38b8980b05c8d56d43fe956c542ca00b"
+
+const ecdsaPkDataHex = "9893045071c29413052b8104002304230401f4867769cedfa52c325018896245443968e52e51d0c2df8d939949cb5b330f2921711fbee1c9b9dddb95d15cb0255e99badeddda7cc23d9ddcaacbc290969b9f24019375d61c2e4e3b36953a28d8b2bc95f78c3f1d592fb24499be348656a7b17e3963187b4361afe497bc5f9f81213f04069f8e1fb9e6a6290ae295ca1a92b894396cb4"
+
+// Source: https://sites.google.com/site/brainhub/pgpecckeys#TOC-ECC-NIST-P-384-key
+const ecc384PubHex = `99006f044d53059213052b81040022030304f6b8c5aced5b84ef9f4a209db2e4a9dfb70d28cb8c10ecd57674a9fa5a67389942b62d5e51367df4c7bfd3f8e500feecf07ed265a621a8ebbbe53e947ec78c677eba143bd1533c2b350e1c29f82313e1e1108eba063be1e64b10e6950e799c2db42465635f6473615f64685f333834203c6f70656e70677040627261696e6875622e6f72673e8900cb04101309005305024d530592301480000000002000077072656665727265642d656d61696c2d656e636f64696e67407067702e636f6d7067706d696d65040b090807021901051b03000000021602051e010000000415090a08000a0910098033880f54719fca2b0180aa37350968bd5f115afd8ce7bc7b103822152dbff06d0afcda835329510905b98cb469ba208faab87c7412b799e7b633017f58364ea480e8a1a3f253a0c5f22c446e8be9a9fce6210136ee30811abbd49139de28b5bdf8dc36d06ae748579e9ff503b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec9180301090989008404181309000c05024d530592051b0c000000000a0910098033880f54719f80970180eee7a6d8fcee41ee4f9289df17f9bcf9d955dca25c583b94336f3a2b2d4986dc5cf417b8d2dc86f741a9e1a6d236c0e3017d1c76575458a0cfb93ae8a2b274fcc65ceecd7a91eec83656ba13219969f06945b48c56bd04152c3a0553c5f2f4bd1267`
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/public_key_v3_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/public_key_v3_test.go
new file mode 100644
index 000000000..e06405904
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/public_key_v3_test.go
@@ -0,0 +1,82 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "encoding/hex"
+ "testing"
+ "time"
+)
+
+var pubKeyV3Test = struct {
+ hexFingerprint string
+ creationTime time.Time
+ pubKeyAlgo PublicKeyAlgorithm
+ keyId uint64
+ keyIdString string
+ keyIdShort string
+}{
+ "103BECF5BD1E837C89D19E98487767F7",
+ time.Unix(779753634, 0),
+ PubKeyAlgoRSA,
+ 0xDE0F188A5DA5E3C9,
+ "DE0F188A5DA5E3C9",
+ "5DA5E3C9"}
+
+func TestPublicKeyV3Read(t *testing.T) {
+ i, test := 0, pubKeyV3Test
+ packet, err := Read(v3KeyReader(t))
+ if err != nil {
+ t.Fatalf("#%d: Read error: %s", i, err)
+ }
+ pk, ok := packet.(*PublicKeyV3)
+ if !ok {
+ t.Fatalf("#%d: failed to parse, got: %#v", i, packet)
+ }
+ if pk.PubKeyAlgo != test.pubKeyAlgo {
+ t.Errorf("#%d: bad public key algorithm got:%x want:%x", i, pk.PubKeyAlgo, test.pubKeyAlgo)
+ }
+ if !pk.CreationTime.Equal(test.creationTime) {
+ t.Errorf("#%d: bad creation time got:%v want:%v", i, pk.CreationTime, test.creationTime)
+ }
+ expectedFingerprint, _ := hex.DecodeString(test.hexFingerprint)
+ if !bytes.Equal(expectedFingerprint, pk.Fingerprint[:]) {
+ t.Errorf("#%d: bad fingerprint got:%x want:%x", i, pk.Fingerprint[:], expectedFingerprint)
+ }
+ if pk.KeyId != test.keyId {
+ t.Errorf("#%d: bad keyid got:%x want:%x", i, pk.KeyId, test.keyId)
+ }
+ if g, e := pk.KeyIdString(), test.keyIdString; g != e {
+ t.Errorf("#%d: bad KeyIdString got:%q want:%q", i, g, e)
+ }
+ if g, e := pk.KeyIdShortString(), test.keyIdShort; g != e {
+ t.Errorf("#%d: bad KeyIdShortString got:%q want:%q", i, g, e)
+ }
+}
+
+func TestPublicKeyV3Serialize(t *testing.T) {
+ //for i, test := range pubKeyV3Tests {
+ i := 0
+ packet, err := Read(v3KeyReader(t))
+ if err != nil {
+ t.Fatalf("#%d: Read error: %s", i, err)
+ }
+ pk, ok := packet.(*PublicKeyV3)
+ if !ok {
+ t.Fatalf("#%d: failed to parse, got: %#v", i, packet)
+ }
+ var serializeBuf bytes.Buffer
+ if err = pk.Serialize(&serializeBuf); err != nil {
+ t.Fatalf("#%d: failed to serialize: %s", i, err)
+ }
+
+ if packet, err = Read(bytes.NewBuffer(serializeBuf.Bytes())); err != nil {
+ t.Fatalf("#%d: Read error (from serialized data): %s", i, err)
+ }
+ if pk, ok = packet.(*PublicKeyV3); !ok {
+ t.Fatalf("#%d: failed to parse serialized data, got: %#v", i, packet)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/signature_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/signature_test.go
new file mode 100644
index 000000000..c1bbde8b0
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/signature_test.go
@@ -0,0 +1,42 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "crypto"
+ "encoding/hex"
+ "testing"
+)
+
+func TestSignatureRead(t *testing.T) {
+ packet, err := Read(readerFromHex(signatureDataHex))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ sig, ok := packet.(*Signature)
+ if !ok || sig.SigType != SigTypeBinary || sig.PubKeyAlgo != PubKeyAlgoRSA || sig.Hash != crypto.SHA1 {
+ t.Errorf("failed to parse, got: %#v", packet)
+ }
+}
+
+func TestSignatureReserialize(t *testing.T) {
+ packet, _ := Read(readerFromHex(signatureDataHex))
+ sig := packet.(*Signature)
+ out := new(bytes.Buffer)
+ err := sig.Serialize(out)
+ if err != nil {
+ t.Errorf("error reserializing: %s", err)
+ return
+ }
+
+ expected, _ := hex.DecodeString(signatureDataHex)
+ if !bytes.Equal(expected, out.Bytes()) {
+ t.Errorf("output doesn't match input (got vs expected):\n%s\n%s", hex.Dump(out.Bytes()), hex.Dump(expected))
+ }
+}
+
+const signatureDataHex = "c2c05c04000102000605024cb45112000a0910ab105c91af38fb158f8d07ff5596ea368c5efe015bed6e78348c0f033c931d5f2ce5db54ce7f2a7e4b4ad64db758d65a7a71773edeab7ba2a9e0908e6a94a1175edd86c1d843279f045b021a6971a72702fcbd650efc393c5474d5b59a15f96d2eaad4c4c426797e0dcca2803ef41c6ff234d403eec38f31d610c344c06f2401c262f0993b2e66cad8a81ebc4322c723e0d4ba09fe917e8777658307ad8329adacba821420741009dfe87f007759f0982275d028a392c6ed983a0d846f890b36148c7358bdb8a516007fac760261ecd06076813831a36d0459075d1befa245ae7f7fb103d92ca759e9498fe60ef8078a39a3beda510deea251ea9f0a7f0df6ef42060f20780360686f3e400e"
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/signature_v3_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/signature_v3_test.go
new file mode 100644
index 000000000..ad7b62ac1
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/signature_v3_test.go
@@ -0,0 +1,92 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "crypto"
+ "encoding/hex"
+ "io"
+ "io/ioutil"
+ "testing"
+
+ "golang.org/x/crypto/openpgp/armor"
+)
+
+func TestSignatureV3Read(t *testing.T) {
+ r := v3KeyReader(t)
+ Read(r) // Skip public key
+ Read(r) // Skip uid
+ packet, err := Read(r) // Signature
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ sig, ok := packet.(*SignatureV3)
+ if !ok || sig.SigType != SigTypeGenericCert || sig.PubKeyAlgo != PubKeyAlgoRSA || sig.Hash != crypto.MD5 {
+ t.Errorf("failed to parse, got: %#v", packet)
+ }
+}
+
+func TestSignatureV3Reserialize(t *testing.T) {
+ r := v3KeyReader(t)
+ Read(r) // Skip public key
+ Read(r) // Skip uid
+ packet, err := Read(r)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ sig := packet.(*SignatureV3)
+ out := new(bytes.Buffer)
+ if err = sig.Serialize(out); err != nil {
+ t.Errorf("error reserializing: %s", err)
+ return
+ }
+ expected, err := ioutil.ReadAll(v3KeyReader(t))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ expected = expected[4+141+4+39:] // See pgpdump offsets below, this is where the sig starts
+ if !bytes.Equal(expected, out.Bytes()) {
+ t.Errorf("output doesn't match input (got vs expected):\n%s\n%s", hex.Dump(out.Bytes()), hex.Dump(expected))
+ }
+}
+
+func v3KeyReader(t *testing.T) io.Reader {
+ armorBlock, err := armor.Decode(bytes.NewBufferString(keySigV3Armor))
+ if err != nil {
+ t.Fatalf("armor Decode failed: %v", err)
+ }
+ return armorBlock.Body
+}
+
+// keySigV3Armor is some V3 public key I found in an SKS dump.
+// Old: Public Key Packet(tag 6)(141 bytes)
+// Ver 4 - new
+// Public key creation time - Fri Sep 16 17:13:54 CDT 1994
+// Pub alg - unknown(pub 0)
+// Unknown public key(pub 0)
+// Old: User ID Packet(tag 13)(39 bytes)
+// User ID - Armin M. Warda
+// Old: Signature Packet(tag 2)(149 bytes)
+// Ver 4 - new
+// Sig type - unknown(05)
+// Pub alg - ElGamal Encrypt-Only(pub 16)
+// Hash alg - unknown(hash 46)
+// Hashed Sub: unknown(sub 81, critical)(1988 bytes)
+const keySigV3Armor = `-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: SKS 1.0.10
+
+mI0CLnoYogAAAQQA1qwA2SuJwfQ5bCQ6u5t20ulnOtY0gykf7YjiK4LiVeRBwHjGq7v30tGV
+5Qti7qqRW4Ww7CDCJc4sZMFnystucR2vLkXaSoNWoFm4Fg47NiisDdhDezHwbVPW6OpCFNSi
+ZAamtj4QAUBu8j4LswafrJqZqR9336/V3g8Yil2l48kABRG0J0FybWluIE0uIFdhcmRhIDx3
+YXJkYUBuZXBoaWxpbS5ydWhyLmRlPoiVAgUQLok2xwXR6zmeWEiZAQE/DgP/WgxPQh40/Po4
+gSkWZCDAjNdph7zexvAb0CcUWahcwiBIgg3U5ErCx9I5CNVA9U+s8bNrDZwgSIeBzp3KhWUx
+524uhGgm6ZUTOAIKA6CbV6pfqoLpJnRYvXYQU5mIWsNa99wcu2qu18OeEDnztb7aLA6Ra9OF
+YFCbq4EjXRoOrYM=
+=LPjs
+-----END PGP PUBLIC KEY BLOCK-----`
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted_test.go
new file mode 100644
index 000000000..19538df77
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted_test.go
@@ -0,0 +1,103 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "encoding/hex"
+ "io"
+ "io/ioutil"
+ "testing"
+)
+
+func TestSymmetricKeyEncrypted(t *testing.T) {
+ buf := readerFromHex(symmetricallyEncryptedHex)
+ packet, err := Read(buf)
+ if err != nil {
+ t.Errorf("failed to read SymmetricKeyEncrypted: %s", err)
+ return
+ }
+ ske, ok := packet.(*SymmetricKeyEncrypted)
+ if !ok {
+ t.Error("didn't find SymmetricKeyEncrypted packet")
+ return
+ }
+ key, cipherFunc, err := ske.Decrypt([]byte("password"))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ packet, err = Read(buf)
+ if err != nil {
+ t.Errorf("failed to read SymmetricallyEncrypted: %s", err)
+ return
+ }
+ se, ok := packet.(*SymmetricallyEncrypted)
+ if !ok {
+ t.Error("didn't find SymmetricallyEncrypted packet")
+ return
+ }
+ r, err := se.Decrypt(cipherFunc, key)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ contents, err := ioutil.ReadAll(r)
+ if err != nil && err != io.EOF {
+ t.Error(err)
+ return
+ }
+
+ expectedContents, _ := hex.DecodeString(symmetricallyEncryptedContentsHex)
+ if !bytes.Equal(expectedContents, contents) {
+ t.Errorf("bad contents got:%x want:%x", contents, expectedContents)
+ }
+}
+
+const symmetricallyEncryptedHex = "8c0d04030302371a0b38d884f02060c91cf97c9973b8e58e028e9501708ccfe618fb92afef7fa2d80ddadd93cf"
+const symmetricallyEncryptedContentsHex = "cb1062004d14c4df636f6e74656e74732e0a"
+
+func TestSerializeSymmetricKeyEncrypted(t *testing.T) {
+ buf := bytes.NewBuffer(nil)
+ passphrase := []byte("testing")
+ const cipherFunc = CipherAES128
+ config := &Config{
+ DefaultCipher: cipherFunc,
+ }
+
+ key, err := SerializeSymmetricKeyEncrypted(buf, passphrase, config)
+ if err != nil {
+ t.Errorf("failed to serialize: %s", err)
+ return
+ }
+
+ p, err := Read(buf)
+ if err != nil {
+ t.Errorf("failed to reparse: %s", err)
+ return
+ }
+ ske, ok := p.(*SymmetricKeyEncrypted)
+ if !ok {
+ t.Errorf("parsed a different packet type: %#v", p)
+ return
+ }
+
+ if ske.CipherFunc != config.DefaultCipher {
+ t.Errorf("SKE cipher function is %d (expected %d)", ske.CipherFunc, config.DefaultCipher)
+ }
+ parsedKey, parsedCipherFunc, err := ske.Decrypt(passphrase)
+ if err != nil {
+ t.Errorf("failed to decrypt reparsed SKE: %s", err)
+ return
+ }
+ if !bytes.Equal(key, parsedKey) {
+ t.Errorf("keys don't match after Decrypt: %x (original) vs %x (parsed)", key, parsedKey)
+ }
+ if parsedCipherFunc != cipherFunc {
+ t.Errorf("cipher function doesn't match after Decrypt: %d (original) vs %d (parsed)", cipherFunc, parsedCipherFunc)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted_test.go
new file mode 100644
index 000000000..c5c00f7b9
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted_test.go
@@ -0,0 +1,123 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "crypto/sha1"
+ "encoding/hex"
+ "golang.org/x/crypto/openpgp/errors"
+ "io"
+ "io/ioutil"
+ "testing"
+)
+
+// TestReader wraps a []byte and returns reads of a specific length.
+type testReader struct {
+ data []byte
+ stride int
+}
+
+func (t *testReader) Read(buf []byte) (n int, err error) {
+ n = t.stride
+ if n > len(t.data) {
+ n = len(t.data)
+ }
+ if n > len(buf) {
+ n = len(buf)
+ }
+ copy(buf, t.data)
+ t.data = t.data[n:]
+ if len(t.data) == 0 {
+ err = io.EOF
+ }
+ return
+}
+
+func testMDCReader(t *testing.T) {
+ mdcPlaintext, _ := hex.DecodeString(mdcPlaintextHex)
+
+ for stride := 1; stride < len(mdcPlaintext)/2; stride++ {
+ r := &testReader{data: mdcPlaintext, stride: stride}
+ mdcReader := &seMDCReader{in: r, h: sha1.New()}
+ body, err := ioutil.ReadAll(mdcReader)
+ if err != nil {
+ t.Errorf("stride: %d, error: %s", stride, err)
+ continue
+ }
+ if !bytes.Equal(body, mdcPlaintext[:len(mdcPlaintext)-22]) {
+ t.Errorf("stride: %d: bad contents %x", stride, body)
+ continue
+ }
+
+ err = mdcReader.Close()
+ if err != nil {
+ t.Errorf("stride: %d, error on Close: %s", stride, err)
+ }
+ }
+
+ mdcPlaintext[15] ^= 80
+
+ r := &testReader{data: mdcPlaintext, stride: 2}
+ mdcReader := &seMDCReader{in: r, h: sha1.New()}
+ _, err := ioutil.ReadAll(mdcReader)
+ if err != nil {
+ t.Errorf("corruption test, error: %s", err)
+ return
+ }
+ err = mdcReader.Close()
+ if err == nil {
+ t.Error("corruption: no error")
+ } else if _, ok := err.(*errors.SignatureError); !ok {
+ t.Errorf("corruption: expected SignatureError, got: %s", err)
+ }
+}
+
+const mdcPlaintextHex = "a302789c3b2d93c4e0eb9aba22283539b3203335af44a134afb800c849cb4c4de10200aff40b45d31432c80cb384299a0655966d6939dfdeed1dddf980"
+
+func TestSerialize(t *testing.T) {
+ buf := bytes.NewBuffer(nil)
+ c := CipherAES128
+ key := make([]byte, c.KeySize())
+
+ w, err := SerializeSymmetricallyEncrypted(buf, c, key, nil)
+ if err != nil {
+ t.Errorf("error from SerializeSymmetricallyEncrypted: %s", err)
+ return
+ }
+
+ contents := []byte("hello world\n")
+
+ w.Write(contents)
+ w.Close()
+
+ p, err := Read(buf)
+ if err != nil {
+ t.Errorf("error from Read: %s", err)
+ return
+ }
+
+ se, ok := p.(*SymmetricallyEncrypted)
+ if !ok {
+ t.Errorf("didn't read a *SymmetricallyEncrypted")
+ return
+ }
+
+ r, err := se.Decrypt(c, key)
+ if err != nil {
+ t.Errorf("error from Decrypt: %s", err)
+ return
+ }
+
+ contentsCopy := bytes.NewBuffer(nil)
+ _, err = io.Copy(contentsCopy, r)
+ if err != nil {
+ t.Errorf("error from io.Copy: %s", err)
+ return
+ }
+ if !bytes.Equal(contentsCopy.Bytes(), contents) {
+ t.Errorf("contents not equal got: %x want: %x", contentsCopy.Bytes(), contents)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/userattribute_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/userattribute_test.go
new file mode 100644
index 000000000..13ca5143c
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/userattribute_test.go
@@ -0,0 +1,109 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "encoding/base64"
+ "image/color"
+ "image/jpeg"
+ "testing"
+)
+
+func TestParseUserAttribute(t *testing.T) {
+ r := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(userAttributePacket))
+ for i := 0; i < 2; i++ {
+ p, err := Read(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ uat := p.(*UserAttribute)
+ imgs := uat.ImageData()
+ if len(imgs) != 1 {
+ t.Errorf("Unexpected number of images in user attribute packet: %d", len(imgs))
+ }
+ if len(imgs[0]) != 3395 {
+ t.Errorf("Unexpected JPEG image size: %d", len(imgs[0]))
+ }
+ img, err := jpeg.Decode(bytes.NewBuffer(imgs[0]))
+ if err != nil {
+ t.Errorf("Error decoding JPEG image: %v", err)
+ }
+ // A pixel in my right eye.
+ pixel := color.NRGBAModel.Convert(img.At(56, 36))
+ ref := color.NRGBA{R: 157, G: 128, B: 124, A: 255}
+ if pixel != ref {
+ t.Errorf("Unexpected pixel color: %v", pixel)
+ }
+ w := bytes.NewBuffer(nil)
+ err = uat.Serialize(w)
+ if err != nil {
+ t.Errorf("Error writing user attribute: %v", err)
+ }
+ r = bytes.NewBuffer(w.Bytes())
+ }
+}
+
+const userAttributePacket = `
+0cyWzJQBEAABAQAAAAAAAAAAAAAAAP/Y/+AAEEpGSUYAAQIAAAEAAQAA/9sAQwAFAwQEBAMFBAQE
+BQUFBgcMCAcHBwcPCgsJDBEPEhIRDxEQExYcFxMUGhUQERghGBocHR8fHxMXIiQiHiQcHh8e/9sA
+QwEFBQUHBgcOCAgOHhQRFB4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4e
+Hh4eHh4eHh4e/8AAEQgAZABkAwEiAAIRAQMRAf/EAB8AAAEFAQEBAQEBAAAAAAAAAAABAgMEBQYH
+CAkKC//EALUQAAIBAwMCBAMFBQQEAAABfQECAwAEEQUSITFBBhNRYQcicRQygZGhCCNCscEVUtHw
+JDNicoIJChYXGBkaJSYnKCkqNDU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6
+g4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2drh4uPk
+5ebn6Onq8fLz9PX29/j5+v/EAB8BAAMBAQEBAQEBAQEAAAAAAAABAgMEBQYHCAkKC//EALURAAIB
+AgQEAwQHBQQEAAECdwABAgMRBAUhMQYSQVEHYXETIjKBCBRCkaGxwQkjM1LwFWJy0QoWJDThJfEX
+GBkaJicoKSo1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoKDhIWGh4iJipKT
+lJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uLj5OXm5+jp6vLz9PX2
+9/j5+v/aAAwDAQACEQMRAD8A5uGP06VehQ4pIox04q5EnHSvAep+hIIl4zVuMHGPWmRrUWtalaaN
+pU2oXsgSGJSxPr6ClvoitErs0Itqjc7BQOpPAFYmrfEnwjojtHNqaXEynBjtx5hH4jj9a8B8d+Od
+W8UXZjWR4LJT+7t0Jwfc+prnIdO1CWZEW2mZ3HyDactXXDB3V5s8evm1namj6r0H4weCLtxG+ova
+ueP30RA/MV6not1bX0Ed1ZzxzwyDKvGwZSPqK+Ff+ES8R8t/ZV2oHUmM10Hgbxp4m8BatEfNnWBH
+/eWshOxx9Kmpg4te49RUM1kn+8Wh9zQ4P1FaMC7l465rjPh14y0fxnoseoaXOpfaPOgJ+eI98j09
+67W19M15bi4uzPSqTU480WXkjZkAyAR61DPE6OCSOalWRRgZxjvTb598sfU4FBwx5uY4T4feIm8P
+TeJbAgc65NIM+8cX+FFeLfF3Vr3SfiNrMFrMypJMJcDPUqP8KK+kpVFyLU+ar037SXqX4hxVpMY7
+1UhPpVlT2rybKx9smWYz3NeH/EDVLzxt40j8O6bITaQybPlbKkjq39K9O8fasdH8IahfKxWQRFIy
+Ou9uB/OuE/Z/0y3j1d9TuyoZCMs5xjuea1pLli5nn46q240l13PcfhN8EvDNtpcEl/CklyVBLuMk
+mvU/Dfwo0BL/AO13FjEDD/qyV7Vn+CvGPg8zRpJrVm8ikLtEg6+1ew2dxZ3EQaJgysuQPasH7eXW
+1zzsbVhT92kk/PsYieEND+zlPs6c/wCyAPyryH4wfCPRtW0u6j+xRLOxLxSoADkDpXY+MPjJ4c0S
+9k082d3O8ZKkxw5XI96ytK+IGk+IpFjRpod+Qq3C7QT6A1E6NenaXbqRg6rlLlqS0fRnxjpd1r/w
+w8afa7GWRPKbZLGeBKmeVNfZngLxNaeKfDdprVjxHcLlkJ5Vh1H5185/tDad9h8XOsqAw3Cb0cjq
+CfX61P8AsveKf7L8T3fhe5nxa3g324YniQdh9R/KuivTdSmp9TXB1/Z1nRlsfU249QBx1pWfcwI7
+Cq6u2Ovamb9rYz16V5x7Psz5q/aJhZfibcupIElvE3H+7j+lFbXx9szP45jlUfeso8/99OKK9elL
+3EeNVopzZVharCtxVRGGMk02S5JyFOB69zWTieypnL/GksfB+0cr9oQt69awPhPpD69Y3Ky3DWth
+CWluGU4LAdq3vibGs/g68BJygVxjrwRW5+ztoRv/AAs8EeCZnO/J/hzz/Kumi4wp3kePjlOdZKPY
+ml8Mvo6WM9ppi7J0EkQYMzkb1X0wW+bJHGACa+ivg14huZPCkjXUO6SImIYOQAP6UQ2sGneHmiWF
+CYoSAAuM8etXfhBpMr+EZ3SSNRcMx6ZxWdes6ytBGSwkMNFuo7pnP614Ut9Zn1C4uLySKcwObGFA
+Qnm4+XcR71h+CfDHiKCQWuv2YWFtw+bBZQD8rcE8n2Ney+GbGGQSM6I7xvtI681rXdp8hKRRp6t3
+FYPE1VDlsY1nQjWdl+J8w/tOeDZZ/AMd/EGefTHyxxyYjwfyODXg3waRh8UtEcFh+8Jb8FNfZPxh
+Ak8J6nbPIsiyW7LnseK+Ofh99ptPHFnf2lu0y2twGcKuSEPB/Q1WHk50miq1o14TXU+xop+On61H
+NMC6Nis1LgsAcUTSt1APFcXJZn0EqmhyvxA037friTYziBV6f7Tf40Vr3k4aXLx5OMZIzRXZB2ik
+efJXbPHJJcnaD9aN2R1qoGO8/WkuLlIV+YjdjpXSonQ5lTxfiTwzqCnkeQxx9BWx+zPrQsrBFYja
+zEfrXL6lfie3khcjY6lSPUGud+G3iA6FrY0uQ/KJsA9gCa0jSvFpnBi6tpKSPu++nsIfDFxeXciR
+qIicscY4rxTwB8RUkn1axsPEf2LTYx85kTGzqCUP8VcJ47+JOs+I0Hhq1njjt/ufIeSvq1VtE+Gs
+eoaUbSHUrkHdu3WtuX5Ix81XRh7OL5jirVpV5Whdn0F8C/iX4auVn0i612T7bASoe8wjTAd89K9g
+vtSt5NMa4t5lkRhgOh3Dn6V8aaz8KZrIR3OlQ6r56LySmSxxz06Vo/CHx34h0rxBP4XvJ5AjK2RP
+nEbAEj6ZxjPrWM6fMmoswqJxqJ1VZnqHxn1NLPwveqWHmNC2BnnNcD8DfDkGi+CH1m+ijN1qMzNA
+4GSIiAMf+hVxPxU8Tapc3c0F9MGCn5GU5BX0Pau3+HmrT3XgXSIJCBHDGdgAx1NYSpezha52Yauq
+1dya2Wh2onAIwTj1p0lxxWWLkhRyCKWa5O3ORXOos9KVQluZm83j0oqi84JyWH50Vdmc7ep43d3I
+t1Z2Iz2FYdxeSTsxyRnvTdVuDNcNluM9KrKcg817NOnZGNbEXdkNckjrXGeIIprPxFFdRHAlIwem
+COtdmxrG8Q2cd/ZNExw45RvQ1bVjim+dWNzw7eaTD4mN3dndCQCo6hmI5zXpj/Ea/wBHjkh0kwRW
+xXEfl4yTxXzXZalJDL9nuWKMmRnHcV2Hh3WreCyYXW2SWQhd5P3F6n+lS43d2cTm6d7Ox9EWPxH1
+ODQxPqWpCaSU/ukUc4z3/WvKW8UhviAdaMewYZG98gj9c1ymoa8LyWOJHwkTDaVPb0qpr+q2m6Nb
+cfvNo349az9mou9iZVXNWbub3jm98/Vza2ReV7lsJg/e3dsV654UR9N0K0sZP9ZDGFbHr3rzL4P+
+H7rXfEEWr3I3W1qf3IYdW9fwqDxf4k8UeH/G95p08kscHmk25dPlZT0we9YTj7SXKjpw1aNG8mj3
+FLv5ccU959ycnmvKPDnxB82YQarGsZPAlTp+IrvIr1ZIgySKwIyCOhFYTpyg9T0qWIhVV4svzPvf
+IdhgY4orPachj81FRdmtzxqdiZmJ9aQEgdqZcPtmbJ71DJcAZ5r20kkeXJtsfPIQDwPzrG1a+S3i
+LyHAHvmp7y7HOD1rlNdm+1T7Acovf3o+J2RMpezjzMvrob67pX9o2ShZlYgg/wAWKxZLLWLZ/Ke3
+mVh14yK9M+BMC3dre2ko3LHKCB7EV7EngeGQJdQ7HyBkMKS0djgq1W3c+XtK03U522RwzsTwNiEk
+ntXoHgf4calql9El/G8UZbLfLyfr7V9FeGvh+s+0Lbxxcglu2K1NW1nwN4Gk/wBLuI57tV5jjwzE
+/QVNS+0dWYRqNvXRFv4eeCodKsY1ggVIY1G3K4z714h+1Jqul3GpwaXYeXJLbzgyyrg4b+6D+HNb
+vjz436zq9m+naHF/ZdkeGfOZXH17V4Vqt2b29K+ZuOc5bnce5zWdPBShL2lTfojSeJhy+zp/NjVz
+1Bwa6DSfFGq6fbJFDKrov8DjPFcu97ZxsUe4jVhwVJ5Bpp1mwQiLewJPXacVq6fNpYyjOUXdHoKf
+EG8VQHsInbuVcgflRXnt5fIs2FYHgcgUVi8LG+xusdW/mN7U2KgEVkTzPt60UVfQ9eHxGHrV1MGi
+iD4V25x1qvdgLAMd6KK0pbHm4x++dp8FtUubLxJ5EIjMc+A4Za+qfD8pe1JZVOBmiinW3RyRPMfi
+R8QPE638+k2l6LK0Hylbddhb6nOa80mlkcmWR2kcnlnOSaKK7qCXKcNdu5narcSrAoBxvODWJIga
+VckjDdqKKwq/EaQ0gUdbjQ6mr7QGBUcd6tPBC6gtGpOOuKKKie5qn7qIpEXd0HSiiimSf//Z`
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/userid_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/userid_test.go
new file mode 100644
index 000000000..296819389
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/packet/userid_test.go
@@ -0,0 +1,87 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "testing"
+)
+
+var userIdTests = []struct {
+ id string
+ name, comment, email string
+}{
+ {"", "", "", ""},
+ {"John Smith", "John Smith", "", ""},
+ {"John Smith ()", "John Smith", "", ""},
+ {"John Smith () <>", "John Smith", "", ""},
+ {"(comment", "", "comment", ""},
+ {"(comment)", "", "comment", ""},
+ {" sdfk", "", "", "email"},
+ {" John Smith ( Comment ) asdkflj < email > lksdfj", "John Smith", "Comment", "email"},
+ {" John Smith < email > lksdfj", "John Smith", "", "email"},
+ {"("},
+ {"foo", "bar", "", "foo (bar)"},
+ {"foo", "", "baz", "foo "},
+ {"", "bar", "baz", "(bar) "},
+ {"foo", "bar", "baz", "foo (bar) "},
+}
+
+func TestNewUserId(t *testing.T) {
+ for i, test := range newUserIdTests {
+ uid := NewUserId(test.name, test.comment, test.email)
+ if uid == nil {
+ t.Errorf("#%d: returned nil", i)
+ continue
+ }
+ if uid.Id != test.id {
+ t.Errorf("#%d: got '%s', want '%s'", i, uid.Id, test.id)
+ }
+ }
+}
+
+var invalidNewUserIdTests = []struct {
+ name, comment, email string
+}{
+ {"foo(", "", ""},
+ {"foo<", "", ""},
+ {"", "bar)", ""},
+ {"", "bar<", ""},
+ {"", "", "baz>"},
+ {"", "", "baz)"},
+ {"", "", "baz\x00"},
+}
+
+func TestNewUserIdWithInvalidInput(t *testing.T) {
+ for i, test := range invalidNewUserIdTests {
+ if uid := NewUserId(test.name, test.comment, test.email); uid != nil {
+ t.Errorf("#%d: returned non-nil value: %#v", i, uid)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/read_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/read_test.go
new file mode 100644
index 000000000..7524a02e5
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/read_test.go
@@ -0,0 +1,512 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package openpgp
+
+import (
+ "bytes"
+ _ "crypto/sha512"
+ "encoding/hex"
+ "io"
+ "io/ioutil"
+ "strings"
+ "testing"
+
+ "golang.org/x/crypto/openpgp/errors"
+)
+
+func readerFromHex(s string) io.Reader {
+ data, err := hex.DecodeString(s)
+ if err != nil {
+ panic("readerFromHex: bad input")
+ }
+ return bytes.NewBuffer(data)
+}
+
+func TestReadKeyRing(t *testing.T) {
+ kring, err := ReadKeyRing(readerFromHex(testKeys1And2Hex))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if len(kring) != 2 || uint32(kring[0].PrimaryKey.KeyId) != 0xC20C31BB || uint32(kring[1].PrimaryKey.KeyId) != 0x1E35246B {
+ t.Errorf("bad keyring: %#v", kring)
+ }
+}
+
+func TestRereadKeyRing(t *testing.T) {
+ kring, err := ReadKeyRing(readerFromHex(testKeys1And2Hex))
+ if err != nil {
+ t.Errorf("error in initial parse: %s", err)
+ return
+ }
+ out := new(bytes.Buffer)
+ err = kring[0].Serialize(out)
+ if err != nil {
+ t.Errorf("error in serialization: %s", err)
+ return
+ }
+ kring, err = ReadKeyRing(out)
+ if err != nil {
+ t.Errorf("error in second parse: %s", err)
+ return
+ }
+
+ if len(kring) != 1 || uint32(kring[0].PrimaryKey.KeyId) != 0xC20C31BB {
+ t.Errorf("bad keyring: %#v", kring)
+ }
+}
+
+func TestReadPrivateKeyRing(t *testing.T) {
+ kring, err := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if len(kring) != 2 || uint32(kring[0].PrimaryKey.KeyId) != 0xC20C31BB || uint32(kring[1].PrimaryKey.KeyId) != 0x1E35246B || kring[0].PrimaryKey == nil {
+ t.Errorf("bad keyring: %#v", kring)
+ }
+}
+
+func TestReadDSAKey(t *testing.T) {
+ kring, err := ReadKeyRing(readerFromHex(dsaTestKeyHex))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if len(kring) != 1 || uint32(kring[0].PrimaryKey.KeyId) != 0x0CCC0360 {
+ t.Errorf("bad parse: %#v", kring)
+ }
+}
+
+func TestDSAHashTruncatation(t *testing.T) {
+ // dsaKeyWithSHA512 was generated with GnuPG and --cert-digest-algo
+ // SHA512 in order to require DSA hash truncation to verify correctly.
+ _, err := ReadKeyRing(readerFromHex(dsaKeyWithSHA512))
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestGetKeyById(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex))
+
+ keys := kring.KeysById(0xa34d7e18c20c31bb)
+ if len(keys) != 1 || keys[0].Entity != kring[0] {
+ t.Errorf("bad result for 0xa34d7e18c20c31bb: %#v", keys)
+ }
+
+ keys = kring.KeysById(0xfd94408d4543314f)
+ if len(keys) != 1 || keys[0].Entity != kring[0] {
+ t.Errorf("bad result for 0xa34d7e18c20c31bb: %#v", keys)
+ }
+}
+
+func checkSignedMessage(t *testing.T, signedHex, expected string) {
+ kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex))
+
+ md, err := ReadMessage(readerFromHex(signedHex), kring, nil, nil)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ if !md.IsSigned || md.SignedByKeyId != 0xa34d7e18c20c31bb || md.SignedBy == nil || md.IsEncrypted || md.IsSymmetricallyEncrypted || len(md.EncryptedToKeyIds) != 0 || md.IsSymmetricallyEncrypted {
+ t.Errorf("bad MessageDetails: %#v", md)
+ }
+
+ contents, err := ioutil.ReadAll(md.UnverifiedBody)
+ if err != nil {
+ t.Errorf("error reading UnverifiedBody: %s", err)
+ }
+ if string(contents) != expected {
+ t.Errorf("bad UnverifiedBody got:%s want:%s", string(contents), expected)
+ }
+ if md.SignatureError != nil || md.Signature == nil {
+ t.Errorf("failed to validate: %s", md.SignatureError)
+ }
+}
+
+func TestSignedMessage(t *testing.T) {
+ checkSignedMessage(t, signedMessageHex, signedInput)
+}
+
+func TestTextSignedMessage(t *testing.T) {
+ checkSignedMessage(t, signedTextMessageHex, signedTextInput)
+}
+
+// The reader should detect "compressed quines", which are compressed
+// packets that expand into themselves and cause an infinite recursive
+// parsing loop.
+// The packet in this test case comes from Taylor R. Campbell at
+// http://mumble.net/~campbell/misc/pgp-quine/
+func TestCampbellQuine(t *testing.T) {
+ md, err := ReadMessage(readerFromHex(campbellQuine), nil, nil, nil)
+ if md != nil {
+ t.Errorf("Reading a compressed quine should not return any data: %#v", md)
+ }
+ structural, ok := err.(errors.StructuralError)
+ if !ok {
+ t.Fatalf("Unexpected class of error: %T", err)
+ }
+ if !strings.Contains(string(structural), "too many layers of packets") {
+ t.Fatalf("Unexpected error: %s", err)
+ }
+}
+
+var signedEncryptedMessageTests = []struct {
+ keyRingHex string
+ messageHex string
+ signedByKeyId uint64
+ encryptedToKeyId uint64
+}{
+ {
+ testKeys1And2PrivateHex,
+ signedEncryptedMessageHex,
+ 0xa34d7e18c20c31bb,
+ 0x2a67d68660df41c7,
+ },
+ {
+ dsaElGamalTestKeysHex,
+ signedEncryptedMessage2Hex,
+ 0x33af447ccd759b09,
+ 0xcf6a7abcd43e3673,
+ },
+}
+
+func TestSignedEncryptedMessage(t *testing.T) {
+ for i, test := range signedEncryptedMessageTests {
+ expected := "Signed and encrypted message\n"
+ kring, _ := ReadKeyRing(readerFromHex(test.keyRingHex))
+ prompt := func(keys []Key, symmetric bool) ([]byte, error) {
+ if symmetric {
+ t.Errorf("prompt: message was marked as symmetrically encrypted")
+ return nil, errors.ErrKeyIncorrect
+ }
+
+ if len(keys) == 0 {
+ t.Error("prompt: no keys requested")
+ return nil, errors.ErrKeyIncorrect
+ }
+
+ err := keys[0].PrivateKey.Decrypt([]byte("passphrase"))
+ if err != nil {
+ t.Errorf("prompt: error decrypting key: %s", err)
+ return nil, errors.ErrKeyIncorrect
+ }
+
+ return nil, nil
+ }
+
+ md, err := ReadMessage(readerFromHex(test.messageHex), kring, prompt, nil)
+ if err != nil {
+ t.Errorf("#%d: error reading message: %s", i, err)
+ return
+ }
+
+ if !md.IsSigned || md.SignedByKeyId != test.signedByKeyId || md.SignedBy == nil || !md.IsEncrypted || md.IsSymmetricallyEncrypted || len(md.EncryptedToKeyIds) == 0 || md.EncryptedToKeyIds[0] != test.encryptedToKeyId {
+ t.Errorf("#%d: bad MessageDetails: %#v", i, md)
+ }
+
+ contents, err := ioutil.ReadAll(md.UnverifiedBody)
+ if err != nil {
+ t.Errorf("#%d: error reading UnverifiedBody: %s", i, err)
+ }
+ if string(contents) != expected {
+ t.Errorf("#%d: bad UnverifiedBody got:%s want:%s", i, string(contents), expected)
+ }
+
+ if md.SignatureError != nil || md.Signature == nil {
+ t.Errorf("#%d: failed to validate: %s", i, md.SignatureError)
+ }
+ }
+}
+
+func TestUnspecifiedRecipient(t *testing.T) {
+ expected := "Recipient unspecified\n"
+ kring, _ := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex))
+
+ md, err := ReadMessage(readerFromHex(recipientUnspecifiedHex), kring, nil, nil)
+ if err != nil {
+ t.Errorf("error reading message: %s", err)
+ return
+ }
+
+ contents, err := ioutil.ReadAll(md.UnverifiedBody)
+ if err != nil {
+ t.Errorf("error reading UnverifiedBody: %s", err)
+ }
+ if string(contents) != expected {
+ t.Errorf("bad UnverifiedBody got:%s want:%s", string(contents), expected)
+ }
+}
+
+func TestSymmetricallyEncrypted(t *testing.T) {
+ firstTimeCalled := true
+
+ prompt := func(keys []Key, symmetric bool) ([]byte, error) {
+ if len(keys) != 0 {
+ t.Errorf("prompt: len(keys) = %d (want 0)", len(keys))
+ }
+
+ if !symmetric {
+ t.Errorf("symmetric is not set")
+ }
+
+ if firstTimeCalled {
+ firstTimeCalled = false
+ return []byte("wrongpassword"), nil
+ }
+
+ return []byte("password"), nil
+ }
+
+ md, err := ReadMessage(readerFromHex(symmetricallyEncryptedCompressedHex), nil, prompt, nil)
+ if err != nil {
+ t.Errorf("ReadMessage: %s", err)
+ return
+ }
+
+ contents, err := ioutil.ReadAll(md.UnverifiedBody)
+ if err != nil {
+ t.Errorf("ReadAll: %s", err)
+ }
+
+ expectedCreationTime := uint32(1295992998)
+ if md.LiteralData.Time != expectedCreationTime {
+ t.Errorf("LiteralData.Time is %d, want %d", md.LiteralData.Time, expectedCreationTime)
+ }
+
+ const expected = "Symmetrically encrypted.\n"
+ if string(contents) != expected {
+ t.Errorf("contents got: %s want: %s", string(contents), expected)
+ }
+}
+
+func testDetachedSignature(t *testing.T, kring KeyRing, signature io.Reader, sigInput, tag string, expectedSignerKeyId uint64) {
+ signed := bytes.NewBufferString(sigInput)
+ signer, err := CheckDetachedSignature(kring, signed, signature)
+ if err != nil {
+ t.Errorf("%s: signature error: %s", tag, err)
+ return
+ }
+ if signer == nil {
+ t.Errorf("%s: signer is nil", tag)
+ return
+ }
+ if signer.PrimaryKey.KeyId != expectedSignerKeyId {
+ t.Errorf("%s: wrong signer got:%x want:%x", tag, signer.PrimaryKey.KeyId, expectedSignerKeyId)
+ }
+}
+
+func TestDetachedSignature(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex))
+ testDetachedSignature(t, kring, readerFromHex(detachedSignatureHex), signedInput, "binary", testKey1KeyId)
+ testDetachedSignature(t, kring, readerFromHex(detachedSignatureTextHex), signedInput, "text", testKey1KeyId)
+ testDetachedSignature(t, kring, readerFromHex(detachedSignatureV3TextHex), signedInput, "v3", testKey1KeyId)
+
+ incorrectSignedInput := signedInput + "X"
+ _, err := CheckDetachedSignature(kring, bytes.NewBufferString(incorrectSignedInput), readerFromHex(detachedSignatureHex))
+ if err == nil {
+ t.Fatal("CheckDetachedSignature returned without error for bad signature")
+ }
+ if err == errors.ErrUnknownIssuer {
+ t.Fatal("CheckDetachedSignature returned ErrUnknownIssuer when the signer was known, but the signature invalid")
+ }
+}
+
+func TestDetachedSignatureDSA(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(dsaTestKeyHex))
+ testDetachedSignature(t, kring, readerFromHex(detachedSignatureDSAHex), signedInput, "binary", testKey3KeyId)
+}
+
+func TestMultipleSignaturePacketsDSA(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(dsaTestKeyHex))
+ testDetachedSignature(t, kring, readerFromHex(missingHashFunctionHex+detachedSignatureDSAHex), signedInput, "binary", testKey3KeyId)
+}
+
+func testHashFunctionError(t *testing.T, signatureHex string) {
+ kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex))
+ _, err := CheckDetachedSignature(kring, nil, readerFromHex(signatureHex))
+ if err == nil {
+ t.Fatal("Packet with bad hash type was correctly parsed")
+ }
+ unsupported, ok := err.(errors.UnsupportedError)
+ if !ok {
+ t.Fatalf("Unexpected class of error: %s", err)
+ }
+ if !strings.Contains(string(unsupported), "hash ") {
+ t.Fatalf("Unexpected error: %s", err)
+ }
+}
+
+func TestUnknownHashFunction(t *testing.T) {
+ // unknownHashFunctionHex contains a signature packet with hash
+ // function type 153 (which isn't a real hash function id).
+ testHashFunctionError(t, unknownHashFunctionHex)
+}
+
+func TestMissingHashFunction(t *testing.T) {
+ // missingHashFunctionHex contains a signature packet that uses
+ // RIPEMD160, which isn't compiled in. Since that's the only signature
+ // packet we don't find any suitable packets and end up with ErrUnknownIssuer
+ kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex))
+ _, err := CheckDetachedSignature(kring, nil, readerFromHex(missingHashFunctionHex))
+ if err == nil {
+ t.Fatal("Packet with missing hash type was correctly parsed")
+ }
+ if err != errors.ErrUnknownIssuer {
+ t.Fatalf("Unexpected class of error: %s", err)
+ }
+}
+
+func TestReadingArmoredPrivateKey(t *testing.T) {
+ el, err := ReadArmoredKeyRing(bytes.NewBufferString(armoredPrivateKeyBlock))
+ if err != nil {
+ t.Error(err)
+ }
+ if len(el) != 1 {
+ t.Errorf("got %d entities, wanted 1\n", len(el))
+ }
+}
+
+func TestReadingArmoredPublicKey(t *testing.T) {
+ el, err := ReadArmoredKeyRing(bytes.NewBufferString(e2ePublicKey))
+ if err != nil {
+ t.Error(err)
+ }
+ if len(el) != 1 {
+ t.Errorf("didn't get a valid entity")
+ }
+}
+
+func TestNoArmoredData(t *testing.T) {
+ _, err := ReadArmoredKeyRing(bytes.NewBufferString("foo"))
+ if _, ok := err.(errors.InvalidArgumentError); !ok {
+ t.Errorf("error was not an InvalidArgumentError: %s", err)
+ }
+}
+
+func testReadMessageError(t *testing.T, messageHex string) {
+ buf, err := hex.DecodeString(messageHex)
+ if err != nil {
+ t.Errorf("hex.DecodeString(): %v", err)
+ }
+
+ kr, err := ReadKeyRing(new(bytes.Buffer))
+ if err != nil {
+ t.Errorf("ReadKeyring(): %v", err)
+ }
+
+ _, err = ReadMessage(bytes.NewBuffer(buf), kr,
+ func([]Key, bool) ([]byte, error) {
+ return []byte("insecure"), nil
+ }, nil)
+
+ if err == nil {
+ t.Errorf("ReadMessage(): Unexpected nil error")
+ }
+}
+
+func TestIssue11503(t *testing.T) {
+ testReadMessageError(t, "8c040402000aa430aa8228b9248b01fc899a91197130303030")
+}
+
+func TestIssue11504(t *testing.T) {
+ testReadMessageError(t, "9303000130303030303030303030983002303030303030030000000130")
+}
+
+const testKey1KeyId = 0xA34D7E18C20C31BB
+const testKey3KeyId = 0x338934250CCC0360
+
+const signedInput = "Signed message\nline 2\nline 3\n"
+const signedTextInput = "Signed message\r\nline 2\r\nline 3\r\n"
+
+const recipientUnspecifiedHex = "848c0300000000000000000103ff62d4d578d03cf40c3da998dfe216c074fa6ddec5e31c197c9666ba292830d91d18716a80f699f9d897389a90e6d62d0238f5f07a5248073c0f24920e4bc4a30c2d17ee4e0cae7c3d4aaa4e8dced50e3010a80ee692175fa0385f62ecca4b56ee6e9980aa3ec51b61b077096ac9e800edaf161268593eedb6cc7027ff5cb32745d250010d407a6221ae22ef18469b444f2822478c4d190b24d36371a95cb40087cdd42d9399c3d06a53c0673349bfb607927f20d1e122bde1e2bf3aa6cae6edf489629bcaa0689539ae3b718914d88ededc3b"
+
+const detachedSignatureHex = "889c04000102000605024d449cd1000a0910a34d7e18c20c31bb167603ff57718d09f28a519fdc7b5a68b6a3336da04df85e38c5cd5d5bd2092fa4629848a33d85b1729402a2aab39c3ac19f9d573f773cc62c264dc924c067a79dfd8a863ae06c7c8686120760749f5fd9b1e03a64d20a7df3446ddc8f0aeadeaeba7cbaee5c1e366d65b6a0c6cc749bcb912d2f15013f812795c2e29eb7f7b77f39ce77"
+
+const detachedSignatureTextHex = "889c04010102000605024d449d21000a0910a34d7e18c20c31bbc8c60400a24fbef7342603a41cb1165767bd18985d015fb72fe05db42db36cfb2f1d455967f1e491194fbf6cf88146222b23bf6ffbd50d17598d976a0417d3192ff9cc0034fd00f287b02e90418bbefe609484b09231e4e7a5f3562e199bf39909ab5276c4d37382fe088f6b5c3426fc1052865da8b3ab158672d58b6264b10823dc4b39"
+
+const detachedSignatureV3TextHex = "8900950305005255c25ca34d7e18c20c31bb0102bb3f04009f6589ef8a028d6e54f6eaf25432e590d31c3a41f4710897585e10c31e5e332c7f9f409af8512adceaff24d0da1474ab07aa7bce4f674610b010fccc5b579ae5eb00a127f272fb799f988ab8e4574c141da6dbfecfef7e6b2c478d9a3d2551ba741f260ee22bec762812f0053e05380bfdd55ad0f22d8cdf71b233fe51ae8a24"
+
+const detachedSignatureDSAHex = "884604001102000605024d6c4eac000a0910338934250ccc0360f18d00a087d743d6405ed7b87755476629600b8b694a39e900a0abff8126f46faf1547c1743c37b21b4ea15b8f83"
+
+const testKeys1And2Hex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b0020003b88d044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f0011010001889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab0020003988d044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b0020003b88d044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020003"
+
+const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd00110100010003ff4d91393b9a8e3430b14d6209df42f98dc927425b881f1209f319220841273a802a97c7bdb8b3a7740b3ab5866c4d1d308ad0d3a79bd1e883aacf1ac92dfe720285d10d08752a7efe3c609b1d00f17f2805b217be53999a7da7e493bfc3e9618fd17018991b8128aea70a05dbce30e4fbe626aa45775fa255dd9177aabf4df7cf0200c1ded12566e4bc2bb590455e5becfb2e2c9796482270a943343a7835de41080582c2be3caf5981aa838140e97afa40ad652a0b544f83eb1833b0957dce26e47b0200eacd6046741e9ce2ec5beb6fb5e6335457844fb09477f83b050a96be7da043e17f3a9523567ed40e7a521f818813a8b8a72209f1442844843ccc7eb9805442570200bdafe0438d97ac36e773c7162028d65844c4d463e2420aa2228c6e50dc2743c3d6c72d0d782a5173fe7be2169c8a9f4ef8a7cf3e37165e8c61b89c346cdc6c1799d2b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b00200009d01d8044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f00110100010003fd17a7490c22a79c59281fb7b20f5e6553ec0c1637ae382e8adaea295f50241037f8997cf42c1ce26417e015091451b15424b2c59eb8d4161b0975630408e394d3b00f88d4b4e18e2cc85e8251d4753a27c639c83f5ad4a571c4f19d7cd460b9b73c25ade730c99df09637bd173d8e3e981ac64432078263bb6dc30d3e974150dd0200d0ee05be3d4604d2146fb0457f31ba17c057560785aa804e8ca5530a7cd81d3440d0f4ba6851efcfd3954b7e68908fc0ba47f7ac37bf559c6c168b70d3a7c8cd0200da1c677c4bce06a068070f2b3733b0a714e88d62aa3f9a26c6f5216d48d5c2b5624144f3807c0df30be66b3268eeeca4df1fbded58faf49fc95dc3c35f134f8b01fd1396b6c0fc1b6c4f0eb8f5e44b8eace1e6073e20d0b8bc5385f86f1cf3f050f66af789f3ef1fc107b7f4421e19e0349c730c68f0a226981f4e889054fdb4dc149e8e889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab00200009501fe044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001fe030302e9030f3c783e14856063f16938530e148bc57a7aa3f3e4f90df9dceccdc779bc0835e1ad3d006e4a8d7b36d08b8e0de5a0d947254ecfbd22037e6572b426bcfdc517796b224b0036ff90bc574b5509bede85512f2eefb520fb4b02aa523ba739bff424a6fe81c5041f253f8d757e69a503d3563a104d0d49e9e890b9d0c26f96b55b743883b472caa7050c4acfd4a21f875bdf1258d88bd61224d303dc9df77f743137d51e6d5246b88c406780528fd9a3e15bab5452e5b93970d9dcc79f48b38651b9f15bfbcf6da452837e9cc70683d1bdca94507870f743e4ad902005812488dd342f836e72869afd00ce1850eea4cfa53ce10e3608e13d3c149394ee3cbd0e23d018fcbcb6e2ec5a1a22972d1d462ca05355d0d290dd2751e550d5efb38c6c89686344df64852bf4ff86638708f644e8ec6bd4af9b50d8541cb91891a431326ab2e332faa7ae86cfb6e0540aa63160c1e5cdd5a4add518b303fff0a20117c6bc77f7cfbaf36b04c865c6c2b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b00200009d01fe044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001fe030302e9030f3c783e148560f936097339ae381d63116efcf802ff8b1c9360767db5219cc987375702a4123fd8657d3e22700f23f95020d1b261eda5257e9a72f9a918e8ef22dd5b3323ae03bbc1923dd224db988cadc16acc04b120a9f8b7e84da9716c53e0334d7b66586ddb9014df604b41be1e960dcfcbc96f4ed150a1a0dd070b9eb14276b9b6be413a769a75b519a53d3ecc0c220e85cd91ca354d57e7344517e64b43b6e29823cbd87eae26e2b2e78e6dedfbb76e3e9f77bcb844f9a8932eb3db2c3f9e44316e6f5d60e9e2a56e46b72abe6b06dc9a31cc63f10023d1f5e12d2a3ee93b675c96f504af0001220991c88db759e231b3320dcedf814dcf723fd9857e3d72d66a0f2af26950b915abdf56c1596f46a325bf17ad4810d3535fb02a259b247ac3dbd4cc3ecf9c51b6c07cebb009c1506fba0a89321ec8683e3fd009a6e551d50243e2d5092fefb3321083a4bad91320dc624bd6b5dddf93553e3d53924c05bfebec1fb4bd47e89a1a889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020000"
+
+const dsaElGamalTestKeysHex = "9501e1044dfcb16a110400aa3e5c1a1f43dd28c2ffae8abf5cfce555ee874134d8ba0a0f7b868ce2214beddc74e5e1e21ded354a95d18acdaf69e5e342371a71fbb9093162e0c5f3427de413a7f2c157d83f5cd2f9d791256dc4f6f0e13f13c3302af27f2384075ab3021dff7a050e14854bbde0a1094174855fc02f0bae8e00a340d94a1f22b32e48485700a0cec672ac21258fb95f61de2ce1af74b2c4fa3e6703ff698edc9be22c02ae4d916e4fa223f819d46582c0516235848a77b577ea49018dcd5e9e15cff9dbb4663a1ae6dd7580fa40946d40c05f72814b0f88481207e6c0832c3bded4853ebba0a7e3bd8e8c66df33d5a537cd4acf946d1080e7a3dcea679cb2b11a72a33a2b6a9dc85f466ad2ddf4c3db6283fa645343286971e3dd700703fc0c4e290d45767f370831a90187e74e9972aae5bff488eeff7d620af0362bfb95c1a6c3413ab5d15a2e4139e5d07a54d72583914661ed6a87cce810be28a0aa8879a2dd39e52fb6fe800f4f181ac7e328f740cde3d09a05cecf9483e4cca4253e60d4429ffd679d9996a520012aad119878c941e3cf151459873bdfc2a9563472fe0303027a728f9feb3b864260a1babe83925ce794710cfd642ee4ae0e5b9d74cee49e9c67b6cd0ea5dfbb582132195a121356a1513e1bca73e5b80c58c7ccb4164453412f456c47616d616c2054657374204b65792031886204131102002205024dfcb16a021b03060b090807030206150802090a0b0416020301021e01021780000a091033af447ccd759b09fadd00a0b8fd6f5a790bad7e9f2dbb7632046dc4493588db009c087c6a9ba9f7f49fab221587a74788c00db4889ab00200009d0157044dfcb16a1004008dec3f9291205255ccff8c532318133a6840739dd68b03ba942676f9038612071447bf07d00d559c5c0875724ea16a4c774f80d8338b55fca691a0522e530e604215b467bbc9ccfd483a1da99d7bc2648b4318fdbd27766fc8bfad3fddb37c62b8ae7ccfe9577e9b8d1e77c1d417ed2c2ef02d52f4da11600d85d3229607943700030503ff506c94c87c8cab778e963b76cf63770f0a79bf48fb49d3b4e52234620fc9f7657f9f8d56c96a2b7c7826ae6b57ebb2221a3fe154b03b6637cea7e6d98e3e45d87cf8dc432f723d3d71f89c5192ac8d7290684d2c25ce55846a80c9a7823f6acd9bb29fa6cd71f20bc90eccfca20451d0c976e460e672b000df49466408d527affe0303027a728f9feb3b864260abd761730327bca2aaa4ea0525c175e92bf240682a0e83b226f97ecb2e935b62c9a133858ce31b271fa8eb41f6a1b3cd72a63025ce1a75ee4180dcc284884904181102000905024dfcb16a021b0c000a091033af447ccd759b09dd0b009e3c3e7296092c81bee5a19929462caaf2fff3ae26009e218c437a2340e7ea628149af1ec98ec091a43992b00200009501e1044dfcb1be1104009f61faa61aa43df75d128cbe53de528c4aec49ce9360c992e70c77072ad5623de0a3a6212771b66b39a30dad6781799e92608316900518ec01184a85d872365b7d2ba4bacfb5882ea3c2473d3750dc6178cc1cf82147fb58caa28b28e9f12f6d1efcb0534abed644156c91cca4ab78834268495160b2400bc422beb37d237c2300a0cac94911b6d493bda1e1fbc6feeca7cb7421d34b03fe22cec6ccb39675bb7b94a335c2b7be888fd3906a1125f33301d8aa6ec6ee6878f46f73961c8d57a3e9544d8ef2a2cbfd4d52da665b1266928cfe4cb347a58c412815f3b2d2369dec04b41ac9a71cc9547426d5ab941cccf3b18575637ccfb42df1a802df3cfe0a999f9e7109331170e3a221991bf868543960f8c816c28097e503fe319db10fb98049f3a57d7c80c420da66d56f3644371631fad3f0ff4040a19a4fedc2d07727a1b27576f75a4d28c47d8246f27071e12d7a8de62aad216ddbae6aa02efd6b8a3e2818cda48526549791ab277e447b3a36c57cefe9b592f5eab73959743fcc8e83cbefec03a329b55018b53eec196765ae40ef9e20521a603c551efe0303020950d53a146bf9c66034d00c23130cce95576a2ff78016ca471276e8227fb30b1ffbd92e61804fb0c3eff9e30b1a826ee8f3e4730b4d86273ca977b4164453412f456c47616d616c2054657374204b65792032886204131102002205024dfcb1be021b03060b090807030206150802090a0b0416020301021e01021780000a0910a86bf526325b21b22bd9009e34511620415c974750a20df5cb56b182f3b48e6600a0a9466cb1a1305a84953445f77d461593f1d42bc1b00200009d0157044dfcb1be1004009565a951da1ee87119d600c077198f1c1bceb0f7aa54552489298e41ff788fa8f0d43a69871f0f6f77ebdfb14a4260cf9fbeb65d5844b4272a1904dd95136d06c3da745dc46327dd44a0f16f60135914368c8039a34033862261806bb2c5ce1152e2840254697872c85441ccb7321431d75a747a4bfb1d2c66362b51ce76311700030503fc0ea76601c196768070b7365a200e6ddb09307f262d5f39eec467b5f5784e22abdf1aa49226f59ab37cb49969d8f5230ea65caf56015abda62604544ed526c5c522bf92bed178a078789f6c807b6d34885688024a5bed9e9f8c58d11d4b82487b44c5f470c5606806a0443b79cadb45e0f897a561a53f724e5349b9267c75ca17fe0303020950d53a146bf9c660bc5f4ce8f072465e2d2466434320c1e712272fafc20e342fe7608101580fa1a1a367e60486a7cd1246b7ef5586cf5e10b32762b710a30144f12dd17dd4884904181102000905024dfcb1be021b0c000a0910a86bf526325b21b2904c00a0b2b66b4b39ccffda1d10f3ea8d58f827e30a8b8e009f4255b2d8112a184e40cde43a34e8655ca7809370b0020000"
+
+const signedMessageHex = "a3019bc0cbccc0c4b8d8b74ee2108fe16ec6d3ca490cbe362d3f8333d3f352531472538b8b13d353b97232f352158c20943157c71c16064626063656269052062e4e01987e9b6fccff4b7df3a34c534b23e679cbec3bc0f8f6e64dfb4b55fe3f8efa9ce110ddb5cd79faf1d753c51aecfa669f7e7aa043436596cccc3359cb7dd6bbe9ecaa69e5989d9e57209571edc0b2fa7f57b9b79a64ee6e99ce1371395fee92fec2796f7b15a77c386ff668ee27f6d38f0baa6c438b561657377bf6acff3c5947befd7bf4c196252f1d6e5c524d0300"
+
+const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c178233d3f352531472538b8b13d35379b97232f352158ca0b4312f57c71c1646462606365626906a062e4e019811591798ff99bf8afee860b0d8a8c2a85c3387e3bcf0bb3b17987f2bbcfab2aa526d930cbfd3d98757184df3995c9f3e7790e36e3e9779f06089d4c64e9e47dd6202cb6e9bc73c5d11bb59fbaf89d22d8dc7cf199ddf17af96e77c5f65f9bbed56f427bd8db7af37f6c9984bf9385efaf5f184f986fb3e6adb0ecfe35bbf92d16a7aa2a344fb0bc52fb7624f0200"
+
+const signedEncryptedMessageHex = "848c032a67d68660df41c70103ff5789d0de26b6a50c985a02a13131ca829c413a35d0e6fa8d6842599252162808ac7439c72151c8c6183e76923fe3299301414d0c25a2f06a2257db3839e7df0ec964773f6e4c4ac7ff3b48c444237166dd46ba8ff443a5410dc670cb486672fdbe7c9dfafb75b4fea83af3a204fe2a7dfa86bd20122b4f3d2646cbeecb8f7be8d2c03b018bd210b1d3791e1aba74b0f1034e122ab72e760492c192383cf5e20b5628bd043272d63df9b923f147eb6091cd897553204832aba48fec54aa447547bb16305a1024713b90e77fd0065f1918271947549205af3c74891af22ee0b56cd29bfec6d6e351901cd4ab3ece7c486f1e32a792d4e474aed98ee84b3f591c7dff37b64e0ecd68fd036d517e412dcadf85840ce184ad7921ad446c4ee28db80447aea1ca8d4f574db4d4e37688158ddd19e14ee2eab4873d46947d65d14a23e788d912cf9a19624ca7352469b72a83866b7c23cb5ace3deab3c7018061b0ba0f39ed2befe27163e5083cf9b8271e3e3d52cc7ad6e2a3bd81d4c3d7022f8d"
+
+const signedEncryptedMessage2Hex = "85010e03cf6a7abcd43e36731003fb057f5495b79db367e277cdbe4ab90d924ddee0c0381494112ff8c1238fb0184af35d1731573b01bc4c55ecacd2aafbe2003d36310487d1ecc9ac994f3fada7f9f7f5c3a64248ab7782906c82c6ff1303b69a84d9a9529c31ecafbcdb9ba87e05439897d87e8a2a3dec55e14df19bba7f7bd316291c002ae2efd24f83f9e3441203fc081c0c23dc3092a454ca8a082b27f631abf73aca341686982e8fbda7e0e7d863941d68f3de4a755c2964407f4b5e0477b3196b8c93d551dd23c8beef7d0f03fbb1b6066f78907faf4bf1677d8fcec72651124080e0b7feae6b476e72ab207d38d90b958759fdedfc3c6c35717c9dbfc979b3cfbbff0a76d24a5e57056bb88acbd2a901ef64bc6e4db02adc05b6250ff378de81dca18c1910ab257dff1b9771b85bb9bbe0a69f5989e6d1710a35e6dfcceb7d8fb5ccea8db3932b3d9ff3fe0d327597c68b3622aec8e3716c83a6c93f497543b459b58ba504ed6bcaa747d37d2ca746fe49ae0a6ce4a8b694234e941b5159ff8bd34b9023da2814076163b86f40eed7c9472f81b551452d5ab87004a373c0172ec87ea6ce42ccfa7dbdad66b745496c4873d8019e8c28d6b3"
+
+const symmetricallyEncryptedCompressedHex = "8c0d04030302eb4a03808145d0d260c92f714339e13de5a79881216431925bf67ee2898ea61815f07894cd0703c50d0a76ef64d482196f47a8bc729af9b80bb6"
+
+const dsaTestKeyHex = "9901a2044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794"
+
+const dsaTestKeyPrivateHex = "9501bb044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4d00009f592e0619d823953577d4503061706843317e4fee083db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794"
+
+const armoredPrivateKeyBlock = `-----BEGIN PGP PRIVATE KEY BLOCK-----
+Version: GnuPG v1.4.10 (GNU/Linux)
+
+lQHYBE2rFNoBBADFwqWQIW/DSqcB4yCQqnAFTJ27qS5AnB46ccAdw3u4Greeu3Bp
+idpoHdjULy7zSKlwR1EA873dO/k/e11Ml3dlAFUinWeejWaK2ugFP6JjiieSsrKn
+vWNicdCS4HTWn0X4sjl0ZiAygw6GNhqEQ3cpLeL0g8E9hnYzJKQ0LWJa0QARAQAB
+AAP/TB81EIo2VYNmTq0pK1ZXwUpxCrvAAIG3hwKjEzHcbQznsjNvPUihZ+NZQ6+X
+0HCfPAdPkGDCLCb6NavcSW+iNnLTrdDnSI6+3BbIONqWWdRDYJhqZCkqmG6zqSfL
+IdkJgCw94taUg5BWP/AAeQrhzjChvpMQTVKQL5mnuZbUCeMCAN5qrYMP2S9iKdnk
+VANIFj7656ARKt/nf4CBzxcpHTyB8+d2CtPDKCmlJP6vL8t58Jmih+kHJMvC0dzn
+gr5f5+sCAOOe5gt9e0am7AvQWhdbHVfJU0TQJx+m2OiCJAqGTB1nvtBLHdJnfdC9
+TnXXQ6ZXibqLyBies/xeY2sCKL5qtTMCAKnX9+9d/5yQxRyrQUHt1NYhaXZnJbHx
+q4ytu0eWz+5i68IYUSK69jJ1NWPM0T6SkqpB3KCAIv68VFm9PxqG1KmhSrQIVGVz
+dCBLZXmIuAQTAQIAIgUCTasU2gIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AA
+CgkQO9o98PRieSoLhgQAkLEZex02Qt7vGhZzMwuN0R22w3VwyYyjBx+fM3JFETy1
+ut4xcLJoJfIaF5ZS38UplgakHG0FQ+b49i8dMij0aZmDqGxrew1m4kBfjXw9B/v+
+eIqpODryb6cOSwyQFH0lQkXC040pjq9YqDsO5w0WYNXYKDnzRV0p4H1pweo2VDid
+AdgETasU2gEEAN46UPeWRqKHvA99arOxee38fBt2CI08iiWyI8T3J6ivtFGixSqV
+bRcPxYO/qLpVe5l84Nb3X71GfVXlc9hyv7CD6tcowL59hg1E/DC5ydI8K8iEpUmK
+/UnHdIY5h8/kqgGxkY/T/hgp5fRQgW1ZoZxLajVlMRZ8W4tFtT0DeA+JABEBAAEA
+A/0bE1jaaZKj6ndqcw86jd+QtD1SF+Cf21CWRNeLKnUds4FRRvclzTyUMuWPkUeX
+TaNNsUOFqBsf6QQ2oHUBBK4VCHffHCW4ZEX2cd6umz7mpHW6XzN4DECEzOVksXtc
+lUC1j4UB91DC/RNQqwX1IV2QLSwssVotPMPqhOi0ZLNY7wIA3n7DWKInxYZZ4K+6
+rQ+POsz6brEoRHwr8x6XlHenq1Oki855pSa1yXIARoTrSJkBtn5oI+f8AzrnN0BN
+oyeQAwIA/7E++3HDi5aweWrViiul9cd3rcsS0dEnksPhvS0ozCJiHsq/6GFmy7J8
+QSHZPteedBnZyNp5jR+H7cIfVN3KgwH/Skq4PsuPhDq5TKK6i8Pc1WW8MA6DXTdU
+nLkX7RGmMwjC0DBf7KWAlPjFaONAX3a8ndnz//fy1q7u2l9AZwrj1qa1iJ8EGAEC
+AAkFAk2rFNoCGwwACgkQO9o98PRieSo2/QP/WTzr4ioINVsvN1akKuekmEMI3LAp
+BfHwatufxxP1U+3Si/6YIk7kuPB9Hs+pRqCXzbvPRrI8NHZBmc8qIGthishdCYad
+AHcVnXjtxrULkQFGbGvhKURLvS9WnzD/m1K2zzwxzkPTzT9/Yf06O6Mal5AdugPL
+VrM0m72/jnpKo04=
+=zNCn
+-----END PGP PRIVATE KEY BLOCK-----`
+
+const e2ePublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
+Charset: UTF-8
+
+xv8AAABSBAAAAAATCCqGSM49AwEHAgME1LRoXSpOxtHXDUdmuvzchyg6005qIBJ4
+sfaSxX7QgH9RV2ONUhC+WiayCNADq+UMzuR/vunSr4aQffXvuGnR383/AAAAFDxk
+Z2lsQHlhaG9vLWluYy5jb20+wv8AAACGBBATCAA4/wAAAAWCVGvAG/8AAAACiwn/
+AAAACZC2VkQCOjdvYf8AAAAFlQgJCgv/AAAAA5YBAv8AAAACngEAAE1BAP0X8veD
+24IjmI5/C6ZAfVNXxgZZFhTAACFX75jUA3oD6AEAzoSwKf1aqH6oq62qhCN/pekX
++WAsVMBhNwzLpqtCRjLO/wAAAFYEAAAAABIIKoZIzj0DAQcCAwT50ain7vXiIRv8
+B1DO3x3cE/aattZ5sHNixJzRCXi2vQIA5QmOxZ6b5jjUekNbdHG3SZi1a2Ak5mfX
+fRxC/5VGAwEIB8L/AAAAZQQYEwgAGP8AAAAFglRrwBz/AAAACZC2VkQCOjdvYQAA
+FJAA9isX3xtGyMLYwp2F3nXm7QEdY5bq5VUcD/RJlj792VwA/1wH0pCzVLl4Q9F9
+ex7En5r7rHR5xwX82Msc+Rq9dSyO
+=7MrZ
+-----END PGP PUBLIC KEY BLOCK-----`
+
+const dsaKeyWithSHA512 = `9901a2044f04b07f110400db244efecc7316553ee08d179972aab87bb1214de7692593fcf5b6feb1c80fba268722dd464748539b85b81d574cd2d7ad0ca2444de4d849b8756bad7768c486c83a824f9bba4af773d11742bdfb4ac3b89ef8cc9452d4aad31a37e4b630d33927bff68e879284a1672659b8b298222fc68f370f3e24dccacc4a862442b9438b00a0ea444a24088dc23e26df7daf8f43cba3bffc4fe703fe3d6cd7fdca199d54ed8ae501c30e3ec7871ea9cdd4cf63cfe6fc82281d70a5b8bb493f922cd99fba5f088935596af087c8d818d5ec4d0b9afa7f070b3d7c1dd32a84fca08d8280b4890c8da1dde334de8e3cad8450eed2a4a4fcc2db7b8e5528b869a74a7f0189e11ef097ef1253582348de072bb07a9fa8ab838e993cef0ee203ff49298723e2d1f549b00559f886cd417a41692ce58d0ac1307dc71d85a8af21b0cf6eaa14baf2922d3a70389bedf17cc514ba0febbd107675a372fe84b90162a9e88b14d4b1c6be855b96b33fb198c46f058568817780435b6936167ebb3724b680f32bf27382ada2e37a879b3d9de2abe0c3f399350afd1ad438883f4791e2e3b4184453412068617368207472756e636174696f6e207465737488620413110a002205024f04b07f021b03060b090807030206150802090a0b0416020301021e01021780000a0910ef20e0cefca131581318009e2bf3bf047a44d75a9bacd00161ee04d435522397009a03a60d51bd8a568c6c021c8d7cf1be8d990d6417b0020003`
+
+const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff01010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101`
+
+const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101`
+
+const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000`
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/s2k/s2k_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/s2k/s2k_test.go
new file mode 100644
index 000000000..183d26056
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/s2k/s2k_test.go
@@ -0,0 +1,137 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2k
+
+import (
+ "bytes"
+ "crypto"
+ _ "crypto/md5"
+ "crypto/rand"
+ "crypto/sha1"
+ _ "crypto/sha256"
+ _ "crypto/sha512"
+ "encoding/hex"
+ "testing"
+
+ _ "golang.org/x/crypto/ripemd160"
+)
+
+var saltedTests = []struct {
+ in, out string
+}{
+ {"hello", "10295ac1"},
+ {"world", "ac587a5e"},
+ {"foo", "4dda8077"},
+ {"bar", "bd8aac6b9ea9cae04eae6a91c6133b58b5d9a61c14f355516ed9370456"},
+ {"x", "f1d3f289"},
+ {"xxxxxxxxxxxxxxxxxxxxxxx", "e00d7b45"},
+}
+
+func TestSalted(t *testing.T) {
+ h := sha1.New()
+ salt := [4]byte{1, 2, 3, 4}
+
+ for i, test := range saltedTests {
+ expected, _ := hex.DecodeString(test.out)
+ out := make([]byte, len(expected))
+ Salted(out, h, []byte(test.in), salt[:])
+ if !bytes.Equal(expected, out) {
+ t.Errorf("#%d, got: %x want: %x", i, out, expected)
+ }
+ }
+}
+
+var iteratedTests = []struct {
+ in, out string
+}{
+ {"hello", "83126105"},
+ {"world", "6fa317f9"},
+ {"foo", "8fbc35b9"},
+ {"bar", "2af5a99b54f093789fd657f19bd245af7604d0f6ae06f66602a46a08ae"},
+ {"x", "5a684dfe"},
+ {"xxxxxxxxxxxxxxxxxxxxxxx", "18955174"},
+}
+
+func TestIterated(t *testing.T) {
+ h := sha1.New()
+ salt := [4]byte{4, 3, 2, 1}
+
+ for i, test := range iteratedTests {
+ expected, _ := hex.DecodeString(test.out)
+ out := make([]byte, len(expected))
+ Iterated(out, h, []byte(test.in), salt[:], 31)
+ if !bytes.Equal(expected, out) {
+ t.Errorf("#%d, got: %x want: %x", i, out, expected)
+ }
+ }
+}
+
+var parseTests = []struct {
+ spec, in, out string
+}{
+ /* Simple with SHA1 */
+ {"0002", "hello", "aaf4c61d"},
+ /* Salted with SHA1 */
+ {"01020102030405060708", "hello", "f4f7d67e"},
+ /* Iterated with SHA1 */
+ {"03020102030405060708f1", "hello", "f2a57b7c"},
+}
+
+func TestParse(t *testing.T) {
+ for i, test := range parseTests {
+ spec, _ := hex.DecodeString(test.spec)
+ buf := bytes.NewBuffer(spec)
+ f, err := Parse(buf)
+ if err != nil {
+ t.Errorf("%d: Parse returned error: %s", i, err)
+ continue
+ }
+
+ expected, _ := hex.DecodeString(test.out)
+ out := make([]byte, len(expected))
+ f(out, []byte(test.in))
+ if !bytes.Equal(out, expected) {
+ t.Errorf("%d: output got: %x want: %x", i, out, expected)
+ }
+ if testing.Short() {
+ break
+ }
+ }
+}
+
+func TestSerialize(t *testing.T) {
+ hashes := []crypto.Hash{crypto.MD5, crypto.SHA1, crypto.RIPEMD160,
+ crypto.SHA256, crypto.SHA384, crypto.SHA512, crypto.SHA224}
+ testCounts := []int{-1, 0, 1024, 65536, 4063232, 65011712}
+ for _, h := range hashes {
+ for _, c := range testCounts {
+ testSerializeConfig(t, &Config{Hash: h, S2KCount: c})
+ }
+ }
+}
+
+func testSerializeConfig(t *testing.T, c *Config) {
+ t.Logf("Running testSerializeConfig() with config: %+v", c)
+
+ buf := bytes.NewBuffer(nil)
+ key := make([]byte, 16)
+ passphrase := []byte("testing")
+ err := Serialize(buf, key, rand.Reader, passphrase, c)
+ if err != nil {
+ t.Errorf("failed to serialize: %s", err)
+ return
+ }
+
+ f, err := Parse(buf)
+ if err != nil {
+ t.Errorf("failed to reparse: %s", err)
+ return
+ }
+ key2 := make([]byte, len(key))
+ f(key2, passphrase)
+ if !bytes.Equal(key2, key) {
+ t.Errorf("keys don't match: %x (serialied) vs %x (parsed)", key, key2)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/openpgp/write_test.go b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/write_test.go
new file mode 100644
index 000000000..8e9a33583
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/openpgp/write_test.go
@@ -0,0 +1,259 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package openpgp
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "testing"
+ "time"
+
+ "golang.org/x/crypto/openpgp/packet"
+)
+
+func TestSignDetached(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex))
+ out := bytes.NewBuffer(nil)
+ message := bytes.NewBufferString(signedInput)
+ err := DetachSign(out, kring[0], message, nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ testDetachedSignature(t, kring, out, signedInput, "check", testKey1KeyId)
+}
+
+func TestSignTextDetached(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex))
+ out := bytes.NewBuffer(nil)
+ message := bytes.NewBufferString(signedInput)
+ err := DetachSignText(out, kring[0], message, nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ testDetachedSignature(t, kring, out, signedInput, "check", testKey1KeyId)
+}
+
+func TestSignDetachedDSA(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(dsaTestKeyPrivateHex))
+ out := bytes.NewBuffer(nil)
+ message := bytes.NewBufferString(signedInput)
+ err := DetachSign(out, kring[0], message, nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ testDetachedSignature(t, kring, out, signedInput, "check", testKey3KeyId)
+}
+
+func TestNewEntity(t *testing.T) {
+ if testing.Short() {
+ return
+ }
+
+ // Check bit-length with no config.
+ e, err := NewEntity("Test User", "test", "test@example.com", nil)
+ if err != nil {
+ t.Errorf("failed to create entity: %s", err)
+ return
+ }
+ bl, err := e.PrimaryKey.BitLength()
+ if err != nil {
+ t.Errorf("failed to find bit length: %s", err)
+ }
+ if int(bl) != defaultRSAKeyBits {
+ t.Errorf("BitLength %v, expected %v", defaultRSAKeyBits)
+ }
+
+ // Check bit-length with a config.
+ cfg := &packet.Config{RSABits: 1024}
+ e, err = NewEntity("Test User", "test", "test@example.com", cfg)
+ if err != nil {
+ t.Errorf("failed to create entity: %s", err)
+ return
+ }
+ bl, err = e.PrimaryKey.BitLength()
+ if err != nil {
+ t.Errorf("failed to find bit length: %s", err)
+ }
+ if int(bl) != cfg.RSABits {
+ t.Errorf("BitLength %v, expected %v", bl, cfg.RSABits)
+ }
+
+ w := bytes.NewBuffer(nil)
+ if err := e.SerializePrivate(w, nil); err != nil {
+ t.Errorf("failed to serialize entity: %s", err)
+ return
+ }
+ serialized := w.Bytes()
+
+ el, err := ReadKeyRing(w)
+ if err != nil {
+ t.Errorf("failed to reparse entity: %s", err)
+ return
+ }
+
+ if len(el) != 1 {
+ t.Errorf("wrong number of entities found, got %d, want 1", len(el))
+ }
+
+ w = bytes.NewBuffer(nil)
+ if err := e.SerializePrivate(w, nil); err != nil {
+ t.Errorf("failed to serialize entity second time: %s", err)
+ return
+ }
+
+ if !bytes.Equal(w.Bytes(), serialized) {
+ t.Errorf("results differed")
+ }
+}
+
+func TestSymmetricEncryption(t *testing.T) {
+ buf := new(bytes.Buffer)
+ plaintext, err := SymmetricallyEncrypt(buf, []byte("testing"), nil, nil)
+ if err != nil {
+ t.Errorf("error writing headers: %s", err)
+ return
+ }
+ message := []byte("hello world\n")
+ _, err = plaintext.Write(message)
+ if err != nil {
+ t.Errorf("error writing to plaintext writer: %s", err)
+ }
+ err = plaintext.Close()
+ if err != nil {
+ t.Errorf("error closing plaintext writer: %s", err)
+ }
+
+ md, err := ReadMessage(buf, nil, func(keys []Key, symmetric bool) ([]byte, error) {
+ return []byte("testing"), nil
+ }, nil)
+ if err != nil {
+ t.Errorf("error rereading message: %s", err)
+ }
+ messageBuf := bytes.NewBuffer(nil)
+ _, err = io.Copy(messageBuf, md.UnverifiedBody)
+ if err != nil {
+ t.Errorf("error rereading message: %s", err)
+ }
+ if !bytes.Equal(message, messageBuf.Bytes()) {
+ t.Errorf("recovered message incorrect got '%s', want '%s'", messageBuf.Bytes(), message)
+ }
+}
+
+var testEncryptionTests = []struct {
+ keyRingHex string
+ isSigned bool
+}{
+ {
+ testKeys1And2PrivateHex,
+ false,
+ },
+ {
+ testKeys1And2PrivateHex,
+ true,
+ },
+ {
+ dsaElGamalTestKeysHex,
+ false,
+ },
+ {
+ dsaElGamalTestKeysHex,
+ true,
+ },
+}
+
+func TestEncryption(t *testing.T) {
+ for i, test := range testEncryptionTests {
+ kring, _ := ReadKeyRing(readerFromHex(test.keyRingHex))
+
+ passphrase := []byte("passphrase")
+ for _, entity := range kring {
+ if entity.PrivateKey != nil && entity.PrivateKey.Encrypted {
+ err := entity.PrivateKey.Decrypt(passphrase)
+ if err != nil {
+ t.Errorf("#%d: failed to decrypt key", i)
+ }
+ }
+ for _, subkey := range entity.Subkeys {
+ if subkey.PrivateKey != nil && subkey.PrivateKey.Encrypted {
+ err := subkey.PrivateKey.Decrypt(passphrase)
+ if err != nil {
+ t.Errorf("#%d: failed to decrypt subkey", i)
+ }
+ }
+ }
+ }
+
+ var signed *Entity
+ if test.isSigned {
+ signed = kring[0]
+ }
+
+ buf := new(bytes.Buffer)
+ w, err := Encrypt(buf, kring[:1], signed, nil /* no hints */, nil)
+ if err != nil {
+ t.Errorf("#%d: error in Encrypt: %s", i, err)
+ continue
+ }
+
+ const message = "testing"
+ _, err = w.Write([]byte(message))
+ if err != nil {
+ t.Errorf("#%d: error writing plaintext: %s", i, err)
+ continue
+ }
+ err = w.Close()
+ if err != nil {
+ t.Errorf("#%d: error closing WriteCloser: %s", i, err)
+ continue
+ }
+
+ md, err := ReadMessage(buf, kring, nil /* no prompt */, nil)
+ if err != nil {
+ t.Errorf("#%d: error reading message: %s", i, err)
+ continue
+ }
+
+ testTime, _ := time.Parse("2006-01-02", "2013-07-01")
+ if test.isSigned {
+ signKey, _ := kring[0].signingKey(testTime)
+ expectedKeyId := signKey.PublicKey.KeyId
+ if md.SignedByKeyId != expectedKeyId {
+ t.Errorf("#%d: message signed by wrong key id, got: %d, want: %d", i, *md.SignedBy, expectedKeyId)
+ }
+ if md.SignedBy == nil {
+ t.Errorf("#%d: failed to find the signing Entity", i)
+ }
+ }
+
+ plaintext, err := ioutil.ReadAll(md.UnverifiedBody)
+ if err != nil {
+ t.Errorf("#%d: error reading encrypted contents: %s", i, err)
+ continue
+ }
+
+ encryptKey, _ := kring[0].encryptionKey(testTime)
+ expectedKeyId := encryptKey.PublicKey.KeyId
+ if len(md.EncryptedToKeyIds) != 1 || md.EncryptedToKeyIds[0] != expectedKeyId {
+ t.Errorf("#%d: expected message to be encrypted to %v, but got %#v", i, expectedKeyId, md.EncryptedToKeyIds)
+ }
+
+ if string(plaintext) != message {
+ t.Errorf("#%d: got: %s, want: %s", i, string(plaintext), message)
+ }
+
+ if test.isSigned {
+ if md.SignatureError != nil {
+ t.Errorf("#%d: signature error: %s", i, md.SignatureError)
+ }
+ if md.Signature == nil {
+ t.Error("signature missing")
+ }
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/agent/client_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/agent/client_test.go
new file mode 100644
index 000000000..ec7198d54
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/agent/client_test.go
@@ -0,0 +1,287 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package agent
+
+import (
+ "bytes"
+ "crypto/rand"
+ "errors"
+ "net"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "testing"
+
+ "golang.org/x/crypto/ssh"
+)
+
+// startAgent executes ssh-agent, and returns a Agent interface to it.
+func startAgent(t *testing.T) (client Agent, socket string, cleanup func()) {
+ if testing.Short() {
+ // ssh-agent is not always available, and the key
+ // types supported vary by platform.
+ t.Skip("skipping test due to -short")
+ }
+
+ bin, err := exec.LookPath("ssh-agent")
+ if err != nil {
+ t.Skip("could not find ssh-agent")
+ }
+
+ cmd := exec.Command(bin, "-s")
+ out, err := cmd.Output()
+ if err != nil {
+ t.Fatalf("cmd.Output: %v", err)
+ }
+
+ /* Output looks like:
+
+ SSH_AUTH_SOCK=/tmp/ssh-P65gpcqArqvH/agent.15541; export SSH_AUTH_SOCK;
+ SSH_AGENT_PID=15542; export SSH_AGENT_PID;
+ echo Agent pid 15542;
+ */
+ fields := bytes.Split(out, []byte(";"))
+ line := bytes.SplitN(fields[0], []byte("="), 2)
+ line[0] = bytes.TrimLeft(line[0], "\n")
+ if string(line[0]) != "SSH_AUTH_SOCK" {
+ t.Fatalf("could not find key SSH_AUTH_SOCK in %q", fields[0])
+ }
+ socket = string(line[1])
+
+ line = bytes.SplitN(fields[2], []byte("="), 2)
+ line[0] = bytes.TrimLeft(line[0], "\n")
+ if string(line[0]) != "SSH_AGENT_PID" {
+ t.Fatalf("could not find key SSH_AGENT_PID in %q", fields[2])
+ }
+ pidStr := line[1]
+ pid, err := strconv.Atoi(string(pidStr))
+ if err != nil {
+ t.Fatalf("Atoi(%q): %v", pidStr, err)
+ }
+
+ conn, err := net.Dial("unix", string(socket))
+ if err != nil {
+ t.Fatalf("net.Dial: %v", err)
+ }
+
+ ac := NewClient(conn)
+ return ac, socket, func() {
+ proc, _ := os.FindProcess(pid)
+ if proc != nil {
+ proc.Kill()
+ }
+ conn.Close()
+ os.RemoveAll(filepath.Dir(socket))
+ }
+}
+
+func testAgent(t *testing.T, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) {
+ agent, _, cleanup := startAgent(t)
+ defer cleanup()
+
+ testAgentInterface(t, agent, key, cert, lifetimeSecs)
+}
+
+func testAgentInterface(t *testing.T, agent Agent, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) {
+ signer, err := ssh.NewSignerFromKey(key)
+ if err != nil {
+ t.Fatalf("NewSignerFromKey(%T): %v", key, err)
+ }
+ // The agent should start up empty.
+ if keys, err := agent.List(); err != nil {
+ t.Fatalf("RequestIdentities: %v", err)
+ } else if len(keys) > 0 {
+ t.Fatalf("got %d keys, want 0: %v", len(keys), keys)
+ }
+
+ // Attempt to insert the key, with certificate if specified.
+ var pubKey ssh.PublicKey
+ if cert != nil {
+ err = agent.Add(AddedKey{
+ PrivateKey: key,
+ Certificate: cert,
+ Comment: "comment",
+ LifetimeSecs: lifetimeSecs,
+ })
+ pubKey = cert
+ } else {
+ err = agent.Add(AddedKey{PrivateKey: key, Comment: "comment", LifetimeSecs: lifetimeSecs})
+ pubKey = signer.PublicKey()
+ }
+ if err != nil {
+ t.Fatalf("insert(%T): %v", key, err)
+ }
+
+ // Did the key get inserted successfully?
+ if keys, err := agent.List(); err != nil {
+ t.Fatalf("List: %v", err)
+ } else if len(keys) != 1 {
+ t.Fatalf("got %v, want 1 key", keys)
+ } else if keys[0].Comment != "comment" {
+ t.Fatalf("key comment: got %v, want %v", keys[0].Comment, "comment")
+ } else if !bytes.Equal(keys[0].Blob, pubKey.Marshal()) {
+ t.Fatalf("key mismatch")
+ }
+
+ // Can the agent make a valid signature?
+ data := []byte("hello")
+ sig, err := agent.Sign(pubKey, data)
+ if err != nil {
+ t.Fatalf("Sign(%s): %v", pubKey.Type(), err)
+ }
+
+ if err := pubKey.Verify(data, sig); err != nil {
+ t.Fatalf("Verify(%s): %v", pubKey.Type(), err)
+ }
+}
+
+func TestAgent(t *testing.T) {
+ for _, keyType := range []string{"rsa", "dsa", "ecdsa"} {
+ testAgent(t, testPrivateKeys[keyType], nil, 0)
+ }
+}
+
+func TestCert(t *testing.T) {
+ cert := &ssh.Certificate{
+ Key: testPublicKeys["rsa"],
+ ValidBefore: ssh.CertTimeInfinity,
+ CertType: ssh.UserCert,
+ }
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+
+ testAgent(t, testPrivateKeys["rsa"], cert, 0)
+}
+
+func TestConstraints(t *testing.T) {
+ testAgent(t, testPrivateKeys["rsa"], nil, 3600 /* lifetime in seconds */)
+}
+
+// netPipe is analogous to net.Pipe, but it uses a real net.Conn, and
+// therefore is buffered (net.Pipe deadlocks if both sides start with
+// a write.)
+func netPipe() (net.Conn, net.Conn, error) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, nil, err
+ }
+ defer listener.Close()
+ c1, err := net.Dial("tcp", listener.Addr().String())
+ if err != nil {
+ return nil, nil, err
+ }
+
+ c2, err := listener.Accept()
+ if err != nil {
+ c1.Close()
+ return nil, nil, err
+ }
+
+ return c1, c2, nil
+}
+
+func TestAuth(t *testing.T) {
+ a, b, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+
+ defer a.Close()
+ defer b.Close()
+
+ agent, _, cleanup := startAgent(t)
+ defer cleanup()
+
+ if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["rsa"], Comment: "comment"}); err != nil {
+ t.Errorf("Add: %v", err)
+ }
+
+ serverConf := ssh.ServerConfig{}
+ serverConf.AddHostKey(testSigners["rsa"])
+ serverConf.PublicKeyCallback = func(c ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
+ if bytes.Equal(key.Marshal(), testPublicKeys["rsa"].Marshal()) {
+ return nil, nil
+ }
+
+ return nil, errors.New("pubkey rejected")
+ }
+
+ go func() {
+ conn, _, _, err := ssh.NewServerConn(a, &serverConf)
+ if err != nil {
+ t.Fatalf("Server: %v", err)
+ }
+ conn.Close()
+ }()
+
+ conf := ssh.ClientConfig{}
+ conf.Auth = append(conf.Auth, ssh.PublicKeysCallback(agent.Signers))
+ conn, _, _, err := ssh.NewClientConn(b, "", &conf)
+ if err != nil {
+ t.Fatalf("NewClientConn: %v", err)
+ }
+ conn.Close()
+}
+
+func TestLockClient(t *testing.T) {
+ agent, _, cleanup := startAgent(t)
+ defer cleanup()
+ testLockAgent(agent, t)
+}
+
+func testLockAgent(agent Agent, t *testing.T) {
+ if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["rsa"], Comment: "comment 1"}); err != nil {
+ t.Errorf("Add: %v", err)
+ }
+ if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["dsa"], Comment: "comment dsa"}); err != nil {
+ t.Errorf("Add: %v", err)
+ }
+ if keys, err := agent.List(); err != nil {
+ t.Errorf("List: %v", err)
+ } else if len(keys) != 2 {
+ t.Errorf("Want 2 keys, got %v", keys)
+ }
+
+ passphrase := []byte("secret")
+ if err := agent.Lock(passphrase); err != nil {
+ t.Errorf("Lock: %v", err)
+ }
+
+ if keys, err := agent.List(); err != nil {
+ t.Errorf("List: %v", err)
+ } else if len(keys) != 0 {
+ t.Errorf("Want 0 keys, got %v", keys)
+ }
+
+ signer, _ := ssh.NewSignerFromKey(testPrivateKeys["rsa"])
+ if _, err := agent.Sign(signer.PublicKey(), []byte("hello")); err == nil {
+ t.Fatalf("Sign did not fail")
+ }
+
+ if err := agent.Remove(signer.PublicKey()); err == nil {
+ t.Fatalf("Remove did not fail")
+ }
+
+ if err := agent.RemoveAll(); err == nil {
+ t.Fatalf("RemoveAll did not fail")
+ }
+
+ if err := agent.Unlock(nil); err == nil {
+ t.Errorf("Unlock with wrong passphrase succeeded")
+ }
+ if err := agent.Unlock(passphrase); err != nil {
+ t.Errorf("Unlock: %v", err)
+ }
+
+ if err := agent.Remove(signer.PublicKey()); err != nil {
+ t.Fatalf("Remove: %v", err)
+ }
+
+ if keys, err := agent.List(); err != nil {
+ t.Errorf("List: %v", err)
+ } else if len(keys) != 1 {
+ t.Errorf("Want 1 keys, got %v", keys)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/agent/server_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/agent/server_test.go
new file mode 100644
index 000000000..ef0ab2934
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/agent/server_test.go
@@ -0,0 +1,77 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package agent
+
+import (
+ "testing"
+
+ "golang.org/x/crypto/ssh"
+)
+
+func TestServer(t *testing.T) {
+ c1, c2, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+ defer c1.Close()
+ defer c2.Close()
+ client := NewClient(c1)
+
+ go ServeAgent(NewKeyring(), c2)
+
+ testAgentInterface(t, client, testPrivateKeys["rsa"], nil, 0)
+}
+
+func TestLockServer(t *testing.T) {
+ testLockAgent(NewKeyring(), t)
+}
+
+func TestSetupForwardAgent(t *testing.T) {
+ a, b, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+
+ defer a.Close()
+ defer b.Close()
+
+ _, socket, cleanup := startAgent(t)
+ defer cleanup()
+
+ serverConf := ssh.ServerConfig{
+ NoClientAuth: true,
+ }
+ serverConf.AddHostKey(testSigners["rsa"])
+ incoming := make(chan *ssh.ServerConn, 1)
+ go func() {
+ conn, _, _, err := ssh.NewServerConn(a, &serverConf)
+ if err != nil {
+ t.Fatalf("Server: %v", err)
+ }
+ incoming <- conn
+ }()
+
+ conf := ssh.ClientConfig{}
+ conn, chans, reqs, err := ssh.NewClientConn(b, "", &conf)
+ if err != nil {
+ t.Fatalf("NewClientConn: %v", err)
+ }
+ client := ssh.NewClient(conn, chans, reqs)
+
+ if err := ForwardToRemote(client, socket); err != nil {
+ t.Fatalf("SetupForwardAgent: %v", err)
+ }
+
+ server := <-incoming
+ ch, reqs, err := server.OpenChannel(channelType, nil)
+ if err != nil {
+ t.Fatalf("OpenChannel(%q): %v", channelType, err)
+ }
+ go ssh.DiscardRequests(reqs)
+
+ agentClient := NewClient(ch)
+ testAgentInterface(t, agentClient, testPrivateKeys["rsa"], nil, 0)
+ conn.Close()
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/agent/testdata_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/agent/testdata_test.go
new file mode 100644
index 000000000..b7a8781e1
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/agent/testdata_test.go
@@ -0,0 +1,64 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// IMPLEMENTOR NOTE: To avoid a package loop, this file is in three places:
+// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three
+// instances.
+
+package agent
+
+import (
+ "crypto/rand"
+ "fmt"
+
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/crypto/ssh/testdata"
+)
+
+var (
+ testPrivateKeys map[string]interface{}
+ testSigners map[string]ssh.Signer
+ testPublicKeys map[string]ssh.PublicKey
+)
+
+func init() {
+ var err error
+
+ n := len(testdata.PEMBytes)
+ testPrivateKeys = make(map[string]interface{}, n)
+ testSigners = make(map[string]ssh.Signer, n)
+ testPublicKeys = make(map[string]ssh.PublicKey, n)
+ for t, k := range testdata.PEMBytes {
+ testPrivateKeys[t], err = ssh.ParseRawPrivateKey(k)
+ if err != nil {
+ panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err))
+ }
+ testSigners[t], err = ssh.NewSignerFromKey(testPrivateKeys[t])
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err))
+ }
+ testPublicKeys[t] = testSigners[t].PublicKey()
+ }
+
+ // Create a cert and sign it for use in tests.
+ testCert := &ssh.Certificate{
+ Nonce: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
+ ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage
+ ValidAfter: 0, // unix epoch
+ ValidBefore: ssh.CertTimeInfinity, // The end of currently representable time.
+ Reserved: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
+ Key: testPublicKeys["ecdsa"],
+ SignatureKey: testPublicKeys["rsa"],
+ Permissions: ssh.Permissions{
+ CriticalOptions: map[string]string{},
+ Extensions: map[string]string{},
+ },
+ }
+ testCert.SignCert(rand.Reader, testSigners["rsa"])
+ testPrivateKeys["cert"] = testPrivateKeys["ecdsa"]
+ testSigners["cert"], err = ssh.NewCertSigner(testCert, testSigners["ecdsa"])
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create certificate signer: %v", err))
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/benchmark_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/benchmark_test.go
new file mode 100644
index 000000000..d9f7eb9b6
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/benchmark_test.go
@@ -0,0 +1,122 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "errors"
+ "io"
+ "net"
+ "testing"
+)
+
+type server struct {
+ *ServerConn
+ chans <-chan NewChannel
+}
+
+func newServer(c net.Conn, conf *ServerConfig) (*server, error) {
+ sconn, chans, reqs, err := NewServerConn(c, conf)
+ if err != nil {
+ return nil, err
+ }
+ go DiscardRequests(reqs)
+ return &server{sconn, chans}, nil
+}
+
+func (s *server) Accept() (NewChannel, error) {
+ n, ok := <-s.chans
+ if !ok {
+ return nil, io.EOF
+ }
+ return n, nil
+}
+
+func sshPipe() (Conn, *server, error) {
+ c1, c2, err := netPipe()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ clientConf := ClientConfig{
+ User: "user",
+ }
+ serverConf := ServerConfig{
+ NoClientAuth: true,
+ }
+ serverConf.AddHostKey(testSigners["ecdsa"])
+ done := make(chan *server, 1)
+ go func() {
+ server, err := newServer(c2, &serverConf)
+ if err != nil {
+ done <- nil
+ }
+ done <- server
+ }()
+
+ client, _, reqs, err := NewClientConn(c1, "", &clientConf)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ server := <-done
+ if server == nil {
+ return nil, nil, errors.New("server handshake failed.")
+ }
+ go DiscardRequests(reqs)
+
+ return client, server, nil
+}
+
+func BenchmarkEndToEnd(b *testing.B) {
+ b.StopTimer()
+
+ client, server, err := sshPipe()
+ if err != nil {
+ b.Fatalf("sshPipe: %v", err)
+ }
+
+ defer client.Close()
+ defer server.Close()
+
+ size := (1 << 20)
+ input := make([]byte, size)
+ output := make([]byte, size)
+ b.SetBytes(int64(size))
+ done := make(chan int, 1)
+
+ go func() {
+ newCh, err := server.Accept()
+ if err != nil {
+ b.Fatalf("Client: %v", err)
+ }
+ ch, incoming, err := newCh.Accept()
+ go DiscardRequests(incoming)
+ for i := 0; i < b.N; i++ {
+ if _, err := io.ReadFull(ch, output); err != nil {
+ b.Fatalf("ReadFull: %v", err)
+ }
+ }
+ ch.Close()
+ done <- 1
+ }()
+
+ ch, in, err := client.OpenChannel("speed", nil)
+ if err != nil {
+ b.Fatalf("OpenChannel: %v", err)
+ }
+ go DiscardRequests(in)
+
+ b.ResetTimer()
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ if _, err := ch.Write(input); err != nil {
+ b.Fatalf("WriteFull: %v", err)
+ }
+ }
+ ch.Close()
+ b.StopTimer()
+
+ <-done
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/buffer_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/buffer_test.go
new file mode 100644
index 000000000..d5781cb3d
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/buffer_test.go
@@ -0,0 +1,87 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "io"
+ "testing"
+)
+
+var alphabet = []byte("abcdefghijklmnopqrstuvwxyz")
+
+func TestBufferReadwrite(t *testing.T) {
+ b := newBuffer()
+ b.write(alphabet[:10])
+ r, _ := b.Read(make([]byte, 10))
+ if r != 10 {
+ t.Fatalf("Expected written == read == 10, written: 10, read %d", r)
+ }
+
+ b = newBuffer()
+ b.write(alphabet[:5])
+ r, _ = b.Read(make([]byte, 10))
+ if r != 5 {
+ t.Fatalf("Expected written == read == 5, written: 5, read %d", r)
+ }
+
+ b = newBuffer()
+ b.write(alphabet[:10])
+ r, _ = b.Read(make([]byte, 5))
+ if r != 5 {
+ t.Fatalf("Expected written == 10, read == 5, written: 10, read %d", r)
+ }
+
+ b = newBuffer()
+ b.write(alphabet[:5])
+ b.write(alphabet[5:15])
+ r, _ = b.Read(make([]byte, 10))
+ r2, _ := b.Read(make([]byte, 10))
+ if r != 10 || r2 != 5 || 15 != r+r2 {
+ t.Fatal("Expected written == read == 15")
+ }
+}
+
+func TestBufferClose(t *testing.T) {
+ b := newBuffer()
+ b.write(alphabet[:10])
+ b.eof()
+ _, err := b.Read(make([]byte, 5))
+ if err != nil {
+ t.Fatal("expected read of 5 to not return EOF")
+ }
+ b = newBuffer()
+ b.write(alphabet[:10])
+ b.eof()
+ r, err := b.Read(make([]byte, 5))
+ r2, err2 := b.Read(make([]byte, 10))
+ if r != 5 || r2 != 5 || err != nil || err2 != nil {
+ t.Fatal("expected reads of 5 and 5")
+ }
+
+ b = newBuffer()
+ b.write(alphabet[:10])
+ b.eof()
+ r, err = b.Read(make([]byte, 5))
+ r2, err2 = b.Read(make([]byte, 10))
+ r3, err3 := b.Read(make([]byte, 10))
+ if r != 5 || r2 != 5 || r3 != 0 || err != nil || err2 != nil || err3 != io.EOF {
+ t.Fatal("expected reads of 5 and 5 and 0, with EOF")
+ }
+
+ b = newBuffer()
+ b.write(make([]byte, 5))
+ b.write(make([]byte, 10))
+ b.eof()
+ r, err = b.Read(make([]byte, 9))
+ r2, err2 = b.Read(make([]byte, 3))
+ r3, err3 = b.Read(make([]byte, 3))
+ r4, err4 := b.Read(make([]byte, 10))
+ if err != nil || err2 != nil || err3 != nil || err4 != io.EOF {
+ t.Fatalf("Expected EOF on forth read only, err=%v, err2=%v, err3=%v, err4=%v", err, err2, err3, err4)
+ }
+ if r != 9 || r2 != 3 || r3 != 3 || r4 != 0 {
+ t.Fatal("Expected written == read == 15", r, r2, r3, r4)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/certs_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/certs_test.go
new file mode 100644
index 000000000..c5f2e5330
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/certs_test.go
@@ -0,0 +1,216 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bytes"
+ "crypto/rand"
+ "reflect"
+ "testing"
+ "time"
+)
+
+// Cert generated by ssh-keygen 6.0p1 Debian-4.
+// % ssh-keygen -s ca-key -I test user-key
+const exampleSSHCert = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgb1srW/W3ZDjYAO45xLYAwzHBDLsJ4Ux6ICFIkTjb1LEAAAADAQABAAAAYQCkoR51poH0wE8w72cqSB8Sszx+vAhzcMdCO0wqHTj7UNENHWEXGrU0E0UQekD7U+yhkhtoyjbPOVIP7hNa6aRk/ezdh/iUnCIt4Jt1v3Z1h1P+hA4QuYFMHNB+rmjPwAcAAAAAAAAAAAAAAAEAAAAEdGVzdAAAAAAAAAAAAAAAAP//////////AAAAAAAAAIIAAAAVcGVybWl0LVgxMS1mb3J3YXJkaW5nAAAAAAAAABdwZXJtaXQtYWdlbnQtZm9yd2FyZGluZwAAAAAAAAAWcGVybWl0LXBvcnQtZm9yd2FyZGluZwAAAAAAAAAKcGVybWl0LXB0eQAAAAAAAAAOcGVybWl0LXVzZXItcmMAAAAAAAAAAAAAAHcAAAAHc3NoLXJzYQAAAAMBAAEAAABhANFS2kaktpSGc+CcmEKPyw9mJC4nZKxHKTgLVZeaGbFZOvJTNzBspQHdy7Q1uKSfktxpgjZnksiu/tFF9ngyY2KFoc+U88ya95IZUycBGCUbBQ8+bhDtw/icdDGQD5WnUwAAAG8AAAAHc3NoLXJzYQAAAGC8Y9Z2LQKhIhxf52773XaWrXdxP0t3GBVo4A10vUWiYoAGepr6rQIoGGXFxT4B9Gp+nEBJjOwKDXPrAevow0T9ca8gZN+0ykbhSrXLE5Ao48rqr3zP4O1/9P7e6gp0gw8=`
+
+func TestParseCert(t *testing.T) {
+ authKeyBytes := []byte(exampleSSHCert)
+
+ key, _, _, rest, err := ParseAuthorizedKey(authKeyBytes)
+ if err != nil {
+ t.Fatalf("ParseAuthorizedKey: %v", err)
+ }
+ if len(rest) > 0 {
+ t.Errorf("rest: got %q, want empty", rest)
+ }
+
+ if _, ok := key.(*Certificate); !ok {
+ t.Fatalf("got %v (%T), want *Certificate", key, key)
+ }
+
+ marshaled := MarshalAuthorizedKey(key)
+ // Before comparison, remove the trailing newline that
+ // MarshalAuthorizedKey adds.
+ marshaled = marshaled[:len(marshaled)-1]
+ if !bytes.Equal(authKeyBytes, marshaled) {
+ t.Errorf("marshaled certificate does not match original: got %q, want %q", marshaled, authKeyBytes)
+ }
+}
+
+// Cert generated by ssh-keygen OpenSSH_6.8p1 OS X 10.10.3
+// % ssh-keygen -s ca -I testcert -O source-address=192.168.1.0/24 -O force-command=/bin/sleep user.pub
+// user.pub key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMN
+// Critical Options:
+// force-command /bin/sleep
+// source-address 192.168.1.0/24
+// Extensions:
+// permit-X11-forwarding
+// permit-agent-forwarding
+// permit-port-forwarding
+// permit-pty
+// permit-user-rc
+const exampleSSHCertWithOptions = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgDyysCJY0XrO1n03EeRRoITnTPdjENFmWDs9X58PP3VUAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMNAAAAAAAAAAAAAAABAAAACHRlc3RjZXJ0AAAAAAAAAAAAAAAA//////////8AAABLAAAADWZvcmNlLWNvbW1hbmQAAAAOAAAACi9iaW4vc2xlZXAAAAAOc291cmNlLWFkZHJlc3MAAAASAAAADjE5Mi4xNjguMS4wLzI0AAAAggAAABVwZXJtaXQtWDExLWZvcndhcmRpbmcAAAAAAAAAF3Blcm1pdC1hZ2VudC1mb3J3YXJkaW5nAAAAAAAAABZwZXJtaXQtcG9ydC1mb3J3YXJkaW5nAAAAAAAAAApwZXJtaXQtcHR5AAAAAAAAAA5wZXJtaXQtdXNlci1yYwAAAAAAAAAAAAABFwAAAAdzc2gtcnNhAAAAAwEAAQAAAQEAwU+c5ui5A8+J/CFpjW8wCa52bEODA808WWQDCSuTG/eMXNf59v9Y8Pk0F1E9dGCosSNyVcB/hacUrc6He+i97+HJCyKavBsE6GDxrjRyxYqAlfcOXi/IVmaUGiO8OQ39d4GHrjToInKvExSUeleQyH4Y4/e27T/pILAqPFL3fyrvMLT5qU9QyIt6zIpa7GBP5+urouNavMprV3zsfIqNBbWypinOQAw823a5wN+zwXnhZrgQiHZ/USG09Y6k98y1dTVz8YHlQVR4D3lpTAsKDKJ5hCH9WU4fdf+lU8OyNGaJ/vz0XNqxcToe1l4numLTnaoSuH89pHryjqurB7lJKwAAAQ8AAAAHc3NoLXJzYQAAAQCaHvUIoPL1zWUHIXLvu96/HU1s/i4CAW2IIEuGgxCUCiFj6vyTyYtgxQxcmbfZf6eaITlS6XJZa7Qq4iaFZh75C1DXTX8labXhRSD4E2t//AIP9MC1rtQC5xo6FmbQ+BoKcDskr+mNACcbRSxs3IL3bwCfWDnIw2WbVox9ZdcthJKk4UoCW4ix4QwdHw7zlddlz++fGEEVhmTbll1SUkycGApPFBsAYRTMupUJcYPIeReBI/m8XfkoMk99bV8ZJQTAd7OekHY2/48Ff53jLmyDjP7kNw1F8OaPtkFs6dGJXta4krmaekPy87j+35In5hFj7yoOqvSbmYUkeX70/GGQ`
+
+func TestParseCertWithOptions(t *testing.T) {
+ opts := map[string]string{
+ "source-address": "192.168.1.0/24",
+ "force-command": "/bin/sleep",
+ }
+ exts := map[string]string{
+ "permit-X11-forwarding": "",
+ "permit-agent-forwarding": "",
+ "permit-port-forwarding": "",
+ "permit-pty": "",
+ "permit-user-rc": "",
+ }
+ authKeyBytes := []byte(exampleSSHCertWithOptions)
+
+ key, _, _, rest, err := ParseAuthorizedKey(authKeyBytes)
+ if err != nil {
+ t.Fatalf("ParseAuthorizedKey: %v", err)
+ }
+ if len(rest) > 0 {
+ t.Errorf("rest: got %q, want empty", rest)
+ }
+ cert, ok := key.(*Certificate)
+ if !ok {
+ t.Fatalf("got %v (%T), want *Certificate", key, key)
+ }
+ if !reflect.DeepEqual(cert.CriticalOptions, opts) {
+ t.Errorf("unexpected critical options - got %v, want %v", cert.CriticalOptions, opts)
+ }
+ if !reflect.DeepEqual(cert.Extensions, exts) {
+ t.Errorf("unexpected Extensions - got %v, want %v", cert.Extensions, exts)
+ }
+ marshaled := MarshalAuthorizedKey(key)
+ // Before comparison, remove the trailing newline that
+ // MarshalAuthorizedKey adds.
+ marshaled = marshaled[:len(marshaled)-1]
+ if !bytes.Equal(authKeyBytes, marshaled) {
+ t.Errorf("marshaled certificate does not match original: got %q, want %q", marshaled, authKeyBytes)
+ }
+}
+
+func TestValidateCert(t *testing.T) {
+ key, _, _, _, err := ParseAuthorizedKey([]byte(exampleSSHCert))
+ if err != nil {
+ t.Fatalf("ParseAuthorizedKey: %v", err)
+ }
+ validCert, ok := key.(*Certificate)
+ if !ok {
+ t.Fatalf("got %v (%T), want *Certificate", key, key)
+ }
+ checker := CertChecker{}
+ checker.IsAuthority = func(k PublicKey) bool {
+ return bytes.Equal(k.Marshal(), validCert.SignatureKey.Marshal())
+ }
+
+ if err := checker.CheckCert("user", validCert); err != nil {
+ t.Errorf("Unable to validate certificate: %v", err)
+ }
+ invalidCert := &Certificate{
+ Key: testPublicKeys["rsa"],
+ SignatureKey: testPublicKeys["ecdsa"],
+ ValidBefore: CertTimeInfinity,
+ Signature: &Signature{},
+ }
+ if err := checker.CheckCert("user", invalidCert); err == nil {
+ t.Error("Invalid cert signature passed validation")
+ }
+}
+
+func TestValidateCertTime(t *testing.T) {
+ cert := Certificate{
+ ValidPrincipals: []string{"user"},
+ Key: testPublicKeys["rsa"],
+ ValidAfter: 50,
+ ValidBefore: 100,
+ }
+
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+
+ for ts, ok := range map[int64]bool{
+ 25: false,
+ 50: true,
+ 99: true,
+ 100: false,
+ 125: false,
+ } {
+ checker := CertChecker{
+ Clock: func() time.Time { return time.Unix(ts, 0) },
+ }
+ checker.IsAuthority = func(k PublicKey) bool {
+ return bytes.Equal(k.Marshal(),
+ testPublicKeys["ecdsa"].Marshal())
+ }
+
+ if v := checker.CheckCert("user", &cert); (v == nil) != ok {
+ t.Errorf("Authenticate(%d): %v", ts, v)
+ }
+ }
+}
+
+// TODO(hanwen): tests for
+//
+// host keys:
+// * fallbacks
+
+func TestHostKeyCert(t *testing.T) {
+ cert := &Certificate{
+ ValidPrincipals: []string{"hostname", "hostname.domain"},
+ Key: testPublicKeys["rsa"],
+ ValidBefore: CertTimeInfinity,
+ CertType: HostCert,
+ }
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+
+ checker := &CertChecker{
+ IsAuthority: func(p PublicKey) bool {
+ return bytes.Equal(testPublicKeys["ecdsa"].Marshal(), p.Marshal())
+ },
+ }
+
+ certSigner, err := NewCertSigner(cert, testSigners["rsa"])
+ if err != nil {
+ t.Errorf("NewCertSigner: %v", err)
+ }
+
+ for _, name := range []string{"hostname", "otherhost"} {
+ c1, c2, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+ defer c1.Close()
+ defer c2.Close()
+
+ errc := make(chan error)
+
+ go func() {
+ conf := ServerConfig{
+ NoClientAuth: true,
+ }
+ conf.AddHostKey(certSigner)
+ _, _, _, err := NewServerConn(c1, &conf)
+ errc <- err
+ }()
+
+ config := &ClientConfig{
+ User: "user",
+ HostKeyCallback: checker.CheckHostKey,
+ }
+ _, _, _, err = NewClientConn(c2, name, config)
+
+ succeed := name == "hostname"
+ if (err == nil) != succeed {
+ t.Fatalf("NewClientConn(%q): %v", name, err)
+ }
+
+ err = <-errc
+ if (err == nil) != succeed {
+ t.Fatalf("NewServerConn(%q): %v", name, err)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/cipher_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/cipher_test.go
new file mode 100644
index 000000000..54b92b6ed
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/cipher_test.go
@@ -0,0 +1,127 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/aes"
+ "crypto/rand"
+ "testing"
+)
+
+func TestDefaultCiphersExist(t *testing.T) {
+ for _, cipherAlgo := range supportedCiphers {
+ if _, ok := cipherModes[cipherAlgo]; !ok {
+ t.Errorf("default cipher %q is unknown", cipherAlgo)
+ }
+ }
+}
+
+func TestPacketCiphers(t *testing.T) {
+ // Still test aes128cbc cipher althought it's commented out.
+ cipherModes[aes128cbcID] = &streamCipherMode{16, aes.BlockSize, 0, nil}
+ defer delete(cipherModes, aes128cbcID)
+
+ for cipher := range cipherModes {
+ kr := &kexResult{Hash: crypto.SHA1}
+ algs := directionAlgorithms{
+ Cipher: cipher,
+ MAC: "hmac-sha1",
+ Compression: "none",
+ }
+ client, err := newPacketCipher(clientKeys, algs, kr)
+ if err != nil {
+ t.Errorf("newPacketCipher(client, %q): %v", cipher, err)
+ continue
+ }
+ server, err := newPacketCipher(clientKeys, algs, kr)
+ if err != nil {
+ t.Errorf("newPacketCipher(client, %q): %v", cipher, err)
+ continue
+ }
+
+ want := "bla bla"
+ input := []byte(want)
+ buf := &bytes.Buffer{}
+ if err := client.writePacket(0, buf, rand.Reader, input); err != nil {
+ t.Errorf("writePacket(%q): %v", cipher, err)
+ continue
+ }
+
+ packet, err := server.readPacket(0, buf)
+ if err != nil {
+ t.Errorf("readPacket(%q): %v", cipher, err)
+ continue
+ }
+
+ if string(packet) != want {
+ t.Errorf("roundtrip(%q): got %q, want %q", cipher, packet, want)
+ }
+ }
+}
+
+func TestCBCOracleCounterMeasure(t *testing.T) {
+ cipherModes[aes128cbcID] = &streamCipherMode{16, aes.BlockSize, 0, nil}
+ defer delete(cipherModes, aes128cbcID)
+
+ kr := &kexResult{Hash: crypto.SHA1}
+ algs := directionAlgorithms{
+ Cipher: aes128cbcID,
+ MAC: "hmac-sha1",
+ Compression: "none",
+ }
+ client, err := newPacketCipher(clientKeys, algs, kr)
+ if err != nil {
+ t.Fatalf("newPacketCipher(client): %v", err)
+ }
+
+ want := "bla bla"
+ input := []byte(want)
+ buf := &bytes.Buffer{}
+ if err := client.writePacket(0, buf, rand.Reader, input); err != nil {
+ t.Errorf("writePacket: %v", err)
+ }
+
+ packetSize := buf.Len()
+ buf.Write(make([]byte, 2*maxPacket))
+
+ // We corrupt each byte, but this usually will only test the
+ // 'packet too large' or 'MAC failure' cases.
+ lastRead := -1
+ for i := 0; i < packetSize; i++ {
+ server, err := newPacketCipher(clientKeys, algs, kr)
+ if err != nil {
+ t.Fatalf("newPacketCipher(client): %v", err)
+ }
+
+ fresh := &bytes.Buffer{}
+ fresh.Write(buf.Bytes())
+ fresh.Bytes()[i] ^= 0x01
+
+ before := fresh.Len()
+ _, err = server.readPacket(0, fresh)
+ if err == nil {
+ t.Errorf("corrupt byte %d: readPacket succeeded ", i)
+ continue
+ }
+ if _, ok := err.(cbcError); !ok {
+ t.Errorf("corrupt byte %d: got %v (%T), want cbcError", i, err, err)
+ continue
+ }
+
+ after := fresh.Len()
+ bytesRead := before - after
+ if bytesRead < maxPacket {
+ t.Errorf("corrupt byte %d: read %d bytes, want more than %d", i, bytesRead, maxPacket)
+ continue
+ }
+
+ if i > 0 && bytesRead != lastRead {
+ t.Errorf("corrupt byte %d: read %d bytes, want %d bytes read", i, bytesRead, lastRead)
+ }
+ lastRead = bytesRead
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/client_auth_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/client_auth_test.go
new file mode 100644
index 000000000..2ea44624f
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/client_auth_test.go
@@ -0,0 +1,393 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bytes"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "strings"
+ "testing"
+)
+
+type keyboardInteractive map[string]string
+
+func (cr keyboardInteractive) Challenge(user string, instruction string, questions []string, echos []bool) ([]string, error) {
+ var answers []string
+ for _, q := range questions {
+ answers = append(answers, cr[q])
+ }
+ return answers, nil
+}
+
+// reused internally by tests
+var clientPassword = "tiger"
+
+// tryAuth runs a handshake with a given config against an SSH server
+// with config serverConfig
+func tryAuth(t *testing.T, config *ClientConfig) error {
+ c1, c2, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+ defer c1.Close()
+ defer c2.Close()
+
+ certChecker := CertChecker{
+ IsAuthority: func(k PublicKey) bool {
+ return bytes.Equal(k.Marshal(), testPublicKeys["ecdsa"].Marshal())
+ },
+ UserKeyFallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) {
+ if conn.User() == "testuser" && bytes.Equal(key.Marshal(), testPublicKeys["rsa"].Marshal()) {
+ return nil, nil
+ }
+
+ return nil, fmt.Errorf("pubkey for %q not acceptable", conn.User())
+ },
+ IsRevoked: func(c *Certificate) bool {
+ return c.Serial == 666
+ },
+ }
+
+ serverConfig := &ServerConfig{
+ PasswordCallback: func(conn ConnMetadata, pass []byte) (*Permissions, error) {
+ if conn.User() == "testuser" && string(pass) == clientPassword {
+ return nil, nil
+ }
+ return nil, errors.New("password auth failed")
+ },
+ PublicKeyCallback: certChecker.Authenticate,
+ KeyboardInteractiveCallback: func(conn ConnMetadata, challenge KeyboardInteractiveChallenge) (*Permissions, error) {
+ ans, err := challenge("user",
+ "instruction",
+ []string{"question1", "question2"},
+ []bool{true, true})
+ if err != nil {
+ return nil, err
+ }
+ ok := conn.User() == "testuser" && ans[0] == "answer1" && ans[1] == "answer2"
+ if ok {
+ challenge("user", "motd", nil, nil)
+ return nil, nil
+ }
+ return nil, errors.New("keyboard-interactive failed")
+ },
+ AuthLogCallback: func(conn ConnMetadata, method string, err error) {
+ t.Logf("user %q, method %q: %v", conn.User(), method, err)
+ },
+ }
+ serverConfig.AddHostKey(testSigners["rsa"])
+
+ go newServer(c1, serverConfig)
+ _, _, _, err = NewClientConn(c2, "", config)
+ return err
+}
+
+func TestClientAuthPublicKey(t *testing.T) {
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ PublicKeys(testSigners["rsa"]),
+ },
+ }
+ if err := tryAuth(t, config); err != nil {
+ t.Fatalf("unable to dial remote side: %s", err)
+ }
+}
+
+func TestAuthMethodPassword(t *testing.T) {
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ Password(clientPassword),
+ },
+ }
+
+ if err := tryAuth(t, config); err != nil {
+ t.Fatalf("unable to dial remote side: %s", err)
+ }
+}
+
+func TestAuthMethodFallback(t *testing.T) {
+ var passwordCalled bool
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ PublicKeys(testSigners["rsa"]),
+ PasswordCallback(
+ func() (string, error) {
+ passwordCalled = true
+ return "WRONG", nil
+ }),
+ },
+ }
+
+ if err := tryAuth(t, config); err != nil {
+ t.Fatalf("unable to dial remote side: %s", err)
+ }
+
+ if passwordCalled {
+ t.Errorf("password auth tried before public-key auth.")
+ }
+}
+
+func TestAuthMethodWrongPassword(t *testing.T) {
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ Password("wrong"),
+ PublicKeys(testSigners["rsa"]),
+ },
+ }
+
+ if err := tryAuth(t, config); err != nil {
+ t.Fatalf("unable to dial remote side: %s", err)
+ }
+}
+
+func TestAuthMethodKeyboardInteractive(t *testing.T) {
+ answers := keyboardInteractive(map[string]string{
+ "question1": "answer1",
+ "question2": "answer2",
+ })
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ KeyboardInteractive(answers.Challenge),
+ },
+ }
+
+ if err := tryAuth(t, config); err != nil {
+ t.Fatalf("unable to dial remote side: %s", err)
+ }
+}
+
+func TestAuthMethodWrongKeyboardInteractive(t *testing.T) {
+ answers := keyboardInteractive(map[string]string{
+ "question1": "answer1",
+ "question2": "WRONG",
+ })
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ KeyboardInteractive(answers.Challenge),
+ },
+ }
+
+ if err := tryAuth(t, config); err == nil {
+ t.Fatalf("wrong answers should not have authenticated with KeyboardInteractive")
+ }
+}
+
+// the mock server will only authenticate ssh-rsa keys
+func TestAuthMethodInvalidPublicKey(t *testing.T) {
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ PublicKeys(testSigners["dsa"]),
+ },
+ }
+
+ if err := tryAuth(t, config); err == nil {
+ t.Fatalf("dsa private key should not have authenticated with rsa public key")
+ }
+}
+
+// the client should authenticate with the second key
+func TestAuthMethodRSAandDSA(t *testing.T) {
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ PublicKeys(testSigners["dsa"], testSigners["rsa"]),
+ },
+ }
+ if err := tryAuth(t, config); err != nil {
+ t.Fatalf("client could not authenticate with rsa key: %v", err)
+ }
+}
+
+func TestClientHMAC(t *testing.T) {
+ for _, mac := range supportedMACs {
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ PublicKeys(testSigners["rsa"]),
+ },
+ Config: Config{
+ MACs: []string{mac},
+ },
+ }
+ if err := tryAuth(t, config); err != nil {
+ t.Fatalf("client could not authenticate with mac algo %s: %v", mac, err)
+ }
+ }
+}
+
+// issue 4285.
+func TestClientUnsupportedCipher(t *testing.T) {
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ PublicKeys(),
+ },
+ Config: Config{
+ Ciphers: []string{"aes128-cbc"}, // not currently supported
+ },
+ }
+ if err := tryAuth(t, config); err == nil {
+ t.Errorf("expected no ciphers in common")
+ }
+}
+
+func TestClientUnsupportedKex(t *testing.T) {
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ PublicKeys(),
+ },
+ Config: Config{
+ KeyExchanges: []string{"diffie-hellman-group-exchange-sha256"}, // not currently supported
+ },
+ }
+ if err := tryAuth(t, config); err == nil || !strings.Contains(err.Error(), "common algorithm") {
+ t.Errorf("got %v, expected 'common algorithm'", err)
+ }
+}
+
+func TestClientLoginCert(t *testing.T) {
+ cert := &Certificate{
+ Key: testPublicKeys["rsa"],
+ ValidBefore: CertTimeInfinity,
+ CertType: UserCert,
+ }
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+ certSigner, err := NewCertSigner(cert, testSigners["rsa"])
+ if err != nil {
+ t.Fatalf("NewCertSigner: %v", err)
+ }
+
+ clientConfig := &ClientConfig{
+ User: "user",
+ }
+ clientConfig.Auth = append(clientConfig.Auth, PublicKeys(certSigner))
+
+ t.Log("should succeed")
+ if err := tryAuth(t, clientConfig); err != nil {
+ t.Errorf("cert login failed: %v", err)
+ }
+
+ t.Log("corrupted signature")
+ cert.Signature.Blob[0]++
+ if err := tryAuth(t, clientConfig); err == nil {
+ t.Errorf("cert login passed with corrupted sig")
+ }
+
+ t.Log("revoked")
+ cert.Serial = 666
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+ if err := tryAuth(t, clientConfig); err == nil {
+ t.Errorf("revoked cert login succeeded")
+ }
+ cert.Serial = 1
+
+ t.Log("sign with wrong key")
+ cert.SignCert(rand.Reader, testSigners["dsa"])
+ if err := tryAuth(t, clientConfig); err == nil {
+ t.Errorf("cert login passed with non-authoritive key")
+ }
+
+ t.Log("host cert")
+ cert.CertType = HostCert
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+ if err := tryAuth(t, clientConfig); err == nil {
+ t.Errorf("cert login passed with wrong type")
+ }
+ cert.CertType = UserCert
+
+ t.Log("principal specified")
+ cert.ValidPrincipals = []string{"user"}
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+ if err := tryAuth(t, clientConfig); err != nil {
+ t.Errorf("cert login failed: %v", err)
+ }
+
+ t.Log("wrong principal specified")
+ cert.ValidPrincipals = []string{"fred"}
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+ if err := tryAuth(t, clientConfig); err == nil {
+ t.Errorf("cert login passed with wrong principal")
+ }
+ cert.ValidPrincipals = nil
+
+ t.Log("added critical option")
+ cert.CriticalOptions = map[string]string{"root-access": "yes"}
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+ if err := tryAuth(t, clientConfig); err == nil {
+ t.Errorf("cert login passed with unrecognized critical option")
+ }
+
+ t.Log("allowed source address")
+ cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42/24"}
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+ if err := tryAuth(t, clientConfig); err != nil {
+ t.Errorf("cert login with source-address failed: %v", err)
+ }
+
+ t.Log("disallowed source address")
+ cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42"}
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+ if err := tryAuth(t, clientConfig); err == nil {
+ t.Errorf("cert login with source-address succeeded")
+ }
+}
+
+func testPermissionsPassing(withPermissions bool, t *testing.T) {
+ serverConfig := &ServerConfig{
+ PublicKeyCallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) {
+ if conn.User() == "nopermissions" {
+ return nil, nil
+ } else {
+ return &Permissions{}, nil
+ }
+ },
+ }
+ serverConfig.AddHostKey(testSigners["rsa"])
+
+ clientConfig := &ClientConfig{
+ Auth: []AuthMethod{
+ PublicKeys(testSigners["rsa"]),
+ },
+ }
+ if withPermissions {
+ clientConfig.User = "permissions"
+ } else {
+ clientConfig.User = "nopermissions"
+ }
+
+ c1, c2, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+ defer c1.Close()
+ defer c2.Close()
+
+ go NewClientConn(c2, "", clientConfig)
+ serverConn, err := newServer(c1, serverConfig)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if p := serverConn.Permissions; (p != nil) != withPermissions {
+ t.Fatalf("withPermissions is %t, but Permissions object is %#v", withPermissions, p)
+ }
+}
+
+func TestPermissionsPassing(t *testing.T) {
+ testPermissionsPassing(true, t)
+}
+
+func TestNoPermissionsPassing(t *testing.T) {
+ testPermissionsPassing(false, t)
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/client_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/client_test.go
new file mode 100644
index 000000000..1fe790cb4
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/client_test.go
@@ -0,0 +1,39 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "net"
+ "testing"
+)
+
+func testClientVersion(t *testing.T, config *ClientConfig, expected string) {
+ clientConn, serverConn := net.Pipe()
+ defer clientConn.Close()
+ receivedVersion := make(chan string, 1)
+ go func() {
+ version, err := readVersion(serverConn)
+ if err != nil {
+ receivedVersion <- ""
+ } else {
+ receivedVersion <- string(version)
+ }
+ serverConn.Close()
+ }()
+ NewClientConn(clientConn, "", config)
+ actual := <-receivedVersion
+ if actual != expected {
+ t.Fatalf("got %s; want %s", actual, expected)
+ }
+}
+
+func TestCustomClientVersion(t *testing.T) {
+ version := "Test-Client-Version-0.0"
+ testClientVersion(t, &ClientConfig{ClientVersion: version}, version)
+}
+
+func TestDefaultClientVersion(t *testing.T) {
+ testClientVersion(t, &ClientConfig{}, packageVersion)
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/example_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/example_test.go
new file mode 100644
index 000000000..dfd9dcab6
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/example_test.go
@@ -0,0 +1,211 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh_test
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/crypto/ssh/terminal"
+)
+
+func ExampleNewServerConn() {
+ // An SSH server is represented by a ServerConfig, which holds
+ // certificate details and handles authentication of ServerConns.
+ config := &ssh.ServerConfig{
+ PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
+ // Should use constant-time compare (or better, salt+hash) in
+ // a production setting.
+ if c.User() == "testuser" && string(pass) == "tiger" {
+ return nil, nil
+ }
+ return nil, fmt.Errorf("password rejected for %q", c.User())
+ },
+ }
+
+ privateBytes, err := ioutil.ReadFile("id_rsa")
+ if err != nil {
+ panic("Failed to load private key")
+ }
+
+ private, err := ssh.ParsePrivateKey(privateBytes)
+ if err != nil {
+ panic("Failed to parse private key")
+ }
+
+ config.AddHostKey(private)
+
+ // Once a ServerConfig has been configured, connections can be
+ // accepted.
+ listener, err := net.Listen("tcp", "0.0.0.0:2022")
+ if err != nil {
+ panic("failed to listen for connection")
+ }
+ nConn, err := listener.Accept()
+ if err != nil {
+ panic("failed to accept incoming connection")
+ }
+
+ // Before use, a handshake must be performed on the incoming
+ // net.Conn.
+ _, chans, reqs, err := ssh.NewServerConn(nConn, config)
+ if err != nil {
+ panic("failed to handshake")
+ }
+ // The incoming Request channel must be serviced.
+ go ssh.DiscardRequests(reqs)
+
+ // Service the incoming Channel channel.
+ for newChannel := range chans {
+ // Channels have a type, depending on the application level
+ // protocol intended. In the case of a shell, the type is
+ // "session" and ServerShell may be used to present a simple
+ // terminal interface.
+ if newChannel.ChannelType() != "session" {
+ newChannel.Reject(ssh.UnknownChannelType, "unknown channel type")
+ continue
+ }
+ channel, requests, err := newChannel.Accept()
+ if err != nil {
+ panic("could not accept channel.")
+ }
+
+ // Sessions have out-of-band requests such as "shell",
+ // "pty-req" and "env". Here we handle only the
+ // "shell" request.
+ go func(in <-chan *ssh.Request) {
+ for req := range in {
+ ok := false
+ switch req.Type {
+ case "shell":
+ ok = true
+ if len(req.Payload) > 0 {
+ // We don't accept any
+ // commands, only the
+ // default shell.
+ ok = false
+ }
+ }
+ req.Reply(ok, nil)
+ }
+ }(requests)
+
+ term := terminal.NewTerminal(channel, "> ")
+
+ go func() {
+ defer channel.Close()
+ for {
+ line, err := term.ReadLine()
+ if err != nil {
+ break
+ }
+ fmt.Println(line)
+ }
+ }()
+ }
+}
+
+func ExampleDial() {
+ // An SSH client is represented with a ClientConn. Currently only
+ // the "password" authentication method is supported.
+ //
+ // To authenticate with the remote server you must pass at least one
+ // implementation of AuthMethod via the Auth field in ClientConfig.
+ config := &ssh.ClientConfig{
+ User: "username",
+ Auth: []ssh.AuthMethod{
+ ssh.Password("yourpassword"),
+ },
+ }
+ client, err := ssh.Dial("tcp", "yourserver.com:22", config)
+ if err != nil {
+ panic("Failed to dial: " + err.Error())
+ }
+
+ // Each ClientConn can support multiple interactive sessions,
+ // represented by a Session.
+ session, err := client.NewSession()
+ if err != nil {
+ panic("Failed to create session: " + err.Error())
+ }
+ defer session.Close()
+
+ // Once a Session is created, you can execute a single command on
+ // the remote side using the Run method.
+ var b bytes.Buffer
+ session.Stdout = &b
+ if err := session.Run("/usr/bin/whoami"); err != nil {
+ panic("Failed to run: " + err.Error())
+ }
+ fmt.Println(b.String())
+}
+
+func ExampleClient_Listen() {
+ config := &ssh.ClientConfig{
+ User: "username",
+ Auth: []ssh.AuthMethod{
+ ssh.Password("password"),
+ },
+ }
+ // Dial your ssh server.
+ conn, err := ssh.Dial("tcp", "localhost:22", config)
+ if err != nil {
+ log.Fatalf("unable to connect: %s", err)
+ }
+ defer conn.Close()
+
+ // Request the remote side to open port 8080 on all interfaces.
+ l, err := conn.Listen("tcp", "0.0.0.0:8080")
+ if err != nil {
+ log.Fatalf("unable to register tcp forward: %v", err)
+ }
+ defer l.Close()
+
+ // Serve HTTP with your SSH server acting as a reverse proxy.
+ http.Serve(l, http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
+ fmt.Fprintf(resp, "Hello world!\n")
+ }))
+}
+
+func ExampleSession_RequestPty() {
+ // Create client config
+ config := &ssh.ClientConfig{
+ User: "username",
+ Auth: []ssh.AuthMethod{
+ ssh.Password("password"),
+ },
+ }
+ // Connect to ssh server
+ conn, err := ssh.Dial("tcp", "localhost:22", config)
+ if err != nil {
+ log.Fatalf("unable to connect: %s", err)
+ }
+ defer conn.Close()
+ // Create a session
+ session, err := conn.NewSession()
+ if err != nil {
+ log.Fatalf("unable to create session: %s", err)
+ }
+ defer session.Close()
+ // Set up terminal modes
+ modes := ssh.TerminalModes{
+ ssh.ECHO: 0, // disable echoing
+ ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud
+ ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud
+ }
+ // Request pseudo terminal
+ if err := session.RequestPty("xterm", 80, 40, modes); err != nil {
+ log.Fatalf("request for pseudo terminal failed: %s", err)
+ }
+ // Start remote shell
+ if err := session.Shell(); err != nil {
+ log.Fatalf("failed to start shell: %s", err)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/handshake_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/handshake_test.go
new file mode 100644
index 000000000..b86d369cc
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/handshake_test.go
@@ -0,0 +1,415 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bytes"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "net"
+ "runtime"
+ "strings"
+ "sync"
+ "testing"
+)
+
+type testChecker struct {
+ calls []string
+}
+
+func (t *testChecker) Check(dialAddr string, addr net.Addr, key PublicKey) error {
+ if dialAddr == "bad" {
+ return fmt.Errorf("dialAddr is bad")
+ }
+
+ if tcpAddr, ok := addr.(*net.TCPAddr); !ok || tcpAddr == nil {
+ return fmt.Errorf("testChecker: got %T want *net.TCPAddr", addr)
+ }
+
+ t.calls = append(t.calls, fmt.Sprintf("%s %v %s %x", dialAddr, addr, key.Type(), key.Marshal()))
+
+ return nil
+}
+
+// netPipe is analogous to net.Pipe, but it uses a real net.Conn, and
+// therefore is buffered (net.Pipe deadlocks if both sides start with
+// a write.)
+func netPipe() (net.Conn, net.Conn, error) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, nil, err
+ }
+ defer listener.Close()
+ c1, err := net.Dial("tcp", listener.Addr().String())
+ if err != nil {
+ return nil, nil, err
+ }
+
+ c2, err := listener.Accept()
+ if err != nil {
+ c1.Close()
+ return nil, nil, err
+ }
+
+ return c1, c2, nil
+}
+
+func handshakePair(clientConf *ClientConfig, addr string) (client *handshakeTransport, server *handshakeTransport, err error) {
+ a, b, err := netPipe()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ trC := newTransport(a, rand.Reader, true)
+ trS := newTransport(b, rand.Reader, false)
+ clientConf.SetDefaults()
+
+ v := []byte("version")
+ client = newClientTransport(trC, v, v, clientConf, addr, a.RemoteAddr())
+
+ serverConf := &ServerConfig{}
+ serverConf.AddHostKey(testSigners["ecdsa"])
+ serverConf.AddHostKey(testSigners["rsa"])
+ serverConf.SetDefaults()
+ server = newServerTransport(trS, v, v, serverConf)
+
+ return client, server, nil
+}
+
+func TestHandshakeBasic(t *testing.T) {
+ if runtime.GOOS == "plan9" {
+ t.Skip("see golang.org/issue/7237")
+ }
+ checker := &testChecker{}
+ trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr")
+ if err != nil {
+ t.Fatalf("handshakePair: %v", err)
+ }
+
+ defer trC.Close()
+ defer trS.Close()
+
+ go func() {
+ // Client writes a bunch of stuff, and does a key
+ // change in the middle. This should not confuse the
+ // handshake in progress
+ for i := 0; i < 10; i++ {
+ p := []byte{msgRequestSuccess, byte(i)}
+ if err := trC.writePacket(p); err != nil {
+ t.Fatalf("sendPacket: %v", err)
+ }
+ if i == 5 {
+ // halfway through, we request a key change.
+ _, _, err := trC.sendKexInit()
+ if err != nil {
+ t.Fatalf("sendKexInit: %v", err)
+ }
+ }
+ }
+ trC.Close()
+ }()
+
+ // Server checks that client messages come in cleanly
+ i := 0
+ for {
+ p, err := trS.readPacket()
+ if err != nil {
+ break
+ }
+ if p[0] == msgNewKeys {
+ continue
+ }
+ want := []byte{msgRequestSuccess, byte(i)}
+ if bytes.Compare(p, want) != 0 {
+ t.Errorf("message %d: got %q, want %q", i, p, want)
+ }
+ i++
+ }
+ if i != 10 {
+ t.Errorf("received %d messages, want 10.", i)
+ }
+
+ // If all went well, we registered exactly 1 key change.
+ if len(checker.calls) != 1 {
+ t.Fatalf("got %d host key checks, want 1", len(checker.calls))
+ }
+
+ pub := testSigners["ecdsa"].PublicKey()
+ want := fmt.Sprintf("%s %v %s %x", "addr", trC.remoteAddr, pub.Type(), pub.Marshal())
+ if want != checker.calls[0] {
+ t.Errorf("got %q want %q for host key check", checker.calls[0], want)
+ }
+}
+
+func TestHandshakeError(t *testing.T) {
+ checker := &testChecker{}
+ trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "bad")
+ if err != nil {
+ t.Fatalf("handshakePair: %v", err)
+ }
+ defer trC.Close()
+ defer trS.Close()
+
+ // send a packet
+ packet := []byte{msgRequestSuccess, 42}
+ if err := trC.writePacket(packet); err != nil {
+ t.Errorf("writePacket: %v", err)
+ }
+
+ // Now request a key change.
+ _, _, err = trC.sendKexInit()
+ if err != nil {
+ t.Errorf("sendKexInit: %v", err)
+ }
+
+ // the key change will fail, and afterwards we can't write.
+ if err := trC.writePacket([]byte{msgRequestSuccess, 43}); err == nil {
+ t.Errorf("writePacket after botched rekey succeeded.")
+ }
+
+ readback, err := trS.readPacket()
+ if err != nil {
+ t.Fatalf("server closed too soon: %v", err)
+ }
+ if bytes.Compare(readback, packet) != 0 {
+ t.Errorf("got %q want %q", readback, packet)
+ }
+ readback, err = trS.readPacket()
+ if err == nil {
+ t.Errorf("got a message %q after failed key change", readback)
+ }
+}
+
+func TestHandshakeTwice(t *testing.T) {
+ checker := &testChecker{}
+ trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr")
+ if err != nil {
+ t.Fatalf("handshakePair: %v", err)
+ }
+
+ defer trC.Close()
+ defer trS.Close()
+
+ // send a packet
+ packet := make([]byte, 5)
+ packet[0] = msgRequestSuccess
+ if err := trC.writePacket(packet); err != nil {
+ t.Errorf("writePacket: %v", err)
+ }
+
+ // Now request a key change.
+ _, _, err = trC.sendKexInit()
+ if err != nil {
+ t.Errorf("sendKexInit: %v", err)
+ }
+
+ // Send another packet. Use a fresh one, since writePacket destroys.
+ packet = make([]byte, 5)
+ packet[0] = msgRequestSuccess
+ if err := trC.writePacket(packet); err != nil {
+ t.Errorf("writePacket: %v", err)
+ }
+
+ // 2nd key change.
+ _, _, err = trC.sendKexInit()
+ if err != nil {
+ t.Errorf("sendKexInit: %v", err)
+ }
+
+ packet = make([]byte, 5)
+ packet[0] = msgRequestSuccess
+ if err := trC.writePacket(packet); err != nil {
+ t.Errorf("writePacket: %v", err)
+ }
+
+ packet = make([]byte, 5)
+ packet[0] = msgRequestSuccess
+ for i := 0; i < 5; i++ {
+ msg, err := trS.readPacket()
+ if err != nil {
+ t.Fatalf("server closed too soon: %v", err)
+ }
+ if msg[0] == msgNewKeys {
+ continue
+ }
+
+ if bytes.Compare(msg, packet) != 0 {
+ t.Errorf("packet %d: got %q want %q", i, msg, packet)
+ }
+ }
+ if len(checker.calls) != 2 {
+ t.Errorf("got %d key changes, want 2", len(checker.calls))
+ }
+}
+
+func TestHandshakeAutoRekeyWrite(t *testing.T) {
+ checker := &testChecker{}
+ clientConf := &ClientConfig{HostKeyCallback: checker.Check}
+ clientConf.RekeyThreshold = 500
+ trC, trS, err := handshakePair(clientConf, "addr")
+ if err != nil {
+ t.Fatalf("handshakePair: %v", err)
+ }
+ defer trC.Close()
+ defer trS.Close()
+
+ for i := 0; i < 5; i++ {
+ packet := make([]byte, 251)
+ packet[0] = msgRequestSuccess
+ if err := trC.writePacket(packet); err != nil {
+ t.Errorf("writePacket: %v", err)
+ }
+ }
+
+ j := 0
+ for ; j < 5; j++ {
+ _, err := trS.readPacket()
+ if err != nil {
+ break
+ }
+ }
+
+ if j != 5 {
+ t.Errorf("got %d, want 5 messages", j)
+ }
+
+ if len(checker.calls) != 2 {
+ t.Errorf("got %d key changes, wanted 2", len(checker.calls))
+ }
+}
+
+type syncChecker struct {
+ called chan int
+}
+
+func (t *syncChecker) Check(dialAddr string, addr net.Addr, key PublicKey) error {
+ t.called <- 1
+ return nil
+}
+
+func TestHandshakeAutoRekeyRead(t *testing.T) {
+ sync := &syncChecker{make(chan int, 2)}
+ clientConf := &ClientConfig{
+ HostKeyCallback: sync.Check,
+ }
+ clientConf.RekeyThreshold = 500
+
+ trC, trS, err := handshakePair(clientConf, "addr")
+ if err != nil {
+ t.Fatalf("handshakePair: %v", err)
+ }
+ defer trC.Close()
+ defer trS.Close()
+
+ packet := make([]byte, 501)
+ packet[0] = msgRequestSuccess
+ if err := trS.writePacket(packet); err != nil {
+ t.Fatalf("writePacket: %v", err)
+ }
+ // While we read out the packet, a key change will be
+ // initiated.
+ if _, err := trC.readPacket(); err != nil {
+ t.Fatalf("readPacket(client): %v", err)
+ }
+
+ <-sync.called
+}
+
+// errorKeyingTransport generates errors after a given number of
+// read/write operations.
+type errorKeyingTransport struct {
+ packetConn
+ readLeft, writeLeft int
+}
+
+func (n *errorKeyingTransport) prepareKeyChange(*algorithms, *kexResult) error {
+ return nil
+}
+func (n *errorKeyingTransport) getSessionID() []byte {
+ return nil
+}
+
+func (n *errorKeyingTransport) writePacket(packet []byte) error {
+ if n.writeLeft == 0 {
+ n.Close()
+ return errors.New("barf")
+ }
+
+ n.writeLeft--
+ return n.packetConn.writePacket(packet)
+}
+
+func (n *errorKeyingTransport) readPacket() ([]byte, error) {
+ if n.readLeft == 0 {
+ n.Close()
+ return nil, errors.New("barf")
+ }
+
+ n.readLeft--
+ return n.packetConn.readPacket()
+}
+
+func TestHandshakeErrorHandlingRead(t *testing.T) {
+ for i := 0; i < 20; i++ {
+ testHandshakeErrorHandlingN(t, i, -1)
+ }
+}
+
+func TestHandshakeErrorHandlingWrite(t *testing.T) {
+ for i := 0; i < 20; i++ {
+ testHandshakeErrorHandlingN(t, -1, i)
+ }
+}
+
+// testHandshakeErrorHandlingN runs handshakes, injecting errors. If
+// handshakeTransport deadlocks, the go runtime will detect it and
+// panic.
+func testHandshakeErrorHandlingN(t *testing.T, readLimit, writeLimit int) {
+ msg := Marshal(&serviceRequestMsg{strings.Repeat("x", int(minRekeyThreshold)/4)})
+
+ a, b := memPipe()
+ defer a.Close()
+ defer b.Close()
+
+ key := testSigners["ecdsa"]
+ serverConf := Config{RekeyThreshold: minRekeyThreshold}
+ serverConf.SetDefaults()
+ serverConn := newHandshakeTransport(&errorKeyingTransport{a, readLimit, writeLimit}, &serverConf, []byte{'a'}, []byte{'b'})
+ serverConn.hostKeys = []Signer{key}
+ go serverConn.readLoop()
+
+ clientConf := Config{RekeyThreshold: 10 * minRekeyThreshold}
+ clientConf.SetDefaults()
+ clientConn := newHandshakeTransport(&errorKeyingTransport{b, -1, -1}, &clientConf, []byte{'a'}, []byte{'b'})
+ clientConn.hostKeyAlgorithms = []string{key.PublicKey().Type()}
+ go clientConn.readLoop()
+
+ var wg sync.WaitGroup
+ wg.Add(4)
+
+ for _, hs := range []packetConn{serverConn, clientConn} {
+ go func(c packetConn) {
+ for {
+ err := c.writePacket(msg)
+ if err != nil {
+ break
+ }
+ }
+ wg.Done()
+ }(hs)
+ go func(c packetConn) {
+ for {
+ _, err := c.readPacket()
+ if err != nil {
+ break
+ }
+ }
+ wg.Done()
+ }(hs)
+ }
+
+ wg.Wait()
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/kex_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/kex_test.go
new file mode 100644
index 000000000..12ca0acd3
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/kex_test.go
@@ -0,0 +1,50 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+// Key exchange tests.
+
+import (
+ "crypto/rand"
+ "reflect"
+ "testing"
+)
+
+func TestKexes(t *testing.T) {
+ type kexResultErr struct {
+ result *kexResult
+ err error
+ }
+
+ for name, kex := range kexAlgoMap {
+ a, b := memPipe()
+
+ s := make(chan kexResultErr, 1)
+ c := make(chan kexResultErr, 1)
+ var magics handshakeMagics
+ go func() {
+ r, e := kex.Client(a, rand.Reader, &magics)
+ a.Close()
+ c <- kexResultErr{r, e}
+ }()
+ go func() {
+ r, e := kex.Server(b, rand.Reader, &magics, testSigners["ecdsa"])
+ b.Close()
+ s <- kexResultErr{r, e}
+ }()
+
+ clientRes := <-c
+ serverRes := <-s
+ if clientRes.err != nil {
+ t.Errorf("client: %v", clientRes.err)
+ }
+ if serverRes.err != nil {
+ t.Errorf("server: %v", serverRes.err)
+ }
+ if !reflect.DeepEqual(clientRes.result, serverRes.result) {
+ t.Errorf("kex %q: mismatch %#v, %#v", name, clientRes.result, serverRes.result)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/keys_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/keys_test.go
new file mode 100644
index 000000000..b4cceaffb
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/keys_test.go
@@ -0,0 +1,306 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bytes"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "encoding/base64"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+
+ "golang.org/x/crypto/ssh/testdata"
+)
+
+func rawKey(pub PublicKey) interface{} {
+ switch k := pub.(type) {
+ case *rsaPublicKey:
+ return (*rsa.PublicKey)(k)
+ case *dsaPublicKey:
+ return (*dsa.PublicKey)(k)
+ case *ecdsaPublicKey:
+ return (*ecdsa.PublicKey)(k)
+ case *Certificate:
+ return k
+ }
+ panic("unknown key type")
+}
+
+func TestKeyMarshalParse(t *testing.T) {
+ for _, priv := range testSigners {
+ pub := priv.PublicKey()
+ roundtrip, err := ParsePublicKey(pub.Marshal())
+ if err != nil {
+ t.Errorf("ParsePublicKey(%T): %v", pub, err)
+ }
+
+ k1 := rawKey(pub)
+ k2 := rawKey(roundtrip)
+
+ if !reflect.DeepEqual(k1, k2) {
+ t.Errorf("got %#v in roundtrip, want %#v", k2, k1)
+ }
+ }
+}
+
+func TestUnsupportedCurves(t *testing.T) {
+ raw, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader)
+ if err != nil {
+ t.Fatalf("GenerateKey: %v", err)
+ }
+
+ if _, err = NewSignerFromKey(raw); err == nil || !strings.Contains(err.Error(), "only P-256") {
+ t.Fatalf("NewPrivateKey should not succeed with P-224, got: %v", err)
+ }
+
+ if _, err = NewPublicKey(&raw.PublicKey); err == nil || !strings.Contains(err.Error(), "only P-256") {
+ t.Fatalf("NewPublicKey should not succeed with P-224, got: %v", err)
+ }
+}
+
+func TestNewPublicKey(t *testing.T) {
+ for _, k := range testSigners {
+ raw := rawKey(k.PublicKey())
+ // Skip certificates, as NewPublicKey does not support them.
+ if _, ok := raw.(*Certificate); ok {
+ continue
+ }
+ pub, err := NewPublicKey(raw)
+ if err != nil {
+ t.Errorf("NewPublicKey(%#v): %v", raw, err)
+ }
+ if !reflect.DeepEqual(k.PublicKey(), pub) {
+ t.Errorf("NewPublicKey(%#v) = %#v, want %#v", raw, pub, k.PublicKey())
+ }
+ }
+}
+
+func TestKeySignVerify(t *testing.T) {
+ for _, priv := range testSigners {
+ pub := priv.PublicKey()
+
+ data := []byte("sign me")
+ sig, err := priv.Sign(rand.Reader, data)
+ if err != nil {
+ t.Fatalf("Sign(%T): %v", priv, err)
+ }
+
+ if err := pub.Verify(data, sig); err != nil {
+ t.Errorf("publicKey.Verify(%T): %v", priv, err)
+ }
+ sig.Blob[5]++
+ if err := pub.Verify(data, sig); err == nil {
+ t.Errorf("publicKey.Verify on broken sig did not fail")
+ }
+ }
+}
+
+func TestParseRSAPrivateKey(t *testing.T) {
+ key := testPrivateKeys["rsa"]
+
+ rsa, ok := key.(*rsa.PrivateKey)
+ if !ok {
+ t.Fatalf("got %T, want *rsa.PrivateKey", rsa)
+ }
+
+ if err := rsa.Validate(); err != nil {
+ t.Errorf("Validate: %v", err)
+ }
+}
+
+func TestParseECPrivateKey(t *testing.T) {
+ key := testPrivateKeys["ecdsa"]
+
+ ecKey, ok := key.(*ecdsa.PrivateKey)
+ if !ok {
+ t.Fatalf("got %T, want *ecdsa.PrivateKey", ecKey)
+ }
+
+ if !validateECPublicKey(ecKey.Curve, ecKey.X, ecKey.Y) {
+ t.Fatalf("public key does not validate.")
+ }
+}
+
+func TestParseDSA(t *testing.T) {
+ // We actually exercise the ParsePrivateKey codepath here, as opposed to
+ // using the ParseRawPrivateKey+NewSignerFromKey path that testdata_test.go
+ // uses.
+ s, err := ParsePrivateKey(testdata.PEMBytes["dsa"])
+ if err != nil {
+ t.Fatalf("ParsePrivateKey returned error: %s", err)
+ }
+
+ data := []byte("sign me")
+ sig, err := s.Sign(rand.Reader, data)
+ if err != nil {
+ t.Fatalf("dsa.Sign: %v", err)
+ }
+
+ if err := s.PublicKey().Verify(data, sig); err != nil {
+ t.Errorf("Verify failed: %v", err)
+ }
+}
+
+// Tests for authorized_keys parsing.
+
+// getTestKey returns a public key, and its base64 encoding.
+func getTestKey() (PublicKey, string) {
+ k := testPublicKeys["rsa"]
+
+ b := &bytes.Buffer{}
+ e := base64.NewEncoder(base64.StdEncoding, b)
+ e.Write(k.Marshal())
+ e.Close()
+
+ return k, b.String()
+}
+
+func TestMarshalParsePublicKey(t *testing.T) {
+ pub, pubSerialized := getTestKey()
+ line := fmt.Sprintf("%s %s user@host", pub.Type(), pubSerialized)
+
+ authKeys := MarshalAuthorizedKey(pub)
+ actualFields := strings.Fields(string(authKeys))
+ if len(actualFields) == 0 {
+ t.Fatalf("failed authKeys: %v", authKeys)
+ }
+
+ // drop the comment
+ expectedFields := strings.Fields(line)[0:2]
+
+ if !reflect.DeepEqual(actualFields, expectedFields) {
+ t.Errorf("got %v, expected %v", actualFields, expectedFields)
+ }
+
+ actPub, _, _, _, err := ParseAuthorizedKey([]byte(line))
+ if err != nil {
+ t.Fatalf("cannot parse %v: %v", line, err)
+ }
+ if !reflect.DeepEqual(actPub, pub) {
+ t.Errorf("got %v, expected %v", actPub, pub)
+ }
+}
+
+type authResult struct {
+ pubKey PublicKey
+ options []string
+ comments string
+ rest string
+ ok bool
+}
+
+func testAuthorizedKeys(t *testing.T, authKeys []byte, expected []authResult) {
+ rest := authKeys
+ var values []authResult
+ for len(rest) > 0 {
+ var r authResult
+ var err error
+ r.pubKey, r.comments, r.options, rest, err = ParseAuthorizedKey(rest)
+ r.ok = (err == nil)
+ t.Log(err)
+ r.rest = string(rest)
+ values = append(values, r)
+ }
+
+ if !reflect.DeepEqual(values, expected) {
+ t.Errorf("got %#v, expected %#v", values, expected)
+ }
+}
+
+func TestAuthorizedKeyBasic(t *testing.T) {
+ pub, pubSerialized := getTestKey()
+ line := "ssh-rsa " + pubSerialized + " user@host"
+ testAuthorizedKeys(t, []byte(line),
+ []authResult{
+ {pub, nil, "user@host", "", true},
+ })
+}
+
+func TestAuth(t *testing.T) {
+ pub, pubSerialized := getTestKey()
+ authWithOptions := []string{
+ `# comments to ignore before any keys...`,
+ ``,
+ `env="HOME=/home/root",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`,
+ `# comments to ignore, along with a blank line`,
+ ``,
+ `env="HOME=/home/root2" ssh-rsa ` + pubSerialized + ` user2@host2`,
+ ``,
+ `# more comments, plus a invalid entry`,
+ `ssh-rsa data-that-will-not-parse user@host3`,
+ }
+ for _, eol := range []string{"\n", "\r\n"} {
+ authOptions := strings.Join(authWithOptions, eol)
+ rest2 := strings.Join(authWithOptions[3:], eol)
+ rest3 := strings.Join(authWithOptions[6:], eol)
+ testAuthorizedKeys(t, []byte(authOptions), []authResult{
+ {pub, []string{`env="HOME=/home/root"`, "no-port-forwarding"}, "user@host", rest2, true},
+ {pub, []string{`env="HOME=/home/root2"`}, "user2@host2", rest3, true},
+ {nil, nil, "", "", false},
+ })
+ }
+}
+
+func TestAuthWithQuotedSpaceInEnv(t *testing.T) {
+ pub, pubSerialized := getTestKey()
+ authWithQuotedSpaceInEnv := []byte(`env="HOME=/home/root dir",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`)
+ testAuthorizedKeys(t, []byte(authWithQuotedSpaceInEnv), []authResult{
+ {pub, []string{`env="HOME=/home/root dir"`, "no-port-forwarding"}, "user@host", "", true},
+ })
+}
+
+func TestAuthWithQuotedCommaInEnv(t *testing.T) {
+ pub, pubSerialized := getTestKey()
+ authWithQuotedCommaInEnv := []byte(`env="HOME=/home/root,dir",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`)
+ testAuthorizedKeys(t, []byte(authWithQuotedCommaInEnv), []authResult{
+ {pub, []string{`env="HOME=/home/root,dir"`, "no-port-forwarding"}, "user@host", "", true},
+ })
+}
+
+func TestAuthWithQuotedQuoteInEnv(t *testing.T) {
+ pub, pubSerialized := getTestKey()
+ authWithQuotedQuoteInEnv := []byte(`env="HOME=/home/\"root dir",no-port-forwarding` + "\t" + `ssh-rsa` + "\t" + pubSerialized + ` user@host`)
+ authWithDoubleQuotedQuote := []byte(`no-port-forwarding,env="HOME=/home/ \"root dir\"" ssh-rsa ` + pubSerialized + "\t" + `user@host`)
+ testAuthorizedKeys(t, []byte(authWithQuotedQuoteInEnv), []authResult{
+ {pub, []string{`env="HOME=/home/\"root dir"`, "no-port-forwarding"}, "user@host", "", true},
+ })
+
+ testAuthorizedKeys(t, []byte(authWithDoubleQuotedQuote), []authResult{
+ {pub, []string{"no-port-forwarding", `env="HOME=/home/ \"root dir\""`}, "user@host", "", true},
+ })
+}
+
+func TestAuthWithInvalidSpace(t *testing.T) {
+ _, pubSerialized := getTestKey()
+ authWithInvalidSpace := []byte(`env="HOME=/home/root dir", no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host
+#more to follow but still no valid keys`)
+ testAuthorizedKeys(t, []byte(authWithInvalidSpace), []authResult{
+ {nil, nil, "", "", false},
+ })
+}
+
+func TestAuthWithMissingQuote(t *testing.T) {
+ pub, pubSerialized := getTestKey()
+ authWithMissingQuote := []byte(`env="HOME=/home/root,no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host
+env="HOME=/home/root",shared-control ssh-rsa ` + pubSerialized + ` user@host`)
+
+ testAuthorizedKeys(t, []byte(authWithMissingQuote), []authResult{
+ {pub, []string{`env="HOME=/home/root"`, `shared-control`}, "user@host", "", true},
+ })
+}
+
+func TestInvalidEntry(t *testing.T) {
+ authInvalid := []byte(`ssh-rsa`)
+ _, _, _, _, err := ParseAuthorizedKey(authInvalid)
+ if err == nil {
+ t.Errorf("got valid entry for %q", authInvalid)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/mempipe_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/mempipe_test.go
new file mode 100644
index 000000000..8697cd614
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/mempipe_test.go
@@ -0,0 +1,110 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "io"
+ "sync"
+ "testing"
+)
+
+// An in-memory packetConn. It is safe to call Close and writePacket
+// from different goroutines.
+type memTransport struct {
+ eof bool
+ pending [][]byte
+ write *memTransport
+ sync.Mutex
+ *sync.Cond
+}
+
+func (t *memTransport) readPacket() ([]byte, error) {
+ t.Lock()
+ defer t.Unlock()
+ for {
+ if len(t.pending) > 0 {
+ r := t.pending[0]
+ t.pending = t.pending[1:]
+ return r, nil
+ }
+ if t.eof {
+ return nil, io.EOF
+ }
+ t.Cond.Wait()
+ }
+}
+
+func (t *memTransport) closeSelf() error {
+ t.Lock()
+ defer t.Unlock()
+ if t.eof {
+ return io.EOF
+ }
+ t.eof = true
+ t.Cond.Broadcast()
+ return nil
+}
+
+func (t *memTransport) Close() error {
+ err := t.write.closeSelf()
+ t.closeSelf()
+ return err
+}
+
+func (t *memTransport) writePacket(p []byte) error {
+ t.write.Lock()
+ defer t.write.Unlock()
+ if t.write.eof {
+ return io.EOF
+ }
+ c := make([]byte, len(p))
+ copy(c, p)
+ t.write.pending = append(t.write.pending, c)
+ t.write.Cond.Signal()
+ return nil
+}
+
+func memPipe() (a, b packetConn) {
+ t1 := memTransport{}
+ t2 := memTransport{}
+ t1.write = &t2
+ t2.write = &t1
+ t1.Cond = sync.NewCond(&t1.Mutex)
+ t2.Cond = sync.NewCond(&t2.Mutex)
+ return &t1, &t2
+}
+
+func TestMemPipe(t *testing.T) {
+ a, b := memPipe()
+ if err := a.writePacket([]byte{42}); err != nil {
+ t.Fatalf("writePacket: %v", err)
+ }
+ if err := a.Close(); err != nil {
+ t.Fatal("Close: ", err)
+ }
+ p, err := b.readPacket()
+ if err != nil {
+ t.Fatal("readPacket: ", err)
+ }
+ if len(p) != 1 || p[0] != 42 {
+ t.Fatalf("got %v, want {42}", p)
+ }
+ p, err = b.readPacket()
+ if err != io.EOF {
+ t.Fatalf("got %v, %v, want EOF", p, err)
+ }
+}
+
+func TestDoubleClose(t *testing.T) {
+ a, _ := memPipe()
+ err := a.Close()
+ if err != nil {
+ t.Errorf("Close: %v", err)
+ }
+ err = a.Close()
+ if err != io.EOF {
+ t.Errorf("expect EOF on double close.")
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/messages_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/messages_test.go
new file mode 100644
index 000000000..955b5127f
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/messages_test.go
@@ -0,0 +1,254 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bytes"
+ "math/big"
+ "math/rand"
+ "reflect"
+ "testing"
+ "testing/quick"
+)
+
+var intLengthTests = []struct {
+ val, length int
+}{
+ {0, 4 + 0},
+ {1, 4 + 1},
+ {127, 4 + 1},
+ {128, 4 + 2},
+ {-1, 4 + 1},
+}
+
+func TestIntLength(t *testing.T) {
+ for _, test := range intLengthTests {
+ v := new(big.Int).SetInt64(int64(test.val))
+ length := intLength(v)
+ if length != test.length {
+ t.Errorf("For %d, got length %d but expected %d", test.val, length, test.length)
+ }
+ }
+}
+
+type msgAllTypes struct {
+ Bool bool `sshtype:"21"`
+ Array [16]byte
+ Uint64 uint64
+ Uint32 uint32
+ Uint8 uint8
+ String string
+ Strings []string
+ Bytes []byte
+ Int *big.Int
+ Rest []byte `ssh:"rest"`
+}
+
+func (t *msgAllTypes) Generate(rand *rand.Rand, size int) reflect.Value {
+ m := &msgAllTypes{}
+ m.Bool = rand.Intn(2) == 1
+ randomBytes(m.Array[:], rand)
+ m.Uint64 = uint64(rand.Int63n(1<<63 - 1))
+ m.Uint32 = uint32(rand.Intn((1 << 31) - 1))
+ m.Uint8 = uint8(rand.Intn(1 << 8))
+ m.String = string(m.Array[:])
+ m.Strings = randomNameList(rand)
+ m.Bytes = m.Array[:]
+ m.Int = randomInt(rand)
+ m.Rest = m.Array[:]
+ return reflect.ValueOf(m)
+}
+
+func TestMarshalUnmarshal(t *testing.T) {
+ rand := rand.New(rand.NewSource(0))
+ iface := &msgAllTypes{}
+ ty := reflect.ValueOf(iface).Type()
+
+ n := 100
+ if testing.Short() {
+ n = 5
+ }
+ for j := 0; j < n; j++ {
+ v, ok := quick.Value(ty, rand)
+ if !ok {
+ t.Errorf("failed to create value")
+ break
+ }
+
+ m1 := v.Elem().Interface()
+ m2 := iface
+
+ marshaled := Marshal(m1)
+ if err := Unmarshal(marshaled, m2); err != nil {
+ t.Errorf("Unmarshal %#v: %s", m1, err)
+ break
+ }
+
+ if !reflect.DeepEqual(v.Interface(), m2) {
+ t.Errorf("got: %#v\nwant:%#v\n%x", m2, m1, marshaled)
+ break
+ }
+ }
+}
+
+func TestUnmarshalEmptyPacket(t *testing.T) {
+ var b []byte
+ var m channelRequestSuccessMsg
+ if err := Unmarshal(b, &m); err == nil {
+ t.Fatalf("unmarshal of empty slice succeeded")
+ }
+}
+
+func TestUnmarshalUnexpectedPacket(t *testing.T) {
+ type S struct {
+ I uint32 `sshtype:"43"`
+ S string
+ B bool
+ }
+
+ s := S{11, "hello", true}
+ packet := Marshal(s)
+ packet[0] = 42
+ roundtrip := S{}
+ err := Unmarshal(packet, &roundtrip)
+ if err == nil {
+ t.Fatal("expected error, not nil")
+ }
+}
+
+func TestMarshalPtr(t *testing.T) {
+ s := struct {
+ S string
+ }{"hello"}
+
+ m1 := Marshal(s)
+ m2 := Marshal(&s)
+ if !bytes.Equal(m1, m2) {
+ t.Errorf("got %q, want %q for marshaled pointer", m2, m1)
+ }
+}
+
+func TestBareMarshalUnmarshal(t *testing.T) {
+ type S struct {
+ I uint32
+ S string
+ B bool
+ }
+
+ s := S{42, "hello", true}
+ packet := Marshal(s)
+ roundtrip := S{}
+ Unmarshal(packet, &roundtrip)
+
+ if !reflect.DeepEqual(s, roundtrip) {
+ t.Errorf("got %#v, want %#v", roundtrip, s)
+ }
+}
+
+func TestBareMarshal(t *testing.T) {
+ type S2 struct {
+ I uint32
+ }
+ s := S2{42}
+ packet := Marshal(s)
+ i, rest, ok := parseUint32(packet)
+ if len(rest) > 0 || !ok {
+ t.Errorf("parseInt(%q): parse error", packet)
+ }
+ if i != s.I {
+ t.Errorf("got %d, want %d", i, s.I)
+ }
+}
+
+func TestUnmarshalShortKexInitPacket(t *testing.T) {
+ // This used to panic.
+ // Issue 11348
+ packet := []byte{0x14, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0xff, 0xff, 0xff, 0xff}
+ kim := &kexInitMsg{}
+ if err := Unmarshal(packet, kim); err == nil {
+ t.Error("truncated packet unmarshaled without error")
+ }
+}
+
+func randomBytes(out []byte, rand *rand.Rand) {
+ for i := 0; i < len(out); i++ {
+ out[i] = byte(rand.Int31())
+ }
+}
+
+func randomNameList(rand *rand.Rand) []string {
+ ret := make([]string, rand.Int31()&15)
+ for i := range ret {
+ s := make([]byte, 1+(rand.Int31()&15))
+ for j := range s {
+ s[j] = 'a' + uint8(rand.Int31()&15)
+ }
+ ret[i] = string(s)
+ }
+ return ret
+}
+
+func randomInt(rand *rand.Rand) *big.Int {
+ return new(big.Int).SetInt64(int64(int32(rand.Uint32())))
+}
+
+func (*kexInitMsg) Generate(rand *rand.Rand, size int) reflect.Value {
+ ki := &kexInitMsg{}
+ randomBytes(ki.Cookie[:], rand)
+ ki.KexAlgos = randomNameList(rand)
+ ki.ServerHostKeyAlgos = randomNameList(rand)
+ ki.CiphersClientServer = randomNameList(rand)
+ ki.CiphersServerClient = randomNameList(rand)
+ ki.MACsClientServer = randomNameList(rand)
+ ki.MACsServerClient = randomNameList(rand)
+ ki.CompressionClientServer = randomNameList(rand)
+ ki.CompressionServerClient = randomNameList(rand)
+ ki.LanguagesClientServer = randomNameList(rand)
+ ki.LanguagesServerClient = randomNameList(rand)
+ if rand.Int31()&1 == 1 {
+ ki.FirstKexFollows = true
+ }
+ return reflect.ValueOf(ki)
+}
+
+func (*kexDHInitMsg) Generate(rand *rand.Rand, size int) reflect.Value {
+ dhi := &kexDHInitMsg{}
+ dhi.X = randomInt(rand)
+ return reflect.ValueOf(dhi)
+}
+
+var (
+ _kexInitMsg = new(kexInitMsg).Generate(rand.New(rand.NewSource(0)), 10).Elem().Interface()
+ _kexDHInitMsg = new(kexDHInitMsg).Generate(rand.New(rand.NewSource(0)), 10).Elem().Interface()
+
+ _kexInit = Marshal(_kexInitMsg)
+ _kexDHInit = Marshal(_kexDHInitMsg)
+)
+
+func BenchmarkMarshalKexInitMsg(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Marshal(_kexInitMsg)
+ }
+}
+
+func BenchmarkUnmarshalKexInitMsg(b *testing.B) {
+ m := new(kexInitMsg)
+ for i := 0; i < b.N; i++ {
+ Unmarshal(_kexInit, m)
+ }
+}
+
+func BenchmarkMarshalKexDHInitMsg(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Marshal(_kexDHInitMsg)
+ }
+}
+
+func BenchmarkUnmarshalKexDHInitMsg(b *testing.B) {
+ m := new(kexDHInitMsg)
+ for i := 0; i < b.N; i++ {
+ Unmarshal(_kexDHInit, m)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/mux_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/mux_test.go
new file mode 100644
index 000000000..523038960
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/mux_test.go
@@ -0,0 +1,525 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "io"
+ "io/ioutil"
+ "sync"
+ "testing"
+)
+
+func muxPair() (*mux, *mux) {
+ a, b := memPipe()
+
+ s := newMux(a)
+ c := newMux(b)
+
+ return s, c
+}
+
+// Returns both ends of a channel, and the mux for the the 2nd
+// channel.
+func channelPair(t *testing.T) (*channel, *channel, *mux) {
+ c, s := muxPair()
+
+ res := make(chan *channel, 1)
+ go func() {
+ newCh, ok := <-s.incomingChannels
+ if !ok {
+ t.Fatalf("No incoming channel")
+ }
+ if newCh.ChannelType() != "chan" {
+ t.Fatalf("got type %q want chan", newCh.ChannelType())
+ }
+ ch, _, err := newCh.Accept()
+ if err != nil {
+ t.Fatalf("Accept %v", err)
+ }
+ res <- ch.(*channel)
+ }()
+
+ ch, err := c.openChannel("chan", nil)
+ if err != nil {
+ t.Fatalf("OpenChannel: %v", err)
+ }
+
+ return <-res, ch, c
+}
+
+// Test that stderr and stdout can be addressed from different
+// goroutines. This is intended for use with the race detector.
+func TestMuxChannelExtendedThreadSafety(t *testing.T) {
+ writer, reader, mux := channelPair(t)
+ defer writer.Close()
+ defer reader.Close()
+ defer mux.Close()
+
+ var wr, rd sync.WaitGroup
+ magic := "hello world"
+
+ wr.Add(2)
+ go func() {
+ io.WriteString(writer, magic)
+ wr.Done()
+ }()
+ go func() {
+ io.WriteString(writer.Stderr(), magic)
+ wr.Done()
+ }()
+
+ rd.Add(2)
+ go func() {
+ c, err := ioutil.ReadAll(reader)
+ if string(c) != magic {
+ t.Fatalf("stdout read got %q, want %q (error %s)", c, magic, err)
+ }
+ rd.Done()
+ }()
+ go func() {
+ c, err := ioutil.ReadAll(reader.Stderr())
+ if string(c) != magic {
+ t.Fatalf("stderr read got %q, want %q (error %s)", c, magic, err)
+ }
+ rd.Done()
+ }()
+
+ wr.Wait()
+ writer.CloseWrite()
+ rd.Wait()
+}
+
+func TestMuxReadWrite(t *testing.T) {
+ s, c, mux := channelPair(t)
+ defer s.Close()
+ defer c.Close()
+ defer mux.Close()
+
+ magic := "hello world"
+ magicExt := "hello stderr"
+ go func() {
+ _, err := s.Write([]byte(magic))
+ if err != nil {
+ t.Fatalf("Write: %v", err)
+ }
+ _, err = s.Extended(1).Write([]byte(magicExt))
+ if err != nil {
+ t.Fatalf("Write: %v", err)
+ }
+ err = s.Close()
+ if err != nil {
+ t.Fatalf("Close: %v", err)
+ }
+ }()
+
+ var buf [1024]byte
+ n, err := c.Read(buf[:])
+ if err != nil {
+ t.Fatalf("server Read: %v", err)
+ }
+ got := string(buf[:n])
+ if got != magic {
+ t.Fatalf("server: got %q want %q", got, magic)
+ }
+
+ n, err = c.Extended(1).Read(buf[:])
+ if err != nil {
+ t.Fatalf("server Read: %v", err)
+ }
+
+ got = string(buf[:n])
+ if got != magicExt {
+ t.Fatalf("server: got %q want %q", got, magic)
+ }
+}
+
+func TestMuxChannelOverflow(t *testing.T) {
+ reader, writer, mux := channelPair(t)
+ defer reader.Close()
+ defer writer.Close()
+ defer mux.Close()
+
+ wDone := make(chan int, 1)
+ go func() {
+ if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil {
+ t.Errorf("could not fill window: %v", err)
+ }
+ writer.Write(make([]byte, 1))
+ wDone <- 1
+ }()
+ writer.remoteWin.waitWriterBlocked()
+
+ // Send 1 byte.
+ packet := make([]byte, 1+4+4+1)
+ packet[0] = msgChannelData
+ marshalUint32(packet[1:], writer.remoteId)
+ marshalUint32(packet[5:], uint32(1))
+ packet[9] = 42
+
+ if err := writer.mux.conn.writePacket(packet); err != nil {
+ t.Errorf("could not send packet")
+ }
+ if _, err := reader.SendRequest("hello", true, nil); err == nil {
+ t.Errorf("SendRequest succeeded.")
+ }
+ <-wDone
+}
+
+func TestMuxChannelCloseWriteUnblock(t *testing.T) {
+ reader, writer, mux := channelPair(t)
+ defer reader.Close()
+ defer writer.Close()
+ defer mux.Close()
+
+ wDone := make(chan int, 1)
+ go func() {
+ if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil {
+ t.Errorf("could not fill window: %v", err)
+ }
+ if _, err := writer.Write(make([]byte, 1)); err != io.EOF {
+ t.Errorf("got %v, want EOF for unblock write", err)
+ }
+ wDone <- 1
+ }()
+
+ writer.remoteWin.waitWriterBlocked()
+ reader.Close()
+ <-wDone
+}
+
+func TestMuxConnectionCloseWriteUnblock(t *testing.T) {
+ reader, writer, mux := channelPair(t)
+ defer reader.Close()
+ defer writer.Close()
+ defer mux.Close()
+
+ wDone := make(chan int, 1)
+ go func() {
+ if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil {
+ t.Errorf("could not fill window: %v", err)
+ }
+ if _, err := writer.Write(make([]byte, 1)); err != io.EOF {
+ t.Errorf("got %v, want EOF for unblock write", err)
+ }
+ wDone <- 1
+ }()
+
+ writer.remoteWin.waitWriterBlocked()
+ mux.Close()
+ <-wDone
+}
+
+func TestMuxReject(t *testing.T) {
+ client, server := muxPair()
+ defer server.Close()
+ defer client.Close()
+
+ go func() {
+ ch, ok := <-server.incomingChannels
+ if !ok {
+ t.Fatalf("Accept")
+ }
+ if ch.ChannelType() != "ch" || string(ch.ExtraData()) != "extra" {
+ t.Fatalf("unexpected channel: %q, %q", ch.ChannelType(), ch.ExtraData())
+ }
+ ch.Reject(RejectionReason(42), "message")
+ }()
+
+ ch, err := client.openChannel("ch", []byte("extra"))
+ if ch != nil {
+ t.Fatal("openChannel not rejected")
+ }
+
+ ocf, ok := err.(*OpenChannelError)
+ if !ok {
+ t.Errorf("got %#v want *OpenChannelError", err)
+ } else if ocf.Reason != 42 || ocf.Message != "message" {
+ t.Errorf("got %#v, want {Reason: 42, Message: %q}", ocf, "message")
+ }
+
+ want := "ssh: rejected: unknown reason 42 (message)"
+ if err.Error() != want {
+ t.Errorf("got %q, want %q", err.Error(), want)
+ }
+}
+
+func TestMuxChannelRequest(t *testing.T) {
+ client, server, mux := channelPair(t)
+ defer server.Close()
+ defer client.Close()
+ defer mux.Close()
+
+ var received int
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ for r := range server.incomingRequests {
+ received++
+ r.Reply(r.Type == "yes", nil)
+ }
+ wg.Done()
+ }()
+ _, err := client.SendRequest("yes", false, nil)
+ if err != nil {
+ t.Fatalf("SendRequest: %v", err)
+ }
+ ok, err := client.SendRequest("yes", true, nil)
+ if err != nil {
+ t.Fatalf("SendRequest: %v", err)
+ }
+
+ if !ok {
+ t.Errorf("SendRequest(yes): %v", ok)
+
+ }
+
+ ok, err = client.SendRequest("no", true, nil)
+ if err != nil {
+ t.Fatalf("SendRequest: %v", err)
+ }
+ if ok {
+ t.Errorf("SendRequest(no): %v", ok)
+
+ }
+
+ client.Close()
+ wg.Wait()
+
+ if received != 3 {
+ t.Errorf("got %d requests, want %d", received, 3)
+ }
+}
+
+func TestMuxGlobalRequest(t *testing.T) {
+ clientMux, serverMux := muxPair()
+ defer serverMux.Close()
+ defer clientMux.Close()
+
+ var seen bool
+ go func() {
+ for r := range serverMux.incomingRequests {
+ seen = seen || r.Type == "peek"
+ if r.WantReply {
+ err := r.Reply(r.Type == "yes",
+ append([]byte(r.Type), r.Payload...))
+ if err != nil {
+ t.Errorf("AckRequest: %v", err)
+ }
+ }
+ }
+ }()
+
+ _, _, err := clientMux.SendRequest("peek", false, nil)
+ if err != nil {
+ t.Errorf("SendRequest: %v", err)
+ }
+
+ ok, data, err := clientMux.SendRequest("yes", true, []byte("a"))
+ if !ok || string(data) != "yesa" || err != nil {
+ t.Errorf("SendRequest(\"yes\", true, \"a\"): %v %v %v",
+ ok, data, err)
+ }
+ if ok, data, err := clientMux.SendRequest("yes", true, []byte("a")); !ok || string(data) != "yesa" || err != nil {
+ t.Errorf("SendRequest(\"yes\", true, \"a\"): %v %v %v",
+ ok, data, err)
+ }
+
+ if ok, data, err := clientMux.SendRequest("no", true, []byte("a")); ok || string(data) != "noa" || err != nil {
+ t.Errorf("SendRequest(\"no\", true, \"a\"): %v %v %v",
+ ok, data, err)
+ }
+
+ clientMux.Disconnect(0, "")
+ if !seen {
+ t.Errorf("never saw 'peek' request")
+ }
+}
+
+func TestMuxGlobalRequestUnblock(t *testing.T) {
+ clientMux, serverMux := muxPair()
+ defer serverMux.Close()
+ defer clientMux.Close()
+
+ result := make(chan error, 1)
+ go func() {
+ _, _, err := clientMux.SendRequest("hello", true, nil)
+ result <- err
+ }()
+
+ <-serverMux.incomingRequests
+ serverMux.conn.Close()
+ err := <-result
+
+ if err != io.EOF {
+ t.Errorf("want EOF, got %v", io.EOF)
+ }
+}
+
+func TestMuxChannelRequestUnblock(t *testing.T) {
+ a, b, connB := channelPair(t)
+ defer a.Close()
+ defer b.Close()
+ defer connB.Close()
+
+ result := make(chan error, 1)
+ go func() {
+ _, err := a.SendRequest("hello", true, nil)
+ result <- err
+ }()
+
+ <-b.incomingRequests
+ connB.conn.Close()
+ err := <-result
+
+ if err != io.EOF {
+ t.Errorf("want EOF, got %v", err)
+ }
+}
+
+func TestMuxDisconnect(t *testing.T) {
+ a, b := muxPair()
+ defer a.Close()
+ defer b.Close()
+
+ go func() {
+ for r := range b.incomingRequests {
+ r.Reply(true, nil)
+ }
+ }()
+
+ a.Disconnect(42, "whatever")
+ ok, _, err := a.SendRequest("hello", true, nil)
+ if ok || err == nil {
+ t.Errorf("got reply after disconnecting")
+ }
+ err = b.Wait()
+ if d, ok := err.(*disconnectMsg); !ok || d.Reason != 42 {
+ t.Errorf("got %#v, want disconnectMsg{Reason:42}", err)
+ }
+}
+
+func TestMuxCloseChannel(t *testing.T) {
+ r, w, mux := channelPair(t)
+ defer mux.Close()
+ defer r.Close()
+ defer w.Close()
+
+ result := make(chan error, 1)
+ go func() {
+ var b [1024]byte
+ _, err := r.Read(b[:])
+ result <- err
+ }()
+ if err := w.Close(); err != nil {
+ t.Errorf("w.Close: %v", err)
+ }
+
+ if _, err := w.Write([]byte("hello")); err != io.EOF {
+ t.Errorf("got err %v, want io.EOF after Close", err)
+ }
+
+ if err := <-result; err != io.EOF {
+ t.Errorf("got %v (%T), want io.EOF", err, err)
+ }
+}
+
+func TestMuxCloseWriteChannel(t *testing.T) {
+ r, w, mux := channelPair(t)
+ defer mux.Close()
+
+ result := make(chan error, 1)
+ go func() {
+ var b [1024]byte
+ _, err := r.Read(b[:])
+ result <- err
+ }()
+ if err := w.CloseWrite(); err != nil {
+ t.Errorf("w.CloseWrite: %v", err)
+ }
+
+ if _, err := w.Write([]byte("hello")); err != io.EOF {
+ t.Errorf("got err %v, want io.EOF after CloseWrite", err)
+ }
+
+ if err := <-result; err != io.EOF {
+ t.Errorf("got %v (%T), want io.EOF", err, err)
+ }
+}
+
+func TestMuxInvalidRecord(t *testing.T) {
+ a, b := muxPair()
+ defer a.Close()
+ defer b.Close()
+
+ packet := make([]byte, 1+4+4+1)
+ packet[0] = msgChannelData
+ marshalUint32(packet[1:], 29348723 /* invalid channel id */)
+ marshalUint32(packet[5:], 1)
+ packet[9] = 42
+
+ a.conn.writePacket(packet)
+ go a.SendRequest("hello", false, nil)
+ // 'a' wrote an invalid packet, so 'b' has exited.
+ req, ok := <-b.incomingRequests
+ if ok {
+ t.Errorf("got request %#v after receiving invalid packet", req)
+ }
+}
+
+func TestZeroWindowAdjust(t *testing.T) {
+ a, b, mux := channelPair(t)
+ defer a.Close()
+ defer b.Close()
+ defer mux.Close()
+
+ go func() {
+ io.WriteString(a, "hello")
+ // bogus adjust.
+ a.sendMessage(windowAdjustMsg{})
+ io.WriteString(a, "world")
+ a.Close()
+ }()
+
+ want := "helloworld"
+ c, _ := ioutil.ReadAll(b)
+ if string(c) != want {
+ t.Errorf("got %q want %q", c, want)
+ }
+}
+
+func TestMuxMaxPacketSize(t *testing.T) {
+ a, b, mux := channelPair(t)
+ defer a.Close()
+ defer b.Close()
+ defer mux.Close()
+
+ large := make([]byte, a.maxRemotePayload+1)
+ packet := make([]byte, 1+4+4+1+len(large))
+ packet[0] = msgChannelData
+ marshalUint32(packet[1:], a.remoteId)
+ marshalUint32(packet[5:], uint32(len(large)))
+ packet[9] = 42
+
+ if err := a.mux.conn.writePacket(packet); err != nil {
+ t.Errorf("could not send packet")
+ }
+
+ go a.SendRequest("hello", false, nil)
+
+ _, ok := <-b.incomingRequests
+ if ok {
+ t.Errorf("connection still alive after receiving large packet.")
+ }
+}
+
+// Don't ship code with debug=true.
+func TestDebug(t *testing.T) {
+ if debugMux {
+ t.Error("mux debug switched on")
+ }
+ if debugHandshake {
+ t.Error("handshake debug switched on")
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/session_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/session_test.go
new file mode 100644
index 000000000..f7f0f7642
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/session_test.go
@@ -0,0 +1,774 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+// Session tests.
+
+import (
+ "bytes"
+ crypto_rand "crypto/rand"
+ "errors"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "testing"
+
+ "golang.org/x/crypto/ssh/terminal"
+)
+
+type serverType func(Channel, <-chan *Request, *testing.T)
+
+// dial constructs a new test server and returns a *ClientConn.
+func dial(handler serverType, t *testing.T) *Client {
+ c1, c2, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+
+ go func() {
+ defer c1.Close()
+ conf := ServerConfig{
+ NoClientAuth: true,
+ }
+ conf.AddHostKey(testSigners["rsa"])
+
+ _, chans, reqs, err := NewServerConn(c1, &conf)
+ if err != nil {
+ t.Fatalf("Unable to handshake: %v", err)
+ }
+ go DiscardRequests(reqs)
+
+ for newCh := range chans {
+ if newCh.ChannelType() != "session" {
+ newCh.Reject(UnknownChannelType, "unknown channel type")
+ continue
+ }
+
+ ch, inReqs, err := newCh.Accept()
+ if err != nil {
+ t.Errorf("Accept: %v", err)
+ continue
+ }
+ go func() {
+ handler(ch, inReqs, t)
+ }()
+ }
+ }()
+
+ config := &ClientConfig{
+ User: "testuser",
+ }
+
+ conn, chans, reqs, err := NewClientConn(c2, "", config)
+ if err != nil {
+ t.Fatalf("unable to dial remote side: %v", err)
+ }
+
+ return NewClient(conn, chans, reqs)
+}
+
+// Test a simple string is returned to session.Stdout.
+func TestSessionShell(t *testing.T) {
+ conn := dial(shellHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+ stdout := new(bytes.Buffer)
+ session.Stdout = stdout
+ if err := session.Shell(); err != nil {
+ t.Fatalf("Unable to execute command: %s", err)
+ }
+ if err := session.Wait(); err != nil {
+ t.Fatalf("Remote command did not exit cleanly: %v", err)
+ }
+ actual := stdout.String()
+ if actual != "golang" {
+ t.Fatalf("Remote shell did not return expected string: expected=golang, actual=%s", actual)
+ }
+}
+
+// TODO(dfc) add support for Std{in,err}Pipe when the Server supports it.
+
+// Test a simple string is returned via StdoutPipe.
+func TestSessionStdoutPipe(t *testing.T) {
+ conn := dial(shellHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+ stdout, err := session.StdoutPipe()
+ if err != nil {
+ t.Fatalf("Unable to request StdoutPipe(): %v", err)
+ }
+ var buf bytes.Buffer
+ if err := session.Shell(); err != nil {
+ t.Fatalf("Unable to execute command: %v", err)
+ }
+ done := make(chan bool, 1)
+ go func() {
+ if _, err := io.Copy(&buf, stdout); err != nil {
+ t.Errorf("Copy of stdout failed: %v", err)
+ }
+ done <- true
+ }()
+ if err := session.Wait(); err != nil {
+ t.Fatalf("Remote command did not exit cleanly: %v", err)
+ }
+ <-done
+ actual := buf.String()
+ if actual != "golang" {
+ t.Fatalf("Remote shell did not return expected string: expected=golang, actual=%s", actual)
+ }
+}
+
+// Test that a simple string is returned via the Output helper,
+// and that stderr is discarded.
+func TestSessionOutput(t *testing.T) {
+ conn := dial(fixedOutputHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+
+ buf, err := session.Output("") // cmd is ignored by fixedOutputHandler
+ if err != nil {
+ t.Error("Remote command did not exit cleanly:", err)
+ }
+ w := "this-is-stdout."
+ g := string(buf)
+ if g != w {
+ t.Error("Remote command did not return expected string:")
+ t.Logf("want %q", w)
+ t.Logf("got %q", g)
+ }
+}
+
+// Test that both stdout and stderr are returned
+// via the CombinedOutput helper.
+func TestSessionCombinedOutput(t *testing.T) {
+ conn := dial(fixedOutputHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+
+ buf, err := session.CombinedOutput("") // cmd is ignored by fixedOutputHandler
+ if err != nil {
+ t.Error("Remote command did not exit cleanly:", err)
+ }
+ const stdout = "this-is-stdout."
+ const stderr = "this-is-stderr."
+ g := string(buf)
+ if g != stdout+stderr && g != stderr+stdout {
+ t.Error("Remote command did not return expected string:")
+ t.Logf("want %q, or %q", stdout+stderr, stderr+stdout)
+ t.Logf("got %q", g)
+ }
+}
+
+// Test non-0 exit status is returned correctly.
+func TestExitStatusNonZero(t *testing.T) {
+ conn := dial(exitStatusNonZeroHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+ if err := session.Shell(); err != nil {
+ t.Fatalf("Unable to execute command: %v", err)
+ }
+ err = session.Wait()
+ if err == nil {
+ t.Fatalf("expected command to fail but it didn't")
+ }
+ e, ok := err.(*ExitError)
+ if !ok {
+ t.Fatalf("expected *ExitError but got %T", err)
+ }
+ if e.ExitStatus() != 15 {
+ t.Fatalf("expected command to exit with 15 but got %v", e.ExitStatus())
+ }
+}
+
+// Test 0 exit status is returned correctly.
+func TestExitStatusZero(t *testing.T) {
+ conn := dial(exitStatusZeroHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+
+ if err := session.Shell(); err != nil {
+ t.Fatalf("Unable to execute command: %v", err)
+ }
+ err = session.Wait()
+ if err != nil {
+ t.Fatalf("expected nil but got %v", err)
+ }
+}
+
+// Test exit signal and status are both returned correctly.
+func TestExitSignalAndStatus(t *testing.T) {
+ conn := dial(exitSignalAndStatusHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+ if err := session.Shell(); err != nil {
+ t.Fatalf("Unable to execute command: %v", err)
+ }
+ err = session.Wait()
+ if err == nil {
+ t.Fatalf("expected command to fail but it didn't")
+ }
+ e, ok := err.(*ExitError)
+ if !ok {
+ t.Fatalf("expected *ExitError but got %T", err)
+ }
+ if e.Signal() != "TERM" || e.ExitStatus() != 15 {
+ t.Fatalf("expected command to exit with signal TERM and status 15 but got signal %s and status %v", e.Signal(), e.ExitStatus())
+ }
+}
+
+// Test exit signal and status are both returned correctly.
+func TestKnownExitSignalOnly(t *testing.T) {
+ conn := dial(exitSignalHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+ if err := session.Shell(); err != nil {
+ t.Fatalf("Unable to execute command: %v", err)
+ }
+ err = session.Wait()
+ if err == nil {
+ t.Fatalf("expected command to fail but it didn't")
+ }
+ e, ok := err.(*ExitError)
+ if !ok {
+ t.Fatalf("expected *ExitError but got %T", err)
+ }
+ if e.Signal() != "TERM" || e.ExitStatus() != 143 {
+ t.Fatalf("expected command to exit with signal TERM and status 143 but got signal %s and status %v", e.Signal(), e.ExitStatus())
+ }
+}
+
+// Test exit signal and status are both returned correctly.
+func TestUnknownExitSignal(t *testing.T) {
+ conn := dial(exitSignalUnknownHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+ if err := session.Shell(); err != nil {
+ t.Fatalf("Unable to execute command: %v", err)
+ }
+ err = session.Wait()
+ if err == nil {
+ t.Fatalf("expected command to fail but it didn't")
+ }
+ e, ok := err.(*ExitError)
+ if !ok {
+ t.Fatalf("expected *ExitError but got %T", err)
+ }
+ if e.Signal() != "SYS" || e.ExitStatus() != 128 {
+ t.Fatalf("expected command to exit with signal SYS and status 128 but got signal %s and status %v", e.Signal(), e.ExitStatus())
+ }
+}
+
+// Test WaitMsg is not returned if the channel closes abruptly.
+func TestExitWithoutStatusOrSignal(t *testing.T) {
+ conn := dial(exitWithoutSignalOrStatus, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+ if err := session.Shell(); err != nil {
+ t.Fatalf("Unable to execute command: %v", err)
+ }
+ err = session.Wait()
+ if err == nil {
+ t.Fatalf("expected command to fail but it didn't")
+ }
+ _, ok := err.(*ExitError)
+ if ok {
+ // you can't actually test for errors.errorString
+ // because it's not exported.
+ t.Fatalf("expected *errorString but got %T", err)
+ }
+}
+
+// windowTestBytes is the number of bytes that we'll send to the SSH server.
+const windowTestBytes = 16000 * 200
+
+// TestServerWindow writes random data to the server. The server is expected to echo
+// the same data back, which is compared against the original.
+func TestServerWindow(t *testing.T) {
+ origBuf := bytes.NewBuffer(make([]byte, 0, windowTestBytes))
+ io.CopyN(origBuf, crypto_rand.Reader, windowTestBytes)
+ origBytes := origBuf.Bytes()
+
+ conn := dial(echoHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer session.Close()
+ result := make(chan []byte)
+
+ go func() {
+ defer close(result)
+ echoedBuf := bytes.NewBuffer(make([]byte, 0, windowTestBytes))
+ serverStdout, err := session.StdoutPipe()
+ if err != nil {
+ t.Errorf("StdoutPipe failed: %v", err)
+ return
+ }
+ n, err := copyNRandomly("stdout", echoedBuf, serverStdout, windowTestBytes)
+ if err != nil && err != io.EOF {
+ t.Errorf("Read only %d bytes from server, expected %d: %v", n, windowTestBytes, err)
+ }
+ result <- echoedBuf.Bytes()
+ }()
+
+ serverStdin, err := session.StdinPipe()
+ if err != nil {
+ t.Fatalf("StdinPipe failed: %v", err)
+ }
+ written, err := copyNRandomly("stdin", serverStdin, origBuf, windowTestBytes)
+ if err != nil {
+ t.Fatalf("failed to copy origBuf to serverStdin: %v", err)
+ }
+ if written != windowTestBytes {
+ t.Fatalf("Wrote only %d of %d bytes to server", written, windowTestBytes)
+ }
+
+ echoedBytes := <-result
+
+ if !bytes.Equal(origBytes, echoedBytes) {
+ t.Fatalf("Echoed buffer differed from original, orig %d, echoed %d", len(origBytes), len(echoedBytes))
+ }
+}
+
+// Verify the client can handle a keepalive packet from the server.
+func TestClientHandlesKeepalives(t *testing.T) {
+ conn := dial(channelKeepaliveSender, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer session.Close()
+ if err := session.Shell(); err != nil {
+ t.Fatalf("Unable to execute command: %v", err)
+ }
+ err = session.Wait()
+ if err != nil {
+ t.Fatalf("expected nil but got: %v", err)
+ }
+}
+
+type exitStatusMsg struct {
+ Status uint32
+}
+
+type exitSignalMsg struct {
+ Signal string
+ CoreDumped bool
+ Errmsg string
+ Lang string
+}
+
+func handleTerminalRequests(in <-chan *Request) {
+ for req := range in {
+ ok := false
+ switch req.Type {
+ case "shell":
+ ok = true
+ if len(req.Payload) > 0 {
+ // We don't accept any commands, only the default shell.
+ ok = false
+ }
+ case "env":
+ ok = true
+ }
+ req.Reply(ok, nil)
+ }
+}
+
+func newServerShell(ch Channel, in <-chan *Request, prompt string) *terminal.Terminal {
+ term := terminal.NewTerminal(ch, prompt)
+ go handleTerminalRequests(in)
+ return term
+}
+
+func exitStatusZeroHandler(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ // this string is returned to stdout
+ shell := newServerShell(ch, in, "> ")
+ readLine(shell, t)
+ sendStatus(0, ch, t)
+}
+
+func exitStatusNonZeroHandler(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ shell := newServerShell(ch, in, "> ")
+ readLine(shell, t)
+ sendStatus(15, ch, t)
+}
+
+func exitSignalAndStatusHandler(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ shell := newServerShell(ch, in, "> ")
+ readLine(shell, t)
+ sendStatus(15, ch, t)
+ sendSignal("TERM", ch, t)
+}
+
+func exitSignalHandler(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ shell := newServerShell(ch, in, "> ")
+ readLine(shell, t)
+ sendSignal("TERM", ch, t)
+}
+
+func exitSignalUnknownHandler(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ shell := newServerShell(ch, in, "> ")
+ readLine(shell, t)
+ sendSignal("SYS", ch, t)
+}
+
+func exitWithoutSignalOrStatus(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ shell := newServerShell(ch, in, "> ")
+ readLine(shell, t)
+}
+
+func shellHandler(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ // this string is returned to stdout
+ shell := newServerShell(ch, in, "golang")
+ readLine(shell, t)
+ sendStatus(0, ch, t)
+}
+
+// Ignores the command, writes fixed strings to stderr and stdout.
+// Strings are "this-is-stdout." and "this-is-stderr.".
+func fixedOutputHandler(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ _, err := ch.Read(nil)
+
+ req, ok := <-in
+ if !ok {
+ t.Fatalf("error: expected channel request, got: %#v", err)
+ return
+ }
+
+ // ignore request, always send some text
+ req.Reply(true, nil)
+
+ _, err = io.WriteString(ch, "this-is-stdout.")
+ if err != nil {
+ t.Fatalf("error writing on server: %v", err)
+ }
+ _, err = io.WriteString(ch.Stderr(), "this-is-stderr.")
+ if err != nil {
+ t.Fatalf("error writing on server: %v", err)
+ }
+ sendStatus(0, ch, t)
+}
+
+func readLine(shell *terminal.Terminal, t *testing.T) {
+ if _, err := shell.ReadLine(); err != nil && err != io.EOF {
+ t.Errorf("unable to read line: %v", err)
+ }
+}
+
+func sendStatus(status uint32, ch Channel, t *testing.T) {
+ msg := exitStatusMsg{
+ Status: status,
+ }
+ if _, err := ch.SendRequest("exit-status", false, Marshal(&msg)); err != nil {
+ t.Errorf("unable to send status: %v", err)
+ }
+}
+
+func sendSignal(signal string, ch Channel, t *testing.T) {
+ sig := exitSignalMsg{
+ Signal: signal,
+ CoreDumped: false,
+ Errmsg: "Process terminated",
+ Lang: "en-GB-oed",
+ }
+ if _, err := ch.SendRequest("exit-signal", false, Marshal(&sig)); err != nil {
+ t.Errorf("unable to send signal: %v", err)
+ }
+}
+
+func discardHandler(ch Channel, t *testing.T) {
+ defer ch.Close()
+ io.Copy(ioutil.Discard, ch)
+}
+
+func echoHandler(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ if n, err := copyNRandomly("echohandler", ch, ch, windowTestBytes); err != nil {
+ t.Errorf("short write, wrote %d, expected %d: %v ", n, windowTestBytes, err)
+ }
+}
+
+// copyNRandomly copies n bytes from src to dst. It uses a variable, and random,
+// buffer size to exercise more code paths.
+func copyNRandomly(title string, dst io.Writer, src io.Reader, n int) (int, error) {
+ var (
+ buf = make([]byte, 32*1024)
+ written int
+ remaining = n
+ )
+ for remaining > 0 {
+ l := rand.Intn(1 << 15)
+ if remaining < l {
+ l = remaining
+ }
+ nr, er := src.Read(buf[:l])
+ nw, ew := dst.Write(buf[:nr])
+ remaining -= nw
+ written += nw
+ if ew != nil {
+ return written, ew
+ }
+ if nr != nw {
+ return written, io.ErrShortWrite
+ }
+ if er != nil && er != io.EOF {
+ return written, er
+ }
+ }
+ return written, nil
+}
+
+func channelKeepaliveSender(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ shell := newServerShell(ch, in, "> ")
+ readLine(shell, t)
+ if _, err := ch.SendRequest("keepalive@openssh.com", true, nil); err != nil {
+ t.Errorf("unable to send channel keepalive request: %v", err)
+ }
+ sendStatus(0, ch, t)
+}
+
+func TestClientWriteEOF(t *testing.T) {
+ conn := dial(simpleEchoHandler, t)
+ defer conn.Close()
+
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer session.Close()
+ stdin, err := session.StdinPipe()
+ if err != nil {
+ t.Fatalf("StdinPipe failed: %v", err)
+ }
+ stdout, err := session.StdoutPipe()
+ if err != nil {
+ t.Fatalf("StdoutPipe failed: %v", err)
+ }
+
+ data := []byte(`0000`)
+ _, err = stdin.Write(data)
+ if err != nil {
+ t.Fatalf("Write failed: %v", err)
+ }
+ stdin.Close()
+
+ res, err := ioutil.ReadAll(stdout)
+ if err != nil {
+ t.Fatalf("Read failed: %v", err)
+ }
+
+ if !bytes.Equal(data, res) {
+ t.Fatalf("Read differed from write, wrote: %v, read: %v", data, res)
+ }
+}
+
+func simpleEchoHandler(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ data, err := ioutil.ReadAll(ch)
+ if err != nil {
+ t.Errorf("handler read error: %v", err)
+ }
+ _, err = ch.Write(data)
+ if err != nil {
+ t.Errorf("handler write error: %v", err)
+ }
+}
+
+func TestSessionID(t *testing.T) {
+ c1, c2, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+ defer c1.Close()
+ defer c2.Close()
+
+ serverID := make(chan []byte, 1)
+ clientID := make(chan []byte, 1)
+
+ serverConf := &ServerConfig{
+ NoClientAuth: true,
+ }
+ serverConf.AddHostKey(testSigners["ecdsa"])
+ clientConf := &ClientConfig{
+ User: "user",
+ }
+
+ go func() {
+ conn, chans, reqs, err := NewServerConn(c1, serverConf)
+ if err != nil {
+ t.Fatalf("server handshake: %v", err)
+ }
+ serverID <- conn.SessionID()
+ go DiscardRequests(reqs)
+ for ch := range chans {
+ ch.Reject(Prohibited, "")
+ }
+ }()
+
+ go func() {
+ conn, chans, reqs, err := NewClientConn(c2, "", clientConf)
+ if err != nil {
+ t.Fatalf("client handshake: %v", err)
+ }
+ clientID <- conn.SessionID()
+ go DiscardRequests(reqs)
+ for ch := range chans {
+ ch.Reject(Prohibited, "")
+ }
+ }()
+
+ s := <-serverID
+ c := <-clientID
+ if bytes.Compare(s, c) != 0 {
+ t.Errorf("server session ID (%x) != client session ID (%x)", s, c)
+ } else if len(s) == 0 {
+ t.Errorf("client and server SessionID were empty.")
+ }
+}
+
+type noReadConn struct {
+ readSeen bool
+ net.Conn
+}
+
+func (c *noReadConn) Close() error {
+ return nil
+}
+
+func (c *noReadConn) Read(b []byte) (int, error) {
+ c.readSeen = true
+ return 0, errors.New("noReadConn error")
+}
+
+func TestInvalidServerConfiguration(t *testing.T) {
+ c1, c2, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+ defer c1.Close()
+ defer c2.Close()
+
+ serveConn := noReadConn{Conn: c1}
+ serverConf := &ServerConfig{}
+
+ NewServerConn(&serveConn, serverConf)
+ if serveConn.readSeen {
+ t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing host key")
+ }
+
+ serverConf.AddHostKey(testSigners["ecdsa"])
+
+ NewServerConn(&serveConn, serverConf)
+ if serveConn.readSeen {
+ t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing authentication method")
+ }
+}
+
+func TestHostKeyAlgorithms(t *testing.T) {
+ serverConf := &ServerConfig{
+ NoClientAuth: true,
+ }
+ serverConf.AddHostKey(testSigners["rsa"])
+ serverConf.AddHostKey(testSigners["ecdsa"])
+
+ connect := func(clientConf *ClientConfig, want string) {
+ var alg string
+ clientConf.HostKeyCallback = func(h string, a net.Addr, key PublicKey) error {
+ alg = key.Type()
+ return nil
+ }
+ c1, c2, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+ defer c1.Close()
+ defer c2.Close()
+
+ go NewServerConn(c1, serverConf)
+ _, _, _, err = NewClientConn(c2, "", clientConf)
+ if err != nil {
+ t.Fatalf("NewClientConn: %v", err)
+ }
+ if alg != want {
+ t.Errorf("selected key algorithm %s, want %s", alg, want)
+ }
+ }
+
+ // By default, we get the preferred algorithm, which is ECDSA 256.
+
+ clientConf := &ClientConfig{}
+ connect(clientConf, KeyAlgoECDSA256)
+
+ // Client asks for RSA explicitly.
+ clientConf.HostKeyAlgorithms = []string{KeyAlgoRSA}
+ connect(clientConf, KeyAlgoRSA)
+
+ c1, c2, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+ defer c1.Close()
+ defer c2.Close()
+
+ go NewServerConn(c1, serverConf)
+ clientConf.HostKeyAlgorithms = []string{"nonexistent-hostkey-algo"}
+ _, _, _, err = NewClientConn(c2, "", clientConf)
+ if err == nil {
+ t.Fatal("succeeded connecting with unknown hostkey algorithm")
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/tcpip_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/tcpip_test.go
new file mode 100644
index 000000000..f1265cb49
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/tcpip_test.go
@@ -0,0 +1,20 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "testing"
+)
+
+func TestAutoPortListenBroken(t *testing.T) {
+ broken := "SSH-2.0-OpenSSH_5.9hh11"
+ works := "SSH-2.0-OpenSSH_6.1"
+ if !isBrokenOpenSSHVersion(broken) {
+ t.Errorf("version %q not marked as broken", broken)
+ }
+ if isBrokenOpenSSHVersion(works) {
+ t.Errorf("version %q marked as broken", works)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/terminal_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/terminal_test.go
new file mode 100644
index 000000000..a663fe41b
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal/terminal_test.go
@@ -0,0 +1,269 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package terminal
+
+import (
+ "io"
+ "testing"
+)
+
+type MockTerminal struct {
+ toSend []byte
+ bytesPerRead int
+ received []byte
+}
+
+func (c *MockTerminal) Read(data []byte) (n int, err error) {
+ n = len(data)
+ if n == 0 {
+ return
+ }
+ if n > len(c.toSend) {
+ n = len(c.toSend)
+ }
+ if n == 0 {
+ return 0, io.EOF
+ }
+ if c.bytesPerRead > 0 && n > c.bytesPerRead {
+ n = c.bytesPerRead
+ }
+ copy(data, c.toSend[:n])
+ c.toSend = c.toSend[n:]
+ return
+}
+
+func (c *MockTerminal) Write(data []byte) (n int, err error) {
+ c.received = append(c.received, data...)
+ return len(data), nil
+}
+
+func TestClose(t *testing.T) {
+ c := &MockTerminal{}
+ ss := NewTerminal(c, "> ")
+ line, err := ss.ReadLine()
+ if line != "" {
+ t.Errorf("Expected empty line but got: %s", line)
+ }
+ if err != io.EOF {
+ t.Errorf("Error should have been EOF but got: %s", err)
+ }
+}
+
+var keyPressTests = []struct {
+ in string
+ line string
+ err error
+ throwAwayLines int
+}{
+ {
+ err: io.EOF,
+ },
+ {
+ in: "\r",
+ line: "",
+ },
+ {
+ in: "foo\r",
+ line: "foo",
+ },
+ {
+ in: "a\x1b[Cb\r", // right
+ line: "ab",
+ },
+ {
+ in: "a\x1b[Db\r", // left
+ line: "ba",
+ },
+ {
+ in: "a\177b\r", // backspace
+ line: "b",
+ },
+ {
+ in: "\x1b[A\r", // up
+ },
+ {
+ in: "\x1b[B\r", // down
+ },
+ {
+ in: "line\x1b[A\x1b[B\r", // up then down
+ line: "line",
+ },
+ {
+ in: "line1\rline2\x1b[A\r", // recall previous line.
+ line: "line1",
+ throwAwayLines: 1,
+ },
+ {
+ // recall two previous lines and append.
+ in: "line1\rline2\rline3\x1b[A\x1b[Axxx\r",
+ line: "line1xxx",
+ throwAwayLines: 2,
+ },
+ {
+ // Ctrl-A to move to beginning of line followed by ^K to kill
+ // line.
+ in: "a b \001\013\r",
+ line: "",
+ },
+ {
+ // Ctrl-A to move to beginning of line, Ctrl-E to move to end,
+ // finally ^K to kill nothing.
+ in: "a b \001\005\013\r",
+ line: "a b ",
+ },
+ {
+ in: "\027\r",
+ line: "",
+ },
+ {
+ in: "a\027\r",
+ line: "",
+ },
+ {
+ in: "a \027\r",
+ line: "",
+ },
+ {
+ in: "a b\027\r",
+ line: "a ",
+ },
+ {
+ in: "a b \027\r",
+ line: "a ",
+ },
+ {
+ in: "one two thr\x1b[D\027\r",
+ line: "one two r",
+ },
+ {
+ in: "\013\r",
+ line: "",
+ },
+ {
+ in: "a\013\r",
+ line: "a",
+ },
+ {
+ in: "ab\x1b[D\013\r",
+ line: "a",
+ },
+ {
+ in: "Ξεσκεπάζω\r",
+ line: "Ξεσκεπάζω",
+ },
+ {
+ in: "£\r\x1b[A\177\r", // non-ASCII char, enter, up, backspace.
+ line: "",
+ throwAwayLines: 1,
+ },
+ {
+ in: "£\r££\x1b[A\x1b[B\177\r", // non-ASCII char, enter, 2x non-ASCII, up, down, backspace, enter.
+ line: "£",
+ throwAwayLines: 1,
+ },
+ {
+ // Ctrl-D at the end of the line should be ignored.
+ in: "a\004\r",
+ line: "a",
+ },
+ {
+ // a, b, left, Ctrl-D should erase the b.
+ in: "ab\x1b[D\004\r",
+ line: "a",
+ },
+ {
+ // a, b, c, d, left, left, ^U should erase to the beginning of
+ // the line.
+ in: "abcd\x1b[D\x1b[D\025\r",
+ line: "cd",
+ },
+ {
+ // Bracketed paste mode: control sequences should be returned
+ // verbatim in paste mode.
+ in: "abc\x1b[200~de\177f\x1b[201~\177\r",
+ line: "abcde\177",
+ },
+ {
+ // Enter in bracketed paste mode should still work.
+ in: "abc\x1b[200~d\refg\x1b[201~h\r",
+ line: "efgh",
+ throwAwayLines: 1,
+ },
+ {
+ // Lines consisting entirely of pasted data should be indicated as such.
+ in: "\x1b[200~a\r",
+ line: "a",
+ err: ErrPasteIndicator,
+ },
+}
+
+func TestKeyPresses(t *testing.T) {
+ for i, test := range keyPressTests {
+ for j := 1; j < len(test.in); j++ {
+ c := &MockTerminal{
+ toSend: []byte(test.in),
+ bytesPerRead: j,
+ }
+ ss := NewTerminal(c, "> ")
+ for k := 0; k < test.throwAwayLines; k++ {
+ _, err := ss.ReadLine()
+ if err != nil {
+ t.Errorf("Throwaway line %d from test %d resulted in error: %s", k, i, err)
+ }
+ }
+ line, err := ss.ReadLine()
+ if line != test.line {
+ t.Errorf("Line resulting from test %d (%d bytes per read) was '%s', expected '%s'", i, j, line, test.line)
+ break
+ }
+ if err != test.err {
+ t.Errorf("Error resulting from test %d (%d bytes per read) was '%v', expected '%v'", i, j, err, test.err)
+ break
+ }
+ }
+ }
+}
+
+func TestPasswordNotSaved(t *testing.T) {
+ c := &MockTerminal{
+ toSend: []byte("password\r\x1b[A\r"),
+ bytesPerRead: 1,
+ }
+ ss := NewTerminal(c, "> ")
+ pw, _ := ss.ReadPassword("> ")
+ if pw != "password" {
+ t.Fatalf("failed to read password, got %s", pw)
+ }
+ line, _ := ss.ReadLine()
+ if len(line) > 0 {
+ t.Fatalf("password was saved in history")
+ }
+}
+
+var setSizeTests = []struct {
+ width, height int
+}{
+ {40, 13},
+ {80, 24},
+ {132, 43},
+}
+
+func TestTerminalSetSize(t *testing.T) {
+ for _, setSize := range setSizeTests {
+ c := &MockTerminal{
+ toSend: []byte("password\r\x1b[A\r"),
+ bytesPerRead: 1,
+ }
+ ss := NewTerminal(c, "> ")
+ ss.SetSize(setSize.width, setSize.height)
+ pw, _ := ss.ReadPassword("Password: ")
+ if pw != "password" {
+ t.Fatalf("failed to read password, got %s", pw)
+ }
+ if string(c.received) != "Password: \r\n" {
+ t.Errorf("failed to set the temporary prompt expected %q, got %q", "Password: ", c.received)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/agent_unix_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/agent_unix_test.go
new file mode 100644
index 000000000..f481253c9
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/agent_unix_test.go
@@ -0,0 +1,59 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package test
+
+import (
+ "bytes"
+ "testing"
+
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/crypto/ssh/agent"
+)
+
+func TestAgentForward(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ keyring := agent.NewKeyring()
+ if err := keyring.Add(agent.AddedKey{PrivateKey: testPrivateKeys["dsa"]}); err != nil {
+ t.Fatalf("Error adding key: %s", err)
+ }
+ if err := keyring.Add(agent.AddedKey{
+ PrivateKey: testPrivateKeys["dsa"],
+ ConfirmBeforeUse: true,
+ LifetimeSecs: 3600,
+ }); err != nil {
+ t.Fatalf("Error adding key with constraints: %s", err)
+ }
+ pub := testPublicKeys["dsa"]
+
+ sess, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("NewSession: %v", err)
+ }
+ if err := agent.RequestAgentForwarding(sess); err != nil {
+ t.Fatalf("RequestAgentForwarding: %v", err)
+ }
+
+ if err := agent.ForwardToAgent(conn, keyring); err != nil {
+ t.Fatalf("SetupForwardKeyring: %v", err)
+ }
+ out, err := sess.CombinedOutput("ssh-add -L")
+ if err != nil {
+ t.Fatalf("running ssh-add: %v, out %s", err, out)
+ }
+ key, _, _, _, err := ssh.ParseAuthorizedKey(out)
+ if err != nil {
+ t.Fatalf("ParseAuthorizedKey(%q): %v", out, err)
+ }
+
+ if !bytes.Equal(key.Marshal(), pub.Marshal()) {
+ t.Fatalf("got key %s, want %s", ssh.MarshalAuthorizedKey(key), ssh.MarshalAuthorizedKey(pub))
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/cert_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/cert_test.go
new file mode 100644
index 000000000..364790f17
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/cert_test.go
@@ -0,0 +1,47 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package test
+
+import (
+ "crypto/rand"
+ "testing"
+
+ "golang.org/x/crypto/ssh"
+)
+
+func TestCertLogin(t *testing.T) {
+ s := newServer(t)
+ defer s.Shutdown()
+
+ // Use a key different from the default.
+ clientKey := testSigners["dsa"]
+ caAuthKey := testSigners["ecdsa"]
+ cert := &ssh.Certificate{
+ Key: clientKey.PublicKey(),
+ ValidPrincipals: []string{username()},
+ CertType: ssh.UserCert,
+ ValidBefore: ssh.CertTimeInfinity,
+ }
+ if err := cert.SignCert(rand.Reader, caAuthKey); err != nil {
+ t.Fatalf("SetSignature: %v", err)
+ }
+
+ certSigner, err := ssh.NewCertSigner(cert, clientKey)
+ if err != nil {
+ t.Fatalf("NewCertSigner: %v", err)
+ }
+
+ conf := &ssh.ClientConfig{
+ User: username(),
+ }
+ conf.Auth = append(conf.Auth, ssh.PublicKeys(certSigner))
+ client, err := s.TryDial(conf)
+ if err != nil {
+ t.Fatalf("TryDial: %v", err)
+ }
+ client.Close()
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/forward_unix_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/forward_unix_test.go
new file mode 100644
index 000000000..877a88cde
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/forward_unix_test.go
@@ -0,0 +1,160 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package test
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "testing"
+ "time"
+)
+
+func TestPortForward(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ sshListener, err := conn.Listen("tcp", "localhost:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ go func() {
+ sshConn, err := sshListener.Accept()
+ if err != nil {
+ t.Fatalf("listen.Accept failed: %v", err)
+ }
+
+ _, err = io.Copy(sshConn, sshConn)
+ if err != nil && err != io.EOF {
+ t.Fatalf("ssh client copy: %v", err)
+ }
+ sshConn.Close()
+ }()
+
+ forwardedAddr := sshListener.Addr().String()
+ tcpConn, err := net.Dial("tcp", forwardedAddr)
+ if err != nil {
+ t.Fatalf("TCP dial failed: %v", err)
+ }
+
+ readChan := make(chan []byte)
+ go func() {
+ data, _ := ioutil.ReadAll(tcpConn)
+ readChan <- data
+ }()
+
+ // Invent some data.
+ data := make([]byte, 100*1000)
+ for i := range data {
+ data[i] = byte(i % 255)
+ }
+
+ var sent []byte
+ for len(sent) < 1000*1000 {
+ // Send random sized chunks
+ m := rand.Intn(len(data))
+ n, err := tcpConn.Write(data[:m])
+ if err != nil {
+ break
+ }
+ sent = append(sent, data[:n]...)
+ }
+ if err := tcpConn.(*net.TCPConn).CloseWrite(); err != nil {
+ t.Errorf("tcpConn.CloseWrite: %v", err)
+ }
+
+ read := <-readChan
+
+ if len(sent) != len(read) {
+ t.Fatalf("got %d bytes, want %d", len(read), len(sent))
+ }
+ if bytes.Compare(sent, read) != 0 {
+ t.Fatalf("read back data does not match")
+ }
+
+ if err := sshListener.Close(); err != nil {
+ t.Fatalf("sshListener.Close: %v", err)
+ }
+
+ // Check that the forward disappeared.
+ tcpConn, err = net.Dial("tcp", forwardedAddr)
+ if err == nil {
+ tcpConn.Close()
+ t.Errorf("still listening to %s after closing", forwardedAddr)
+ }
+}
+
+func TestAcceptClose(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+
+ sshListener, err := conn.Listen("tcp", "localhost:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ quit := make(chan error, 1)
+ go func() {
+ for {
+ c, err := sshListener.Accept()
+ if err != nil {
+ quit <- err
+ break
+ }
+ c.Close()
+ }
+ }()
+ sshListener.Close()
+
+ select {
+ case <-time.After(1 * time.Second):
+ t.Errorf("timeout: listener did not close.")
+ case err := <-quit:
+ t.Logf("quit as expected (error %v)", err)
+ }
+}
+
+// Check that listeners exit if the underlying client transport dies.
+func TestPortForwardConnectionClose(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+
+ sshListener, err := conn.Listen("tcp", "localhost:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ quit := make(chan error, 1)
+ go func() {
+ for {
+ c, err := sshListener.Accept()
+ if err != nil {
+ quit <- err
+ break
+ }
+ c.Close()
+ }
+ }()
+
+ // It would be even nicer if we closed the server side, but it
+ // is more involved as the fd for that side is dup()ed.
+ server.clientConn.Close()
+
+ select {
+ case <-time.After(1 * time.Second):
+ t.Errorf("timeout: listener did not close.")
+ case err := <-quit:
+ t.Logf("quit as expected (error %v)", err)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/session_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/session_test.go
new file mode 100644
index 000000000..c0e714ba9
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/session_test.go
@@ -0,0 +1,340 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !windows
+
+package test
+
+// Session functional tests.
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "strings"
+ "testing"
+
+ "golang.org/x/crypto/ssh"
+)
+
+func TestRunCommandSuccess(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("session failed: %v", err)
+ }
+ defer session.Close()
+ err = session.Run("true")
+ if err != nil {
+ t.Fatalf("session failed: %v", err)
+ }
+}
+
+func TestHostKeyCheck(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+
+ conf := clientConfig()
+ hostDB := hostKeyDB()
+ conf.HostKeyCallback = hostDB.Check
+
+ // change the keys.
+ hostDB.keys[ssh.KeyAlgoRSA][25]++
+ hostDB.keys[ssh.KeyAlgoDSA][25]++
+ hostDB.keys[ssh.KeyAlgoECDSA256][25]++
+
+ conn, err := server.TryDial(conf)
+ if err == nil {
+ conn.Close()
+ t.Fatalf("dial should have failed.")
+ } else if !strings.Contains(err.Error(), "host key mismatch") {
+ t.Fatalf("'host key mismatch' not found in %v", err)
+ }
+}
+
+func TestRunCommandStdin(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("session failed: %v", err)
+ }
+ defer session.Close()
+
+ r, w := io.Pipe()
+ defer r.Close()
+ defer w.Close()
+ session.Stdin = r
+
+ err = session.Run("true")
+ if err != nil {
+ t.Fatalf("session failed: %v", err)
+ }
+}
+
+func TestRunCommandStdinError(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("session failed: %v", err)
+ }
+ defer session.Close()
+
+ r, w := io.Pipe()
+ defer r.Close()
+ session.Stdin = r
+ pipeErr := errors.New("closing write end of pipe")
+ w.CloseWithError(pipeErr)
+
+ err = session.Run("true")
+ if err != pipeErr {
+ t.Fatalf("expected %v, found %v", pipeErr, err)
+ }
+}
+
+func TestRunCommandFailed(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("session failed: %v", err)
+ }
+ defer session.Close()
+ err = session.Run(`bash -c "kill -9 $$"`)
+ if err == nil {
+ t.Fatalf("session succeeded: %v", err)
+ }
+}
+
+func TestRunCommandWeClosed(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("session failed: %v", err)
+ }
+ err = session.Shell()
+ if err != nil {
+ t.Fatalf("shell failed: %v", err)
+ }
+ err = session.Close()
+ if err != nil {
+ t.Fatalf("shell failed: %v", err)
+ }
+}
+
+func TestFuncLargeRead(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("unable to create new session: %s", err)
+ }
+
+ stdout, err := session.StdoutPipe()
+ if err != nil {
+ t.Fatalf("unable to acquire stdout pipe: %s", err)
+ }
+
+ err = session.Start("dd if=/dev/urandom bs=2048 count=1024")
+ if err != nil {
+ t.Fatalf("unable to execute remote command: %s", err)
+ }
+
+ buf := new(bytes.Buffer)
+ n, err := io.Copy(buf, stdout)
+ if err != nil {
+ t.Fatalf("error reading from remote stdout: %s", err)
+ }
+
+ if n != 2048*1024 {
+ t.Fatalf("Expected %d bytes but read only %d from remote command", 2048, n)
+ }
+}
+
+func TestKeyChange(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conf := clientConfig()
+ hostDB := hostKeyDB()
+ conf.HostKeyCallback = hostDB.Check
+ conf.RekeyThreshold = 1024
+ conn := server.Dial(conf)
+ defer conn.Close()
+
+ for i := 0; i < 4; i++ {
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("unable to create new session: %s", err)
+ }
+
+ stdout, err := session.StdoutPipe()
+ if err != nil {
+ t.Fatalf("unable to acquire stdout pipe: %s", err)
+ }
+
+ err = session.Start("dd if=/dev/urandom bs=1024 count=1")
+ if err != nil {
+ t.Fatalf("unable to execute remote command: %s", err)
+ }
+ buf := new(bytes.Buffer)
+ n, err := io.Copy(buf, stdout)
+ if err != nil {
+ t.Fatalf("error reading from remote stdout: %s", err)
+ }
+
+ want := int64(1024)
+ if n != want {
+ t.Fatalf("Expected %d bytes but read only %d from remote command", want, n)
+ }
+ }
+
+ if changes := hostDB.checkCount; changes < 4 {
+ t.Errorf("got %d key changes, want 4", changes)
+ }
+}
+
+func TestInvalidTerminalMode(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("session failed: %v", err)
+ }
+ defer session.Close()
+
+ if err = session.RequestPty("vt100", 80, 40, ssh.TerminalModes{255: 1984}); err == nil {
+ t.Fatalf("req-pty failed: successful request with invalid mode")
+ }
+}
+
+func TestValidTerminalMode(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("session failed: %v", err)
+ }
+ defer session.Close()
+
+ stdout, err := session.StdoutPipe()
+ if err != nil {
+ t.Fatalf("unable to acquire stdout pipe: %s", err)
+ }
+
+ stdin, err := session.StdinPipe()
+ if err != nil {
+ t.Fatalf("unable to acquire stdin pipe: %s", err)
+ }
+
+ tm := ssh.TerminalModes{ssh.ECHO: 0}
+ if err = session.RequestPty("xterm", 80, 40, tm); err != nil {
+ t.Fatalf("req-pty failed: %s", err)
+ }
+
+ err = session.Shell()
+ if err != nil {
+ t.Fatalf("session failed: %s", err)
+ }
+
+ stdin.Write([]byte("stty -a && exit\n"))
+
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, stdout); err != nil {
+ t.Fatalf("reading failed: %s", err)
+ }
+
+ if sttyOutput := buf.String(); !strings.Contains(sttyOutput, "-echo ") {
+ t.Fatalf("terminal mode failure: expected -echo in stty output, got %s", sttyOutput)
+ }
+}
+
+func TestCiphers(t *testing.T) {
+ var config ssh.Config
+ config.SetDefaults()
+ cipherOrder := config.Ciphers
+ // This cipher will not be tested when commented out in cipher.go it will
+ // fallback to the next available as per line 292.
+ cipherOrder = append(cipherOrder, "aes128-cbc")
+
+ for _, ciph := range cipherOrder {
+ server := newServer(t)
+ defer server.Shutdown()
+ conf := clientConfig()
+ conf.Ciphers = []string{ciph}
+ // Don't fail if sshd doesnt have the cipher.
+ conf.Ciphers = append(conf.Ciphers, cipherOrder...)
+ conn, err := server.TryDial(conf)
+ if err == nil {
+ conn.Close()
+ } else {
+ t.Fatalf("failed for cipher %q", ciph)
+ }
+ }
+}
+
+func TestMACs(t *testing.T) {
+ var config ssh.Config
+ config.SetDefaults()
+ macOrder := config.MACs
+
+ for _, mac := range macOrder {
+ server := newServer(t)
+ defer server.Shutdown()
+ conf := clientConfig()
+ conf.MACs = []string{mac}
+ // Don't fail if sshd doesnt have the MAC.
+ conf.MACs = append(conf.MACs, macOrder...)
+ if conn, err := server.TryDial(conf); err == nil {
+ conn.Close()
+ } else {
+ t.Fatalf("failed for MAC %q", mac)
+ }
+ }
+}
+
+func TestKeyExchanges(t *testing.T) {
+ var config ssh.Config
+ config.SetDefaults()
+ kexOrder := config.KeyExchanges
+ for _, kex := range kexOrder {
+ server := newServer(t)
+ defer server.Shutdown()
+ conf := clientConfig()
+ // Don't fail if sshd doesnt have the kex.
+ conf.KeyExchanges = append([]string{kex}, kexOrder...)
+ conn, err := server.TryDial(conf)
+ if err == nil {
+ conn.Close()
+ } else {
+ t.Errorf("failed for kex %q", kex)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/tcpip_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/tcpip_test.go
new file mode 100644
index 000000000..a2eb9358d
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/tcpip_test.go
@@ -0,0 +1,46 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !windows
+
+package test
+
+// direct-tcpip functional tests
+
+import (
+ "io"
+ "net"
+ "testing"
+)
+
+func TestDial(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ sshConn := server.Dial(clientConfig())
+ defer sshConn.Close()
+
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("Listen: %v", err)
+ }
+ defer l.Close()
+
+ go func() {
+ for {
+ c, err := l.Accept()
+ if err != nil {
+ break
+ }
+
+ io.WriteString(c, c.RemoteAddr().String())
+ c.Close()
+ }
+ }()
+
+ conn, err := sshConn.Dial("tcp", l.Addr().String())
+ if err != nil {
+ t.Fatalf("Dial: %v", err)
+ }
+ defer conn.Close()
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/test_unix_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/test_unix_test.go
new file mode 100644
index 000000000..f1fc50b2e
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/test_unix_test.go
@@ -0,0 +1,261 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd plan9
+
+package test
+
+// functional test harness for unix.
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "os"
+ "os/exec"
+ "os/user"
+ "path/filepath"
+ "testing"
+ "text/template"
+
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/crypto/ssh/testdata"
+)
+
+const sshd_config = `
+Protocol 2
+HostKey {{.Dir}}/id_rsa
+HostKey {{.Dir}}/id_dsa
+HostKey {{.Dir}}/id_ecdsa
+Pidfile {{.Dir}}/sshd.pid
+#UsePrivilegeSeparation no
+KeyRegenerationInterval 3600
+ServerKeyBits 768
+SyslogFacility AUTH
+LogLevel DEBUG2
+LoginGraceTime 120
+PermitRootLogin no
+StrictModes no
+RSAAuthentication yes
+PubkeyAuthentication yes
+AuthorizedKeysFile {{.Dir}}/id_user.pub
+TrustedUserCAKeys {{.Dir}}/id_ecdsa.pub
+IgnoreRhosts yes
+RhostsRSAAuthentication no
+HostbasedAuthentication no
+`
+
+var configTmpl = template.Must(template.New("").Parse(sshd_config))
+
+type server struct {
+ t *testing.T
+ cleanup func() // executed during Shutdown
+ configfile string
+ cmd *exec.Cmd
+ output bytes.Buffer // holds stderr from sshd process
+
+ // Client half of the network connection.
+ clientConn net.Conn
+}
+
+func username() string {
+ var username string
+ if user, err := user.Current(); err == nil {
+ username = user.Username
+ } else {
+ // user.Current() currently requires cgo. If an error is
+ // returned attempt to get the username from the environment.
+ log.Printf("user.Current: %v; falling back on $USER", err)
+ username = os.Getenv("USER")
+ }
+ if username == "" {
+ panic("Unable to get username")
+ }
+ return username
+}
+
+type storedHostKey struct {
+ // keys map from an algorithm string to binary key data.
+ keys map[string][]byte
+
+ // checkCount counts the Check calls. Used for testing
+ // rekeying.
+ checkCount int
+}
+
+func (k *storedHostKey) Add(key ssh.PublicKey) {
+ if k.keys == nil {
+ k.keys = map[string][]byte{}
+ }
+ k.keys[key.Type()] = key.Marshal()
+}
+
+func (k *storedHostKey) Check(addr string, remote net.Addr, key ssh.PublicKey) error {
+ k.checkCount++
+ algo := key.Type()
+
+ if k.keys == nil || bytes.Compare(key.Marshal(), k.keys[algo]) != 0 {
+ return fmt.Errorf("host key mismatch. Got %q, want %q", key, k.keys[algo])
+ }
+ return nil
+}
+
+func hostKeyDB() *storedHostKey {
+ keyChecker := &storedHostKey{}
+ keyChecker.Add(testPublicKeys["ecdsa"])
+ keyChecker.Add(testPublicKeys["rsa"])
+ keyChecker.Add(testPublicKeys["dsa"])
+ return keyChecker
+}
+
+func clientConfig() *ssh.ClientConfig {
+ config := &ssh.ClientConfig{
+ User: username(),
+ Auth: []ssh.AuthMethod{
+ ssh.PublicKeys(testSigners["user"]),
+ },
+ HostKeyCallback: hostKeyDB().Check,
+ }
+ return config
+}
+
+// unixConnection creates two halves of a connected net.UnixConn. It
+// is used for connecting the Go SSH client with sshd without opening
+// ports.
+func unixConnection() (*net.UnixConn, *net.UnixConn, error) {
+ dir, err := ioutil.TempDir("", "unixConnection")
+ if err != nil {
+ return nil, nil, err
+ }
+ defer os.Remove(dir)
+
+ addr := filepath.Join(dir, "ssh")
+ listener, err := net.Listen("unix", addr)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer listener.Close()
+ c1, err := net.Dial("unix", addr)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ c2, err := listener.Accept()
+ if err != nil {
+ c1.Close()
+ return nil, nil, err
+ }
+
+ return c1.(*net.UnixConn), c2.(*net.UnixConn), nil
+}
+
+func (s *server) TryDial(config *ssh.ClientConfig) (*ssh.Client, error) {
+ sshd, err := exec.LookPath("sshd")
+ if err != nil {
+ s.t.Skipf("skipping test: %v", err)
+ }
+
+ c1, c2, err := unixConnection()
+ if err != nil {
+ s.t.Fatalf("unixConnection: %v", err)
+ }
+
+ s.cmd = exec.Command(sshd, "-f", s.configfile, "-i", "-e")
+ f, err := c2.File()
+ if err != nil {
+ s.t.Fatalf("UnixConn.File: %v", err)
+ }
+ defer f.Close()
+ s.cmd.Stdin = f
+ s.cmd.Stdout = f
+ s.cmd.Stderr = &s.output
+ if err := s.cmd.Start(); err != nil {
+ s.t.Fail()
+ s.Shutdown()
+ s.t.Fatalf("s.cmd.Start: %v", err)
+ }
+ s.clientConn = c1
+ conn, chans, reqs, err := ssh.NewClientConn(c1, "", config)
+ if err != nil {
+ return nil, err
+ }
+ return ssh.NewClient(conn, chans, reqs), nil
+}
+
+func (s *server) Dial(config *ssh.ClientConfig) *ssh.Client {
+ conn, err := s.TryDial(config)
+ if err != nil {
+ s.t.Fail()
+ s.Shutdown()
+ s.t.Fatalf("ssh.Client: %v", err)
+ }
+ return conn
+}
+
+func (s *server) Shutdown() {
+ if s.cmd != nil && s.cmd.Process != nil {
+ // Don't check for errors; if it fails it's most
+ // likely "os: process already finished", and we don't
+ // care about that. Use os.Interrupt, so child
+ // processes are killed too.
+ s.cmd.Process.Signal(os.Interrupt)
+ s.cmd.Wait()
+ }
+ if s.t.Failed() {
+ // log any output from sshd process
+ s.t.Logf("sshd: %s", s.output.String())
+ }
+ s.cleanup()
+}
+
+func writeFile(path string, contents []byte) {
+ f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0600)
+ if err != nil {
+ panic(err)
+ }
+ defer f.Close()
+ if _, err := f.Write(contents); err != nil {
+ panic(err)
+ }
+}
+
+// newServer returns a new mock ssh server.
+func newServer(t *testing.T) *server {
+ if testing.Short() {
+ t.Skip("skipping test due to -short")
+ }
+ dir, err := ioutil.TempDir("", "sshtest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ f, err := os.Create(filepath.Join(dir, "sshd_config"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = configTmpl.Execute(f, map[string]string{
+ "Dir": dir,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.Close()
+
+ for k, v := range testdata.PEMBytes {
+ filename := "id_" + k
+ writeFile(filepath.Join(dir, filename), v)
+ writeFile(filepath.Join(dir, filename+".pub"), ssh.MarshalAuthorizedKey(testPublicKeys[k]))
+ }
+
+ return &server{
+ t: t,
+ configfile: f.Name(),
+ cleanup: func() {
+ if err := os.RemoveAll(dir); err != nil {
+ t.Error(err)
+ }
+ },
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/testdata_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/testdata_test.go
new file mode 100644
index 000000000..ae48c7516
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/test/testdata_test.go
@@ -0,0 +1,64 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// IMPLEMENTOR NOTE: To avoid a package loop, this file is in three places:
+// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three
+// instances.
+
+package test
+
+import (
+ "crypto/rand"
+ "fmt"
+
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/crypto/ssh/testdata"
+)
+
+var (
+ testPrivateKeys map[string]interface{}
+ testSigners map[string]ssh.Signer
+ testPublicKeys map[string]ssh.PublicKey
+)
+
+func init() {
+ var err error
+
+ n := len(testdata.PEMBytes)
+ testPrivateKeys = make(map[string]interface{}, n)
+ testSigners = make(map[string]ssh.Signer, n)
+ testPublicKeys = make(map[string]ssh.PublicKey, n)
+ for t, k := range testdata.PEMBytes {
+ testPrivateKeys[t], err = ssh.ParseRawPrivateKey(k)
+ if err != nil {
+ panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err))
+ }
+ testSigners[t], err = ssh.NewSignerFromKey(testPrivateKeys[t])
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err))
+ }
+ testPublicKeys[t] = testSigners[t].PublicKey()
+ }
+
+ // Create a cert and sign it for use in tests.
+ testCert := &ssh.Certificate{
+ Nonce: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
+ ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage
+ ValidAfter: 0, // unix epoch
+ ValidBefore: ssh.CertTimeInfinity, // The end of currently representable time.
+ Reserved: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
+ Key: testPublicKeys["ecdsa"],
+ SignatureKey: testPublicKeys["rsa"],
+ Permissions: ssh.Permissions{
+ CriticalOptions: map[string]string{},
+ Extensions: map[string]string{},
+ },
+ }
+ testCert.SignCert(rand.Reader, testSigners["rsa"])
+ testPrivateKeys["cert"] = testPrivateKeys["ecdsa"]
+ testSigners["cert"], err = ssh.NewCertSigner(testCert, testSigners["ecdsa"])
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create certificate signer: %v", err))
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/testdata/doc.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/testdata/doc.go
new file mode 100644
index 000000000..ae7bd8b89
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/testdata/doc.go
@@ -0,0 +1,8 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This package contains test data shared between the various subpackages of
+// the golang.org/x/crypto/ssh package. Under no circumstance should
+// this data be used for production code.
+package testdata
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/testdata/keys.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/testdata/keys.go
new file mode 100644
index 000000000..5ff1c0e03
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/testdata/keys.go
@@ -0,0 +1,43 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testdata
+
+var PEMBytes = map[string][]byte{
+ "dsa": []byte(`-----BEGIN DSA PRIVATE KEY-----
+MIIBuwIBAAKBgQD6PDSEyXiI9jfNs97WuM46MSDCYlOqWw80ajN16AohtBncs1YB
+lHk//dQOvCYOsYaE+gNix2jtoRjwXhDsc25/IqQbU1ahb7mB8/rsaILRGIbA5WH3
+EgFtJmXFovDz3if6F6TzvhFpHgJRmLYVR8cqsezL3hEZOvvs2iH7MorkxwIVAJHD
+nD82+lxh2fb4PMsIiaXudAsBAoGAQRf7Q/iaPRn43ZquUhd6WwvirqUj+tkIu6eV
+2nZWYmXLlqFQKEy4Tejl7Wkyzr2OSYvbXLzo7TNxLKoWor6ips0phYPPMyXld14r
+juhT24CrhOzuLMhDduMDi032wDIZG4Y+K7ElU8Oufn8Sj5Wge8r6ANmmVgmFfynr
+FhdYCngCgYEA3ucGJ93/Mx4q4eKRDxcWD3QzWyqpbRVRRV1Vmih9Ha/qC994nJFz
+DQIdjxDIT2Rk2AGzMqFEB68Zc3O+Wcsmz5eWWzEwFxaTwOGWTyDqsDRLm3fD+QYj
+nOwuxb0Kce+gWI8voWcqC9cyRm09jGzu2Ab3Bhtpg8JJ8L7gS3MRZK4CFEx4UAfY
+Fmsr0W6fHB9nhS4/UXM8
+-----END DSA PRIVATE KEY-----
+`),
+ "ecdsa": []byte(`-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEINGWx0zo6fhJ/0EAfrPzVFyFC9s18lBt3cRoEDhS3ARooAoGCCqGSM49
+AwEHoUQDQgAEi9Hdw6KvZcWxfg2IDhA7UkpDtzzt6ZqJXSsFdLd+Kx4S3Sx4cVO+
+6/ZOXRnPmNAlLUqjShUsUBBngG0u2fqEqA==
+-----END EC PRIVATE KEY-----
+`),
+ "rsa": []byte(`-----BEGIN RSA PRIVATE KEY-----
+MIIBOwIBAAJBALdGZxkXDAjsYk10ihwU6Id2KeILz1TAJuoq4tOgDWxEEGeTrcld
+r/ZwVaFzjWzxaf6zQIJbfaSEAhqD5yo72+sCAwEAAQJBAK8PEVU23Wj8mV0QjwcJ
+tZ4GcTUYQL7cF4+ezTCE9a1NrGnCP2RuQkHEKxuTVrxXt+6OF15/1/fuXnxKjmJC
+nxkCIQDaXvPPBi0c7vAxGwNY9726x01/dNbHCE0CBtcotobxpwIhANbbQbh3JHVW
+2haQh4fAG5mhesZKAGcxTyv4mQ7uMSQdAiAj+4dzMpJWdSzQ+qGHlHMIBvVHLkqB
+y2VdEyF7DPCZewIhAI7GOI/6LDIFOvtPo6Bj2nNmyQ1HU6k/LRtNIXi4c9NJAiAr
+rrxx26itVhJmcvoUhOjwuzSlP2bE5VHAvkGB352YBg==
+-----END RSA PRIVATE KEY-----
+`),
+ "user": []byte(`-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEILYCAeq8f7V4vSSypRw7pxy8yz3V5W4qg8kSC3zJhqpQoAoGCCqGSM49
+AwEHoUQDQgAEYcO2xNKiRUYOLEHM7VYAp57HNyKbOdYtHD83Z4hzNPVC4tM5mdGD
+PLL8IEwvYu2wq+lpXfGQnNMbzYf9gspG0w==
+-----END EC PRIVATE KEY-----
+`),
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/testdata_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/testdata_test.go
new file mode 100644
index 000000000..f2828c1b5
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/testdata_test.go
@@ -0,0 +1,63 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// IMPLEMENTOR NOTE: To avoid a package loop, this file is in three places:
+// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three
+// instances.
+
+package ssh
+
+import (
+ "crypto/rand"
+ "fmt"
+
+ "golang.org/x/crypto/ssh/testdata"
+)
+
+var (
+ testPrivateKeys map[string]interface{}
+ testSigners map[string]Signer
+ testPublicKeys map[string]PublicKey
+)
+
+func init() {
+ var err error
+
+ n := len(testdata.PEMBytes)
+ testPrivateKeys = make(map[string]interface{}, n)
+ testSigners = make(map[string]Signer, n)
+ testPublicKeys = make(map[string]PublicKey, n)
+ for t, k := range testdata.PEMBytes {
+ testPrivateKeys[t], err = ParseRawPrivateKey(k)
+ if err != nil {
+ panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err))
+ }
+ testSigners[t], err = NewSignerFromKey(testPrivateKeys[t])
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err))
+ }
+ testPublicKeys[t] = testSigners[t].PublicKey()
+ }
+
+ // Create a cert and sign it for use in tests.
+ testCert := &Certificate{
+ Nonce: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
+ ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage
+ ValidAfter: 0, // unix epoch
+ ValidBefore: CertTimeInfinity, // The end of currently representable time.
+ Reserved: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
+ Key: testPublicKeys["ecdsa"],
+ SignatureKey: testPublicKeys["rsa"],
+ Permissions: Permissions{
+ CriticalOptions: map[string]string{},
+ Extensions: map[string]string{},
+ },
+ }
+ testCert.SignCert(rand.Reader, testSigners["rsa"])
+ testPrivateKeys["cert"] = testPrivateKeys["ecdsa"]
+ testSigners["cert"], err = NewCertSigner(testCert, testSigners["ecdsa"])
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create certificate signer: %v", err))
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/transport_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/transport_test.go
new file mode 100644
index 000000000..92d83abf9
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/crypto/ssh/transport_test.go
@@ -0,0 +1,109 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/binary"
+ "strings"
+ "testing"
+)
+
+func TestReadVersion(t *testing.T) {
+ longversion := strings.Repeat("SSH-2.0-bla", 50)[:253]
+ cases := map[string]string{
+ "SSH-2.0-bla\r\n": "SSH-2.0-bla",
+ "SSH-2.0-bla\n": "SSH-2.0-bla",
+ longversion + "\r\n": longversion,
+ }
+
+ for in, want := range cases {
+ result, err := readVersion(bytes.NewBufferString(in))
+ if err != nil {
+ t.Errorf("readVersion(%q): %s", in, err)
+ }
+ got := string(result)
+ if got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+ }
+}
+
+func TestReadVersionError(t *testing.T) {
+ longversion := strings.Repeat("SSH-2.0-bla", 50)[:253]
+ cases := []string{
+ longversion + "too-long\r\n",
+ }
+ for _, in := range cases {
+ if _, err := readVersion(bytes.NewBufferString(in)); err == nil {
+ t.Errorf("readVersion(%q) should have failed", in)
+ }
+ }
+}
+
+func TestExchangeVersionsBasic(t *testing.T) {
+ v := "SSH-2.0-bla"
+ buf := bytes.NewBufferString(v + "\r\n")
+ them, err := exchangeVersions(buf, []byte("xyz"))
+ if err != nil {
+ t.Errorf("exchangeVersions: %v", err)
+ }
+
+ if want := "SSH-2.0-bla"; string(them) != want {
+ t.Errorf("got %q want %q for our version", them, want)
+ }
+}
+
+func TestExchangeVersions(t *testing.T) {
+ cases := []string{
+ "not\x000allowed",
+ "not allowed\n",
+ }
+ for _, c := range cases {
+ buf := bytes.NewBufferString("SSH-2.0-bla\r\n")
+ if _, err := exchangeVersions(buf, []byte(c)); err == nil {
+ t.Errorf("exchangeVersions(%q): should have failed", c)
+ }
+ }
+}
+
+type closerBuffer struct {
+ bytes.Buffer
+}
+
+func (b *closerBuffer) Close() error {
+ return nil
+}
+
+func TestTransportMaxPacketWrite(t *testing.T) {
+ buf := &closerBuffer{}
+ tr := newTransport(buf, rand.Reader, true)
+ huge := make([]byte, maxPacket+1)
+ err := tr.writePacket(huge)
+ if err == nil {
+ t.Errorf("transport accepted write for a huge packet.")
+ }
+}
+
+func TestTransportMaxPacketReader(t *testing.T) {
+ var header [5]byte
+ huge := make([]byte, maxPacket+128)
+ binary.BigEndian.PutUint32(header[0:], uint32(len(huge)))
+ // padding.
+ header[4] = 0
+
+ buf := &closerBuffer{}
+ buf.Write(header[:])
+ buf.Write(huge)
+
+ tr := newTransport(buf, rand.Reader, true)
+ _, err := tr.readPacket()
+ if err == nil {
+ t.Errorf("transport succeeded reading huge packet.")
+ } else if !strings.Contains(err.Error(), "large") {
+ t.Errorf("got %q, should mention %q", err.Error(), "large")
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/net/context/context_test.go b/Godeps/_workspace/src/golang.org/x/net/context/context_test.go
new file mode 100644
index 000000000..05345fc5e
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/net/context/context_test.go
@@ -0,0 +1,575 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "fmt"
+ "math/rand"
+ "runtime"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+)
+
+// otherContext is a Context that's not one of the types defined in context.go.
+// This lets us test code paths that differ based on the underlying type of the
+// Context.
+type otherContext struct {
+ Context
+}
+
+func TestBackground(t *testing.T) {
+ c := Background()
+ if c == nil {
+ t.Fatalf("Background returned nil")
+ }
+ select {
+ case x := <-c.Done():
+ t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
+ default:
+ }
+ if got, want := fmt.Sprint(c), "context.Background"; got != want {
+ t.Errorf("Background().String() = %q want %q", got, want)
+ }
+}
+
+func TestTODO(t *testing.T) {
+ c := TODO()
+ if c == nil {
+ t.Fatalf("TODO returned nil")
+ }
+ select {
+ case x := <-c.Done():
+ t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
+ default:
+ }
+ if got, want := fmt.Sprint(c), "context.TODO"; got != want {
+ t.Errorf("TODO().String() = %q want %q", got, want)
+ }
+}
+
+func TestWithCancel(t *testing.T) {
+ c1, cancel := WithCancel(Background())
+
+ if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want {
+ t.Errorf("c1.String() = %q want %q", got, want)
+ }
+
+ o := otherContext{c1}
+ c2, _ := WithCancel(o)
+ contexts := []Context{c1, o, c2}
+
+ for i, c := range contexts {
+ if d := c.Done(); d == nil {
+ t.Errorf("c[%d].Done() == %v want non-nil", i, d)
+ }
+ if e := c.Err(); e != nil {
+ t.Errorf("c[%d].Err() == %v want nil", i, e)
+ }
+
+ select {
+ case x := <-c.Done():
+ t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
+ default:
+ }
+ }
+
+ cancel()
+ time.Sleep(100 * time.Millisecond) // let cancelation propagate
+
+ for i, c := range contexts {
+ select {
+ case <-c.Done():
+ default:
+ t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i)
+ }
+ if e := c.Err(); e != Canceled {
+ t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled)
+ }
+ }
+}
+
+func TestParentFinishesChild(t *testing.T) {
+ // Context tree:
+ // parent -> cancelChild
+ // parent -> valueChild -> timerChild
+ parent, cancel := WithCancel(Background())
+ cancelChild, stop := WithCancel(parent)
+ defer stop()
+ valueChild := WithValue(parent, "key", "value")
+ timerChild, stop := WithTimeout(valueChild, 10000*time.Hour)
+ defer stop()
+
+ select {
+ case x := <-parent.Done():
+ t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
+ case x := <-cancelChild.Done():
+ t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x)
+ case x := <-timerChild.Done():
+ t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x)
+ case x := <-valueChild.Done():
+ t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x)
+ default:
+ }
+
+ // The parent's children should contain the two cancelable children.
+ pc := parent.(*cancelCtx)
+ cc := cancelChild.(*cancelCtx)
+ tc := timerChild.(*timerCtx)
+ pc.mu.Lock()
+ if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] {
+ t.Errorf("bad linkage: pc.children = %v, want %v and %v",
+ pc.children, cc, tc)
+ }
+ pc.mu.Unlock()
+
+ if p, ok := parentCancelCtx(cc.Context); !ok || p != pc {
+ t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc)
+ }
+ if p, ok := parentCancelCtx(tc.Context); !ok || p != pc {
+ t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc)
+ }
+
+ cancel()
+
+ pc.mu.Lock()
+ if len(pc.children) != 0 {
+ t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children)
+ }
+ pc.mu.Unlock()
+
+ // parent and children should all be finished.
+ check := func(ctx Context, name string) {
+ select {
+ case <-ctx.Done():
+ default:
+ t.Errorf("<-%s.Done() blocked, but shouldn't have", name)
+ }
+ if e := ctx.Err(); e != Canceled {
+ t.Errorf("%s.Err() == %v want %v", name, e, Canceled)
+ }
+ }
+ check(parent, "parent")
+ check(cancelChild, "cancelChild")
+ check(valueChild, "valueChild")
+ check(timerChild, "timerChild")
+
+ // WithCancel should return a canceled context on a canceled parent.
+ precanceledChild := WithValue(parent, "key", "value")
+ select {
+ case <-precanceledChild.Done():
+ default:
+ t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have")
+ }
+ if e := precanceledChild.Err(); e != Canceled {
+ t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled)
+ }
+}
+
+func TestChildFinishesFirst(t *testing.T) {
+ cancelable, stop := WithCancel(Background())
+ defer stop()
+ for _, parent := range []Context{Background(), cancelable} {
+ child, cancel := WithCancel(parent)
+
+ select {
+ case x := <-parent.Done():
+ t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
+ case x := <-child.Done():
+ t.Errorf("<-child.Done() == %v want nothing (it should block)", x)
+ default:
+ }
+
+ cc := child.(*cancelCtx)
+ pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background()
+ if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) {
+ t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok)
+ }
+
+ if pcok {
+ pc.mu.Lock()
+ if len(pc.children) != 1 || !pc.children[cc] {
+ t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc)
+ }
+ pc.mu.Unlock()
+ }
+
+ cancel()
+
+ if pcok {
+ pc.mu.Lock()
+ if len(pc.children) != 0 {
+ t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children)
+ }
+ pc.mu.Unlock()
+ }
+
+ // child should be finished.
+ select {
+ case <-child.Done():
+ default:
+ t.Errorf("<-child.Done() blocked, but shouldn't have")
+ }
+ if e := child.Err(); e != Canceled {
+ t.Errorf("child.Err() == %v want %v", e, Canceled)
+ }
+
+ // parent should not be finished.
+ select {
+ case x := <-parent.Done():
+ t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
+ default:
+ }
+ if e := parent.Err(); e != nil {
+ t.Errorf("parent.Err() == %v want nil", e)
+ }
+ }
+}
+
+func testDeadline(c Context, wait time.Duration, t *testing.T) {
+ select {
+ case <-time.After(wait):
+ t.Fatalf("context should have timed out")
+ case <-c.Done():
+ }
+ if e := c.Err(); e != DeadlineExceeded {
+ t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded)
+ }
+}
+
+func TestDeadline(t *testing.T) {
+ c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
+ if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
+ t.Errorf("c.String() = %q want prefix %q", got, prefix)
+ }
+ testDeadline(c, 200*time.Millisecond, t)
+
+ c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
+ o := otherContext{c}
+ testDeadline(o, 200*time.Millisecond, t)
+
+ c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
+ o = otherContext{c}
+ c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond))
+ testDeadline(c, 200*time.Millisecond, t)
+}
+
+func TestTimeout(t *testing.T) {
+ c, _ := WithTimeout(Background(), 100*time.Millisecond)
+ if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
+ t.Errorf("c.String() = %q want prefix %q", got, prefix)
+ }
+ testDeadline(c, 200*time.Millisecond, t)
+
+ c, _ = WithTimeout(Background(), 100*time.Millisecond)
+ o := otherContext{c}
+ testDeadline(o, 200*time.Millisecond, t)
+
+ c, _ = WithTimeout(Background(), 100*time.Millisecond)
+ o = otherContext{c}
+ c, _ = WithTimeout(o, 300*time.Millisecond)
+ testDeadline(c, 200*time.Millisecond, t)
+}
+
+func TestCanceledTimeout(t *testing.T) {
+ c, _ := WithTimeout(Background(), 200*time.Millisecond)
+ o := otherContext{c}
+ c, cancel := WithTimeout(o, 400*time.Millisecond)
+ cancel()
+ time.Sleep(100 * time.Millisecond) // let cancelation propagate
+ select {
+ case <-c.Done():
+ default:
+ t.Errorf("<-c.Done() blocked, but shouldn't have")
+ }
+ if e := c.Err(); e != Canceled {
+ t.Errorf("c.Err() == %v want %v", e, Canceled)
+ }
+}
+
+type key1 int
+type key2 int
+
+var k1 = key1(1)
+var k2 = key2(1) // same int as k1, different type
+var k3 = key2(3) // same type as k2, different int
+
+func TestValues(t *testing.T) {
+ check := func(c Context, nm, v1, v2, v3 string) {
+ if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 {
+ t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0)
+ }
+ if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 {
+ t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0)
+ }
+ if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 {
+ t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0)
+ }
+ }
+
+ c0 := Background()
+ check(c0, "c0", "", "", "")
+
+ c1 := WithValue(Background(), k1, "c1k1")
+ check(c1, "c1", "c1k1", "", "")
+
+ if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want {
+ t.Errorf("c.String() = %q want %q", got, want)
+ }
+
+ c2 := WithValue(c1, k2, "c2k2")
+ check(c2, "c2", "c1k1", "c2k2", "")
+
+ c3 := WithValue(c2, k3, "c3k3")
+ check(c3, "c2", "c1k1", "c2k2", "c3k3")
+
+ c4 := WithValue(c3, k1, nil)
+ check(c4, "c4", "", "c2k2", "c3k3")
+
+ o0 := otherContext{Background()}
+ check(o0, "o0", "", "", "")
+
+ o1 := otherContext{WithValue(Background(), k1, "c1k1")}
+ check(o1, "o1", "c1k1", "", "")
+
+ o2 := WithValue(o1, k2, "o2k2")
+ check(o2, "o2", "c1k1", "o2k2", "")
+
+ o3 := otherContext{c4}
+ check(o3, "o3", "", "c2k2", "c3k3")
+
+ o4 := WithValue(o3, k3, nil)
+ check(o4, "o4", "", "c2k2", "")
+}
+
+func TestAllocs(t *testing.T) {
+ bg := Background()
+ for _, test := range []struct {
+ desc string
+ f func()
+ limit float64
+ gccgoLimit float64
+ }{
+ {
+ desc: "Background()",
+ f: func() { Background() },
+ limit: 0,
+ gccgoLimit: 0,
+ },
+ {
+ desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1),
+ f: func() {
+ c := WithValue(bg, k1, nil)
+ c.Value(k1)
+ },
+ limit: 3,
+ gccgoLimit: 3,
+ },
+ {
+ desc: "WithTimeout(bg, 15*time.Millisecond)",
+ f: func() {
+ c, _ := WithTimeout(bg, 15*time.Millisecond)
+ <-c.Done()
+ },
+ limit: 8,
+ gccgoLimit: 15,
+ },
+ {
+ desc: "WithCancel(bg)",
+ f: func() {
+ c, cancel := WithCancel(bg)
+ cancel()
+ <-c.Done()
+ },
+ limit: 5,
+ gccgoLimit: 8,
+ },
+ {
+ desc: "WithTimeout(bg, 100*time.Millisecond)",
+ f: func() {
+ c, cancel := WithTimeout(bg, 100*time.Millisecond)
+ cancel()
+ <-c.Done()
+ },
+ limit: 8,
+ gccgoLimit: 25,
+ },
+ } {
+ limit := test.limit
+ if runtime.Compiler == "gccgo" {
+ // gccgo does not yet do escape analysis.
+ // TOOD(iant): Remove this when gccgo does do escape analysis.
+ limit = test.gccgoLimit
+ }
+ if n := testing.AllocsPerRun(100, test.f); n > limit {
+ t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit))
+ }
+ }
+}
+
+func TestSimultaneousCancels(t *testing.T) {
+ root, cancel := WithCancel(Background())
+ m := map[Context]CancelFunc{root: cancel}
+ q := []Context{root}
+ // Create a tree of contexts.
+ for len(q) != 0 && len(m) < 100 {
+ parent := q[0]
+ q = q[1:]
+ for i := 0; i < 4; i++ {
+ ctx, cancel := WithCancel(parent)
+ m[ctx] = cancel
+ q = append(q, ctx)
+ }
+ }
+ // Start all the cancels in a random order.
+ var wg sync.WaitGroup
+ wg.Add(len(m))
+ for _, cancel := range m {
+ go func(cancel CancelFunc) {
+ cancel()
+ wg.Done()
+ }(cancel)
+ }
+ // Wait on all the contexts in a random order.
+ for ctx := range m {
+ select {
+ case <-ctx.Done():
+ case <-time.After(1 * time.Second):
+ buf := make([]byte, 10<<10)
+ n := runtime.Stack(buf, true)
+ t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n])
+ }
+ }
+ // Wait for all the cancel functions to return.
+ done := make(chan struct{})
+ go func() {
+ wg.Wait()
+ close(done)
+ }()
+ select {
+ case <-done:
+ case <-time.After(1 * time.Second):
+ buf := make([]byte, 10<<10)
+ n := runtime.Stack(buf, true)
+ t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n])
+ }
+}
+
+func TestInterlockedCancels(t *testing.T) {
+ parent, cancelParent := WithCancel(Background())
+ child, cancelChild := WithCancel(parent)
+ go func() {
+ parent.Done()
+ cancelChild()
+ }()
+ cancelParent()
+ select {
+ case <-child.Done():
+ case <-time.After(1 * time.Second):
+ buf := make([]byte, 10<<10)
+ n := runtime.Stack(buf, true)
+ t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n])
+ }
+}
+
+func TestLayersCancel(t *testing.T) {
+ testLayers(t, time.Now().UnixNano(), false)
+}
+
+func TestLayersTimeout(t *testing.T) {
+ testLayers(t, time.Now().UnixNano(), true)
+}
+
+func testLayers(t *testing.T, seed int64, testTimeout bool) {
+ rand.Seed(seed)
+ errorf := func(format string, a ...interface{}) {
+ t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...)
+ }
+ const (
+ timeout = 200 * time.Millisecond
+ minLayers = 30
+ )
+ type value int
+ var (
+ vals []*value
+ cancels []CancelFunc
+ numTimers int
+ ctx = Background()
+ )
+ for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ {
+ switch rand.Intn(3) {
+ case 0:
+ v := new(value)
+ ctx = WithValue(ctx, v, v)
+ vals = append(vals, v)
+ case 1:
+ var cancel CancelFunc
+ ctx, cancel = WithCancel(ctx)
+ cancels = append(cancels, cancel)
+ case 2:
+ var cancel CancelFunc
+ ctx, cancel = WithTimeout(ctx, timeout)
+ cancels = append(cancels, cancel)
+ numTimers++
+ }
+ }
+ checkValues := func(when string) {
+ for _, key := range vals {
+ if val := ctx.Value(key).(*value); key != val {
+ errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key)
+ }
+ }
+ }
+ select {
+ case <-ctx.Done():
+ errorf("ctx should not be canceled yet")
+ default:
+ }
+ if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) {
+ t.Errorf("ctx.String() = %q want prefix %q", s, prefix)
+ }
+ t.Log(ctx)
+ checkValues("before cancel")
+ if testTimeout {
+ select {
+ case <-ctx.Done():
+ case <-time.After(timeout + 100*time.Millisecond):
+ errorf("ctx should have timed out")
+ }
+ checkValues("after timeout")
+ } else {
+ cancel := cancels[rand.Intn(len(cancels))]
+ cancel()
+ select {
+ case <-ctx.Done():
+ default:
+ errorf("ctx should be canceled")
+ }
+ checkValues("after cancel")
+ }
+}
+
+func TestCancelRemoves(t *testing.T) {
+ checkChildren := func(when string, ctx Context, want int) {
+ if got := len(ctx.(*cancelCtx).children); got != want {
+ t.Errorf("%s: context has %d children, want %d", when, got, want)
+ }
+ }
+
+ ctx, _ := WithCancel(Background())
+ checkChildren("after creation", ctx, 0)
+ _, cancel := WithCancel(ctx)
+ checkChildren("with WithCancel child ", ctx, 1)
+ cancel()
+ checkChildren("after cancelling WithCancel child", ctx, 0)
+
+ ctx, _ = WithCancel(Background())
+ checkChildren("after creation", ctx, 0)
+ _, cancel = WithTimeout(ctx, 60*time.Minute)
+ checkChildren("with WithTimeout child ", ctx, 1)
+ cancel()
+ checkChildren("after cancelling WithTimeout child", ctx, 0)
+}
diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp_test.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp_test.go
new file mode 100644
index 000000000..47b53d7f1
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp_test.go
@@ -0,0 +1,72 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ctxhttp
+
+import (
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+const (
+ requestDuration = 100 * time.Millisecond
+ requestBody = "ok"
+)
+
+func TestNoTimeout(t *testing.T) {
+ ctx := context.Background()
+ resp, err := doRequest(ctx)
+
+ if resp == nil || err != nil {
+ t.Fatalf("error received from client: %v %v", err, resp)
+ }
+}
+func TestCancel(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ time.Sleep(requestDuration / 2)
+ cancel()
+ }()
+
+ resp, err := doRequest(ctx)
+
+ if resp != nil || err == nil {
+ t.Fatalf("expected error, didn't get one. resp: %v", resp)
+ }
+ if err != ctx.Err() {
+ t.Fatalf("expected error from context but got: %v", err)
+ }
+}
+
+func TestCancelAfterRequest(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+
+ resp, err := doRequest(ctx)
+
+ // Cancel before reading the body.
+ // Request.Body should still be readable after the context is canceled.
+ cancel()
+
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil || string(b) != requestBody {
+ t.Fatalf("could not read body: %q %v", b, err)
+ }
+}
+
+func doRequest(ctx context.Context) (*http.Response, error) {
+ var okHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ time.Sleep(requestDuration)
+ w.Write([]byte(requestBody))
+ })
+
+ serv := httptest.NewServer(okHandler)
+ defer serv.Close()
+
+ return Get(ctx, nil, serv.URL)
+}
diff --git a/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go b/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go
new file mode 100644
index 000000000..a6754dc36
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go
@@ -0,0 +1,26 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context_test
+
+import (
+ "fmt"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+func ExampleWithTimeout() {
+ // Pass a context with a timeout to tell a blocking function that it
+ // should abandon its work after the timeout elapses.
+ ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond)
+ select {
+ case <-time.After(200 * time.Millisecond):
+ fmt.Println("overslept")
+ case <-ctx.Done():
+ fmt.Println(ctx.Err()) // prints "context deadline exceeded"
+ }
+ // Output:
+ // context deadline exceeded
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go
new file mode 100644
index 000000000..5a0170a95
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go
@@ -0,0 +1,96 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package clientcredentials
+
+import (
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "golang.org/x/oauth2"
+)
+
+func newConf(url string) *Config {
+ return &Config{
+ ClientID: "CLIENT_ID",
+ ClientSecret: "CLIENT_SECRET",
+ Scopes: []string{"scope1", "scope2"},
+ TokenURL: url + "/token",
+ }
+}
+
+type mockTransport struct {
+ rt func(req *http.Request) (resp *http.Response, err error)
+}
+
+func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
+ return t.rt(req)
+}
+
+func TestTokenRequest(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.String() != "/token" {
+ t.Errorf("authenticate client request URL = %q; want %q", r.URL, "/token")
+ }
+ headerAuth := r.Header.Get("Authorization")
+ if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" {
+ t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
+ }
+ if got, want := r.Header.Get("Content-Type"), "application/x-www-form-urlencoded"; got != want {
+ t.Errorf("Content-Type header = %q; want %q", got, want)
+ }
+ body, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ r.Body.Close()
+ }
+ if err != nil {
+ t.Errorf("failed reading request body: %s.", err)
+ }
+ if string(body) != "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2" {
+ t.Errorf("payload = %q; want %q", string(body), "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2")
+ }
+ w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
+ w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&token_type=bearer"))
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ tok, err := conf.Token(oauth2.NoContext)
+ if err != nil {
+ t.Error(err)
+ }
+ if !tok.Valid() {
+ t.Fatalf("token invalid. got: %#v", tok)
+ }
+ if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
+ t.Errorf("Access token = %q; want %q", tok.AccessToken, "90d64460d14870c08c81352a05dedd3465940a7c")
+ }
+ if tok.TokenType != "bearer" {
+ t.Errorf("token type = %q; want %q", tok.TokenType, "bearer")
+ }
+}
+
+func TestTokenRefreshRequest(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.String() == "/somethingelse" {
+ return
+ }
+ if r.URL.String() != "/token" {
+ t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL)
+ }
+ headerContentType := r.Header.Get("Content-Type")
+ if headerContentType != "application/x-www-form-urlencoded" {
+ t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
+ }
+ body, _ := ioutil.ReadAll(r.Body)
+ if string(body) != "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2" {
+ t.Errorf("Unexpected refresh token payload, %v is found.", string(body))
+ }
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ c := conf.Client(oauth2.NoContext)
+ c.Get(ts.URL + "/somethingelse")
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/example_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/example_test.go
new file mode 100644
index 000000000..33b305c62
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/example_test.go
@@ -0,0 +1,45 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2_test
+
+import (
+ "fmt"
+ "log"
+
+ "golang.org/x/oauth2"
+)
+
+func ExampleConfig() {
+ conf := &oauth2.Config{
+ ClientID: "YOUR_CLIENT_ID",
+ ClientSecret: "YOUR_CLIENT_SECRET",
+ Scopes: []string{"SCOPE1", "SCOPE2"},
+ Endpoint: oauth2.Endpoint{
+ AuthURL: "https://provider.com/o/oauth2/auth",
+ TokenURL: "https://provider.com/o/oauth2/token",
+ },
+ }
+
+ // Redirect user to consent page to ask for permission
+ // for the scopes specified above.
+ url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline)
+ fmt.Printf("Visit the URL for the auth dialog: %v", url)
+
+ // Use the authorization code that is pushed to the redirect URL.
+ // NewTransportWithCode will do the handshake to retrieve
+ // an access token and initiate a Transport that is
+ // authorized and authenticated by the retrieved token.
+ var code string
+ if _, err := fmt.Scan(&code); err != nil {
+ log.Fatal(err)
+ }
+ tok, err := conf.Exchange(oauth2.NoContext, code)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ client := conf.Client(oauth2.NoContext, tok)
+ client.Get("...")
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go
new file mode 100644
index 000000000..9745be192
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go
@@ -0,0 +1,150 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appenginevm !appengine
+
+package google_test
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ "golang.org/x/oauth2/jwt"
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/urlfetch"
+)
+
+func ExampleDefaultClient() {
+ client, err := google.DefaultClient(oauth2.NoContext,
+ "https://www.googleapis.com/auth/devstorage.full_control")
+ if err != nil {
+ log.Fatal(err)
+ }
+ client.Get("...")
+}
+
+func Example_webServer() {
+ // Your credentials should be obtained from the Google
+ // Developer Console (https://console.developers.google.com).
+ conf := &oauth2.Config{
+ ClientID: "YOUR_CLIENT_ID",
+ ClientSecret: "YOUR_CLIENT_SECRET",
+ RedirectURL: "YOUR_REDIRECT_URL",
+ Scopes: []string{
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/blogger",
+ },
+ Endpoint: google.Endpoint,
+ }
+ // Redirect user to Google's consent page to ask for permission
+ // for the scopes specified above.
+ url := conf.AuthCodeURL("state")
+ fmt.Printf("Visit the URL for the auth dialog: %v", url)
+
+ // Handle the exchange code to initiate a transport.
+ tok, err := conf.Exchange(oauth2.NoContext, "authorization-code")
+ if err != nil {
+ log.Fatal(err)
+ }
+ client := conf.Client(oauth2.NoContext, tok)
+ client.Get("...")
+}
+
+func ExampleJWTConfigFromJSON() {
+ // Your credentials should be obtained from the Google
+ // Developer Console (https://console.developers.google.com).
+ // Navigate to your project, then see the "Credentials" page
+ // under "APIs & Auth".
+ // To create a service account client, click "Create new Client ID",
+ // select "Service Account", and click "Create Client ID". A JSON
+ // key file will then be downloaded to your computer.
+ data, err := ioutil.ReadFile("/path/to/your-project-key.json")
+ if err != nil {
+ log.Fatal(err)
+ }
+ conf, err := google.JWTConfigFromJSON(data, "https://www.googleapis.com/auth/bigquery")
+ if err != nil {
+ log.Fatal(err)
+ }
+ // Initiate an http.Client. The following GET request will be
+ // authorized and authenticated on the behalf of
+ // your service account.
+ client := conf.Client(oauth2.NoContext)
+ client.Get("...")
+}
+
+func ExampleSDKConfig() {
+ // The credentials will be obtained from the first account that
+ // has been authorized with `gcloud auth login`.
+ conf, err := google.NewSDKConfig("")
+ if err != nil {
+ log.Fatal(err)
+ }
+ // Initiate an http.Client. The following GET request will be
+ // authorized and authenticated on the behalf of the SDK user.
+ client := conf.Client(oauth2.NoContext)
+ client.Get("...")
+}
+
+func Example_serviceAccount() {
+ // Your credentials should be obtained from the Google
+ // Developer Console (https://console.developers.google.com).
+ conf := &jwt.Config{
+ Email: "xxx@developer.gserviceaccount.com",
+ // The contents of your RSA private key or your PEM file
+ // that contains a private key.
+ // If you have a p12 file instead, you
+ // can use `openssl` to export the private key into a pem file.
+ //
+ // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes
+ //
+ // The field only supports PEM containers with no passphrase.
+ // The openssl command will convert p12 keys to passphrase-less PEM containers.
+ PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."),
+ Scopes: []string{
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/blogger",
+ },
+ TokenURL: google.JWTTokenURL,
+ // If you would like to impersonate a user, you can
+ // create a transport with a subject. The following GET
+ // request will be made on the behalf of user@example.com.
+ // Optional.
+ Subject: "user@example.com",
+ }
+ // Initiate an http.Client, the following GET request will be
+ // authorized and authenticated on the behalf of user@example.com.
+ client := conf.Client(oauth2.NoContext)
+ client.Get("...")
+}
+
+func ExampleAppEngineTokenSource() {
+ var req *http.Request // from the ServeHTTP handler
+ ctx := appengine.NewContext(req)
+ client := &http.Client{
+ Transport: &oauth2.Transport{
+ Source: google.AppEngineTokenSource(ctx, "https://www.googleapis.com/auth/bigquery"),
+ Base: &urlfetch.Transport{
+ Context: ctx,
+ },
+ },
+ }
+ client.Get("...")
+}
+
+func ExampleComputeTokenSource() {
+ client := &http.Client{
+ Transport: &oauth2.Transport{
+ // Fetch from Google Compute Engine's metadata server to retrieve
+ // an access token for the provided account.
+ // If no account is specified, "default" is used.
+ Source: google.ComputeTokenSource(""),
+ },
+ }
+ client.Get("...")
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/google_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/google_test.go
new file mode 100644
index 000000000..74080edea
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/google_test.go
@@ -0,0 +1,67 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+ "strings"
+ "testing"
+)
+
+var webJSONKey = []byte(`
+{
+ "web": {
+ "auth_uri": "https://google.com/o/oauth2/auth",
+ "client_secret": "3Oknc4jS_wA2r9i",
+ "token_uri": "https://google.com/o/oauth2/token",
+ "client_email": "222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com",
+ "redirect_uris": ["https://www.example.com/oauth2callback"],
+ "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com",
+ "client_id": "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com",
+ "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
+ "javascript_origins": ["https://www.example.com"]
+ }
+}`)
+
+var installedJSONKey = []byte(`{
+ "installed": {
+ "client_id": "222-installed.apps.googleusercontent.com",
+ "redirect_uris": ["https://www.example.com/oauth2callback"]
+ }
+}`)
+
+func TestConfigFromJSON(t *testing.T) {
+ conf, err := ConfigFromJSON(webJSONKey, "scope1", "scope2")
+ if err != nil {
+ t.Error(err)
+ }
+ if got, want := conf.ClientID, "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com"; got != want {
+ t.Errorf("ClientID = %q; want %q", got, want)
+ }
+ if got, want := conf.ClientSecret, "3Oknc4jS_wA2r9i"; got != want {
+ t.Errorf("ClientSecret = %q; want %q", got, want)
+ }
+ if got, want := conf.RedirectURL, "https://www.example.com/oauth2callback"; got != want {
+ t.Errorf("RedictURL = %q; want %q", got, want)
+ }
+ if got, want := strings.Join(conf.Scopes, ","), "scope1,scope2"; got != want {
+ t.Errorf("Scopes = %q; want %q", got, want)
+ }
+ if got, want := conf.Endpoint.AuthURL, "https://google.com/o/oauth2/auth"; got != want {
+ t.Errorf("AuthURL = %q; want %q", got, want)
+ }
+ if got, want := conf.Endpoint.TokenURL, "https://google.com/o/oauth2/token"; got != want {
+ t.Errorf("TokenURL = %q; want %q", got, want)
+ }
+}
+
+func TestConfigFromJSON_Installed(t *testing.T) {
+ conf, err := ConfigFromJSON(installedJSONKey)
+ if err != nil {
+ t.Error(err)
+ }
+ if got, want := conf.ClientID, "222-installed.apps.googleusercontent.com"; got != want {
+ t.Errorf("ClientID = %q; want %q", got, want)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk_test.go
new file mode 100644
index 000000000..a5aa2a646
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk_test.go
@@ -0,0 +1,46 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import "testing"
+
+func TestSDKConfig(t *testing.T) {
+ sdkConfigPath = func() (string, error) {
+ return "testdata/gcloud", nil
+ }
+
+ tests := []struct {
+ account string
+ accessToken string
+ err bool
+ }{
+ {"", "bar_access_token", false},
+ {"foo@example.com", "foo_access_token", false},
+ {"bar@example.com", "bar_access_token", false},
+ {"baz@serviceaccount.example.com", "", true},
+ }
+ for _, tt := range tests {
+ c, err := NewSDKConfig(tt.account)
+ if got, want := err != nil, tt.err; got != want {
+ if !tt.err {
+ t.Errorf("expected no error, got error: %v", tt.err, err)
+ } else {
+ t.Errorf("expected error, got none")
+ }
+ continue
+ }
+ if err != nil {
+ continue
+ }
+ tok := c.initialToken
+ if tok == nil {
+ t.Errorf("expected token %q, got: nil", tt.accessToken)
+ continue
+ }
+ if tok.AccessToken != tt.accessToken {
+ t.Errorf("expected token %q, got: %q", tt.accessToken, tok.AccessToken)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials b/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials
new file mode 100644
index 000000000..ff5eefbd0
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials
@@ -0,0 +1,122 @@
+{
+ "data": [
+ {
+ "credential": {
+ "_class": "OAuth2Credentials",
+ "_module": "oauth2client.client",
+ "access_token": "foo_access_token",
+ "client_id": "foo_client_id",
+ "client_secret": "foo_client_secret",
+ "id_token": {
+ "at_hash": "foo_at_hash",
+ "aud": "foo_aud",
+ "azp": "foo_azp",
+ "cid": "foo_cid",
+ "email": "foo@example.com",
+ "email_verified": true,
+ "exp": 1420573614,
+ "iat": 1420569714,
+ "id": "1337",
+ "iss": "accounts.google.com",
+ "sub": "1337",
+ "token_hash": "foo_token_hash",
+ "verified_email": true
+ },
+ "invalid": false,
+ "refresh_token": "foo_refresh_token",
+ "revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
+ "token_expiry": "2015-01-09T00:51:51Z",
+ "token_response": {
+ "access_token": "foo_access_token",
+ "expires_in": 3600,
+ "id_token": "foo_id_token",
+ "token_type": "Bearer"
+ },
+ "token_uri": "https://accounts.google.com/o/oauth2/token",
+ "user_agent": "Cloud SDK Command Line Tool"
+ },
+ "key": {
+ "account": "foo@example.com",
+ "clientId": "foo_client_id",
+ "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
+ "type": "google-cloud-sdk"
+ }
+ },
+ {
+ "credential": {
+ "_class": "OAuth2Credentials",
+ "_module": "oauth2client.client",
+ "access_token": "bar_access_token",
+ "client_id": "bar_client_id",
+ "client_secret": "bar_client_secret",
+ "id_token": {
+ "at_hash": "bar_at_hash",
+ "aud": "bar_aud",
+ "azp": "bar_azp",
+ "cid": "bar_cid",
+ "email": "bar@example.com",
+ "email_verified": true,
+ "exp": 1420573614,
+ "iat": 1420569714,
+ "id": "1337",
+ "iss": "accounts.google.com",
+ "sub": "1337",
+ "token_hash": "bar_token_hash",
+ "verified_email": true
+ },
+ "invalid": false,
+ "refresh_token": "bar_refresh_token",
+ "revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
+ "token_expiry": "2015-01-09T00:51:51Z",
+ "token_response": {
+ "access_token": "bar_access_token",
+ "expires_in": 3600,
+ "id_token": "bar_id_token",
+ "token_type": "Bearer"
+ },
+ "token_uri": "https://accounts.google.com/o/oauth2/token",
+ "user_agent": "Cloud SDK Command Line Tool"
+ },
+ "key": {
+ "account": "bar@example.com",
+ "clientId": "bar_client_id",
+ "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
+ "type": "google-cloud-sdk"
+ }
+ },
+ {
+ "credential": {
+ "_class": "ServiceAccountCredentials",
+ "_kwargs": {},
+ "_module": "oauth2client.client",
+ "_private_key_id": "00000000000000000000000000000000",
+ "_private_key_pkcs8_text": "-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQCt3fpiynPSaUhWSIKMGV331zudwJ6GkGmvQtwsoK2S2LbvnSwU\nNxgj4fp08kIDR5p26wF4+t/HrKydMwzftXBfZ9UmLVJgRdSswmS5SmChCrfDS5OE\nvFFcN5+6w1w8/Nu657PF/dse8T0bV95YrqyoR0Osy8WHrUOMSIIbC3hRuwIDAQAB\nAoGAJrGE/KFjn0sQ7yrZ6sXmdLawrM3mObo/2uI9T60+k7SpGbBX0/Pi6nFrJMWZ\nTVONG7P3Mu5aCPzzuVRYJB0j8aldSfzABTY3HKoWCczqw1OztJiEseXGiYz4QOyr\nYU3qDyEpdhS6q6wcoLKGH+hqRmz6pcSEsc8XzOOu7s4xW8kCQQDkc75HjhbarCnd\nJJGMe3U76+6UGmdK67ltZj6k6xoB5WbTNChY9TAyI2JC+ppYV89zv3ssj4L+02u3\nHIHFGxsHAkEAwtU1qYb1tScpchPobnYUFiVKJ7KA8EZaHVaJJODW/cghTCV7BxcJ\nbgVvlmk4lFKn3lPKAgWw7PdQsBTVBUcCrQJATPwoIirizrv3u5soJUQxZIkENAqV\nxmybZx9uetrzP7JTrVbFRf0SScMcyN90hdLJiQL8+i4+gaszgFht7sNMnwJAAbfj\nq0UXcauQwALQ7/h2oONfTg5S+MuGC/AxcXPSMZbMRGGoPh3D5YaCv27aIuS/ukQ+\n6dmm/9AGlCb64fsIWQJAPaokbjIifo+LwC5gyK73Mc4t8nAOSZDenzd/2f6TCq76\nS1dcnKiPxaED7W/y6LJiuBT2rbZiQ2L93NJpFZD/UA==\n-----END RSA PRIVATE KEY-----\n",
+ "_revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
+ "_scopes": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
+ "_service_account_email": "baz@serviceaccount.example.com",
+ "_service_account_id": "baz.serviceaccount.example.com",
+ "_token_uri": "https://accounts.google.com/o/oauth2/token",
+ "_user_agent": "Cloud SDK Command Line Tool",
+ "access_token": null,
+ "assertion_type": null,
+ "client_id": null,
+ "client_secret": null,
+ "id_token": null,
+ "invalid": false,
+ "refresh_token": null,
+ "revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
+ "service_account_name": "baz@serviceaccount.example.com",
+ "token_expiry": null,
+ "token_response": null,
+ "user_agent": "Cloud SDK Command Line Tool"
+ },
+ "key": {
+ "account": "baz@serviceaccount.example.com",
+ "clientId": "baz_client_id",
+ "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
+ "type": "google-cloud-sdk"
+ }
+ }
+ ],
+ "file_version": 1
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties b/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties
new file mode 100644
index 000000000..025de886c
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties
@@ -0,0 +1,2 @@
+[core]
+account = bar@example.com
\ No newline at end of file
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2_test.go
new file mode 100644
index 000000000..c61585542
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2_test.go
@@ -0,0 +1,62 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func TestParseINI(t *testing.T) {
+ tests := []struct {
+ ini string
+ want map[string]map[string]string
+ }{
+ {
+ `root = toor
+[foo]
+bar = hop
+ini = nin
+`,
+ map[string]map[string]string{
+ "": map[string]string{"root": "toor"},
+ "foo": map[string]string{"bar": "hop", "ini": "nin"},
+ },
+ },
+ {
+ `[empty]
+[section]
+empty=
+`,
+ map[string]map[string]string{
+ "": map[string]string{},
+ "empty": map[string]string{},
+ "section": map[string]string{"empty": ""},
+ },
+ },
+ {
+ `ignore
+[invalid
+=stuff
+;comment=true
+`,
+ map[string]map[string]string{
+ "": map[string]string{},
+ },
+ },
+ }
+ for _, tt := range tests {
+ result, err := ParseINI(strings.NewReader(tt.ini))
+ if err != nil {
+ t.Errorf("ParseINI(%q) error %v, want: no error", tt.ini, err)
+ continue
+ }
+ if !reflect.DeepEqual(result, tt.want) {
+ t.Errorf("ParseINI(%q) = %#v, want: %#v", tt.ini, result, tt.want)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/internal/token_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/internal/token_test.go
new file mode 100644
index 000000000..626e93354
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/internal/token_test.go
@@ -0,0 +1,28 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
+
+import (
+ "fmt"
+ "testing"
+)
+
+func Test_providerAuthHeaderWorks(t *testing.T) {
+ for _, p := range brokenAuthHeaderProviders {
+ if providerAuthHeaderWorks(p) {
+ t.Errorf("URL: %s not found in list", p)
+ }
+ p := fmt.Sprintf("%ssomesuffix", p)
+ if providerAuthHeaderWorks(p) {
+ t.Errorf("URL: %s not found in list", p)
+ }
+ }
+ p := "https://api.not-in-the-list-example.com/"
+ if !providerAuthHeaderWorks(p) {
+ t.Errorf("URL: %s found in list", p)
+ }
+
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jwt/example_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/example_test.go
new file mode 100644
index 000000000..a9533e85f
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/example_test.go
@@ -0,0 +1,31 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jwt_test
+
+import (
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/jwt"
+)
+
+func ExampleJWTConfig() {
+ conf := &jwt.Config{
+ Email: "xxx@developer.com",
+ // The contents of your RSA private key or your PEM file
+ // that contains a private key.
+ // If you have a p12 file instead, you
+ // can use `openssl` to export the private key into a pem file.
+ //
+ // $ openssl pkcs12 -in key.p12 -out key.pem -nodes
+ //
+ // It only supports PEM containers with no passphrase.
+ PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."),
+ Subject: "user@example.com",
+ TokenURL: "https://provider.com/o/oauth2/token",
+ }
+ // Initiate an http.Client, the following GET request will be
+ // authorized and authenticated on the behalf of user@example.com.
+ client := conf.Client(oauth2.NoContext)
+ client.Get("...")
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt_test.go
new file mode 100644
index 000000000..a9c126b47
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt_test.go
@@ -0,0 +1,134 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jwt
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "golang.org/x/oauth2"
+)
+
+var dummyPrivateKey = []byte(`-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE
+DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY
+fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK
+1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr
+k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9
+/E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt
+3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn
+2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3
+nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK
+6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf
+5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e
+DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1
+M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g
+z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y
+1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK
+J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U
+f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx
+QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA
+cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr
+Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw
+5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg
+KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84
+OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd
+mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ
+5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg==
+-----END RSA PRIVATE KEY-----`)
+
+func TestJWTFetch_JSONResponse(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{
+ "access_token": "90d64460d14870c08c81352a05dedd3465940a7c",
+ "scope": "user",
+ "token_type": "bearer",
+ "expires_in": 3600
+ }`))
+ }))
+ defer ts.Close()
+
+ conf := &Config{
+ Email: "aaa@xxx.com",
+ PrivateKey: dummyPrivateKey,
+ TokenURL: ts.URL,
+ }
+ tok, err := conf.TokenSource(oauth2.NoContext).Token()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !tok.Valid() {
+ t.Errorf("Token invalid")
+ }
+ if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
+ t.Errorf("Unexpected access token, %#v", tok.AccessToken)
+ }
+ if tok.TokenType != "bearer" {
+ t.Errorf("Unexpected token type, %#v", tok.TokenType)
+ }
+ if tok.Expiry.IsZero() {
+ t.Errorf("Unexpected token expiry, %#v", tok.Expiry)
+ }
+ scope := tok.Extra("scope")
+ if scope != "user" {
+ t.Errorf("Unexpected value for scope: %v", scope)
+ }
+}
+
+func TestJWTFetch_BadResponse(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`))
+ }))
+ defer ts.Close()
+
+ conf := &Config{
+ Email: "aaa@xxx.com",
+ PrivateKey: dummyPrivateKey,
+ TokenURL: ts.URL,
+ }
+ tok, err := conf.TokenSource(oauth2.NoContext).Token()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tok == nil {
+ t.Fatalf("token is nil")
+ }
+ if tok.Valid() {
+ t.Errorf("token is valid. want invalid.")
+ }
+ if tok.AccessToken != "" {
+ t.Errorf("Unexpected non-empty access token %q.", tok.AccessToken)
+ }
+ if want := "bearer"; tok.TokenType != want {
+ t.Errorf("TokenType = %q; want %q", tok.TokenType, want)
+ }
+ scope := tok.Extra("scope")
+ if want := "user"; scope != want {
+ t.Errorf("token scope = %q; want %q", scope, want)
+ }
+}
+
+func TestJWTFetch_BadResponseType(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`))
+ }))
+ defer ts.Close()
+ conf := &Config{
+ Email: "aaa@xxx.com",
+ PrivateKey: dummyPrivateKey,
+ TokenURL: ts.URL,
+ }
+ tok, err := conf.TokenSource(oauth2.NoContext).Token()
+ if err == nil {
+ t.Error("got a token; expected error")
+ if tok.AccessToken != "" {
+ t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go
new file mode 100644
index 000000000..448673b51
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go
@@ -0,0 +1,471 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "reflect"
+ "strconv"
+ "testing"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+type mockTransport struct {
+ rt func(req *http.Request) (resp *http.Response, err error)
+}
+
+func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
+ return t.rt(req)
+}
+
+type mockCache struct {
+ token *Token
+ readErr error
+}
+
+func (c *mockCache) ReadToken() (*Token, error) {
+ return c.token, c.readErr
+}
+
+func (c *mockCache) WriteToken(*Token) {
+ // do nothing
+}
+
+func newConf(url string) *Config {
+ return &Config{
+ ClientID: "CLIENT_ID",
+ ClientSecret: "CLIENT_SECRET",
+ RedirectURL: "REDIRECT_URL",
+ Scopes: []string{"scope1", "scope2"},
+ Endpoint: Endpoint{
+ AuthURL: url + "/auth",
+ TokenURL: url + "/token",
+ },
+ }
+}
+
+func TestAuthCodeURL(t *testing.T) {
+ conf := newConf("server")
+ url := conf.AuthCodeURL("foo", AccessTypeOffline, ApprovalForce)
+ if url != "server/auth?access_type=offline&approval_prompt=force&client_id=CLIENT_ID&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=foo" {
+ t.Errorf("Auth code URL doesn't match the expected, found: %v", url)
+ }
+}
+
+func TestAuthCodeURL_CustomParam(t *testing.T) {
+ conf := newConf("server")
+ param := SetAuthURLParam("foo", "bar")
+ url := conf.AuthCodeURL("baz", param)
+ if url != "server/auth?client_id=CLIENT_ID&foo=bar&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=baz" {
+ t.Errorf("Auth code URL doesn't match the expected, found: %v", url)
+ }
+}
+
+func TestAuthCodeURL_Optional(t *testing.T) {
+ conf := &Config{
+ ClientID: "CLIENT_ID",
+ Endpoint: Endpoint{
+ AuthURL: "/auth-url",
+ TokenURL: "/token-url",
+ },
+ }
+ url := conf.AuthCodeURL("")
+ if url != "/auth-url?client_id=CLIENT_ID&response_type=code" {
+ t.Fatalf("Auth code URL doesn't match the expected, found: %v", url)
+ }
+}
+
+func TestExchangeRequest(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.String() != "/token" {
+ t.Errorf("Unexpected exchange request URL, %v is found.", r.URL)
+ }
+ headerAuth := r.Header.Get("Authorization")
+ if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" {
+ t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
+ }
+ headerContentType := r.Header.Get("Content-Type")
+ if headerContentType != "application/x-www-form-urlencoded" {
+ t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
+ }
+ body, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Errorf("Failed reading request body: %s.", err)
+ }
+ if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" {
+ t.Errorf("Unexpected exchange payload, %v is found.", string(body))
+ }
+ w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
+ w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer"))
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ tok, err := conf.Exchange(NoContext, "exchange-code")
+ if err != nil {
+ t.Error(err)
+ }
+ if !tok.Valid() {
+ t.Fatalf("Token invalid. Got: %#v", tok)
+ }
+ if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
+ t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
+ }
+ if tok.TokenType != "bearer" {
+ t.Errorf("Unexpected token type, %#v.", tok.TokenType)
+ }
+ scope := tok.Extra("scope")
+ if scope != "user" {
+ t.Errorf("Unexpected value for scope: %v", scope)
+ }
+}
+
+func TestExchangeRequest_JSONResponse(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.String() != "/token" {
+ t.Errorf("Unexpected exchange request URL, %v is found.", r.URL)
+ }
+ headerAuth := r.Header.Get("Authorization")
+ if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" {
+ t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
+ }
+ headerContentType := r.Header.Get("Content-Type")
+ if headerContentType != "application/x-www-form-urlencoded" {
+ t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
+ }
+ body, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Errorf("Failed reading request body: %s.", err)
+ }
+ if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" {
+ t.Errorf("Unexpected exchange payload, %v is found.", string(body))
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{"access_token": "90d64460d14870c08c81352a05dedd3465940a7c", "scope": "user", "token_type": "bearer", "expires_in": 86400}`))
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ tok, err := conf.Exchange(NoContext, "exchange-code")
+ if err != nil {
+ t.Error(err)
+ }
+ if !tok.Valid() {
+ t.Fatalf("Token invalid. Got: %#v", tok)
+ }
+ if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
+ t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
+ }
+ if tok.TokenType != "bearer" {
+ t.Errorf("Unexpected token type, %#v.", tok.TokenType)
+ }
+ scope := tok.Extra("scope")
+ if scope != "user" {
+ t.Errorf("Unexpected value for scope: %v", scope)
+ }
+ expiresIn := tok.Extra("expires_in")
+ if expiresIn != float64(86400) {
+ t.Errorf("Unexpected non-numeric value for expires_in: %v", expiresIn)
+ }
+}
+
+func TestExtraValueRetrieval(t *testing.T) {
+ values := url.Values{}
+
+ kvmap := map[string]string{
+ "scope": "user", "token_type": "bearer", "expires_in": "86400.92",
+ "server_time": "1443571905.5606415", "referer_ip": "10.0.0.1",
+ "etag": "\"afZYj912P4alikMz_P11982\"", "request_id": "86400",
+ "untrimmed": " untrimmed ",
+ }
+
+ for key, value := range kvmap {
+ values.Set(key, value)
+ }
+
+ tok := Token{
+ raw: values,
+ }
+
+ scope := tok.Extra("scope")
+ if scope != "user" {
+ t.Errorf("Unexpected scope %v wanted \"user\"", scope)
+ }
+ serverTime := tok.Extra("server_time")
+ if serverTime != 1443571905.5606415 {
+ t.Errorf("Unexpected non-float64 value for server_time: %v", serverTime)
+ }
+ refererIp := tok.Extra("referer_ip")
+ if refererIp != "10.0.0.1" {
+ t.Errorf("Unexpected non-string value for referer_ip: %v", refererIp)
+ }
+ expires_in := tok.Extra("expires_in")
+ if expires_in != 86400.92 {
+ t.Errorf("Unexpected value for expires_in, wanted 86400 got %v", expires_in)
+ }
+ requestId := tok.Extra("request_id")
+ if requestId != int64(86400) {
+ t.Errorf("Unexpected non-int64 value for request_id: %v", requestId)
+ }
+ untrimmed := tok.Extra("untrimmed")
+ if untrimmed != " untrimmed " {
+ t.Errorf("Unexpected value for untrimmed, got %q expected \" untrimmed \"", untrimmed)
+ }
+}
+
+const day = 24 * time.Hour
+
+func TestExchangeRequest_JSONResponse_Expiry(t *testing.T) {
+ seconds := int32(day.Seconds())
+ jsonNumberType := reflect.TypeOf(json.Number("0"))
+ for _, c := range []struct {
+ expires string
+ expect error
+ }{
+ {fmt.Sprintf(`"expires_in": %d`, seconds), nil},
+ {fmt.Sprintf(`"expires_in": "%d"`, seconds), nil}, // PayPal case
+ {fmt.Sprintf(`"expires": %d`, seconds), nil}, // Facebook case
+ {`"expires": false`, &json.UnmarshalTypeError{Value: "bool", Type: jsonNumberType}}, // wrong type
+ {`"expires": {}`, &json.UnmarshalTypeError{Value: "object", Type: jsonNumberType}}, // wrong type
+ {`"expires": "zzz"`, &strconv.NumError{Func: "ParseInt", Num: "zzz", Err: strconv.ErrSyntax}}, // wrong value
+ } {
+ testExchangeRequest_JSONResponse_expiry(t, c.expires, c.expect)
+ }
+}
+
+func testExchangeRequest_JSONResponse_expiry(t *testing.T, exp string, expect error) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(fmt.Sprintf(`{"access_token": "90d", "scope": "user", "token_type": "bearer", %s}`, exp)))
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ t1 := time.Now().Add(day)
+ tok, err := conf.Exchange(NoContext, "exchange-code")
+ t2 := time.Now().Add(day)
+ // Do a fmt.Sprint comparison so either side can be
+ // nil. fmt.Sprint just stringifies them to "", and no
+ // non-nil expected error ever stringifies as "", so this
+ // isn't terribly disgusting. We do this because Go 1.4 and
+ // Go 1.5 return a different deep value for
+ // json.UnmarshalTypeError. In Go 1.5, the
+ // json.UnmarshalTypeError contains a new field with a new
+ // non-zero value. Rather than ignore it here with reflect or
+ // add new files and +build tags, just look at the strings.
+ if fmt.Sprint(err) != fmt.Sprint(expect) {
+ t.Errorf("Error = %v; want %v", err, expect)
+ }
+ if err != nil {
+ return
+ }
+ if !tok.Valid() {
+ t.Fatalf("Token invalid. Got: %#v", tok)
+ }
+ expiry := tok.Expiry
+ if expiry.Before(t1) || expiry.After(t2) {
+ t.Errorf("Unexpected value for Expiry: %v (shold be between %v and %v)", expiry, t1, t2)
+ }
+}
+
+func TestExchangeRequest_BadResponse(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`))
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ tok, err := conf.Exchange(NoContext, "code")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tok.AccessToken != "" {
+ t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
+ }
+}
+
+func TestExchangeRequest_BadResponseType(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`))
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ _, err := conf.Exchange(NoContext, "exchange-code")
+ if err == nil {
+ t.Error("expected error from invalid access_token type")
+ }
+}
+
+func TestExchangeRequest_NonBasicAuth(t *testing.T) {
+ tr := &mockTransport{
+ rt: func(r *http.Request) (w *http.Response, err error) {
+ headerAuth := r.Header.Get("Authorization")
+ if headerAuth != "" {
+ t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
+ }
+ return nil, errors.New("no response")
+ },
+ }
+ c := &http.Client{Transport: tr}
+ conf := &Config{
+ ClientID: "CLIENT_ID",
+ Endpoint: Endpoint{
+ AuthURL: "https://accounts.google.com/auth",
+ TokenURL: "https://accounts.google.com/token",
+ },
+ }
+
+ ctx := context.WithValue(context.Background(), HTTPClient, c)
+ conf.Exchange(ctx, "code")
+}
+
+func TestPasswordCredentialsTokenRequest(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ defer r.Body.Close()
+ expected := "/token"
+ if r.URL.String() != expected {
+ t.Errorf("URL = %q; want %q", r.URL, expected)
+ }
+ headerAuth := r.Header.Get("Authorization")
+ expected = "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ="
+ if headerAuth != expected {
+ t.Errorf("Authorization header = %q; want %q", headerAuth, expected)
+ }
+ headerContentType := r.Header.Get("Content-Type")
+ expected = "application/x-www-form-urlencoded"
+ if headerContentType != expected {
+ t.Errorf("Content-Type header = %q; want %q", headerContentType, expected)
+ }
+ body, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Errorf("Failed reading request body: %s.", err)
+ }
+ expected = "client_id=CLIENT_ID&grant_type=password&password=password1&scope=scope1+scope2&username=user1"
+ if string(body) != expected {
+ t.Errorf("res.Body = %q; want %q", string(body), expected)
+ }
+ w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
+ w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer"))
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ tok, err := conf.PasswordCredentialsToken(NoContext, "user1", "password1")
+ if err != nil {
+ t.Error(err)
+ }
+ if !tok.Valid() {
+ t.Fatalf("Token invalid. Got: %#v", tok)
+ }
+ expected := "90d64460d14870c08c81352a05dedd3465940a7c"
+ if tok.AccessToken != expected {
+ t.Errorf("AccessToken = %q; want %q", tok.AccessToken, expected)
+ }
+ expected = "bearer"
+ if tok.TokenType != expected {
+ t.Errorf("TokenType = %q; want %q", tok.TokenType, expected)
+ }
+}
+
+func TestTokenRefreshRequest(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.String() == "/somethingelse" {
+ return
+ }
+ if r.URL.String() != "/token" {
+ t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL)
+ }
+ headerContentType := r.Header.Get("Content-Type")
+ if headerContentType != "application/x-www-form-urlencoded" {
+ t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
+ }
+ body, _ := ioutil.ReadAll(r.Body)
+ if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" {
+ t.Errorf("Unexpected refresh token payload, %v is found.", string(body))
+ }
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ c := conf.Client(NoContext, &Token{RefreshToken: "REFRESH_TOKEN"})
+ c.Get(ts.URL + "/somethingelse")
+}
+
+func TestFetchWithNoRefreshToken(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.String() == "/somethingelse" {
+ return
+ }
+ if r.URL.String() != "/token" {
+ t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL)
+ }
+ headerContentType := r.Header.Get("Content-Type")
+ if headerContentType != "application/x-www-form-urlencoded" {
+ t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
+ }
+ body, _ := ioutil.ReadAll(r.Body)
+ if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" {
+ t.Errorf("Unexpected refresh token payload, %v is found.", string(body))
+ }
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ c := conf.Client(NoContext, nil)
+ _, err := c.Get(ts.URL + "/somethingelse")
+ if err == nil {
+ t.Errorf("Fetch should return an error if no refresh token is set")
+ }
+}
+
+func TestRefreshToken_RefreshTokenReplacement(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{"access_token":"ACCESS TOKEN", "scope": "user", "token_type": "bearer", "refresh_token": "NEW REFRESH TOKEN"}`))
+ return
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ tkr := tokenRefresher{
+ conf: conf,
+ ctx: NoContext,
+ refreshToken: "OLD REFRESH TOKEN",
+ }
+ tk, err := tkr.Token()
+ if err != nil {
+ t.Errorf("Unexpected refreshToken error returned: %v", err)
+ return
+ }
+ if tk.RefreshToken != tkr.refreshToken {
+ t.Errorf("tokenRefresher.refresh_token = %s; want %s", tkr.refreshToken, tk.RefreshToken)
+ }
+}
+
+func TestConfigClientWithToken(t *testing.T) {
+ tok := &Token{
+ AccessToken: "abc123",
+ }
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if got, want := r.Header.Get("Authorization"), fmt.Sprintf("Bearer %s", tok.AccessToken); got != want {
+ t.Errorf("Authorization header = %q; want %q", got, want)
+ }
+ return
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+
+ c := conf.Client(NoContext, tok)
+ req, err := http.NewRequest("GET", ts.URL, nil)
+ if err != nil {
+ t.Error(err)
+ }
+ _, err = c.Do(req)
+ if err != nil {
+ t.Error(err)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/token_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/token_test.go
new file mode 100644
index 000000000..80db83c29
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/token_test.go
@@ -0,0 +1,72 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+ "testing"
+ "time"
+)
+
+func TestTokenExtra(t *testing.T) {
+ type testCase struct {
+ key string
+ val interface{}
+ want interface{}
+ }
+ const key = "extra-key"
+ cases := []testCase{
+ {key: key, val: "abc", want: "abc"},
+ {key: key, val: 123, want: 123},
+ {key: key, val: "", want: ""},
+ {key: "other-key", val: "def", want: nil},
+ }
+ for _, tc := range cases {
+ extra := make(map[string]interface{})
+ extra[tc.key] = tc.val
+ tok := &Token{raw: extra}
+ if got, want := tok.Extra(key), tc.want; got != want {
+ t.Errorf("Extra(%q) = %q; want %q", key, got, want)
+ }
+ }
+}
+
+func TestTokenExpiry(t *testing.T) {
+ now := time.Now()
+ cases := []struct {
+ name string
+ tok *Token
+ want bool
+ }{
+ {name: "12 seconds", tok: &Token{Expiry: now.Add(12 * time.Second)}, want: false},
+ {name: "10 seconds", tok: &Token{Expiry: now.Add(expiryDelta)}, want: true},
+ {name: "-1 hour", tok: &Token{Expiry: now.Add(-1 * time.Hour)}, want: true},
+ }
+ for _, tc := range cases {
+ if got, want := tc.tok.expired(), tc.want; got != want {
+ t.Errorf("expired (%q) = %v; want %v", tc.name, got, want)
+ }
+ }
+}
+
+func TestTokenTypeMethod(t *testing.T) {
+ cases := []struct {
+ name string
+ tok *Token
+ want string
+ }{
+ {name: "bearer-mixed_case", tok: &Token{TokenType: "beAREr"}, want: "Bearer"},
+ {name: "default-bearer", tok: &Token{}, want: "Bearer"},
+ {name: "basic", tok: &Token{TokenType: "basic"}, want: "Basic"},
+ {name: "basic-capitalized", tok: &Token{TokenType: "Basic"}, want: "Basic"},
+ {name: "mac", tok: &Token{TokenType: "mac"}, want: "MAC"},
+ {name: "mac-caps", tok: &Token{TokenType: "MAC"}, want: "MAC"},
+ {name: "mac-mixed_case", tok: &Token{TokenType: "mAc"}, want: "MAC"},
+ }
+ for _, tc := range cases {
+ if got, want := tc.tok.Type(), tc.want; got != want {
+ t.Errorf("TokenType(%q) = %v; want %v", tc.name, got, want)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go
new file mode 100644
index 000000000..35cb25ed5
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go
@@ -0,0 +1,86 @@
+package oauth2
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+)
+
+type tokenSource struct{ token *Token }
+
+func (t *tokenSource) Token() (*Token, error) {
+ return t.token, nil
+}
+
+func TestTransportTokenSource(t *testing.T) {
+ ts := &tokenSource{
+ token: &Token{
+ AccessToken: "abc",
+ },
+ }
+ tr := &Transport{
+ Source: ts,
+ }
+ server := newMockServer(func(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("Authorization") != "Bearer abc" {
+ t.Errorf("Transport doesn't set the Authorization header from the fetched token")
+ }
+ })
+ defer server.Close()
+ client := http.Client{Transport: tr}
+ client.Get(server.URL)
+}
+
+// Test for case-sensitive token types, per https://github.com/golang/oauth2/issues/113
+func TestTransportTokenSourceTypes(t *testing.T) {
+ const val = "abc"
+ tests := []struct {
+ key string
+ val string
+ want string
+ }{
+ {key: "bearer", val: val, want: "Bearer abc"},
+ {key: "mac", val: val, want: "MAC abc"},
+ {key: "basic", val: val, want: "Basic abc"},
+ }
+ for _, tc := range tests {
+ ts := &tokenSource{
+ token: &Token{
+ AccessToken: tc.val,
+ TokenType: tc.key,
+ },
+ }
+ tr := &Transport{
+ Source: ts,
+ }
+ server := newMockServer(func(w http.ResponseWriter, r *http.Request) {
+ if got, want := r.Header.Get("Authorization"), tc.want; got != want {
+ t.Errorf("Authorization header (%q) = %q; want %q", val, got, want)
+ }
+ })
+ defer server.Close()
+ client := http.Client{Transport: tr}
+ client.Get(server.URL)
+ }
+}
+
+func TestTokenValidNoAccessToken(t *testing.T) {
+ token := &Token{}
+ if token.Valid() {
+ t.Errorf("Token should not be valid with no access token")
+ }
+}
+
+func TestExpiredWithExpiry(t *testing.T) {
+ token := &Token{
+ Expiry: time.Now().Add(-5 * time.Hour),
+ }
+ if token.Valid() {
+ t.Errorf("Token should not be valid if it expired in the past")
+ }
+}
+
+func newMockServer(handler func(w http.ResponseWriter, r *http.Request)) *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(handler))
+}
diff --git a/Godeps/_workspace/src/gopkg.in/asn1-ber.v1/ber_test.go b/Godeps/_workspace/src/gopkg.in/asn1-ber.v1/ber_test.go
new file mode 100644
index 000000000..bbd22db6d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/asn1-ber.v1/ber_test.go
@@ -0,0 +1,168 @@
+package ber
+
+import (
+ "bytes"
+ "math"
+
+ "io"
+ "testing"
+)
+
+func TestEncodeDecodeInteger(t *testing.T) {
+ for _, v := range []int64{0, 10, 128, 1024, math.MaxInt64, -1, -100, -128, -1024, math.MinInt64} {
+ enc := encodeInteger(v)
+ dec, err := parseInt64(enc)
+ if err != nil {
+ t.Fatalf("Error decoding %d : %s", v, err)
+ }
+ if v != dec {
+ t.Error("TestEncodeDecodeInteger failed for %d (got %d)", v, dec)
+ }
+
+ }
+}
+
+func TestBoolean(t *testing.T) {
+ var value bool = true
+
+ packet := NewBoolean(ClassUniversal, TypePrimitive, TagBoolean, value, "first Packet, True")
+
+ newBoolean, ok := packet.Value.(bool)
+ if !ok || newBoolean != value {
+ t.Error("error during creating packet")
+ }
+
+ encodedPacket := packet.Bytes()
+
+ newPacket := DecodePacket(encodedPacket)
+
+ newBoolean, ok = newPacket.Value.(bool)
+ if !ok || newBoolean != value {
+ t.Error("error during decoding packet")
+ }
+
+}
+
+func TestInteger(t *testing.T) {
+ var value int64 = 10
+
+ packet := NewInteger(ClassUniversal, TypePrimitive, TagInteger, value, "Integer, 10")
+
+ {
+ newInteger, ok := packet.Value.(int64)
+ if !ok || newInteger != value {
+ t.Error("error creating packet")
+ }
+ }
+
+ encodedPacket := packet.Bytes()
+
+ newPacket := DecodePacket(encodedPacket)
+
+ {
+ newInteger, ok := newPacket.Value.(int64)
+ if !ok || int64(newInteger) != value {
+ t.Error("error decoding packet")
+ }
+ }
+}
+
+func TestString(t *testing.T) {
+ var value string = "Hic sunt dracones"
+
+ packet := NewString(ClassUniversal, TypePrimitive, TagOctetString, value, "String")
+
+ newValue, ok := packet.Value.(string)
+ if !ok || newValue != value {
+ t.Error("error during creating packet")
+ }
+
+ encodedPacket := packet.Bytes()
+
+ newPacket := DecodePacket(encodedPacket)
+
+ newValue, ok = newPacket.Value.(string)
+ if !ok || newValue != value {
+ t.Error("error during decoding packet")
+ }
+
+}
+
+func TestSequenceAndAppendChild(t *testing.T) {
+
+ values := []string{
+ "HIC SVNT LEONES",
+ "Iñtërnâtiônàlizætiøn",
+ "Terra Incognita",
+ }
+
+ sequence := NewSequence("a sequence")
+ for _, s := range values {
+ sequence.AppendChild(NewString(ClassUniversal, TypePrimitive, TagOctetString, s, "String"))
+ }
+
+ if len(sequence.Children) != len(values) {
+ t.Errorf("wrong length for children array should be %d, got %d", len(values), len(sequence.Children))
+ }
+
+ encodedSequence := sequence.Bytes()
+
+ decodedSequence := DecodePacket(encodedSequence)
+ if len(decodedSequence.Children) != len(values) {
+ t.Errorf("wrong length for children array should be %d => %d", len(values), len(decodedSequence.Children))
+ }
+
+ for i, s := range values {
+ if decodedSequence.Children[i].Value.(string) != s {
+ t.Errorf("expected %d to be %q, got %q", i, s, decodedSequence.Children[i].Value.(string))
+ }
+ }
+}
+
+func TestReadPacket(t *testing.T) {
+ packet := NewString(ClassUniversal, TypePrimitive, TagOctetString, "Ad impossibilia nemo tenetur", "string")
+ var buffer io.ReadWriter
+ buffer = new(bytes.Buffer)
+
+ buffer.Write(packet.Bytes())
+
+ newPacket, err := ReadPacket(buffer)
+ if err != nil {
+ t.Error("error during ReadPacket", err)
+ }
+ newPacket.ByteValue = nil
+ if !bytes.Equal(newPacket.ByteValue, packet.ByteValue) {
+ t.Error("packets should be the same")
+ }
+}
+
+func TestBinaryInteger(t *testing.T) {
+ // data src : http://luca.ntop.org/Teaching/Appunti/asn1.html 5.7
+ var data = []struct {
+ v int64
+ e []byte
+ }{
+ {v: 0, e: []byte{0x02, 0x01, 0x00}},
+ {v: 127, e: []byte{0x02, 0x01, 0x7F}},
+ {v: 128, e: []byte{0x02, 0x02, 0x00, 0x80}},
+ {v: 256, e: []byte{0x02, 0x02, 0x01, 0x00}},
+ {v: -128, e: []byte{0x02, 0x01, 0x80}},
+ {v: -129, e: []byte{0x02, 0x02, 0xFF, 0x7F}},
+ {v: math.MaxInt64, e: []byte{0x02, 0x08, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}},
+ {v: math.MinInt64, e: []byte{0x02, 0x08, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
+ }
+
+ for _, d := range data {
+ if b := NewInteger(ClassUniversal, TypePrimitive, TagInteger, int64(d.v), "").Bytes(); !bytes.Equal(d.e, b) {
+ t.Errorf("Wrong binary generated for %d : got % X, expected % X", d.v, b, d.e)
+ }
+ }
+}
+
+func TestBinaryOctetString(t *testing.T) {
+ // data src : http://luca.ntop.org/Teaching/Appunti/asn1.html 5.10
+
+ if !bytes.Equal([]byte{0x04, 0x08, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, NewString(ClassUniversal, TypePrimitive, TagOctetString, "\x01\x23\x45\x67\x89\xab\xcd\xef", "").Bytes()) {
+ t.Error("wrong binary generated")
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/asn1-ber.v1/header_test.go b/Godeps/_workspace/src/gopkg.in/asn1-ber.v1/header_test.go
new file mode 100644
index 000000000..cac1e2e2b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/asn1-ber.v1/header_test.go
@@ -0,0 +1,135 @@
+package ber
+
+import (
+ "bytes"
+ "io"
+ "testing"
+)
+
+func TestReadHeader(t *testing.T) {
+ testcases := map[string]struct {
+ Data []byte
+ ExpectedIdentifier Identifier
+ ExpectedLength int
+ ExpectedBytesRead int
+ ExpectedError string
+ }{
+ "empty": {
+ Data: []byte{},
+ ExpectedIdentifier: Identifier{},
+ ExpectedLength: 0,
+ ExpectedBytesRead: 0,
+ ExpectedError: io.ErrUnexpectedEOF.Error(),
+ },
+
+ "valid short form": {
+ Data: []byte{
+ byte(ClassUniversal) | byte(TypePrimitive) | byte(TagCharacterString),
+ 127,
+ },
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypePrimitive,
+ Tag: TagCharacterString,
+ },
+ ExpectedLength: 127,
+ ExpectedBytesRead: 2,
+ ExpectedError: "",
+ },
+
+ "valid long form": {
+ Data: []byte{
+ // 2-byte encoding of tag
+ byte(ClassUniversal) | byte(TypePrimitive) | byte(HighTag),
+ byte(TagCharacterString),
+
+ // 2-byte encoding of length
+ LengthLongFormBitmask | 1,
+ 127,
+ },
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypePrimitive,
+ Tag: TagCharacterString,
+ },
+ ExpectedLength: 127,
+ ExpectedBytesRead: 4,
+ ExpectedError: "",
+ },
+
+ "valid indefinite length": {
+ Data: []byte{
+ byte(ClassUniversal) | byte(TypeConstructed) | byte(TagCharacterString),
+ LengthLongFormBitmask,
+ },
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: TagCharacterString,
+ },
+ ExpectedLength: LengthIndefinite,
+ ExpectedBytesRead: 2,
+ ExpectedError: "",
+ },
+
+ "invalid indefinite length": {
+ Data: []byte{
+ byte(ClassUniversal) | byte(TypePrimitive) | byte(TagCharacterString),
+ LengthLongFormBitmask,
+ },
+ ExpectedIdentifier: Identifier{},
+ ExpectedLength: 0,
+ ExpectedBytesRead: 2,
+ ExpectedError: "indefinite length used with primitive type",
+ },
+ }
+
+ for k, tc := range testcases {
+ reader := bytes.NewBuffer(tc.Data)
+ identifier, length, read, err := readHeader(reader)
+
+ if err != nil {
+ if tc.ExpectedError == "" {
+ t.Errorf("%s: unexpected error: %v", k, err)
+ } else if err.Error() != tc.ExpectedError {
+ t.Errorf("%s: expected error %v, got %v", k, tc.ExpectedError, err)
+ }
+ } else if tc.ExpectedError != "" {
+ t.Errorf("%s: expected error %v, got none", k, tc.ExpectedError)
+ continue
+ }
+
+ if read != tc.ExpectedBytesRead {
+ t.Errorf("%s: expected read %d, got %d", k, tc.ExpectedBytesRead, read)
+ }
+
+ if identifier.ClassType != tc.ExpectedIdentifier.ClassType {
+ t.Errorf("%s: expected class type %d (%s), got %d (%s)", k,
+ tc.ExpectedIdentifier.ClassType,
+ ClassMap[tc.ExpectedIdentifier.ClassType],
+ identifier.ClassType,
+ ClassMap[identifier.ClassType],
+ )
+ }
+ if identifier.TagType != tc.ExpectedIdentifier.TagType {
+ t.Errorf("%s: expected tag type %d (%s), got %d (%s)", k,
+ tc.ExpectedIdentifier.TagType,
+ TypeMap[tc.ExpectedIdentifier.TagType],
+ identifier.TagType,
+ TypeMap[identifier.TagType],
+ )
+ }
+ if identifier.Tag != tc.ExpectedIdentifier.Tag {
+ t.Errorf("%s: expected tag %d (%s), got %d (%s)", k,
+ tc.ExpectedIdentifier.Tag,
+ tagMap[tc.ExpectedIdentifier.Tag],
+ identifier.Tag,
+ tagMap[identifier.Tag],
+ )
+ }
+
+ if length != tc.ExpectedLength {
+ t.Errorf("%s: expected length %d, got %d", k, tc.ExpectedLength, length)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/asn1-ber.v1/identifier_test.go b/Godeps/_workspace/src/gopkg.in/asn1-ber.v1/identifier_test.go
new file mode 100644
index 000000000..7169362e2
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/asn1-ber.v1/identifier_test.go
@@ -0,0 +1,344 @@
+package ber
+
+import (
+ "bytes"
+ "io"
+ "math"
+ "testing"
+)
+
+func TestReadIdentifier(t *testing.T) {
+ testcases := map[string]struct {
+ Data []byte
+
+ ExpectedIdentifier Identifier
+ ExpectedBytesRead int
+ ExpectedError string
+ }{
+ "empty": {
+ Data: []byte{},
+ ExpectedBytesRead: 0,
+ ExpectedError: io.ErrUnexpectedEOF.Error(),
+ },
+
+ "universal primitive eoc": {
+ Data: []byte{byte(ClassUniversal) | byte(TypePrimitive) | byte(TagEOC)},
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypePrimitive,
+ Tag: TagEOC,
+ },
+ ExpectedBytesRead: 1,
+ },
+ "universal primitive character string": {
+ Data: []byte{byte(ClassUniversal) | byte(TypePrimitive) | byte(TagCharacterString)},
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypePrimitive,
+ Tag: TagCharacterString,
+ },
+ ExpectedBytesRead: 1,
+ },
+
+ "universal constructed bit string": {
+ Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(TagBitString)},
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: TagBitString,
+ },
+ ExpectedBytesRead: 1,
+ },
+ "universal constructed character string": {
+ Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(TagCharacterString)},
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: TagCharacterString,
+ },
+ ExpectedBytesRead: 1,
+ },
+
+ "application constructed object descriptor": {
+ Data: []byte{byte(ClassApplication) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassApplication,
+ TagType: TypeConstructed,
+ Tag: TagObjectDescriptor,
+ },
+ ExpectedBytesRead: 1,
+ },
+ "context constructed object descriptor": {
+ Data: []byte{byte(ClassContext) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassContext,
+ TagType: TypeConstructed,
+ Tag: TagObjectDescriptor,
+ },
+ ExpectedBytesRead: 1,
+ },
+ "private constructed object descriptor": {
+ Data: []byte{byte(ClassPrivate) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassPrivate,
+ TagType: TypeConstructed,
+ Tag: TagObjectDescriptor,
+ },
+ ExpectedBytesRead: 1,
+ },
+
+ "high-tag-number tag missing bytes": {
+ Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag)},
+ ExpectedError: io.ErrUnexpectedEOF.Error(),
+ ExpectedBytesRead: 1,
+ },
+ "high-tag-number tag invalid first byte": {
+ Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag), 0x0},
+ ExpectedError: "invalid first high-tag-number tag byte",
+ ExpectedBytesRead: 2,
+ },
+ "high-tag-number tag invalid first byte with continue bit": {
+ Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag), byte(HighTagContinueBitmask)},
+ ExpectedError: "invalid first high-tag-number tag byte",
+ ExpectedBytesRead: 2,
+ },
+ "high-tag-number tag continuation missing bytes": {
+ Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag), byte(HighTagContinueBitmask | 0x1)},
+ ExpectedError: io.ErrUnexpectedEOF.Error(),
+ ExpectedBytesRead: 2,
+ },
+ "high-tag-number tag overflow": {
+ Data: []byte{
+ byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
+ byte(HighTagContinueBitmask | 0x1),
+ byte(HighTagContinueBitmask | 0x1),
+ byte(HighTagContinueBitmask | 0x1),
+ byte(HighTagContinueBitmask | 0x1),
+ byte(HighTagContinueBitmask | 0x1),
+ byte(HighTagContinueBitmask | 0x1),
+ byte(HighTagContinueBitmask | 0x1),
+ byte(HighTagContinueBitmask | 0x1),
+ byte(HighTagContinueBitmask | 0x1),
+ byte(0x1),
+ },
+ ExpectedError: "high-tag-number tag overflow",
+ ExpectedBytesRead: 11,
+ },
+ "max high-tag-number tag": {
+ Data: []byte{
+ byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(0x7f),
+ },
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: Tag(0x7FFFFFFFFFFFFFFF), // 01111111...(63)...11111b
+ },
+ ExpectedBytesRead: 10,
+ },
+ "high-tag-number encoding of low-tag value": {
+ Data: []byte{
+ byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
+ byte(TagObjectDescriptor),
+ },
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: TagObjectDescriptor,
+ },
+ ExpectedBytesRead: 2,
+ },
+ "max high-tag-number tag ignores extra data": {
+ Data: []byte{
+ byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(0x7f),
+ byte(0x01), // extra data, shouldn't be read
+ byte(0x02), // extra data, shouldn't be read
+ byte(0x03), // extra data, shouldn't be read
+ },
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: Tag(0x7FFFFFFFFFFFFFFF), // 01111111...(63)...11111b
+ },
+ ExpectedBytesRead: 10,
+ },
+ }
+
+ for k, tc := range testcases {
+ reader := bytes.NewBuffer(tc.Data)
+ identifier, read, err := readIdentifier(reader)
+
+ if err != nil {
+ if tc.ExpectedError == "" {
+ t.Errorf("%s: unexpected error: %v", k, err)
+ } else if err.Error() != tc.ExpectedError {
+ t.Errorf("%s: expected error %v, got %v", k, tc.ExpectedError, err)
+ }
+ } else if tc.ExpectedError != "" {
+ t.Errorf("%s: expected error %v, got none", k, tc.ExpectedError)
+ continue
+ }
+
+ if read != tc.ExpectedBytesRead {
+ t.Errorf("%s: expected read %d, got %d", k, tc.ExpectedBytesRead, read)
+ }
+
+ if identifier.ClassType != tc.ExpectedIdentifier.ClassType {
+ t.Errorf("%s: expected class type %d (%s), got %d (%s)", k,
+ tc.ExpectedIdentifier.ClassType,
+ ClassMap[tc.ExpectedIdentifier.ClassType],
+ identifier.ClassType,
+ ClassMap[identifier.ClassType],
+ )
+ }
+ if identifier.TagType != tc.ExpectedIdentifier.TagType {
+ t.Errorf("%s: expected tag type %d (%s), got %d (%s)", k,
+ tc.ExpectedIdentifier.TagType,
+ TypeMap[tc.ExpectedIdentifier.TagType],
+ identifier.TagType,
+ TypeMap[identifier.TagType],
+ )
+ }
+ if identifier.Tag != tc.ExpectedIdentifier.Tag {
+ t.Errorf("%s: expected tag %d (%s), got %d (%s)", k,
+ tc.ExpectedIdentifier.Tag,
+ tagMap[tc.ExpectedIdentifier.Tag],
+ identifier.Tag,
+ tagMap[identifier.Tag],
+ )
+ }
+ }
+}
+
+func TestEncodeIdentifier(t *testing.T) {
+ testcases := map[string]struct {
+ Identifier Identifier
+ ExpectedBytes []byte
+ }{
+ "universal primitive eoc": {
+ Identifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypePrimitive,
+ Tag: TagEOC,
+ },
+ ExpectedBytes: []byte{byte(ClassUniversal) | byte(TypePrimitive) | byte(TagEOC)},
+ },
+ "universal primitive character string": {
+ Identifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypePrimitive,
+ Tag: TagCharacterString,
+ },
+ ExpectedBytes: []byte{byte(ClassUniversal) | byte(TypePrimitive) | byte(TagCharacterString)},
+ },
+
+ "universal constructed bit string": {
+ Identifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: TagBitString,
+ },
+ ExpectedBytes: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(TagBitString)},
+ },
+ "universal constructed character string": {
+ Identifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: TagCharacterString,
+ },
+ ExpectedBytes: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(TagCharacterString)},
+ },
+
+ "application constructed object descriptor": {
+ Identifier: Identifier{
+ ClassType: ClassApplication,
+ TagType: TypeConstructed,
+ Tag: TagObjectDescriptor,
+ },
+ ExpectedBytes: []byte{byte(ClassApplication) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
+ },
+ "context constructed object descriptor": {
+ Identifier: Identifier{
+ ClassType: ClassContext,
+ TagType: TypeConstructed,
+ Tag: TagObjectDescriptor,
+ },
+ ExpectedBytes: []byte{byte(ClassContext) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
+ },
+ "private constructed object descriptor": {
+ Identifier: Identifier{
+ ClassType: ClassPrivate,
+ TagType: TypeConstructed,
+ Tag: TagObjectDescriptor,
+ },
+ ExpectedBytes: []byte{byte(ClassPrivate) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
+ },
+
+ "max low-tag-number tag": {
+ Identifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: TagBMPString,
+ },
+ ExpectedBytes: []byte{
+ byte(ClassUniversal) | byte(TypeConstructed) | byte(TagBMPString),
+ },
+ },
+
+ "min high-tag-number tag": {
+ Identifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: TagBMPString + 1,
+ },
+ ExpectedBytes: []byte{
+ byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
+ byte(TagBMPString + 1),
+ },
+ },
+
+ "max high-tag-number tag": {
+ Identifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: Tag(math.MaxInt64),
+ },
+ ExpectedBytes: []byte{
+ byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(0x7f),
+ },
+ },
+ }
+
+ for k, tc := range testcases {
+ b := encodeIdentifier(tc.Identifier)
+ if bytes.Compare(tc.ExpectedBytes, b) != 0 {
+ t.Errorf("%s: Expected\n\t%#v\ngot\n\t%#v", k, tc.ExpectedBytes, b)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/asn1-ber.v1/length_test.go b/Godeps/_workspace/src/gopkg.in/asn1-ber.v1/length_test.go
new file mode 100644
index 000000000..afe0e8037
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/asn1-ber.v1/length_test.go
@@ -0,0 +1,158 @@
+package ber
+
+import (
+ "bytes"
+ "io"
+ "math"
+ "testing"
+)
+
+func TestReadLength(t *testing.T) {
+ testcases := map[string]struct {
+ Data []byte
+
+ ExpectedLength int
+ ExpectedBytesRead int
+ ExpectedError string
+ }{
+ "empty": {
+ Data: []byte{},
+ ExpectedBytesRead: 0,
+ ExpectedError: io.ErrUnexpectedEOF.Error(),
+ },
+ "invalid first byte": {
+ Data: []byte{0xFF},
+ ExpectedBytesRead: 1,
+ ExpectedError: "invalid length byte 0xff",
+ },
+
+ "indefinite form": {
+ Data: []byte{LengthLongFormBitmask},
+ ExpectedLength: LengthIndefinite,
+ ExpectedBytesRead: 1,
+ },
+
+ "short-definite-form zero length": {
+ Data: []byte{0},
+ ExpectedLength: 0,
+ ExpectedBytesRead: 1,
+ },
+ "short-definite-form length 1": {
+ Data: []byte{1},
+ ExpectedLength: 1,
+ ExpectedBytesRead: 1,
+ },
+ "short-definite-form max length": {
+ Data: []byte{127},
+ ExpectedLength: 127,
+ ExpectedBytesRead: 1,
+ },
+
+ "long-definite-form missing bytes": {
+ Data: []byte{LengthLongFormBitmask | 1},
+ ExpectedBytesRead: 1,
+ ExpectedError: io.ErrUnexpectedEOF.Error(),
+ },
+ "long-definite-form overflow": {
+ Data: []byte{LengthLongFormBitmask | 9},
+ ExpectedBytesRead: 1,
+ ExpectedError: "long-form length overflow",
+ },
+ "long-definite-form zero length": {
+ Data: []byte{LengthLongFormBitmask | 1, 0x0},
+ ExpectedLength: 0,
+ ExpectedBytesRead: 2,
+ },
+ "long-definite-form length 127": {
+ Data: []byte{LengthLongFormBitmask | 1, 127},
+ ExpectedLength: 127,
+ ExpectedBytesRead: 2,
+ },
+ "long-definite-form max length": {
+ Data: []byte{
+ LengthLongFormBitmask | 8,
+ 0x7F,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ },
+ ExpectedLength: math.MaxInt64,
+ ExpectedBytesRead: 9,
+ },
+ }
+
+ for k, tc := range testcases {
+ reader := bytes.NewBuffer(tc.Data)
+ length, read, err := readLength(reader)
+
+ if err != nil {
+ if tc.ExpectedError == "" {
+ t.Errorf("%s: unexpected error: %v", k, err)
+ } else if err.Error() != tc.ExpectedError {
+ t.Errorf("%s: expected error %v, got %v", k, tc.ExpectedError, err)
+ }
+ } else if tc.ExpectedError != "" {
+ t.Errorf("%s: expected error %v, got none", k, tc.ExpectedError)
+ continue
+ }
+
+ if read != tc.ExpectedBytesRead {
+ t.Errorf("%s: expected read %d, got %d", k, tc.ExpectedBytesRead, read)
+ }
+
+ if length != tc.ExpectedLength {
+ t.Errorf("%s: expected length %d, got %d", k, tc.ExpectedLength, length)
+ }
+ }
+}
+
+func TestEncodeLength(t *testing.T) {
+ testcases := map[string]struct {
+ Length int
+ ExpectedBytes []byte
+ }{
+ "0": {
+ Length: 0,
+ ExpectedBytes: []byte{0},
+ },
+ "1": {
+ Length: 1,
+ ExpectedBytes: []byte{1},
+ },
+
+ "max short-form length": {
+ Length: 127,
+ ExpectedBytes: []byte{127},
+ },
+ "min long-form length": {
+ Length: 128,
+ ExpectedBytes: []byte{LengthLongFormBitmask | 1, 128},
+ },
+
+ "max long-form length": {
+ Length: math.MaxInt64,
+ ExpectedBytes: []byte{
+ LengthLongFormBitmask | 8,
+ 0x7F,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ },
+ },
+ }
+
+ for k, tc := range testcases {
+ b := encodeLength(tc.Length)
+ if bytes.Compare(tc.ExpectedBytes, b) != 0 {
+ t.Errorf("%s: Expected\n\t%#v\ngot\n\t%#v", k, tc.ExpectedBytes, b)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/asn1-ber.v1/suite_test.go b/Godeps/_workspace/src/gopkg.in/asn1-ber.v1/suite_test.go
new file mode 100644
index 000000000..ace8e6705
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/asn1-ber.v1/suite_test.go
@@ -0,0 +1,182 @@
+package ber
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "testing"
+)
+
+var errEOF = io.ErrUnexpectedEOF.Error()
+
+// Tests from http://www.strozhevsky.com/free_docs/free_asn1_testsuite_descr.pdf
+// Source files and descriptions at http://www.strozhevsky.com/free_docs/TEST_SUITE.zip
+var testcases = []struct {
+ // File contains the path to the BER-encoded file
+ File string
+ // Error indicates whether a decoding error is expected
+ Error string
+ // AbnormalEncoding indicates whether a normalized re-encoding is expected to differ from the original source
+ AbnormalEncoding bool
+ // IndefiniteEncoding indicates the source file used indefinite-length encoding, so the re-encoding is expected to differ (since the length is known)
+ IndefiniteEncoding bool
+}{
+ // Common blocks
+ {File: "tests/tc1.ber", Error: "high-tag-number tag overflow"},
+ {File: "tests/tc2.ber", Error: errEOF},
+ {File: "tests/tc3.ber", Error: errEOF},
+ {File: "tests/tc4.ber", Error: "invalid length byte 0xff"},
+ {File: "tests/tc5.ber", Error: "", AbnormalEncoding: true},
+ // Real numbers (some expected failures are disabled until support is added)
+ {File: "tests/tc6.ber", Error: ""}, // Error: "REAL value +0 must be encoded with zero-length value block"},
+ {File: "tests/tc7.ber", Error: ""}, // Error: "REAL value -0 must be encoded as a special value"},
+ {File: "tests/tc8.ber", Error: ""},
+ {File: "tests/tc9.ber", Error: ""}, // Error: "Bits 6 and 5 of information octet for REAL are equal to 11"
+ {File: "tests/tc10.ber", Error: ""},
+ {File: "tests/tc11.ber", Error: ""}, // Error: "Incorrect NR form"
+ {File: "tests/tc12.ber", Error: ""}, // Error: "Encoding of "special value" not from ASN.1 standard"
+ {File: "tests/tc13.ber", Error: errEOF},
+ {File: "tests/tc14.ber", Error: errEOF},
+ {File: "tests/tc15.ber", Error: ""}, // Error: "Too big value of exponent"
+ {File: "tests/tc16.ber", Error: ""}, // Error: "Too big value of mantissa"
+ {File: "tests/tc17.ber", Error: ""}, // Error: "Too big values for exponent and mantissa + using of "scaling factor" value"
+ // Integers
+ {File: "tests/tc18.ber", Error: ""},
+ {File: "tests/tc19.ber", Error: errEOF},
+ {File: "tests/tc20.ber", Error: ""},
+ // Object identifiers
+ {File: "tests/tc21.ber", Error: ""},
+ {File: "tests/tc22.ber", Error: ""},
+ {File: "tests/tc23.ber", Error: errEOF},
+ {File: "tests/tc24.ber", Error: ""},
+ // Booleans
+ {File: "tests/tc25.ber", Error: ""},
+ {File: "tests/tc26.ber", Error: ""},
+ {File: "tests/tc27.ber", Error: errEOF},
+ {File: "tests/tc28.ber", Error: ""},
+ {File: "tests/tc29.ber", Error: ""},
+ // Null
+ {File: "tests/tc30.ber", Error: ""},
+ {File: "tests/tc31.ber", Error: errEOF},
+ {File: "tests/tc32.ber", Error: ""},
+ // Bitstring (some expected failures are disabled until support is added)
+ {File: "tests/tc33.ber", Error: ""}, // Error: "Too big value for "unused bits""
+ {File: "tests/tc34.ber", Error: errEOF},
+ {File: "tests/tc35.ber", Error: "", IndefiniteEncoding: true}, // Error: "Using of different from BIT STRING types as internal types for constructive encoding"
+ {File: "tests/tc36.ber", Error: "", IndefiniteEncoding: true}, // Error: "Using of "unused bits" in internal BIT STRINGs with constructive form of encoding"
+ {File: "tests/tc37.ber", Error: ""},
+ {File: "tests/tc38.ber", Error: "", IndefiniteEncoding: true},
+ {File: "tests/tc39.ber", Error: ""},
+ {File: "tests/tc40.ber", Error: ""},
+ // Octet string (some expected failures are disabled until support is added)
+ {File: "tests/tc41.ber", Error: "", IndefiniteEncoding: true}, // Error: "Using of different from OCTET STRING types as internal types for constructive encoding"
+ {File: "tests/tc42.ber", Error: errEOF},
+ {File: "tests/tc43.ber", Error: errEOF},
+ {File: "tests/tc44.ber", Error: ""},
+ {File: "tests/tc45.ber", Error: ""},
+ // Bitstring
+ {File: "tests/tc46.ber", Error: "indefinite length used with primitive type"},
+ {File: "tests/tc47.ber", Error: "eoc child not allowed with definite length"},
+ {File: "tests/tc48.ber", Error: "", IndefiniteEncoding: true}, // Error: "Using of more than 7 "unused bits" in BIT STRING with constrictive encoding form"
+}
+
+func TestSuiteDecodePacket(t *testing.T) {
+ // Debug = true
+ for _, tc := range testcases {
+ file := tc.File
+
+ dataIn, err := ioutil.ReadFile(file)
+ if err != nil {
+ t.Errorf("%s: %v", file, err)
+ continue
+ }
+
+ // fmt.Printf("%s: decode %d\n", file, len(dataIn))
+ packet, err := DecodePacketErr(dataIn)
+ if err != nil {
+ if tc.Error == "" {
+ t.Errorf("%s: unexpected error during DecodePacket: %v", file, err)
+ } else if tc.Error != err.Error() {
+ t.Errorf("%s: expected error %q during DecodePacket, got %q", file, tc.Error, err)
+ }
+ continue
+ }
+ if tc.Error != "" {
+ t.Errorf("%s: expected error %q, got none", file, tc.Error)
+ continue
+ }
+
+ dataOut := packet.Bytes()
+ if tc.AbnormalEncoding || tc.IndefiniteEncoding {
+ // Abnormal encodings and encodings that used indefinite length should re-encode differently
+ if bytes.Equal(dataOut, dataIn) {
+ t.Errorf("%s: data should have been re-encoded differently", file)
+ }
+ } else if !bytes.Equal(dataOut, dataIn) {
+ // Make sure the serialized data matches the source
+ t.Errorf("%s: data should be the same", file)
+ }
+
+ packet, err = DecodePacketErr(dataOut)
+ if err != nil {
+ t.Errorf("%s: unexpected error: %v", file, err)
+ continue
+ }
+
+ // Make sure the re-serialized data matches our original serialization
+ dataOut2 := packet.Bytes()
+ if !bytes.Equal(dataOut, dataOut2) {
+ t.Errorf("%s: data should be the same", file)
+ }
+ }
+}
+
+func TestSuiteReadPacket(t *testing.T) {
+ for _, tc := range testcases {
+ file := tc.File
+
+ dataIn, err := ioutil.ReadFile(file)
+ if err != nil {
+ t.Errorf("%s: %v", file, err)
+ continue
+ }
+
+ buffer := bytes.NewBuffer(dataIn)
+ packet, err := ReadPacket(buffer)
+ if err != nil {
+ if tc.Error == "" {
+ t.Errorf("%s: unexpected error during ReadPacket: %v", file, err)
+ } else if tc.Error != err.Error() {
+ t.Errorf("%s: expected error %q during ReadPacket, got %q", file, tc.Error, err)
+ }
+ continue
+ }
+ if tc.Error != "" {
+ t.Errorf("%s: expected error %q, got none", file, tc.Error)
+ continue
+ }
+
+ dataOut := packet.Bytes()
+ if tc.AbnormalEncoding || tc.IndefiniteEncoding {
+ // Abnormal encodings and encodings that used indefinite length should re-encode differently
+ if bytes.Equal(dataOut, dataIn) {
+ t.Errorf("%s: data should have been re-encoded differently", file)
+ }
+ } else if !bytes.Equal(dataOut, dataIn) {
+ // Make sure the serialized data matches the source
+ t.Errorf("%s: data should be the same", file)
+ }
+
+ packet, err = DecodePacketErr(dataOut)
+ if err != nil {
+ t.Errorf("%s: unexpected error: %v", file, err)
+ continue
+ }
+
+ // Make sure the re-serialized data matches our original serialization
+ dataOut2 := packet.Bytes()
+ if !bytes.Equal(dataOut, dataOut2) {
+ t.Errorf("%s: data should be the same", file)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/inf.v0/benchmark_test.go b/Godeps/_workspace/src/gopkg.in/inf.v0/benchmark_test.go
new file mode 100644
index 000000000..27071da0e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/inf.v0/benchmark_test.go
@@ -0,0 +1,210 @@
+package inf
+
+import (
+ "fmt"
+ "math/big"
+ "math/rand"
+ "sync"
+ "testing"
+)
+
+const maxcap = 1024 * 1024
+const bits = 256
+const maxscale = 32
+
+var once sync.Once
+
+var decInput [][2]Dec
+var intInput [][2]big.Int
+
+var initBench = func() {
+ decInput = make([][2]Dec, maxcap)
+ intInput = make([][2]big.Int, maxcap)
+ max := new(big.Int).Lsh(big.NewInt(1), bits)
+ r := rand.New(rand.NewSource(0))
+ for i := 0; i < cap(decInput); i++ {
+ decInput[i][0].SetUnscaledBig(new(big.Int).Rand(r, max)).
+ SetScale(Scale(r.Int31n(int32(2*maxscale-1)) - int32(maxscale)))
+ decInput[i][1].SetUnscaledBig(new(big.Int).Rand(r, max)).
+ SetScale(Scale(r.Int31n(int32(2*maxscale-1)) - int32(maxscale)))
+ }
+ for i := 0; i < cap(intInput); i++ {
+ intInput[i][0].Rand(r, max)
+ intInput[i][1].Rand(r, max)
+ }
+}
+
+func doBenchmarkDec1(b *testing.B, f func(z *Dec)) {
+ once.Do(initBench)
+ b.ResetTimer()
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ f(&decInput[i%maxcap][0])
+ }
+}
+
+func doBenchmarkDec2(b *testing.B, f func(x, y *Dec)) {
+ once.Do(initBench)
+ b.ResetTimer()
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ f(&decInput[i%maxcap][0], &decInput[i%maxcap][1])
+ }
+}
+
+func doBenchmarkInt1(b *testing.B, f func(z *big.Int)) {
+ once.Do(initBench)
+ b.ResetTimer()
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ f(&intInput[i%maxcap][0])
+ }
+}
+
+func doBenchmarkInt2(b *testing.B, f func(x, y *big.Int)) {
+ once.Do(initBench)
+ b.ResetTimer()
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ f(&intInput[i%maxcap][0], &intInput[i%maxcap][1])
+ }
+}
+
+func Benchmark_Dec_String(b *testing.B) {
+ doBenchmarkDec1(b, func(x *Dec) {
+ x.String()
+ })
+}
+
+func Benchmark_Dec_StringScan(b *testing.B) {
+ doBenchmarkDec1(b, func(x *Dec) {
+ s := x.String()
+ d := new(Dec)
+ fmt.Sscan(s, d)
+ })
+}
+
+func Benchmark_Dec_GobEncode(b *testing.B) {
+ doBenchmarkDec1(b, func(x *Dec) {
+ x.GobEncode()
+ })
+}
+
+func Benchmark_Dec_GobEnDecode(b *testing.B) {
+ doBenchmarkDec1(b, func(x *Dec) {
+ g, _ := x.GobEncode()
+ new(Dec).GobDecode(g)
+ })
+}
+
+func Benchmark_Dec_Add(b *testing.B) {
+ doBenchmarkDec2(b, func(x, y *Dec) {
+ ys := y.Scale()
+ y.SetScale(x.Scale())
+ _ = new(Dec).Add(x, y)
+ y.SetScale(ys)
+ })
+}
+
+func Benchmark_Dec_AddMixed(b *testing.B) {
+ doBenchmarkDec2(b, func(x, y *Dec) {
+ _ = new(Dec).Add(x, y)
+ })
+}
+
+func Benchmark_Dec_Sub(b *testing.B) {
+ doBenchmarkDec2(b, func(x, y *Dec) {
+ ys := y.Scale()
+ y.SetScale(x.Scale())
+ _ = new(Dec).Sub(x, y)
+ y.SetScale(ys)
+ })
+}
+
+func Benchmark_Dec_SubMixed(b *testing.B) {
+ doBenchmarkDec2(b, func(x, y *Dec) {
+ _ = new(Dec).Sub(x, y)
+ })
+}
+
+func Benchmark_Dec_Mul(b *testing.B) {
+ doBenchmarkDec2(b, func(x, y *Dec) {
+ _ = new(Dec).Mul(x, y)
+ })
+}
+
+func Benchmark_Dec_Mul_QuoExact(b *testing.B) {
+ doBenchmarkDec2(b, func(x, y *Dec) {
+ v := new(Dec).Mul(x, y)
+ _ = new(Dec).QuoExact(v, y)
+ })
+}
+
+func Benchmark_Dec_QuoRound_Fixed_Down(b *testing.B) {
+ doBenchmarkDec2(b, func(x, y *Dec) {
+ _ = new(Dec).QuoRound(x, y, 0, RoundDown)
+ })
+}
+
+func Benchmark_Dec_QuoRound_Fixed_HalfUp(b *testing.B) {
+ doBenchmarkDec2(b, func(x, y *Dec) {
+ _ = new(Dec).QuoRound(x, y, 0, RoundHalfUp)
+ })
+}
+
+func Benchmark_Int_String(b *testing.B) {
+ doBenchmarkInt1(b, func(x *big.Int) {
+ x.String()
+ })
+}
+
+func Benchmark_Int_StringScan(b *testing.B) {
+ doBenchmarkInt1(b, func(x *big.Int) {
+ s := x.String()
+ d := new(big.Int)
+ fmt.Sscan(s, d)
+ })
+}
+
+func Benchmark_Int_GobEncode(b *testing.B) {
+ doBenchmarkInt1(b, func(x *big.Int) {
+ x.GobEncode()
+ })
+}
+
+func Benchmark_Int_GobEnDecode(b *testing.B) {
+ doBenchmarkInt1(b, func(x *big.Int) {
+ g, _ := x.GobEncode()
+ new(big.Int).GobDecode(g)
+ })
+}
+
+func Benchmark_Int_Add(b *testing.B) {
+ doBenchmarkInt2(b, func(x, y *big.Int) {
+ _ = new(big.Int).Add(x, y)
+ })
+}
+
+func Benchmark_Int_Sub(b *testing.B) {
+ doBenchmarkInt2(b, func(x, y *big.Int) {
+ _ = new(big.Int).Sub(x, y)
+ })
+}
+
+func Benchmark_Int_Mul(b *testing.B) {
+ doBenchmarkInt2(b, func(x, y *big.Int) {
+ _ = new(big.Int).Mul(x, y)
+ })
+}
+
+func Benchmark_Int_Quo(b *testing.B) {
+ doBenchmarkInt2(b, func(x, y *big.Int) {
+ _ = new(big.Int).Quo(x, y)
+ })
+}
+
+func Benchmark_Int_QuoRem(b *testing.B) {
+ doBenchmarkInt2(b, func(x, y *big.Int) {
+ _, _ = new(big.Int).QuoRem(x, y, new(big.Int))
+ })
+}
diff --git a/Godeps/_workspace/src/gopkg.in/inf.v0/dec_go1_2_test.go b/Godeps/_workspace/src/gopkg.in/inf.v0/dec_go1_2_test.go
new file mode 100644
index 000000000..5df0f7b55
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/inf.v0/dec_go1_2_test.go
@@ -0,0 +1,33 @@
+// +build go1.2
+
+package inf
+
+import (
+ "encoding"
+ "encoding/json"
+ "testing"
+)
+
+var _ encoding.TextMarshaler = new(Dec)
+var _ encoding.TextUnmarshaler = new(Dec)
+
+type Obj struct {
+ Val *Dec
+}
+
+func TestDecJsonMarshalUnmarshal(t *testing.T) {
+ o := Obj{Val: NewDec(123, 2)}
+ js, err := json.Marshal(o)
+ if err != nil {
+ t.Fatalf("json.Marshal(%v): got %v, want ok", o, err)
+ }
+ o2 := &Obj{}
+ err = json.Unmarshal(js, o2)
+ if err != nil {
+ t.Fatalf("json.Unmarshal(%#q): got %v, want ok", js, err)
+ }
+ if o.Val.Scale() != o2.Val.Scale() ||
+ o.Val.UnscaledBig().Cmp(o2.Val.UnscaledBig()) != 0 {
+ t.Fatalf("json.Unmarshal(json.Marshal(%v)): want %v, got %v", o, o, o2)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/inf.v0/dec_internal_test.go b/Godeps/_workspace/src/gopkg.in/inf.v0/dec_internal_test.go
new file mode 100644
index 000000000..d4fbe3e5b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/inf.v0/dec_internal_test.go
@@ -0,0 +1,40 @@
+package inf
+
+import (
+ "math/big"
+ "testing"
+)
+
+var decQuoRemZZZ = []struct {
+ z, x, y *Dec
+ r *big.Rat
+ srA, srB int
+}{
+ // basic examples
+ {NewDec(1, 0), NewDec(2, 0), NewDec(2, 0), big.NewRat(0, 1), 0, 1},
+ {NewDec(15, 1), NewDec(3, 0), NewDec(2, 0), big.NewRat(0, 1), 0, 1},
+ {NewDec(1, 1), NewDec(1, 0), NewDec(10, 0), big.NewRat(0, 1), 0, 1},
+ {NewDec(0, 0), NewDec(2, 0), NewDec(3, 0), big.NewRat(2, 3), 1, 1},
+ {NewDec(0, 0), NewDec(2, 0), NewDec(6, 0), big.NewRat(1, 3), 1, 1},
+ {NewDec(1, 1), NewDec(2, 0), NewDec(12, 0), big.NewRat(2, 3), 1, 1},
+
+ // examples from the Go Language Specification
+ {NewDec(1, 0), NewDec(5, 0), NewDec(3, 0), big.NewRat(2, 3), 1, 1},
+ {NewDec(-1, 0), NewDec(-5, 0), NewDec(3, 0), big.NewRat(-2, 3), -1, 1},
+ {NewDec(-1, 0), NewDec(5, 0), NewDec(-3, 0), big.NewRat(-2, 3), 1, -1},
+ {NewDec(1, 0), NewDec(-5, 0), NewDec(-3, 0), big.NewRat(2, 3), -1, -1},
+}
+
+func TestDecQuoRem(t *testing.T) {
+ for i, a := range decQuoRemZZZ {
+ z, rA, rB := new(Dec), new(big.Int), new(big.Int)
+ s := scaleQuoExact{}.Scale(a.x, a.y)
+ z.quoRem(a.x, a.y, s, true, rA, rB)
+ if a.z.Cmp(z) != 0 || a.r.Cmp(new(big.Rat).SetFrac(rA, rB)) != 0 {
+ t.Errorf("#%d QuoRemZZZ got %v, %v, %v; expected %v, %v", i, z, rA, rB, a.z, a.r)
+ }
+ if a.srA != rA.Sign() || a.srB != rB.Sign() {
+ t.Errorf("#%d QuoRemZZZ wrong signs, got %v, %v; expected %v, %v", i, rA.Sign(), rB.Sign(), a.srA, a.srB)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/inf.v0/dec_test.go b/Godeps/_workspace/src/gopkg.in/inf.v0/dec_test.go
new file mode 100644
index 000000000..e4b09b3fd
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/inf.v0/dec_test.go
@@ -0,0 +1,379 @@
+package inf_test
+
+import (
+ "bytes"
+ "encoding/gob"
+ "fmt"
+ "math/big"
+ "strings"
+ "testing"
+
+ "gopkg.in/inf.v0"
+)
+
+type decFunZZ func(z, x, y *inf.Dec) *inf.Dec
+type decArgZZ struct {
+ z, x, y *inf.Dec
+}
+
+var decSumZZ = []decArgZZ{
+ {inf.NewDec(0, 0), inf.NewDec(0, 0), inf.NewDec(0, 0)},
+ {inf.NewDec(1, 0), inf.NewDec(1, 0), inf.NewDec(0, 0)},
+ {inf.NewDec(1111111110, 0), inf.NewDec(123456789, 0), inf.NewDec(987654321, 0)},
+ {inf.NewDec(-1, 0), inf.NewDec(-1, 0), inf.NewDec(0, 0)},
+ {inf.NewDec(864197532, 0), inf.NewDec(-123456789, 0), inf.NewDec(987654321, 0)},
+ {inf.NewDec(-1111111110, 0), inf.NewDec(-123456789, 0), inf.NewDec(-987654321, 0)},
+ {inf.NewDec(12, 2), inf.NewDec(1, 1), inf.NewDec(2, 2)},
+}
+
+var decProdZZ = []decArgZZ{
+ {inf.NewDec(0, 0), inf.NewDec(0, 0), inf.NewDec(0, 0)},
+ {inf.NewDec(0, 0), inf.NewDec(1, 0), inf.NewDec(0, 0)},
+ {inf.NewDec(1, 0), inf.NewDec(1, 0), inf.NewDec(1, 0)},
+ {inf.NewDec(-991*991, 0), inf.NewDec(991, 0), inf.NewDec(-991, 0)},
+ {inf.NewDec(2, 3), inf.NewDec(1, 1), inf.NewDec(2, 2)},
+ {inf.NewDec(2, -3), inf.NewDec(1, -1), inf.NewDec(2, -2)},
+ {inf.NewDec(2, 3), inf.NewDec(1, 1), inf.NewDec(2, 2)},
+}
+
+func TestDecSignZ(t *testing.T) {
+ var zero inf.Dec
+ for _, a := range decSumZZ {
+ s := a.z.Sign()
+ e := a.z.Cmp(&zero)
+ if s != e {
+ t.Errorf("got %d; want %d for z = %v", s, e, a.z)
+ }
+ }
+}
+
+func TestDecAbsZ(t *testing.T) {
+ var zero inf.Dec
+ for _, a := range decSumZZ {
+ var z inf.Dec
+ z.Abs(a.z)
+ var e inf.Dec
+ e.Set(a.z)
+ if e.Cmp(&zero) < 0 {
+ e.Sub(&zero, &e)
+ }
+ if z.Cmp(&e) != 0 {
+ t.Errorf("got z = %v; want %v", z, e)
+ }
+ }
+}
+
+func testDecFunZZ(t *testing.T, msg string, f decFunZZ, a decArgZZ) {
+ var z inf.Dec
+ f(&z, a.x, a.y)
+ if (&z).Cmp(a.z) != 0 {
+ t.Errorf("%s%+v\n\tgot z = %v; want %v", msg, a, &z, a.z)
+ }
+}
+
+func TestDecSumZZ(t *testing.T) {
+ AddZZ := func(z, x, y *inf.Dec) *inf.Dec { return z.Add(x, y) }
+ SubZZ := func(z, x, y *inf.Dec) *inf.Dec { return z.Sub(x, y) }
+ for _, a := range decSumZZ {
+ arg := a
+ testDecFunZZ(t, "AddZZ", AddZZ, arg)
+
+ arg = decArgZZ{a.z, a.y, a.x}
+ testDecFunZZ(t, "AddZZ symmetric", AddZZ, arg)
+
+ arg = decArgZZ{a.x, a.z, a.y}
+ testDecFunZZ(t, "SubZZ", SubZZ, arg)
+
+ arg = decArgZZ{a.y, a.z, a.x}
+ testDecFunZZ(t, "SubZZ symmetric", SubZZ, arg)
+ }
+}
+
+func TestDecProdZZ(t *testing.T) {
+ MulZZ := func(z, x, y *inf.Dec) *inf.Dec { return z.Mul(x, y) }
+ for _, a := range decProdZZ {
+ arg := a
+ testDecFunZZ(t, "MulZZ", MulZZ, arg)
+
+ arg = decArgZZ{a.z, a.y, a.x}
+ testDecFunZZ(t, "MulZZ symmetric", MulZZ, arg)
+ }
+}
+
+var decUnscaledTests = []struct {
+ d *inf.Dec
+ u int64 // ignored when ok == false
+ ok bool
+}{
+ {new(inf.Dec), 0, true},
+ {inf.NewDec(-1<<63, 0), -1 << 63, true},
+ {inf.NewDec(-(-1<<63 + 1), 0), -(-1<<63 + 1), true},
+ {new(inf.Dec).Neg(inf.NewDec(-1<<63, 0)), 0, false},
+ {new(inf.Dec).Sub(inf.NewDec(-1<<63, 0), inf.NewDec(1, 0)), 0, false},
+ {inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), 0, false},
+}
+
+func TestDecUnscaled(t *testing.T) {
+ for i, tt := range decUnscaledTests {
+ u, ok := tt.d.Unscaled()
+ if ok != tt.ok {
+ t.Errorf("#%d Unscaled: got %v, expected %v", i, ok, tt.ok)
+ } else if ok && u != tt.u {
+ t.Errorf("#%d Unscaled: got %v, expected %v", i, u, tt.u)
+ }
+ }
+}
+
+var decRoundTests = [...]struct {
+ in *inf.Dec
+ s inf.Scale
+ r inf.Rounder
+ exp *inf.Dec
+}{
+ {inf.NewDec(123424999999999993, 15), 2, inf.RoundHalfUp, inf.NewDec(12342, 2)},
+ {inf.NewDec(123425000000000001, 15), 2, inf.RoundHalfUp, inf.NewDec(12343, 2)},
+ {inf.NewDec(123424999999999993, 15), 15, inf.RoundHalfUp, inf.NewDec(123424999999999993, 15)},
+ {inf.NewDec(123424999999999993, 15), 16, inf.RoundHalfUp, inf.NewDec(1234249999999999930, 16)},
+ {inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -1, inf.RoundHalfUp, inf.NewDec(1844674407370955162, -1)},
+ {inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -2, inf.RoundHalfUp, inf.NewDec(184467440737095516, -2)},
+ {inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -3, inf.RoundHalfUp, inf.NewDec(18446744073709552, -3)},
+ {inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -4, inf.RoundHalfUp, inf.NewDec(1844674407370955, -4)},
+ {inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -5, inf.RoundHalfUp, inf.NewDec(184467440737096, -5)},
+ {inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -6, inf.RoundHalfUp, inf.NewDec(18446744073710, -6)},
+}
+
+func TestDecRound(t *testing.T) {
+ for i, tt := range decRoundTests {
+ z := new(inf.Dec).Round(tt.in, tt.s, tt.r)
+ if tt.exp.Cmp(z) != 0 {
+ t.Errorf("#%d Round got %v; expected %v", i, z, tt.exp)
+ }
+ }
+}
+
+var decStringTests = []struct {
+ in string
+ out string
+ val int64
+ scale inf.Scale // skip SetString if negative
+ ok bool
+ scanOk bool
+}{
+ {in: "", ok: false, scanOk: false},
+ {in: "a", ok: false, scanOk: false},
+ {in: "z", ok: false, scanOk: false},
+ {in: "+", ok: false, scanOk: false},
+ {in: "-", ok: false, scanOk: false},
+ {in: "g", ok: false, scanOk: false},
+ {in: ".", ok: false, scanOk: false},
+ {in: ".-0", ok: false, scanOk: false},
+ {in: ".+0", ok: false, scanOk: false},
+ // Scannable but not SetStringable
+ {"0b", "ignored", 0, 0, false, true},
+ {"0x", "ignored", 0, 0, false, true},
+ {"0xg", "ignored", 0, 0, false, true},
+ {"0.0g", "ignored", 0, 1, false, true},
+ // examples from godoc for Dec
+ {"0", "0", 0, 0, true, true},
+ {"0.00", "0.00", 0, 2, true, true},
+ {"ignored", "0", 0, -2, true, false},
+ {"1", "1", 1, 0, true, true},
+ {"1.00", "1.00", 100, 2, true, true},
+ {"10", "10", 10, 0, true, true},
+ {"ignored", "10", 1, -1, true, false},
+ // other tests
+ {"+0", "0", 0, 0, true, true},
+ {"-0", "0", 0, 0, true, true},
+ {"0.0", "0.0", 0, 1, true, true},
+ {"0.1", "0.1", 1, 1, true, true},
+ {"0.", "0", 0, 0, true, true},
+ {"-10", "-10", -1, -1, true, true},
+ {"-1", "-1", -1, 0, true, true},
+ {"-0.1", "-0.1", -1, 1, true, true},
+ {"-0.01", "-0.01", -1, 2, true, true},
+ {"+0.", "0", 0, 0, true, true},
+ {"-0.", "0", 0, 0, true, true},
+ {".0", "0.0", 0, 1, true, true},
+ {"+.0", "0.0", 0, 1, true, true},
+ {"-.0", "0.0", 0, 1, true, true},
+ {"0.0000000000", "0.0000000000", 0, 10, true, true},
+ {"0.0000000001", "0.0000000001", 1, 10, true, true},
+ {"-0.0000000000", "0.0000000000", 0, 10, true, true},
+ {"-0.0000000001", "-0.0000000001", -1, 10, true, true},
+ {"-10", "-10", -10, 0, true, true},
+ {"+10", "10", 10, 0, true, true},
+ {"00", "0", 0, 0, true, true},
+ {"023", "23", 23, 0, true, true}, // decimal, not octal
+ {"-02.3", "-2.3", -23, 1, true, true}, // decimal, not octal
+}
+
+func TestDecGetString(t *testing.T) {
+ z := new(inf.Dec)
+ for i, test := range decStringTests {
+ if !test.ok {
+ continue
+ }
+ z.SetUnscaled(test.val)
+ z.SetScale(test.scale)
+
+ s := z.String()
+ if s != test.out {
+ t.Errorf("#%da got %s; want %s", i, s, test.out)
+ }
+
+ s = fmt.Sprintf("%d", z)
+ if s != test.out {
+ t.Errorf("#%db got %s; want %s", i, s, test.out)
+ }
+ }
+}
+
+func TestDecSetString(t *testing.T) {
+ tmp := new(inf.Dec)
+ for i, test := range decStringTests {
+ if test.scale < 0 {
+ // SetString only supports scale >= 0
+ continue
+ }
+ // initialize to a non-zero value so that issues with parsing
+ // 0 are detected
+ tmp.Set(inf.NewDec(1234567890, 123))
+ n1, ok1 := new(inf.Dec).SetString(test.in)
+ n2, ok2 := tmp.SetString(test.in)
+ expected := inf.NewDec(test.val, test.scale)
+ if ok1 != test.ok || ok2 != test.ok {
+ t.Errorf("#%d (input '%s') ok incorrect (should be %t)", i, test.in, test.ok)
+ continue
+ }
+ if !ok1 {
+ if n1 != nil {
+ t.Errorf("#%d (input '%s') n1 != nil", i, test.in)
+ }
+ continue
+ }
+ if !ok2 {
+ if n2 != nil {
+ t.Errorf("#%d (input '%s') n2 != nil", i, test.in)
+ }
+ continue
+ }
+
+ if n1.Cmp(expected) != 0 {
+ t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n1, test.val)
+ }
+ if n2.Cmp(expected) != 0 {
+ t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n2, test.val)
+ }
+ }
+}
+
+func TestDecScan(t *testing.T) {
+ tmp := new(inf.Dec)
+ for i, test := range decStringTests {
+ if test.scale < 0 {
+ // SetString only supports scale >= 0
+ continue
+ }
+ // initialize to a non-zero value so that issues with parsing
+ // 0 are detected
+ tmp.Set(inf.NewDec(1234567890, 123))
+ n1, n2 := new(inf.Dec), tmp
+ nn1, err1 := fmt.Sscan(test.in, n1)
+ nn2, err2 := fmt.Sscan(test.in, n2)
+ if !test.scanOk {
+ if err1 == nil || err2 == nil {
+ t.Errorf("#%d (input '%s') ok incorrect, should be %t", i, test.in, test.scanOk)
+ }
+ continue
+ }
+ expected := inf.NewDec(test.val, test.scale)
+ if nn1 != 1 || err1 != nil || nn2 != 1 || err2 != nil {
+ t.Errorf("#%d (input '%s') error %d %v, %d %v", i, test.in, nn1, err1, nn2, err2)
+ continue
+ }
+ if n1.Cmp(expected) != 0 {
+ t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n1, test.val)
+ }
+ if n2.Cmp(expected) != 0 {
+ t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n2, test.val)
+ }
+ }
+}
+
+var decScanNextTests = []struct {
+ in string
+ ok bool
+ next rune
+}{
+ {"", false, 0},
+ {"a", false, 'a'},
+ {"z", false, 'z'},
+ {"+", false, 0},
+ {"-", false, 0},
+ {"g", false, 'g'},
+ {".", false, 0},
+ {".-0", false, '-'},
+ {".+0", false, '+'},
+ {"0b", true, 'b'},
+ {"0x", true, 'x'},
+ {"0xg", true, 'x'},
+ {"0.0g", true, 'g'},
+}
+
+func TestDecScanNext(t *testing.T) {
+ for i, test := range decScanNextTests {
+ rdr := strings.NewReader(test.in)
+ n1 := new(inf.Dec)
+ nn1, _ := fmt.Fscan(rdr, n1)
+ if (test.ok && nn1 == 0) || (!test.ok && nn1 > 0) {
+ t.Errorf("#%d (input '%s') ok incorrect should be %t", i, test.in, test.ok)
+ continue
+ }
+ r := rune(0)
+ nn2, err := fmt.Fscanf(rdr, "%c", &r)
+ if test.next != r {
+ t.Errorf("#%d (input '%s') next incorrect, got %c should be %c, %d, %v", i, test.in, r, test.next, nn2, err)
+ }
+ }
+}
+
+var decGobEncodingTests = []string{
+ "0",
+ "1",
+ "2",
+ "10",
+ "42",
+ "1234567890",
+ "298472983472983471903246121093472394872319615612417471234712061",
+}
+
+func TestDecGobEncoding(t *testing.T) {
+ var medium bytes.Buffer
+ enc := gob.NewEncoder(&medium)
+ dec := gob.NewDecoder(&medium)
+ for i, test := range decGobEncodingTests {
+ for j := 0; j < 2; j++ {
+ for k := inf.Scale(-5); k <= 5; k++ {
+ medium.Reset() // empty buffer for each test case (in case of failures)
+ stest := test
+ if j != 0 {
+ // negative numbers
+ stest = "-" + test
+ }
+ var tx inf.Dec
+ tx.SetString(stest)
+ tx.SetScale(k) // test with positive, negative, and zero scale
+ if err := enc.Encode(&tx); err != nil {
+ t.Errorf("#%d%c: encoding failed: %s", i, 'a'+j, err)
+ }
+ var rx inf.Dec
+ if err := dec.Decode(&rx); err != nil {
+ t.Errorf("#%d%c: decoding failed: %s", i, 'a'+j, err)
+ }
+ if rx.Cmp(&tx) != 0 {
+ t.Errorf("#%d%c: transmission failed: got %s want %s", i, 'a'+j, &rx, &tx)
+ }
+ }
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/inf.v0/example_test.go b/Godeps/_workspace/src/gopkg.in/inf.v0/example_test.go
new file mode 100644
index 000000000..fa1e54d16
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/inf.v0/example_test.go
@@ -0,0 +1,62 @@
+package inf_test
+
+import (
+ "fmt"
+ "log"
+)
+
+import "gopkg.in/inf.v0"
+
+func ExampleDec_SetString() {
+ d := new(inf.Dec)
+ d.SetString("012345.67890") // decimal; leading 0 ignored; trailing 0 kept
+ fmt.Println(d)
+ // Output: 12345.67890
+}
+
+func ExampleDec_Scan() {
+ // The Scan function is rarely used directly;
+ // the fmt package recognizes it as an implementation of fmt.Scanner.
+ d := new(inf.Dec)
+ _, err := fmt.Sscan("184467440.73709551617", d)
+ if err != nil {
+ log.Println("error scanning value:", err)
+ } else {
+ fmt.Println(d)
+ }
+ // Output: 184467440.73709551617
+}
+
+func ExampleDec_QuoRound_scale2RoundDown() {
+ // 10 / 3 is an infinite decimal; it has no exact Dec representation
+ x, y := inf.NewDec(10, 0), inf.NewDec(3, 0)
+ // use 2 digits beyond the decimal point, round towards 0
+ z := new(inf.Dec).QuoRound(x, y, 2, inf.RoundDown)
+ fmt.Println(z)
+ // Output: 3.33
+}
+
+func ExampleDec_QuoRound_scale2RoundCeil() {
+ // -42 / 400 is an finite decimal with 3 digits beyond the decimal point
+ x, y := inf.NewDec(-42, 0), inf.NewDec(400, 0)
+ // use 2 digits beyond decimal point, round towards positive infinity
+ z := new(inf.Dec).QuoRound(x, y, 2, inf.RoundCeil)
+ fmt.Println(z)
+ // Output: -0.10
+}
+
+func ExampleDec_QuoExact_ok() {
+ // 1 / 25 is a finite decimal; it has exact Dec representation
+ x, y := inf.NewDec(1, 0), inf.NewDec(25, 0)
+ z := new(inf.Dec).QuoExact(x, y)
+ fmt.Println(z)
+ // Output: 0.04
+}
+
+func ExampleDec_QuoExact_fail() {
+ // 1 / 3 is an infinite decimal; it has no exact Dec representation
+ x, y := inf.NewDec(1, 0), inf.NewDec(3, 0)
+ z := new(inf.Dec).QuoExact(x, y)
+ fmt.Println(z)
+ // Output:
+}
diff --git a/Godeps/_workspace/src/gopkg.in/inf.v0/rounder_example_test.go b/Godeps/_workspace/src/gopkg.in/inf.v0/rounder_example_test.go
new file mode 100644
index 000000000..803c1d7ee
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/inf.v0/rounder_example_test.go
@@ -0,0 +1,72 @@
+package inf_test
+
+import (
+ "fmt"
+ "os"
+ "text/tabwriter"
+
+ "gopkg.in/inf.v0"
+)
+
+// This example displays the results of Dec.Round with each of the Rounders.
+//
+func ExampleRounder() {
+ var vals = []struct {
+ x string
+ s inf.Scale
+ }{
+ {"-0.18", 1}, {"-0.15", 1}, {"-0.12", 1}, {"-0.10", 1},
+ {"-0.08", 1}, {"-0.05", 1}, {"-0.02", 1}, {"0.00", 1},
+ {"0.02", 1}, {"0.05", 1}, {"0.08", 1}, {"0.10", 1},
+ {"0.12", 1}, {"0.15", 1}, {"0.18", 1},
+ }
+
+ var rounders = []struct {
+ name string
+ rounder inf.Rounder
+ }{
+ {"RoundDown", inf.RoundDown}, {"RoundUp", inf.RoundUp},
+ {"RoundCeil", inf.RoundCeil}, {"RoundFloor", inf.RoundFloor},
+ {"RoundHalfDown", inf.RoundHalfDown}, {"RoundHalfUp", inf.RoundHalfUp},
+ {"RoundHalfEven", inf.RoundHalfEven}, {"RoundExact", inf.RoundExact},
+ }
+
+ fmt.Println("The results of new(inf.Dec).Round(x, s, inf.RoundXXX):\n")
+ w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', tabwriter.AlignRight)
+ fmt.Fprint(w, "x\ts\t|\t")
+ for _, r := range rounders {
+ fmt.Fprintf(w, "%s\t", r.name[5:])
+ }
+ fmt.Fprintln(w)
+ for _, v := range vals {
+ fmt.Fprintf(w, "%s\t%d\t|\t", v.x, v.s)
+ for _, r := range rounders {
+ x, _ := new(inf.Dec).SetString(v.x)
+ z := new(inf.Dec).Round(x, v.s, r.rounder)
+ fmt.Fprintf(w, "%d\t", z)
+ }
+ fmt.Fprintln(w)
+ }
+ w.Flush()
+
+ // Output:
+ // The results of new(inf.Dec).Round(x, s, inf.RoundXXX):
+ //
+ // x s | Down Up Ceil Floor HalfDown HalfUp HalfEven Exact
+ // -0.18 1 | -0.1 -0.2 -0.1 -0.2 -0.2 -0.2 -0.2
+ // -0.15 1 | -0.1 -0.2 -0.1 -0.2 -0.1 -0.2 -0.2
+ // -0.12 1 | -0.1 -0.2 -0.1 -0.2 -0.1 -0.1 -0.1
+ // -0.10 1 | -0.1 -0.1 -0.1 -0.1 -0.1 -0.1 -0.1 -0.1
+ // -0.08 1 | 0.0 -0.1 0.0 -0.1 -0.1 -0.1 -0.1
+ // -0.05 1 | 0.0 -0.1 0.0 -0.1 0.0 -0.1 0.0
+ // -0.02 1 | 0.0 -0.1 0.0 -0.1 0.0 0.0 0.0
+ // 0.00 1 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
+ // 0.02 1 | 0.0 0.1 0.1 0.0 0.0 0.0 0.0
+ // 0.05 1 | 0.0 0.1 0.1 0.0 0.0 0.1 0.0
+ // 0.08 1 | 0.0 0.1 0.1 0.0 0.1 0.1 0.1
+ // 0.10 1 | 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1
+ // 0.12 1 | 0.1 0.2 0.2 0.1 0.1 0.1 0.1
+ // 0.15 1 | 0.1 0.2 0.2 0.1 0.1 0.2 0.2
+ // 0.18 1 | 0.1 0.2 0.2 0.1 0.2 0.2 0.2
+
+}
diff --git a/Godeps/_workspace/src/gopkg.in/inf.v0/rounder_test.go b/Godeps/_workspace/src/gopkg.in/inf.v0/rounder_test.go
new file mode 100644
index 000000000..d7e14c58c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/inf.v0/rounder_test.go
@@ -0,0 +1,109 @@
+package inf_test
+
+import (
+ "math/big"
+ "testing"
+
+ "gopkg.in/inf.v0"
+)
+
+var decRounderInputs = [...]struct {
+ quo *inf.Dec
+ rA, rB *big.Int
+}{
+ // examples from go language spec
+ {inf.NewDec(1, 0), big.NewInt(2), big.NewInt(3)}, // 5 / 3
+ {inf.NewDec(-1, 0), big.NewInt(-2), big.NewInt(3)}, // -5 / 3
+ {inf.NewDec(-1, 0), big.NewInt(2), big.NewInt(-3)}, // 5 / -3
+ {inf.NewDec(1, 0), big.NewInt(-2), big.NewInt(-3)}, // -5 / -3
+ // examples from godoc
+ {inf.NewDec(-1, 1), big.NewInt(-8), big.NewInt(10)},
+ {inf.NewDec(-1, 1), big.NewInt(-5), big.NewInt(10)},
+ {inf.NewDec(-1, 1), big.NewInt(-2), big.NewInt(10)},
+ {inf.NewDec(0, 1), big.NewInt(-8), big.NewInt(10)},
+ {inf.NewDec(0, 1), big.NewInt(-5), big.NewInt(10)},
+ {inf.NewDec(0, 1), big.NewInt(-2), big.NewInt(10)},
+ {inf.NewDec(0, 1), big.NewInt(0), big.NewInt(1)},
+ {inf.NewDec(0, 1), big.NewInt(2), big.NewInt(10)},
+ {inf.NewDec(0, 1), big.NewInt(5), big.NewInt(10)},
+ {inf.NewDec(0, 1), big.NewInt(8), big.NewInt(10)},
+ {inf.NewDec(1, 1), big.NewInt(2), big.NewInt(10)},
+ {inf.NewDec(1, 1), big.NewInt(5), big.NewInt(10)},
+ {inf.NewDec(1, 1), big.NewInt(8), big.NewInt(10)},
+}
+
+var decRounderResults = [...]struct {
+ rounder inf.Rounder
+ results [len(decRounderInputs)]*inf.Dec
+}{
+ {inf.RoundExact, [...]*inf.Dec{nil, nil, nil, nil,
+ nil, nil, nil, nil, nil, nil,
+ inf.NewDec(0, 1), nil, nil, nil, nil, nil, nil}},
+ {inf.RoundDown, [...]*inf.Dec{
+ inf.NewDec(1, 0), inf.NewDec(-1, 0), inf.NewDec(-1, 0), inf.NewDec(1, 0),
+ inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
+ inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
+ inf.NewDec(0, 1),
+ inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
+ inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1)}},
+ {inf.RoundUp, [...]*inf.Dec{
+ inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
+ inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-2, 1),
+ inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
+ inf.NewDec(0, 1),
+ inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1),
+ inf.NewDec(2, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
+ {inf.RoundHalfDown, [...]*inf.Dec{
+ inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
+ inf.NewDec(-2, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
+ inf.NewDec(-1, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
+ inf.NewDec(0, 1),
+ inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(1, 1),
+ inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(2, 1)}},
+ {inf.RoundHalfUp, [...]*inf.Dec{
+ inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
+ inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-1, 1),
+ inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(0, 1),
+ inf.NewDec(0, 1),
+ inf.NewDec(0, 1), inf.NewDec(1, 1), inf.NewDec(1, 1),
+ inf.NewDec(1, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
+ {inf.RoundHalfEven, [...]*inf.Dec{
+ inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
+ inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-1, 1),
+ inf.NewDec(-1, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
+ inf.NewDec(0, 1),
+ inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(1, 1),
+ inf.NewDec(1, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
+ {inf.RoundFloor, [...]*inf.Dec{
+ inf.NewDec(1, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(1, 0),
+ inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-2, 1),
+ inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
+ inf.NewDec(0, 1),
+ inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
+ inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1)}},
+ {inf.RoundCeil, [...]*inf.Dec{
+ inf.NewDec(2, 0), inf.NewDec(-1, 0), inf.NewDec(-1, 0), inf.NewDec(2, 0),
+ inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
+ inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
+ inf.NewDec(0, 1),
+ inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1),
+ inf.NewDec(2, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
+}
+
+func TestDecRounders(t *testing.T) {
+ for i, a := range decRounderResults {
+ for j, input := range decRounderInputs {
+ q := new(inf.Dec).Set(input.quo)
+ rA, rB := new(big.Int).Set(input.rA), new(big.Int).Set(input.rB)
+ res := a.rounder.Round(new(inf.Dec), q, rA, rB)
+ if a.results[j] == nil && res == nil {
+ continue
+ }
+ if (a.results[j] == nil && res != nil) ||
+ (a.results[j] != nil && res == nil) ||
+ a.results[j].Cmp(res) != 0 {
+ t.Errorf("#%d,%d Rounder got %v; expected %v", i, j, res, a.results[j])
+ }
+ }
+ }
+}