Merge branch 'f-job-diff2' into f-plan-endpoint
This commit is contained in:
commit
ccc3caae4a
|
@ -3,6 +3,7 @@
|
|||
BUG FIXES:
|
||||
* core: Updated User, Meta or Resources in a task cause create/destroy updates
|
||||
[GH-1128]
|
||||
* dicovery: Ensure service and check names are unique [GH-1143, GH-1144]
|
||||
|
||||
## 0.3.2 (April 22, 2016)
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ PACKAGES = $(shell go list ./... | grep -v '/vendor/')
|
|||
VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods \
|
||||
-nilfunc -printf -rangeloops -shift -structtags -unsafeptr
|
||||
EXTERNAL_TOOLS=\
|
||||
github.com/tools/godep \
|
||||
github.com/kardianos/govendor \
|
||||
github.com/mitchellh/gox \
|
||||
golang.org/x/tools/cmd/cover \
|
||||
golang.org/x/tools/cmd/vet \
|
||||
|
|
|
@ -1,608 +0,0 @@
|
|||
{
|
||||
"ImportPath": "github.com/hashicorp/nomad",
|
||||
"GoVersion": "go1.6",
|
||||
"GodepVersion": "v62",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/DataDog/datadog-go/statsd",
|
||||
"Rev": "bc97e0770ad4edae1c9dc14beb40b79b2dde32f8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Sirupsen/logrus",
|
||||
"Comment": "v0.8.7-87-g4b6ea73",
|
||||
"Rev": "4b6ea7319e214d98c938f12692336f7ca9348d6b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/StackExchange/wmi",
|
||||
"Rev": "f3e2bae1e0cb5aef83e319133eabfee30013a4a5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/circbuf",
|
||||
"Rev": "bbbad097214e2918d8543d5201d12bfd7bca254d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-metrics",
|
||||
"Rev": "06b60999766278efd6d2b5d8418a58c3d5b99e87"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-metrics/datadog",
|
||||
"Rev": "06b60999766278efd6d2b5d8418a58c3d5b99e87"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-metrics/prometheus",
|
||||
"Rev": "06b60999766278efd6d2b5d8418a58c3d5b99e87"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-radix",
|
||||
"Rev": "4239b77079c7b5d1243b7b4736304ce8ddb6f0f2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/awserr",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/client",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/defaults",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/request",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/session",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/endpoints",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/signer/v4",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/waiter",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/service/s3",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3iface",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3manager",
|
||||
"Comment": "v1.0.6-2-g80dd495",
|
||||
"Rev": "80dd4951fdb3f711d31843b8d87871130ef2df67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/beorn7/perks/quantile",
|
||||
"Rev": "b965b613227fddccbfffe13eae360ed3fa822f8d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/bgentry/speakeasy",
|
||||
"Rev": "36e9cfdd690967f4f690c6edcc9ffacd006014a0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/bgentry/speakeasy/example",
|
||||
"Rev": "36e9cfdd690967f4f690c6edcc9ffacd006014a0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/boltdb/bolt",
|
||||
"Comment": "v1.2.0",
|
||||
"Rev": "c6ba97b89e0454fec9aa92e1d33a4e2c5fc1f631"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-systemd/dbus",
|
||||
"Comment": "v4-40-g2ed5b50",
|
||||
"Rev": "2ed5b5012ccde5f057c197890a2c801295941149"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-systemd/util",
|
||||
"Comment": "v4-40-g2ed5b50",
|
||||
"Rev": "2ed5b5012ccde5f057c197890a2c801295941149"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||
"Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/mount",
|
||||
"Comment": "v1.4.1-9713-g35ef3ef",
|
||||
"Rev": "35ef3efe9af64c22c7efbe826f8f63b025639130"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/go-units",
|
||||
"Comment": "v0.1.0-23-g5d2041e",
|
||||
"Rev": "5d2041e26a699eaca682e2ea41c8f891e1060444"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/dustin/go-humanize",
|
||||
"Rev": "8929fe90cee4b2cb9deb468b51fb34eba64d1bf0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient",
|
||||
"Rev": "7c07ffce0f7e14a4da49ce92a2842d4e87be1c1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus",
|
||||
"Rev": "7c07ffce0f7e14a4da49ce92a2842d4e87be1c1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts",
|
||||
"Rev": "7c07ffce0f7e14a4da49ce92a2842d4e87be1c1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive",
|
||||
"Rev": "7c07ffce0f7e14a4da49ce92a2842d4e87be1c1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils",
|
||||
"Rev": "7c07ffce0f7e14a4da49ce92a2842d4e87be1c1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir",
|
||||
"Rev": "7c07ffce0f7e14a4da49ce92a2842d4e87be1c1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools",
|
||||
"Rev": "7c07ffce0f7e14a4da49ce92a2842d4e87be1c1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils",
|
||||
"Rev": "7c07ffce0f7e14a4da49ce92a2842d4e87be1c1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath",
|
||||
"Rev": "7c07ffce0f7e14a4da49ce92a2842d4e87be1c1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools",
|
||||
"Rev": "7c07ffce0f7e14a4da49ce92a2842d4e87be1c1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise",
|
||||
"Rev": "7c07ffce0f7e14a4da49ce92a2842d4e87be1c1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy",
|
||||
"Rev": "7c07ffce0f7e14a4da49ce92a2842d4e87be1c1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system",
|
||||
"Rev": "7c07ffce0f7e14a4da49ce92a2842d4e87be1c1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/go-units",
|
||||
"Rev": "7c07ffce0f7e14a4da49ce92a2842d4e87be1c1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp",
|
||||
"Rev": "7c07ffce0f7e14a4da49ce92a2842d4e87be1c1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user",
|
||||
"Rev": "7c07ffce0f7e14a4da49ce92a2842d4e87be1c1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/golang.org/x/net/context",
|
||||
"Rev": "7c07ffce0f7e14a4da49ce92a2842d4e87be1c1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix",
|
||||
"Rev": "7c07ffce0f7e14a4da49ce92a2842d4e87be1c1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-ini/ini",
|
||||
"Comment": "v1.8.5-2-g6ec4abd",
|
||||
"Rev": "6ec4abd8f8d587536da56f730858f0e27aeb4126"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-ole/go-ole",
|
||||
"Comment": "v1.2.0-4-g5005588",
|
||||
"Rev": "50055884d646dd9434f16bbb5c9801749b9bafe4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-ole/go-ole/oleutil",
|
||||
"Comment": "v1.2.0-4-g5005588",
|
||||
"Rev": "50055884d646dd9434f16bbb5c9801749b9bafe4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/godbus/dbus",
|
||||
"Comment": "v3-10-ge4593d6",
|
||||
"Rev": "e4593d66e29678c26f84166fe231a03e0268ced5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/godbus/dbus/introspect",
|
||||
"Comment": "v3-10-ge4593d6",
|
||||
"Rev": "e4593d66e29678c26f84166fe231a03e0268ced5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/godbus/dbus/prop",
|
||||
"Comment": "v3-10-ge4593d6",
|
||||
"Rev": "e4593d66e29678c26f84166fe231a03e0268ced5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "0dfe8f37844c14cb32c7247925270e0f7ba90973"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gorhill/cronexpr",
|
||||
"Comment": "1.0.0",
|
||||
"Rev": "a557574d6c024ed6e36acc8b610f5f211c91568a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gorhill/cronexpr/cronexpr",
|
||||
"Comment": "1.0.0",
|
||||
"Rev": "a557574d6c024ed6e36acc8b610f5f211c91568a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/consul/api",
|
||||
"Comment": "v0.6.3-363-gae32a3c",
|
||||
"Rev": "ae32a3ceae9fddb431b933ed7b2a82110e41e1bf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/consul/tlsutil",
|
||||
"Comment": "v0.6.3-363-gae32a3c",
|
||||
"Rev": "ae32a3ceae9fddb431b933ed7b2a82110e41e1bf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/errwrap",
|
||||
"Rev": "7554cd9344cec97297fa6649b055a8c98c2a1e55"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-checkpoint",
|
||||
"Rev": "e4b2dc34c0f698ee04750bf2035d8b9384233e1b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-cleanhttp",
|
||||
"Rev": "875fb671b3ddc66f8e2f0acc33829c8cb989a38d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-getter",
|
||||
"Rev": "3142ddc1d627a166970ddd301bc09cb510c74edc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-getter/helper/url",
|
||||
"Rev": "3142ddc1d627a166970ddd301bc09cb510c74edc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-immutable-radix",
|
||||
"Rev": "8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-memdb",
|
||||
"Rev": "2cc5518f24b906e7cccfc808817ba479f5489821"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-msgpack/codec",
|
||||
"Rev": "fa3f63826f7c23912c15263591e65d54d080b458"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-multierror",
|
||||
"Rev": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-plugin",
|
||||
"Rev": "cccb4a1328abbb89898f3ecf4311a05bddc4de6d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-syslog",
|
||||
"Rev": "42a2b573b664dbf281bd48c3cc12c086b17a39ba"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-version",
|
||||
"Rev": "2e7f5ea8e27bb3fdf9baa0881d16757ac4637332"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru/simplelru",
|
||||
"Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl",
|
||||
"Rev": "1c284ec98f4b398443cbabb0d9197f7f4cc0077c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/hcl/ast",
|
||||
"Rev": "1c284ec98f4b398443cbabb0d9197f7f4cc0077c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/hcl/parser",
|
||||
"Rev": "1c284ec98f4b398443cbabb0d9197f7f4cc0077c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/hcl/scanner",
|
||||
"Rev": "1c284ec98f4b398443cbabb0d9197f7f4cc0077c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/hcl/strconv",
|
||||
"Rev": "1c284ec98f4b398443cbabb0d9197f7f4cc0077c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/hcl/token",
|
||||
"Rev": "1c284ec98f4b398443cbabb0d9197f7f4cc0077c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/json/parser",
|
||||
"Rev": "1c284ec98f4b398443cbabb0d9197f7f4cc0077c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/json/scanner",
|
||||
"Rev": "1c284ec98f4b398443cbabb0d9197f7f4cc0077c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/json/token",
|
||||
"Rev": "1c284ec98f4b398443cbabb0d9197f7f4cc0077c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/logutils",
|
||||
"Rev": "0dc08b1671f34c4250ce212759ebd880f743d883"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/memberlist",
|
||||
"Rev": "88ac4de0d1a0ca6def284b571342db3b777a4c37"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/net-rpc-msgpackrpc",
|
||||
"Rev": "a14192a58a694c123d8fe5481d4a4727d6ae82f3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft",
|
||||
"Rev": "057b893fd996696719e98b6c44649ea14968c811"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft-boltdb",
|
||||
"Rev": "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/scada-client",
|
||||
"Rev": "84989fd23ad4cc0e7ad44d6a871fd793eb9beb0a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/serf/coordinate",
|
||||
"Comment": "v0.7.0-18-gc4c55f1",
|
||||
"Rev": "c4c55f16bae1aed9b355ad655d3ebf0215734461"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/serf/serf",
|
||||
"Comment": "v0.7.0-18-gc4c55f1",
|
||||
"Rev": "c4c55f16bae1aed9b355ad655d3ebf0215734461"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/yamux",
|
||||
"Rev": "df949784da9ed028ee76df44652e42d37a09d7e4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jmespath/go-jmespath",
|
||||
"Comment": "0.2.2-2-gc01cf91",
|
||||
"Rev": "c01cf91b011868172fdcd9f41838e80c9d716264"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jmespath/go-jmespath/fuzz",
|
||||
"Comment": "0.2.2-2-gc01cf91",
|
||||
"Rev": "c01cf91b011868172fdcd9f41838e80c9d716264"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kardianos/osext",
|
||||
"Rev": "29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattn/go-isatty",
|
||||
"Rev": "56b76bdf51f7708750eac80fa38b952bb9f32639"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
|
||||
"Rev": "d0c3fe89de86839aecf2e0579c40ba3bb336a453"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/miekg/dns",
|
||||
"Rev": "7e024ce8ce18b21b475ac6baf8fa3c42536bf2fa"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/cli",
|
||||
"Rev": "cb6853d606ea4a12a15ac83cc43503df99fd28fb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/copystructure",
|
||||
"Rev": "80adcec1955ee4e97af357c30dee61aadcc02c10"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/hashstructure",
|
||||
"Rev": "1ef5c71b025aef149d12346356ac5973992860bc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||
"Rev": "281073eb9eb092240d33ef253c404f1cca550309"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/reflectwalk",
|
||||
"Rev": "eecf4c70c626c7cfbb95c90195bc34d386c74ac6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups",
|
||||
"Comment": "v0.0.9-108-g89ab7f2",
|
||||
"Rev": "89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/fs",
|
||||
"Comment": "v0.0.9-108-g89ab7f2",
|
||||
"Rev": "89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/systemd",
|
||||
"Comment": "v0.0.9-108-g89ab7f2",
|
||||
"Rev": "89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/configs",
|
||||
"Comment": "v0.0.9-108-g89ab7f2",
|
||||
"Rev": "89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/system",
|
||||
"Comment": "v0.0.9-108-g89ab7f2",
|
||||
"Rev": "89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/utils",
|
||||
"Comment": "v0.0.9-108-g89ab7f2",
|
||||
"Rev": "89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_golang/prometheus",
|
||||
"Comment": "0.7.0-70-g15006a7",
|
||||
"Rev": "15006a7ed88e73201c4e6142a2e66b54ae5fdf00"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_model/go",
|
||||
"Comment": "model-0.0.2-12-gfa8ad6f",
|
||||
"Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/expfmt",
|
||||
"Rev": "23070236b1ebff452f494ae831569545c2b61d26"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
|
||||
"Rev": "23070236b1ebff452f494ae831569545c2b61d26"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/model",
|
||||
"Rev": "23070236b1ebff452f494ae831569545c2b61d26"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/procfs",
|
||||
"Rev": "406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ryanuber/columnize",
|
||||
"Comment": "v2.0.1-8-g983d3a5",
|
||||
"Rev": "983d3a5fab1bf04d1b412465d2d9f8430e2e917e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/shirou/gopsutil/cpu",
|
||||
"Comment": "1.0.0-230-gf58654f",
|
||||
"Rev": "f58654fa1c30aab9b8c503ecea4922e80abcd2bf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/shirou/gopsutil/host",
|
||||
"Comment": "1.0.0-230-gf58654f",
|
||||
"Rev": "f58654fa1c30aab9b8c503ecea4922e80abcd2bf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/shirou/gopsutil/internal/common",
|
||||
"Comment": "1.0.0-230-gf58654f",
|
||||
"Rev": "f58654fa1c30aab9b8c503ecea4922e80abcd2bf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/shirou/gopsutil/mem",
|
||||
"Comment": "1.0.0-230-gf58654f",
|
||||
"Rev": "f58654fa1c30aab9b8c503ecea4922e80abcd2bf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/shirou/gopsutil/net",
|
||||
"Comment": "1.0.0-230-gf58654f",
|
||||
"Rev": "f58654fa1c30aab9b8c503ecea4922e80abcd2bf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/shirou/gopsutil/process",
|
||||
"Comment": "1.0.0-230-gf58654f",
|
||||
"Rev": "f58654fa1c30aab9b8c503ecea4922e80abcd2bf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/shirou/w32",
|
||||
"Rev": "ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ugorji/go/codec",
|
||||
"Rev": "03b46f3d7a8e0457836a5ecd906b4961a5815a63"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ugorji/go/codec/codecgen",
|
||||
"Rev": "03b46f3d7a8e0457836a5ecd906b4961a5815a63"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
"Rev": "50c6bc5e4292a1d4e65c6e9be5f53be28bcbe28e"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
|
@ -5,6 +5,9 @@
|
|||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
$script = <<SCRIPT
|
||||
GO_VERSION="1.6.2"
|
||||
CONSUL_VERSION="0.6.4"
|
||||
|
||||
# Install Prereq Packages
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y build-essential curl git-core mercurial bzr libpcre3-dev pkg-config zip default-jre qemu libc6-dev-i386 silversearcher-ag jq htop vim unzip
|
||||
|
@ -18,8 +21,8 @@ ARCH=`uname -m | sed 's|i686|386|' | sed 's|x86_64|amd64|'`
|
|||
|
||||
# Install Go
|
||||
cd /tmp
|
||||
wget -q https://storage.googleapis.com/golang/go1.6.linux-${ARCH}.tar.gz
|
||||
tar -xf go1.6.linux-${ARCH}.tar.gz
|
||||
wget -q https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${ARCH}.tar.gz
|
||||
tar -xf go${GO_VERSION}.linux-${ARCH}.tar.gz
|
||||
sudo mv go $SRCROOT
|
||||
sudo chmod 775 $SRCROOT
|
||||
sudo chown vagrant:vagrant $SRCROOT
|
||||
|
@ -42,7 +45,7 @@ source /etc/profile.d/gopath.sh
|
|||
|
||||
echo Fetching Consul...
|
||||
cd /tmp/
|
||||
wget https://releases.hashicorp.com/consul/0.6.4/consul_0.6.4_linux_amd64.zip -O consul.zip
|
||||
wget https://releases.hashicorp.com/consul/${CONSUL_VERSION}/consul_${CONSUL_VERSION}_linux_amd64.zip -O consul.zip
|
||||
echo Installing Consul...
|
||||
unzip consul.zip
|
||||
sudo chmod +x consul
|
||||
|
@ -70,34 +73,53 @@ bash scripts/install_rkt.sh
|
|||
grep "cd /opt/gopath/src/github.com/hashicorp/nomad" ~/.profile || echo "cd /opt/gopath/src/github.com/hashicorp/nomad" >> ~/.profile
|
||||
SCRIPT
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
config.vm.box = "cbednarski/ubuntu-1404"
|
||||
config.vm.hostname = "nomad"
|
||||
def configureVM(vmCfg)
|
||||
vmCfg.vm.box = "cbednarski/ubuntu-1404"
|
||||
|
||||
config.vm.provision "shell", inline: $script, privileged: false
|
||||
config.vm.synced_folder '.', '/opt/gopath/src/github.com/hashicorp/nomad'
|
||||
vmCfg.vm.provision "shell", inline: $script, privileged: false
|
||||
vmCfg.vm.synced_folder '.', '/opt/gopath/src/github.com/hashicorp/nomad'
|
||||
|
||||
# We're going to compile go and run a concurrent system, so give ourselves
|
||||
# some extra resources. Nomad will have trouble working correctly with <2 CPUs
|
||||
# so we should use at least that many.
|
||||
# some extra resources. Nomad will have trouble working correctly with <2
|
||||
# CPUs so we should use at least that many.
|
||||
cpus = 2
|
||||
memory = 2048
|
||||
|
||||
config.vm.provider "parallels" do |p, o|
|
||||
vmCfg.vm.provider "parallels" do |p, o|
|
||||
o.vm.box = "parallels/ubuntu-14.04"
|
||||
p.memory = memory
|
||||
p.cpus = cpus
|
||||
end
|
||||
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
vmCfg.vm.provider "virtualbox" do |v|
|
||||
v.memory = memory
|
||||
v.cpus = cpus
|
||||
end
|
||||
|
||||
["vmware_fusion", "vmware_workstation"].each do |p|
|
||||
config.vm.provider p do |v|
|
||||
vmCfg.vm.provider p do |v|
|
||||
v.gui = false
|
||||
v.memory = memory
|
||||
v.cpus = cpus
|
||||
end
|
||||
end
|
||||
return vmCfg
|
||||
end
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
1.upto(3) do |n|
|
||||
vmName = "nomad-server%02d" % [n]
|
||||
config.vm.define vmName, autostart: (n == 1 ? true : false), primary: (n == 1 ? true : false) do |vmCfg|
|
||||
vmCfg.vm.hostname = vmName
|
||||
vmCfg = configureVM(vmCfg)
|
||||
end
|
||||
end
|
||||
|
||||
1.upto(3) do |n|
|
||||
vmName = "nomad-client%02d" % [n]
|
||||
config.vm.define vmName, autostart: false, primary: false do |vmCfg|
|
||||
vmCfg.vm.hostname = vmName
|
||||
vmCfg = configureVM(vmCfg)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -80,6 +80,7 @@ type Check interface {
|
|||
Run() *cstructs.CheckResult
|
||||
ID() string
|
||||
Interval() time.Duration
|
||||
Timeout() time.Duration
|
||||
}
|
||||
|
||||
// Returns a random stagger interval between 0 and the duration
|
||||
|
|
|
@ -429,6 +429,9 @@ func (c *ConsulService) consulPresent() bool {
|
|||
// runCheck runs a check and updates the corresponding ttl check in consul
|
||||
func (c *ConsulService) runCheck(check Check) {
|
||||
res := check.Run()
|
||||
if res.Duration >= check.Timeout() {
|
||||
c.logger.Printf("[DEBUG] consul.sync: check took time: %v, timeout: %v", res.Duration, check.Timeout())
|
||||
}
|
||||
state := consul.HealthCritical
|
||||
output := res.Output
|
||||
switch res.ExitCode {
|
||||
|
@ -445,7 +448,7 @@ func (c *ConsulService) runCheck(check Check) {
|
|||
}
|
||||
if err := c.client.Agent().UpdateTTL(check.ID(), output, state); err != nil {
|
||||
if c.availble {
|
||||
c.logger.Printf("[DEBUG] error updating ttl check for check %q: %v", check.ID(), err)
|
||||
c.logger.Printf("[DEBUG] consul.sync: error updating ttl check for check %q: %v", check.ID(), err)
|
||||
c.availble = false
|
||||
} else {
|
||||
c.availble = true
|
||||
|
|
|
@ -20,20 +20,26 @@ var (
|
|||
client *docker.Client
|
||||
)
|
||||
|
||||
const (
|
||||
// The default check timeout
|
||||
defaultCheckTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
// DockerScriptCheck runs nagios compatible scripts in a docker container and
|
||||
// provides the check result
|
||||
type DockerScriptCheck struct {
|
||||
id string
|
||||
interval time.Duration
|
||||
containerID string
|
||||
id string // id of the check
|
||||
interval time.Duration // interval of the check
|
||||
timeout time.Duration // timeout of the check
|
||||
containerID string // container id in which the check will be invoked
|
||||
logger *log.Logger
|
||||
cmd string
|
||||
args []string
|
||||
cmd string // check command
|
||||
args []string // check command arguments
|
||||
|
||||
dockerEndpoint string
|
||||
tlsCert string
|
||||
tlsCa string
|
||||
tlsKey string
|
||||
dockerEndpoint string // docker endpoint
|
||||
tlsCert string // path to tls certificate
|
||||
tlsCa string // path to tls ca
|
||||
tlsKey string // path to tls key
|
||||
}
|
||||
|
||||
// dockerClient creates the client to interact with the docker daemon
|
||||
|
@ -117,15 +123,24 @@ func (d *DockerScriptCheck) Interval() time.Duration {
|
|||
return d.interval
|
||||
}
|
||||
|
||||
// Timeout returns the duration after which a check is timed out.
|
||||
func (d *DockerScriptCheck) Timeout() time.Duration {
|
||||
if d.timeout == 0 {
|
||||
return defaultCheckTimeout
|
||||
}
|
||||
return d.timeout
|
||||
}
|
||||
|
||||
// ExecScriptCheck runs a nagios compatible script and returns the check result
|
||||
type ExecScriptCheck struct {
|
||||
id string
|
||||
interval time.Duration
|
||||
cmd string
|
||||
args []string
|
||||
taskDir string
|
||||
id string // id of the script check
|
||||
interval time.Duration // interval at which the check is invoked
|
||||
timeout time.Duration // timeout duration of the check
|
||||
cmd string // command of the check
|
||||
args []string // args passed to the check
|
||||
taskDir string // the root directory of the check
|
||||
|
||||
FSIsolation bool
|
||||
FSIsolation bool // indicates whether the check has to be run within a chroot
|
||||
}
|
||||
|
||||
// Run runs an exec script check
|
||||
|
@ -146,6 +161,7 @@ func (e *ExecScriptCheck) Run() *cstructs.CheckResult {
|
|||
for {
|
||||
select {
|
||||
case err := <-errCh:
|
||||
endTime := time.Now()
|
||||
if err == nil {
|
||||
return &cstructs.CheckResult{
|
||||
ExitCode: 0,
|
||||
|
@ -163,8 +179,9 @@ func (e *ExecScriptCheck) Run() *cstructs.CheckResult {
|
|||
ExitCode: exitCode,
|
||||
Output: string(buf.Bytes()),
|
||||
Timestamp: ts,
|
||||
Duration: endTime.Sub(ts),
|
||||
}
|
||||
case <-time.After(30 * time.Second):
|
||||
case <-time.After(e.Timeout()):
|
||||
errCh <- fmt.Errorf("timed out after waiting 30s")
|
||||
}
|
||||
}
|
||||
|
@ -180,3 +197,11 @@ func (e *ExecScriptCheck) ID() string {
|
|||
func (e *ExecScriptCheck) Interval() time.Duration {
|
||||
return e.interval
|
||||
}
|
||||
|
||||
// Timeout returns the duration after which a check is timed out.
|
||||
func (e *ExecScriptCheck) Timeout() time.Duration {
|
||||
if e.timeout == 0 {
|
||||
return defaultCheckTimeout
|
||||
}
|
||||
return e.timeout
|
||||
}
|
||||
|
|
|
@ -566,6 +566,7 @@ func (e *UniversalExecutor) createCheck(check *structs.ServiceCheck, checkID str
|
|||
return &DockerScriptCheck{
|
||||
id: checkID,
|
||||
interval: check.Interval,
|
||||
timeout: check.Timeout,
|
||||
containerID: e.consulCtx.ContainerID,
|
||||
logger: e.logger,
|
||||
cmd: check.Command,
|
||||
|
@ -577,6 +578,7 @@ func (e *UniversalExecutor) createCheck(check *structs.ServiceCheck, checkID str
|
|||
return &ExecScriptCheck{
|
||||
id: checkID,
|
||||
interval: check.Interval,
|
||||
timeout: check.Timeout,
|
||||
cmd: check.Command,
|
||||
args: check.Args,
|
||||
taskDir: e.taskDir,
|
||||
|
|
|
@ -68,8 +68,19 @@ func (r *RecoverableError) Error() string {
|
|||
|
||||
// CheckResult encapsulates the result of a check
|
||||
type CheckResult struct {
|
||||
ExitCode int
|
||||
Output string
|
||||
|
||||
// ExitCode is the exit code of the check
|
||||
ExitCode int
|
||||
|
||||
// Output is the output of the check script
|
||||
Output string
|
||||
|
||||
// Timestamp is the time at which the check was executed
|
||||
Timestamp time.Time
|
||||
Err error
|
||||
|
||||
// Duration is the time it took the check to run
|
||||
Duration time.Duration
|
||||
|
||||
// Err is the error that a check returned
|
||||
Err error
|
||||
}
|
||||
|
|
|
@ -200,7 +200,7 @@ func (r *TaskRunner) setState(state string, event *structs.TaskEvent) {
|
|||
// setTaskEnv sets the task environment. It returns an error if it could not be
|
||||
// created.
|
||||
func (r *TaskRunner) setTaskEnv() error {
|
||||
taskEnv, err := driver.GetTaskEnv(r.ctx.AllocDir, r.config.Node, r.task, r.alloc)
|
||||
taskEnv, err := driver.GetTaskEnv(r.ctx.AllocDir, r.config.Node, r.task.Copy(), r.alloc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -339,7 +339,12 @@ func (c *Command) checkpointResults(results *checkpoint.CheckResponse, err error
|
|||
return
|
||||
}
|
||||
if results.Outdated {
|
||||
c.Ui.Error(fmt.Sprintf("Newer Nomad version available: %s", results.CurrentVersion))
|
||||
versionStr := c.Version
|
||||
if c.VersionPrerelease != "" {
|
||||
versionStr += fmt.Sprintf("-%s", c.VersionPrerelease)
|
||||
}
|
||||
|
||||
c.Ui.Error(fmt.Sprintf("Newer Nomad version available: %s (currently running: %s)", results.CurrentVersion, versionStr))
|
||||
}
|
||||
for _, alert := range results.Alerts {
|
||||
switch alert.Level {
|
||||
|
|
|
@ -8,14 +8,19 @@ import (
|
|||
// Flatten takes an object and returns a flat map of the object. The keys of the
|
||||
// map is the path of the field names until a primitive field is reached and the
|
||||
// value is a string representation of the terminal field.
|
||||
func Flatten(obj interface{}) map[string]string {
|
||||
func Flatten(obj interface{}, filter []string, primitiveOnly bool) map[string]string {
|
||||
flat := make(map[string]string)
|
||||
v := reflect.ValueOf(obj)
|
||||
if !v.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
flatten("", v, flat)
|
||||
flatten("", v, primitiveOnly, false, flat)
|
||||
for _, f := range filter {
|
||||
if _, ok := flat[f]; ok {
|
||||
delete(flat, f)
|
||||
}
|
||||
}
|
||||
return flat
|
||||
}
|
||||
|
||||
|
@ -23,7 +28,7 @@ func Flatten(obj interface{}) map[string]string {
|
|||
// passed value. The results are stored into the output map and the keys are
|
||||
// the fields prepended with the passed prefix.
|
||||
// XXX: A current restriction is that maps only support string keys.
|
||||
func flatten(prefix string, v reflect.Value, output map[string]string) {
|
||||
func flatten(prefix string, v reflect.Value, primitiveOnly, enteredStruct bool, output map[string]string) {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
output[prefix] = fmt.Sprintf("%v", v.Bool())
|
||||
|
@ -40,11 +45,15 @@ func flatten(prefix string, v reflect.Value, output map[string]string) {
|
|||
case reflect.Invalid:
|
||||
output[prefix] = "nil"
|
||||
case reflect.Ptr:
|
||||
if primitiveOnly && enteredStruct {
|
||||
return
|
||||
}
|
||||
|
||||
e := v.Elem()
|
||||
if !e.IsValid() {
|
||||
output[prefix] = "nil"
|
||||
}
|
||||
flatten(prefix, e, output)
|
||||
flatten(prefix, e, primitiveOnly, enteredStruct, output)
|
||||
case reflect.Map:
|
||||
for _, k := range v.MapKeys() {
|
||||
if k.Kind() == reflect.Interface {
|
||||
|
@ -55,9 +64,14 @@ func flatten(prefix string, v reflect.Value, output map[string]string) {
|
|||
panic(fmt.Sprintf("%q: map key is not string: %s", prefix, k))
|
||||
}
|
||||
|
||||
flatten(getSubPrefix(prefix, k.String()), v.MapIndex(k), output)
|
||||
flatten(getSubKeyPrefix(prefix, k.String()), v.MapIndex(k), primitiveOnly, enteredStruct, output)
|
||||
}
|
||||
case reflect.Struct:
|
||||
if primitiveOnly && enteredStruct {
|
||||
return
|
||||
}
|
||||
enteredStruct = true
|
||||
|
||||
t := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
name := t.Field(i).Name
|
||||
|
@ -66,22 +80,30 @@ func flatten(prefix string, v reflect.Value, output map[string]string) {
|
|||
val = val.Elem()
|
||||
}
|
||||
|
||||
flatten(getSubPrefix(prefix, name), val, output)
|
||||
flatten(getSubPrefix(prefix, name), val, primitiveOnly, enteredStruct, output)
|
||||
}
|
||||
case reflect.Interface:
|
||||
if primitiveOnly {
|
||||
return
|
||||
}
|
||||
|
||||
e := v.Elem()
|
||||
if !e.IsValid() {
|
||||
output[prefix] = "nil"
|
||||
return
|
||||
}
|
||||
flatten(prefix, e, output)
|
||||
flatten(prefix, e, primitiveOnly, enteredStruct, output)
|
||||
case reflect.Array, reflect.Slice:
|
||||
if primitiveOnly {
|
||||
return
|
||||
}
|
||||
|
||||
if v.Kind() == reflect.Slice && v.IsNil() {
|
||||
output[prefix] = "nil"
|
||||
return
|
||||
}
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
flatten(fmt.Sprintf("%s[%d]", prefix, i), v.Index(i), output)
|
||||
flatten(fmt.Sprintf("%s[%d]", prefix, i), v.Index(i), primitiveOnly, enteredStruct, output)
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("prefix %q; unsupported type %v", prefix, v.Kind()))
|
||||
|
@ -99,3 +121,15 @@ func getSubPrefix(curPrefix, subField string) string {
|
|||
}
|
||||
return newPrefix
|
||||
}
|
||||
|
||||
// getSubKeyPrefix takes the current prefix and the next subfield and returns an
|
||||
// appropriate prefix for a map field.
|
||||
func getSubKeyPrefix(curPrefix, subField string) string {
|
||||
newPrefix := ""
|
||||
if curPrefix != "" {
|
||||
newPrefix = fmt.Sprintf("%s[%s]", curPrefix, subField)
|
||||
} else {
|
||||
newPrefix = fmt.Sprintf("%s", subField)
|
||||
}
|
||||
return newPrefix
|
||||
}
|
||||
|
|
|
@ -40,8 +40,10 @@ type interfaceHolder struct {
|
|||
|
||||
func TestFlatMap(t *testing.T) {
|
||||
cases := []struct {
|
||||
Input interface{}
|
||||
Expected map[string]string
|
||||
Input interface{}
|
||||
Expected map[string]string
|
||||
Filter []string
|
||||
PrimitiveOnly bool
|
||||
}{
|
||||
{
|
||||
Input: nil,
|
||||
|
@ -85,6 +87,42 @@ func TestFlatMap(t *testing.T) {
|
|||
"s": "foobar",
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: &simpleTypes{
|
||||
b: true,
|
||||
i: -10,
|
||||
i8: 88,
|
||||
i16: 1616,
|
||||
i32: 3232,
|
||||
i64: 6464,
|
||||
ui: 10,
|
||||
ui8: 88,
|
||||
ui16: 1616,
|
||||
ui32: 3232,
|
||||
ui64: 6464,
|
||||
f32: 3232,
|
||||
f64: 6464,
|
||||
c64: 64,
|
||||
c128: 128,
|
||||
s: "foobar",
|
||||
},
|
||||
Filter: []string{"i", "i8", "i16"},
|
||||
Expected: map[string]string{
|
||||
"b": "true",
|
||||
"i32": "3232",
|
||||
"i64": "6464",
|
||||
"ui": "10",
|
||||
"ui8": "88",
|
||||
"ui16": "1616",
|
||||
"ui32": "3232",
|
||||
"ui64": "6464",
|
||||
"f32": "3232",
|
||||
"f64": "6464",
|
||||
"c64": "(64+0i)",
|
||||
"c128": "(128+0i)",
|
||||
"s": "foobar",
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: &linkedList{
|
||||
value: "foo",
|
||||
|
@ -99,6 +137,32 @@ func TestFlatMap(t *testing.T) {
|
|||
"next.next": "nil",
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: &linkedList{
|
||||
value: "foo",
|
||||
next: &linkedList{
|
||||
value: "bar",
|
||||
next: nil,
|
||||
},
|
||||
},
|
||||
PrimitiveOnly: true,
|
||||
Expected: map[string]string{
|
||||
"value": "foo",
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: linkedList{
|
||||
value: "foo",
|
||||
next: &linkedList{
|
||||
value: "bar",
|
||||
next: nil,
|
||||
},
|
||||
},
|
||||
PrimitiveOnly: true,
|
||||
Expected: map[string]string{
|
||||
"value": "foo",
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: &containers{
|
||||
myslice: []int{1, 2},
|
||||
|
@ -112,14 +176,29 @@ func TestFlatMap(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Expected: map[string]string{
|
||||
"myslice[0]": "1",
|
||||
"myslice[1]": "2",
|
||||
"mymap.foo.value": "l1",
|
||||
"mymap.foo.next": "nil",
|
||||
"mymap.bar.value": "l2",
|
||||
"mymap.bar.next": "nil",
|
||||
"myslice[0]": "1",
|
||||
"myslice[1]": "2",
|
||||
"mymap[foo].value": "l1",
|
||||
"mymap[foo].next": "nil",
|
||||
"mymap[bar].value": "l2",
|
||||
"mymap[bar].next": "nil",
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: &containers{
|
||||
myslice: []int{1, 2},
|
||||
mymap: map[string]linkedList{
|
||||
"foo": linkedList{
|
||||
value: "l1",
|
||||
},
|
||||
"bar": linkedList{
|
||||
value: "l2",
|
||||
},
|
||||
},
|
||||
},
|
||||
PrimitiveOnly: true,
|
||||
Expected: map[string]string{},
|
||||
},
|
||||
{
|
||||
Input: &interfaceHolder{
|
||||
value: &linkedList{
|
||||
|
@ -132,10 +211,20 @@ func TestFlatMap(t *testing.T) {
|
|||
"value.next": "nil",
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: &interfaceHolder{
|
||||
value: &linkedList{
|
||||
value: "foo",
|
||||
next: nil,
|
||||
},
|
||||
},
|
||||
PrimitiveOnly: true,
|
||||
Expected: map[string]string{},
|
||||
},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
act := Flatten(c.Input)
|
||||
act := Flatten(c.Input, c.Filter, c.PrimitiveOnly)
|
||||
if !reflect.DeepEqual(act, c.Expected) {
|
||||
t.Fatalf("case %d: got %#v; want %#v", i+1, act, c.Expected)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,944 @@
|
|||
package structs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/nomad/helper/flatmap"
|
||||
"github.com/mitchellh/hashstructure"
|
||||
)
|
||||
|
||||
const (
|
||||
// AnnotationForcesDestructiveUpdate marks a diff as causing a destructive
|
||||
// update.
|
||||
AnnotationForcesDestructiveUpdate = "forces create/destroy update"
|
||||
|
||||
// AnnotationForcesInplaceUpdate marks a diff as causing an in-place
|
||||
// update.
|
||||
AnnotationForcesInplaceUpdate = "forces in-place update"
|
||||
)
|
||||
|
||||
// UpdateTypes denote the type of update to occur against the task group.
|
||||
const (
|
||||
UpdateTypeIgnore = "ignore"
|
||||
UpdateTypeCreate = "create"
|
||||
UpdateTypeDestroy = "destroy"
|
||||
UpdateTypeMigrate = "migrate"
|
||||
UpdateTypeInplaceUpdate = "in-place update"
|
||||
UpdateTypeDestructiveUpdate = "create/destroy update"
|
||||
)
|
||||
|
||||
// DiffType denotes the type of a diff object.
|
||||
type DiffType string
|
||||
|
||||
var (
|
||||
DiffTypeNone DiffType = "None"
|
||||
DiffTypeAdded DiffType = "Added"
|
||||
DiffTypeDeleted DiffType = "Deleted"
|
||||
DiffTypeEdited DiffType = "Edited"
|
||||
)
|
||||
|
||||
func (d DiffType) Less(other DiffType) bool {
|
||||
// Edited > Added > Deleted > None
|
||||
// But we do a reverse sort
|
||||
if d == other {
|
||||
return false
|
||||
}
|
||||
|
||||
if d == DiffTypeEdited {
|
||||
return true
|
||||
} else if other == DiffTypeEdited {
|
||||
return false
|
||||
} else if d == DiffTypeAdded {
|
||||
return true
|
||||
} else if other == DiffTypeAdded {
|
||||
return false
|
||||
} else if d == DiffTypeDeleted {
|
||||
return true
|
||||
} else if other == DiffTypeDeleted {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// JobDiff contains the diff of two jobs.
|
||||
type JobDiff struct {
|
||||
Type DiffType
|
||||
ID string
|
||||
Fields []*FieldDiff
|
||||
Objects []*ObjectDiff
|
||||
TaskGroups []*TaskGroupDiff
|
||||
}
|
||||
|
||||
// Diff returns a diff of two jobs and a potential error if the Jobs are not
|
||||
// diffable. If contextual diff is enabled, objects within the job will contain
|
||||
// field information even if unchanged.
|
||||
func (j *Job) Diff(other *Job, contextual bool) (*JobDiff, error) {
|
||||
diff := &JobDiff{Type: DiffTypeNone}
|
||||
var oldPrimitiveFlat, newPrimitiveFlat map[string]string
|
||||
filter := []string{"ID", "Status", "StatusDescription", "CreateIndex", "ModifyIndex", "JobModifyIndex"}
|
||||
|
||||
if j == nil && other == nil {
|
||||
return diff, nil
|
||||
} else if j == nil {
|
||||
j = &Job{}
|
||||
diff.Type = DiffTypeAdded
|
||||
newPrimitiveFlat = flatmap.Flatten(other, filter, true)
|
||||
diff.ID = other.ID
|
||||
} else if other == nil {
|
||||
other = &Job{}
|
||||
diff.Type = DiffTypeDeleted
|
||||
oldPrimitiveFlat = flatmap.Flatten(j, filter, true)
|
||||
diff.ID = j.ID
|
||||
} else {
|
||||
if !reflect.DeepEqual(j, other) {
|
||||
diff.Type = DiffTypeEdited
|
||||
}
|
||||
|
||||
if j.ID != other.ID {
|
||||
return nil, fmt.Errorf("can not diff jobs with different IDs: %q and %q", j.ID, other.ID)
|
||||
}
|
||||
|
||||
oldPrimitiveFlat = flatmap.Flatten(j, filter, true)
|
||||
newPrimitiveFlat = flatmap.Flatten(other, filter, true)
|
||||
diff.ID = other.ID
|
||||
}
|
||||
|
||||
// Diff the primitive fields.
|
||||
diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, false)
|
||||
|
||||
// Datacenters diff
|
||||
if setDiff := stringSetDiff(j.Datacenters, other.Datacenters, "Datacenters"); setDiff != nil {
|
||||
diff.Objects = append(diff.Objects, setDiff)
|
||||
}
|
||||
|
||||
// Constraints diff
|
||||
conDiff := primitiveObjectSetDiff(
|
||||
interfaceSlice(j.Constraints),
|
||||
interfaceSlice(other.Constraints),
|
||||
[]string{"str"},
|
||||
"Constraint",
|
||||
contextual)
|
||||
if conDiff != nil {
|
||||
diff.Objects = append(diff.Objects, conDiff...)
|
||||
}
|
||||
|
||||
// Task groups diff
|
||||
tgs, err := taskGroupDiffs(j.TaskGroups, other.TaskGroups, contextual)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diff.TaskGroups = tgs
|
||||
|
||||
// Update diff
|
||||
if uDiff := primitiveObjectDiff(j.Update, other.Update, nil, "Update", contextual); uDiff != nil {
|
||||
diff.Objects = append(diff.Objects, uDiff)
|
||||
}
|
||||
|
||||
// Periodic diff
|
||||
if pDiff := primitiveObjectDiff(j.Periodic, other.Periodic, nil, "Periodic", contextual); pDiff != nil {
|
||||
diff.Objects = append(diff.Objects, pDiff)
|
||||
}
|
||||
|
||||
return diff, nil
|
||||
}
|
||||
|
||||
func (j *JobDiff) GoString() string {
|
||||
out := fmt.Sprintf("Job %q (%s):\n", j.ID, j.Type)
|
||||
|
||||
for _, f := range j.Fields {
|
||||
out += fmt.Sprintf("%#v\n", f)
|
||||
}
|
||||
|
||||
for _, o := range j.Objects {
|
||||
out += fmt.Sprintf("%#v\n", o)
|
||||
}
|
||||
|
||||
for _, tg := range j.TaskGroups {
|
||||
out += fmt.Sprintf("%#v\n", tg)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// TaskGroupDiff contains the diff of two task groups.
|
||||
type TaskGroupDiff struct {
|
||||
Type DiffType
|
||||
Name string
|
||||
Fields []*FieldDiff
|
||||
Objects []*ObjectDiff
|
||||
Tasks []*TaskDiff
|
||||
Updates map[string]int
|
||||
}
|
||||
|
||||
// Diff returns a diff of two task groups. If contextual diff is enabled,
|
||||
// objects' fields will be stored even if no diff occured as long as one field
|
||||
// changed.
|
||||
func (tg *TaskGroup) Diff(other *TaskGroup, contextual bool) (*TaskGroupDiff, error) {
|
||||
diff := &TaskGroupDiff{Type: DiffTypeNone}
|
||||
var oldPrimitiveFlat, newPrimitiveFlat map[string]string
|
||||
filter := []string{"Name"}
|
||||
|
||||
if tg == nil && other == nil {
|
||||
return diff, nil
|
||||
} else if tg == nil {
|
||||
tg = &TaskGroup{}
|
||||
diff.Type = DiffTypeAdded
|
||||
diff.Name = other.Name
|
||||
newPrimitiveFlat = flatmap.Flatten(other, filter, true)
|
||||
} else if other == nil {
|
||||
other = &TaskGroup{}
|
||||
diff.Type = DiffTypeDeleted
|
||||
diff.Name = tg.Name
|
||||
oldPrimitiveFlat = flatmap.Flatten(tg, filter, true)
|
||||
} else {
|
||||
if !reflect.DeepEqual(tg, other) {
|
||||
diff.Type = DiffTypeEdited
|
||||
}
|
||||
if tg.Name != other.Name {
|
||||
return nil, fmt.Errorf("can not diff task groups with different names: %q and %q", tg.Name, other.Name)
|
||||
}
|
||||
diff.Name = other.Name
|
||||
oldPrimitiveFlat = flatmap.Flatten(tg, filter, true)
|
||||
newPrimitiveFlat = flatmap.Flatten(other, filter, true)
|
||||
}
|
||||
|
||||
// Diff the primitive fields.
|
||||
diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, false)
|
||||
|
||||
// Constraints diff
|
||||
conDiff := primitiveObjectSetDiff(
|
||||
interfaceSlice(tg.Constraints),
|
||||
interfaceSlice(other.Constraints),
|
||||
[]string{"str"},
|
||||
"Constraint",
|
||||
contextual)
|
||||
if conDiff != nil {
|
||||
diff.Objects = append(diff.Objects, conDiff...)
|
||||
}
|
||||
|
||||
// Restart policy diff
|
||||
rDiff := primitiveObjectDiff(tg.RestartPolicy, other.RestartPolicy, nil, "RestartPolicy", contextual)
|
||||
if rDiff != nil {
|
||||
diff.Objects = append(diff.Objects, rDiff)
|
||||
}
|
||||
|
||||
// Tasks diff
|
||||
tasks, err := taskDiffs(tg.Tasks, other.Tasks, contextual)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diff.Tasks = tasks
|
||||
|
||||
return diff, nil
|
||||
}
|
||||
|
||||
func (tg *TaskGroupDiff) GoString() string {
|
||||
out := fmt.Sprintf("Group %q (%s):\n", tg.Name, tg.Type)
|
||||
|
||||
if len(tg.Updates) != 0 {
|
||||
out += "Updates {\n"
|
||||
for update, count := range tg.Updates {
|
||||
out += fmt.Sprintf("%d %s\n", count, update)
|
||||
}
|
||||
out += "}\n"
|
||||
}
|
||||
|
||||
for _, f := range tg.Fields {
|
||||
out += fmt.Sprintf("%#v\n", f)
|
||||
}
|
||||
|
||||
for _, o := range tg.Objects {
|
||||
out += fmt.Sprintf("%#v\n", o)
|
||||
}
|
||||
|
||||
for _, t := range tg.Tasks {
|
||||
out += fmt.Sprintf("%#v\n", t)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// TaskGroupDiffs diffs two sets of task groups. If contextual diff is enabled,
|
||||
// objects' fields will be stored even if no diff occured as long as one field
|
||||
// changed.
|
||||
func taskGroupDiffs(old, new []*TaskGroup, contextual bool) ([]*TaskGroupDiff, error) {
|
||||
oldMap := make(map[string]*TaskGroup, len(old))
|
||||
newMap := make(map[string]*TaskGroup, len(new))
|
||||
for _, o := range old {
|
||||
oldMap[o.Name] = o
|
||||
}
|
||||
for _, n := range new {
|
||||
newMap[n.Name] = n
|
||||
}
|
||||
|
||||
var diffs []*TaskGroupDiff
|
||||
for name, oldGroup := range oldMap {
|
||||
// Diff the same, deleted and edited
|
||||
diff, err := oldGroup.Diff(newMap[name], contextual)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diffs = append(diffs, diff)
|
||||
}
|
||||
|
||||
for name, newGroup := range newMap {
|
||||
// Diff the added
|
||||
if old, ok := oldMap[name]; !ok {
|
||||
diff, err := old.Diff(newGroup, contextual)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diffs = append(diffs, diff)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(TaskGroupDiffs(diffs))
|
||||
return diffs, nil
|
||||
}
|
||||
|
||||
// For sorting TaskGroupDiffs
|
||||
type TaskGroupDiffs []*TaskGroupDiff
|
||||
|
||||
func (tg TaskGroupDiffs) Len() int { return len(tg) }
|
||||
func (tg TaskGroupDiffs) Swap(i, j int) { tg[i], tg[j] = tg[j], tg[i] }
|
||||
func (tg TaskGroupDiffs) Less(i, j int) bool { return tg[i].Name < tg[j].Name }
|
||||
|
||||
// TaskDiff contains the diff of two Tasks
|
||||
type TaskDiff struct {
|
||||
Type DiffType
|
||||
Name string
|
||||
Fields []*FieldDiff
|
||||
Objects []*ObjectDiff
|
||||
Annotations []string
|
||||
}
|
||||
|
||||
// Diff returns a diff of two tasks. If contextual diff is enabled, objects
|
||||
// within the task will contain field information even if unchanged.
|
||||
func (t *Task) Diff(other *Task, contextual bool) (*TaskDiff, error) {
|
||||
diff := &TaskDiff{Type: DiffTypeNone}
|
||||
var oldPrimitiveFlat, newPrimitiveFlat map[string]string
|
||||
filter := []string{"Name", "Config"}
|
||||
|
||||
if t == nil && other == nil {
|
||||
return diff, nil
|
||||
} else if t == nil {
|
||||
t = &Task{}
|
||||
diff.Type = DiffTypeAdded
|
||||
diff.Name = other.Name
|
||||
newPrimitiveFlat = flatmap.Flatten(other, filter, true)
|
||||
} else if other == nil {
|
||||
other = &Task{}
|
||||
diff.Type = DiffTypeDeleted
|
||||
diff.Name = t.Name
|
||||
oldPrimitiveFlat = flatmap.Flatten(t, filter, true)
|
||||
} else {
|
||||
if !reflect.DeepEqual(t, other) {
|
||||
diff.Type = DiffTypeEdited
|
||||
}
|
||||
if t.Name != other.Name {
|
||||
return nil, fmt.Errorf("can not diff tasks with different names: %q and %q", t.Name, other.Name)
|
||||
}
|
||||
diff.Name = other.Name
|
||||
oldPrimitiveFlat = flatmap.Flatten(t, filter, true)
|
||||
newPrimitiveFlat = flatmap.Flatten(other, filter, true)
|
||||
}
|
||||
|
||||
// Diff the primitive fields.
|
||||
diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, false)
|
||||
|
||||
// Constraints diff
|
||||
conDiff := primitiveObjectSetDiff(
|
||||
interfaceSlice(t.Constraints),
|
||||
interfaceSlice(other.Constraints),
|
||||
[]string{"str"},
|
||||
"Constraint",
|
||||
contextual)
|
||||
if conDiff != nil {
|
||||
diff.Objects = append(diff.Objects, conDiff...)
|
||||
}
|
||||
|
||||
// Config diff
|
||||
if cDiff := configDiff(t.Config, other.Config, contextual); cDiff != nil {
|
||||
diff.Objects = append(diff.Objects, cDiff)
|
||||
}
|
||||
|
||||
// Resources diff
|
||||
if rDiff := t.Resources.Diff(other.Resources, contextual); rDiff != nil {
|
||||
diff.Objects = append(diff.Objects, rDiff)
|
||||
}
|
||||
|
||||
// LogConfig diff
|
||||
lDiff := primitiveObjectDiff(t.LogConfig, other.LogConfig, nil, "LogConfig", contextual)
|
||||
if lDiff != nil {
|
||||
diff.Objects = append(diff.Objects, lDiff)
|
||||
}
|
||||
|
||||
// Artifacts diff
|
||||
diffs := primitiveObjectSetDiff(
|
||||
interfaceSlice(t.Artifacts),
|
||||
interfaceSlice(other.Artifacts),
|
||||
nil,
|
||||
"Artifact",
|
||||
contextual)
|
||||
if diffs != nil {
|
||||
diff.Objects = append(diff.Objects, diffs...)
|
||||
}
|
||||
|
||||
return diff, nil
|
||||
}
|
||||
|
||||
func (t *TaskDiff) GoString() string {
|
||||
var out string
|
||||
if len(t.Annotations) == 0 {
|
||||
out = fmt.Sprintf("Task %q (%s):\n", t.Name, t.Type)
|
||||
} else {
|
||||
out = fmt.Sprintf("Task %q (%s) (%s):\n", t.Name, t.Type, strings.Join(t.Annotations, ","))
|
||||
}
|
||||
|
||||
for _, f := range t.Fields {
|
||||
out += fmt.Sprintf("%#v\n", f)
|
||||
}
|
||||
|
||||
for _, o := range t.Objects {
|
||||
out += fmt.Sprintf("%#v\n", o)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// taskDiffs diffs a set of tasks. If contextual diff is enabled, unchanged
|
||||
// fields within objects nested in the tasks will be returned.
|
||||
func taskDiffs(old, new []*Task, contextual bool) ([]*TaskDiff, error) {
|
||||
oldMap := make(map[string]*Task, len(old))
|
||||
newMap := make(map[string]*Task, len(new))
|
||||
for _, o := range old {
|
||||
oldMap[o.Name] = o
|
||||
}
|
||||
for _, n := range new {
|
||||
newMap[n.Name] = n
|
||||
}
|
||||
|
||||
var diffs []*TaskDiff
|
||||
for name, oldGroup := range oldMap {
|
||||
// Diff the same, deleted and edited
|
||||
diff, err := oldGroup.Diff(newMap[name], contextual)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diffs = append(diffs, diff)
|
||||
}
|
||||
|
||||
for name, newGroup := range newMap {
|
||||
// Diff the added
|
||||
if old, ok := oldMap[name]; !ok {
|
||||
diff, err := old.Diff(newGroup, contextual)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diffs = append(diffs, diff)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(TaskDiffs(diffs))
|
||||
return diffs, nil
|
||||
}
|
||||
|
||||
// For sorting TaskDiffs
|
||||
type TaskDiffs []*TaskDiff
|
||||
|
||||
func (t TaskDiffs) Len() int { return len(t) }
|
||||
func (t TaskDiffs) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
|
||||
func (t TaskDiffs) Less(i, j int) bool { return t[i].Name < t[j].Name }
|
||||
|
||||
// Diff returns a diff of two resource objects. If contextual diff is enabled,
|
||||
// non-changed fields will still be returned.
|
||||
func (r *Resources) Diff(other *Resources, contextual bool) *ObjectDiff {
|
||||
diff := &ObjectDiff{Type: DiffTypeNone, Name: "Resources"}
|
||||
var oldPrimitiveFlat, newPrimitiveFlat map[string]string
|
||||
|
||||
if reflect.DeepEqual(r, other) {
|
||||
return nil
|
||||
} else if r == nil {
|
||||
r = &Resources{}
|
||||
diff.Type = DiffTypeAdded
|
||||
newPrimitiveFlat = flatmap.Flatten(other, nil, true)
|
||||
} else if other == nil {
|
||||
other = &Resources{}
|
||||
diff.Type = DiffTypeDeleted
|
||||
oldPrimitiveFlat = flatmap.Flatten(r, nil, true)
|
||||
} else {
|
||||
diff.Type = DiffTypeEdited
|
||||
oldPrimitiveFlat = flatmap.Flatten(r, nil, true)
|
||||
newPrimitiveFlat = flatmap.Flatten(other, nil, true)
|
||||
}
|
||||
|
||||
// Diff the primitive fields.
|
||||
diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual)
|
||||
|
||||
// Network Resources diff
|
||||
if nDiffs := networkResourceDiffs(r.Networks, other.Networks, contextual); nDiffs != nil {
|
||||
diff.Objects = append(diff.Objects, nDiffs...)
|
||||
}
|
||||
|
||||
return diff
|
||||
}
|
||||
|
||||
// Diff returns a diff of two network resources. If contextual diff is enabled,
|
||||
// non-changed fields will still be returned.
|
||||
func (r *NetworkResource) Diff(other *NetworkResource, contextual bool) *ObjectDiff {
|
||||
diff := &ObjectDiff{Type: DiffTypeNone, Name: "Network"}
|
||||
var oldPrimitiveFlat, newPrimitiveFlat map[string]string
|
||||
filter := []string{"Device", "CIDR", "IP"}
|
||||
|
||||
if reflect.DeepEqual(r, other) {
|
||||
return nil
|
||||
} else if r == nil {
|
||||
r = &NetworkResource{}
|
||||
diff.Type = DiffTypeAdded
|
||||
newPrimitiveFlat = flatmap.Flatten(other, filter, true)
|
||||
} else if other == nil {
|
||||
other = &NetworkResource{}
|
||||
diff.Type = DiffTypeDeleted
|
||||
oldPrimitiveFlat = flatmap.Flatten(r, filter, true)
|
||||
} else {
|
||||
diff.Type = DiffTypeEdited
|
||||
oldPrimitiveFlat = flatmap.Flatten(r, filter, true)
|
||||
newPrimitiveFlat = flatmap.Flatten(other, filter, true)
|
||||
}
|
||||
|
||||
// Diff the primitive fields.
|
||||
diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual)
|
||||
|
||||
// Port diffs
|
||||
resPorts := portDiffs(r.ReservedPorts, other.ReservedPorts, false, contextual)
|
||||
dynPorts := portDiffs(r.DynamicPorts, other.DynamicPorts, true, contextual)
|
||||
if resPorts != nil {
|
||||
diff.Objects = append(diff.Objects, resPorts...)
|
||||
}
|
||||
if dynPorts != nil {
|
||||
diff.Objects = append(diff.Objects, dynPorts...)
|
||||
}
|
||||
|
||||
return diff
|
||||
}
|
||||
|
||||
// networkResourceDiffs diffs a set of NetworkResources. If contextual diff is enabled,
|
||||
// non-changed fields will still be returned.
|
||||
func networkResourceDiffs(old, new []*NetworkResource, contextual bool) []*ObjectDiff {
|
||||
makeSet := func(objects []*NetworkResource) map[string]*NetworkResource {
|
||||
objMap := make(map[string]*NetworkResource, len(objects))
|
||||
for _, obj := range objects {
|
||||
hash, err := hashstructure.Hash(obj, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
objMap[fmt.Sprintf("%d", hash)] = obj
|
||||
}
|
||||
|
||||
return objMap
|
||||
}
|
||||
|
||||
oldSet := makeSet(old)
|
||||
newSet := makeSet(new)
|
||||
|
||||
var diffs []*ObjectDiff
|
||||
for k, oldV := range oldSet {
|
||||
if newV, ok := newSet[k]; !ok {
|
||||
if diff := oldV.Diff(newV, contextual); diff != nil {
|
||||
diffs = append(diffs, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
for k, newV := range newSet {
|
||||
if oldV, ok := oldSet[k]; !ok {
|
||||
if diff := oldV.Diff(newV, contextual); diff != nil {
|
||||
diffs = append(diffs, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(ObjectDiffs(diffs))
|
||||
return diffs
|
||||
|
||||
}
|
||||
|
||||
// portDiffs returns the diff of two sets of ports. The dynamic flag marks the
|
||||
// set of ports as being Dynamic ports versus Static ports. If contextual diff is enabled,
|
||||
// non-changed fields will still be returned.
|
||||
func portDiffs(old, new []Port, dynamic bool, contextual bool) []*ObjectDiff {
|
||||
makeSet := func(ports []Port) map[string]Port {
|
||||
portMap := make(map[string]Port, len(ports))
|
||||
for _, port := range ports {
|
||||
portMap[port.Label] = port
|
||||
}
|
||||
|
||||
return portMap
|
||||
}
|
||||
|
||||
oldPorts := makeSet(old)
|
||||
newPorts := makeSet(new)
|
||||
|
||||
var filter []string
|
||||
name := "Static Port"
|
||||
if dynamic {
|
||||
filter = []string{"Value"}
|
||||
name = "Dynamic Port"
|
||||
}
|
||||
|
||||
var diffs []*ObjectDiff
|
||||
for portLabel, oldPort := range oldPorts {
|
||||
// Diff the same, deleted and edited
|
||||
if newPort, ok := newPorts[portLabel]; ok {
|
||||
diff := primitiveObjectDiff(oldPort, newPort, filter, name, contextual)
|
||||
if diff != nil {
|
||||
diffs = append(diffs, diff)
|
||||
}
|
||||
} else {
|
||||
diff := primitiveObjectDiff(oldPort, nil, filter, name, contextual)
|
||||
if diff != nil {
|
||||
diffs = append(diffs, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
for label, newPort := range newPorts {
|
||||
// Diff the added
|
||||
if _, ok := oldPorts[label]; !ok {
|
||||
diff := primitiveObjectDiff(nil, newPort, filter, name, contextual)
|
||||
if diff != nil {
|
||||
diffs = append(diffs, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(ObjectDiffs(diffs))
|
||||
return diffs
|
||||
|
||||
}
|
||||
|
||||
// configDiff returns the diff of two Task Config objects. If contextual diff is
|
||||
// enabled, all fields will be returned, even if no diff occured.
|
||||
func configDiff(old, new map[string]interface{}, contextual bool) *ObjectDiff {
|
||||
diff := &ObjectDiff{Type: DiffTypeNone, Name: "Config"}
|
||||
if reflect.DeepEqual(old, new) {
|
||||
return nil
|
||||
} else if len(old) == 0 {
|
||||
diff.Type = DiffTypeAdded
|
||||
} else if len(new) == 0 {
|
||||
diff.Type = DiffTypeDeleted
|
||||
} else {
|
||||
diff.Type = DiffTypeEdited
|
||||
}
|
||||
|
||||
// Diff the primitive fields.
|
||||
oldPrimitiveFlat := flatmap.Flatten(old, nil, false)
|
||||
newPrimitiveFlat := flatmap.Flatten(new, nil, false)
|
||||
diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual)
|
||||
return diff
|
||||
}
|
||||
|
||||
// ObjectDiff contains the diff of two generic objects.
|
||||
type ObjectDiff struct {
|
||||
Type DiffType
|
||||
Name string
|
||||
Fields []*FieldDiff
|
||||
Objects []*ObjectDiff
|
||||
}
|
||||
|
||||
func (o *ObjectDiff) GoString() string {
|
||||
out := fmt.Sprintf("\n%q (%s) {\n", o.Name, o.Type)
|
||||
for _, f := range o.Fields {
|
||||
out += fmt.Sprintf("%#v\n", f)
|
||||
}
|
||||
for _, o := range o.Objects {
|
||||
out += fmt.Sprintf("%#v\n", o)
|
||||
}
|
||||
out += "}"
|
||||
return out
|
||||
}
|
||||
|
||||
func (o *ObjectDiff) Less(other *ObjectDiff) bool {
|
||||
if reflect.DeepEqual(o, other) {
|
||||
return false
|
||||
} else if other == nil {
|
||||
return false
|
||||
} else if o == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if o.Name != other.Name {
|
||||
return o.Name < other.Name
|
||||
}
|
||||
|
||||
if o.Type != other.Type {
|
||||
return o.Type.Less(other.Type)
|
||||
}
|
||||
|
||||
if lO, lOther := len(o.Fields), len(other.Fields); lO != lOther {
|
||||
return lO < lOther
|
||||
}
|
||||
|
||||
if lO, lOther := len(o.Objects), len(other.Objects); lO != lOther {
|
||||
return lO < lOther
|
||||
}
|
||||
|
||||
// Check each field
|
||||
sort.Sort(FieldDiffs(o.Fields))
|
||||
sort.Sort(FieldDiffs(other.Fields))
|
||||
|
||||
for i, oV := range o.Fields {
|
||||
if oV.Less(other.Fields[i]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Check each object
|
||||
sort.Sort(ObjectDiffs(o.Objects))
|
||||
sort.Sort(ObjectDiffs(other.Objects))
|
||||
for i, oV := range o.Objects {
|
||||
if oV.Less(other.Objects[i]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// For sorting ObjectDiffs
|
||||
type ObjectDiffs []*ObjectDiff
|
||||
|
||||
func (o ObjectDiffs) Len() int { return len(o) }
|
||||
func (o ObjectDiffs) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
func (o ObjectDiffs) Less(i, j int) bool { return o[i].Less(o[j]) }
|
||||
|
||||
type FieldDiff struct {
|
||||
Type DiffType
|
||||
Name string
|
||||
Old, New string
|
||||
}
|
||||
|
||||
// fieldDiff returns a FieldDiff if old and new are different otherwise, it
|
||||
// returns nil. If contextual diff is enabled, even non-changed fields will be
|
||||
// returned.
|
||||
func fieldDiff(old, new, name string, contextual bool) *FieldDiff {
|
||||
diff := &FieldDiff{Name: name, Type: DiffTypeNone}
|
||||
if old == new {
|
||||
if !contextual {
|
||||
return nil
|
||||
}
|
||||
diff.Old, diff.New = old, new
|
||||
return diff
|
||||
}
|
||||
|
||||
if old == "" {
|
||||
diff.Type = DiffTypeAdded
|
||||
diff.New = new
|
||||
} else if new == "" {
|
||||
diff.Type = DiffTypeDeleted
|
||||
diff.Old = old
|
||||
} else {
|
||||
diff.Type = DiffTypeEdited
|
||||
diff.Old = old
|
||||
diff.New = new
|
||||
}
|
||||
return diff
|
||||
}
|
||||
|
||||
func (f *FieldDiff) GoString() string {
|
||||
return fmt.Sprintf("%q (%s): %q => %q", f.Name, f.Type, f.Old, f.New)
|
||||
}
|
||||
|
||||
func (f *FieldDiff) Less(other *FieldDiff) bool {
|
||||
if reflect.DeepEqual(f, other) {
|
||||
return false
|
||||
} else if other == nil {
|
||||
return false
|
||||
} else if f == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if f.Name != other.Name {
|
||||
return f.Name < other.Name
|
||||
} else if f.Old != other.Old {
|
||||
return f.Old < other.Old
|
||||
}
|
||||
|
||||
return f.New < other.New
|
||||
}
|
||||
|
||||
// For sorting FieldDiffs
|
||||
type FieldDiffs []*FieldDiff
|
||||
|
||||
func (f FieldDiffs) Len() int { return len(f) }
|
||||
func (f FieldDiffs) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
|
||||
func (f FieldDiffs) Less(i, j int) bool { return f[i].Less(f[j]) }
|
||||
|
||||
// fieldDiffs takes a map of field names to their values and returns a set of
|
||||
// field diffs. If contextual diff is enabled, even non-changed fields will be
|
||||
// returned.
|
||||
func fieldDiffs(old, new map[string]string, contextual bool) []*FieldDiff {
|
||||
var diffs []*FieldDiff
|
||||
visited := make(map[string]struct{})
|
||||
for k, oldV := range old {
|
||||
visited[k] = struct{}{}
|
||||
newV := new[k]
|
||||
if diff := fieldDiff(oldV, newV, k, contextual); diff != nil {
|
||||
diffs = append(diffs, diff)
|
||||
}
|
||||
}
|
||||
|
||||
for k, newV := range new {
|
||||
if _, ok := visited[k]; !ok {
|
||||
if diff := fieldDiff("", newV, k, contextual); diff != nil {
|
||||
diffs = append(diffs, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(FieldDiffs(diffs))
|
||||
return diffs
|
||||
}
|
||||
|
||||
// stringSetDiff diffs two sets of strings with the given name.
|
||||
func stringSetDiff(old, new []string, name string) *ObjectDiff {
|
||||
oldMap := make(map[string]struct{}, len(old))
|
||||
newMap := make(map[string]struct{}, len(new))
|
||||
for _, o := range old {
|
||||
oldMap[o] = struct{}{}
|
||||
}
|
||||
for _, n := range new {
|
||||
newMap[n] = struct{}{}
|
||||
}
|
||||
if reflect.DeepEqual(oldMap, newMap) {
|
||||
return nil
|
||||
}
|
||||
|
||||
diff := &ObjectDiff{Name: name}
|
||||
var added, removed bool
|
||||
for k := range oldMap {
|
||||
if _, ok := newMap[k]; !ok {
|
||||
diff.Fields = append(diff.Fields, fieldDiff(k, "", name, false))
|
||||
removed = true
|
||||
}
|
||||
}
|
||||
|
||||
for k := range newMap {
|
||||
if _, ok := oldMap[k]; !ok {
|
||||
diff.Fields = append(diff.Fields, fieldDiff("", k, name, false))
|
||||
added = true
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(FieldDiffs(diff.Fields))
|
||||
|
||||
// Determine the type
|
||||
if added && removed {
|
||||
diff.Type = DiffTypeEdited
|
||||
} else if added {
|
||||
diff.Type = DiffTypeAdded
|
||||
} else {
|
||||
diff.Type = DiffTypeDeleted
|
||||
}
|
||||
|
||||
return diff
|
||||
}
|
||||
|
||||
// primitiveObjectDiff returns a diff of the passed objects' primitive fields.
|
||||
// The filter field can be used to exclude fields from the diff. The name is the
|
||||
// name of the objects. If contextual is set, non-changed fields will also be
|
||||
// stored in the object diff.
|
||||
func primitiveObjectDiff(old, new interface{}, filter []string, name string, contextual bool) *ObjectDiff {
|
||||
oldPrimitiveFlat := flatmap.Flatten(old, filter, true)
|
||||
newPrimitiveFlat := flatmap.Flatten(new, filter, true)
|
||||
delete(oldPrimitiveFlat, "")
|
||||
delete(newPrimitiveFlat, "")
|
||||
|
||||
diff := &ObjectDiff{Name: name}
|
||||
diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual)
|
||||
|
||||
var added, deleted, edited bool
|
||||
for _, f := range diff.Fields {
|
||||
switch f.Type {
|
||||
case DiffTypeEdited:
|
||||
edited = true
|
||||
break
|
||||
case DiffTypeDeleted:
|
||||
deleted = true
|
||||
case DiffTypeAdded:
|
||||
added = true
|
||||
}
|
||||
}
|
||||
|
||||
if edited || added && deleted {
|
||||
diff.Type = DiffTypeEdited
|
||||
} else if added {
|
||||
diff.Type = DiffTypeAdded
|
||||
} else if deleted {
|
||||
diff.Type = DiffTypeDeleted
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
return diff
|
||||
}
|
||||
|
||||
// primitiveObjectSetDiff does a set difference of the old and new sets. The
|
||||
// filter parameter can be used to filter a set of primitive fields in the
|
||||
// passed structs. The name corresponds to the name of the passed objects. If
|
||||
// contextual diff is enabled, objects' primtive fields will be returned even if
|
||||
// no diff exists.
|
||||
func primitiveObjectSetDiff(old, new []interface{}, filter []string, name string, contextual bool) []*ObjectDiff {
|
||||
makeSet := func(objects []interface{}) map[string]interface{} {
|
||||
objMap := make(map[string]interface{}, len(objects))
|
||||
for _, obj := range objects {
|
||||
hash, err := hashstructure.Hash(obj, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
objMap[fmt.Sprintf("%d", hash)] = obj
|
||||
}
|
||||
|
||||
return objMap
|
||||
}
|
||||
|
||||
oldSet := makeSet(old)
|
||||
newSet := makeSet(new)
|
||||
|
||||
var diffs []*ObjectDiff
|
||||
for k, v := range oldSet {
|
||||
// Deleted
|
||||
if _, ok := newSet[k]; !ok {
|
||||
diffs = append(diffs, primitiveObjectDiff(v, nil, filter, name, contextual))
|
||||
}
|
||||
}
|
||||
for k, v := range newSet {
|
||||
// Added
|
||||
if _, ok := oldSet[k]; !ok {
|
||||
diffs = append(diffs, primitiveObjectDiff(nil, v, filter, name, contextual))
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(ObjectDiffs(diffs))
|
||||
return diffs
|
||||
}
|
||||
|
||||
// interfaceSlice is a helper method that takes a slice of typed elements and
|
||||
// returns a slice of interface. This method will panic if given a non-slice
|
||||
// input.
|
||||
func interfaceSlice(slice interface{}) []interface{} {
|
||||
s := reflect.ValueOf(slice)
|
||||
if s.Kind() != reflect.Slice {
|
||||
panic("InterfaceSlice() given a non-slice type")
|
||||
}
|
||||
|
||||
ret := make([]interface{}, s.Len())
|
||||
|
||||
for i := 0; i < s.Len(); i++ {
|
||||
ret[i] = s.Index(i).Interface()
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1801,17 +1801,32 @@ func (t *Task) Validate() error {
|
|||
func validateServices(t *Task) error {
|
||||
var mErr multierror.Error
|
||||
|
||||
// Ensure that services don't ask for non-existent ports.
|
||||
// Ensure that services don't ask for non-existent ports and their names are
|
||||
// unique.
|
||||
servicePorts := make(map[string][]string)
|
||||
knownServices := make(map[string]struct{})
|
||||
for i, service := range t.Services {
|
||||
if err := service.Validate(); err != nil {
|
||||
outer := fmt.Errorf("service %d validation failed: %s", i, err)
|
||||
mErr.Errors = append(mErr.Errors, outer)
|
||||
}
|
||||
if _, ok := knownServices[service.Name]; ok {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q is duplicate", service.Name))
|
||||
}
|
||||
knownServices[service.Name] = struct{}{}
|
||||
|
||||
if service.PortLabel != "" {
|
||||
servicePorts[service.PortLabel] = append(servicePorts[service.PortLabel], service.Name)
|
||||
}
|
||||
|
||||
// Ensure that check names are unique.
|
||||
knownChecks := make(map[string]struct{})
|
||||
for _, check := range service.Checks {
|
||||
if _, ok := knownChecks[check.Name]; ok {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q is duplicate", check.Name))
|
||||
}
|
||||
knownChecks[check.Name] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Get the set of port labels.
|
||||
|
|
|
@ -202,7 +202,7 @@ func TestTask_Validate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTask_Validate_Services(t *testing.T) {
|
||||
s := &Service{
|
||||
s1 := &Service{
|
||||
Name: "service-name",
|
||||
PortLabel: "bar",
|
||||
Checks: []*ServiceCheck{
|
||||
|
@ -210,9 +210,17 @@ func TestTask_Validate_Services(t *testing.T) {
|
|||
Name: "check-name",
|
||||
Type: ServiceCheckTCP,
|
||||
},
|
||||
{
|
||||
Name: "check-name",
|
||||
Type: ServiceCheckTCP,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
s2 := &Service{
|
||||
Name: "service-name",
|
||||
}
|
||||
|
||||
task := &Task{
|
||||
Name: "web",
|
||||
Driver: "docker",
|
||||
|
@ -222,7 +230,7 @@ func TestTask_Validate_Services(t *testing.T) {
|
|||
MemoryMB: 100,
|
||||
IOPS: 10,
|
||||
},
|
||||
Services: []*Service{s},
|
||||
Services: []*Service{s1, s2},
|
||||
}
|
||||
err := task.Validate()
|
||||
if err == nil {
|
||||
|
@ -231,6 +239,14 @@ func TestTask_Validate_Services(t *testing.T) {
|
|||
if !strings.Contains(err.Error(), "referenced by services service-name does not exist") {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(), "service \"service-name\" is duplicate") {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(), "check \"check-name\" is duplicate") {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTask_Validate_LogConfig(t *testing.T) {
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
Copyright (c) 2015 Datadog, Inc
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -1,45 +0,0 @@
|
|||
## Overview
|
||||
|
||||
Package `statsd` provides a Go [dogstatsd](http://docs.datadoghq.com/guides/dogstatsd/) client. Dogstatsd extends Statsd, adding tags
|
||||
and histograms.
|
||||
|
||||
## Get the code
|
||||
|
||||
$ go get github.com/DataDog/datadog-go/statsd
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
// Create the client
|
||||
c, err := statsd.New("127.0.0.1:8125")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Prefix every metric with the app name
|
||||
c.Namespace = "flubber."
|
||||
// Send the EC2 availability zone as a tag with every metric
|
||||
c.Tags = append(c.Tags, "us-east-1a")
|
||||
err = c.Gauge("request.duration", 1.2, nil, 1)
|
||||
```
|
||||
|
||||
## Buffering Client
|
||||
|
||||
Dogstatsd accepts packets with multiple statsd payloads in them. Using the BufferingClient via `NewBufferingClient` will buffer up commands and send them when the buffer is reached or after 100msec.
|
||||
|
||||
## Development
|
||||
|
||||
Run the tests with:
|
||||
|
||||
$ go test
|
||||
|
||||
## Documentation
|
||||
|
||||
Please see: http://godoc.org/github.com/DataDog/datadog-go/statsd
|
||||
|
||||
## License
|
||||
|
||||
go-dogstatsd is released under the [MIT license](http://www.opensource.org/licenses/mit-license.php).
|
||||
|
||||
## Credits
|
||||
|
||||
Original code by [ooyala](https://github.com/ooyala/go-dogstatsd).
|
|
@ -1,353 +0,0 @@
|
|||
// Copyright 2013 Ooyala, Inc.
|
||||
|
||||
/*
|
||||
Package statsd provides a Go dogstatsd client. Dogstatsd extends the popular statsd,
|
||||
adding tags and histograms and pushing upstream to Datadog.
|
||||
|
||||
Refer to http://docs.datadoghq.com/guides/dogstatsd/ for information about DogStatsD.
|
||||
|
||||
Example Usage:
|
||||
|
||||
// Create the client
|
||||
c, err := statsd.New("127.0.0.1:8125")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Prefix every metric with the app name
|
||||
c.Namespace = "flubber."
|
||||
// Send the EC2 availability zone as a tag with every metric
|
||||
c.Tags = append(c.Tags, "us-east-1a")
|
||||
err = c.Gauge("request.duration", 1.2, nil, 1)
|
||||
|
||||
statsd is based on go-statsd-client.
|
||||
*/
|
||||
package statsd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Client is a handle for sending udp messages to dogstatsd. It is safe to
|
||||
// use one Client from multiple goroutines simultaneously.
|
||||
type Client struct {
|
||||
conn net.Conn
|
||||
// Namespace to prepend to all statsd calls
|
||||
Namespace string
|
||||
// Tags are global tags to be added to every statsd call
|
||||
Tags []string
|
||||
// BufferLength is the length of the buffer in commands.
|
||||
bufferLength int
|
||||
flushTime time.Duration
|
||||
commands []string
|
||||
stop bool
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// New returns a pointer to a new Client given an addr in the format "hostname:port".
|
||||
func New(addr string) (*Client, error) {
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn, err := net.DialUDP("udp", nil, udpAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := &Client{conn: conn}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewBuffered returns a Client that buffers its output and sends it in chunks.
|
||||
// Buflen is the length of the buffer in number of commands.
|
||||
func NewBuffered(addr string, buflen int) (*Client, error) {
|
||||
client, err := New(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.bufferLength = buflen
|
||||
client.commands = make([]string, 0, buflen)
|
||||
client.flushTime = time.Millisecond * 100
|
||||
go client.watch()
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// format a message from its name, value, tags and rate. Also adds global
|
||||
// namespace and tags.
|
||||
func (c *Client) format(name, value string, tags []string, rate float64) string {
|
||||
var buf bytes.Buffer
|
||||
if c.Namespace != "" {
|
||||
buf.WriteString(c.Namespace)
|
||||
}
|
||||
buf.WriteString(name)
|
||||
buf.WriteString(":")
|
||||
buf.WriteString(value)
|
||||
if rate < 1 {
|
||||
buf.WriteString(`|@`)
|
||||
buf.WriteString(strconv.FormatFloat(rate, 'f', -1, 64))
|
||||
}
|
||||
|
||||
tags = append(c.Tags, tags...)
|
||||
if len(tags) > 0 {
|
||||
buf.WriteString("|#")
|
||||
buf.WriteString(tags[0])
|
||||
for _, tag := range tags[1:] {
|
||||
buf.WriteString(",")
|
||||
buf.WriteString(tag)
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (c *Client) watch() {
|
||||
for _ = range time.Tick(c.flushTime) {
|
||||
if c.stop {
|
||||
return
|
||||
}
|
||||
c.Lock()
|
||||
if len(c.commands) > 0 {
|
||||
// FIXME: eating error here
|
||||
c.flush()
|
||||
}
|
||||
c.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) append(cmd string) error {
|
||||
c.Lock()
|
||||
c.commands = append(c.commands, cmd)
|
||||
// if we should flush, lets do it
|
||||
if len(c.commands) == c.bufferLength {
|
||||
if err := c.flush(); err != nil {
|
||||
c.Unlock()
|
||||
return err
|
||||
}
|
||||
}
|
||||
c.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// flush the commands in the buffer. Lock must be held by caller.
|
||||
func (c *Client) flush() error {
|
||||
data := strings.Join(c.commands, "\n")
|
||||
_, err := c.conn.Write([]byte(data))
|
||||
// clear the slice with a slice op, doesn't realloc
|
||||
c.commands = c.commands[:0]
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Client) sendMsg(msg string) error {
|
||||
// if this client is buffered, then we'll just append this
|
||||
if c.bufferLength > 0 {
|
||||
return c.append(msg)
|
||||
}
|
||||
c.Lock()
|
||||
_, err := c.conn.Write([]byte(msg))
|
||||
c.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
// send handles sampling and sends the message over UDP. It also adds global namespace prefixes and tags.
|
||||
func (c *Client) send(name, value string, tags []string, rate float64) error {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
if rate < 1 && rand.Float64() > rate {
|
||||
return nil
|
||||
}
|
||||
data := c.format(name, value, tags, rate)
|
||||
return c.sendMsg(data)
|
||||
}
|
||||
|
||||
// Gauge measures the value of a metric at a particular time.
|
||||
func (c *Client) Gauge(name string, value float64, tags []string, rate float64) error {
|
||||
stat := fmt.Sprintf("%f|g", value)
|
||||
return c.send(name, stat, tags, rate)
|
||||
}
|
||||
|
||||
// Count tracks how many times something happened per second.
|
||||
func (c *Client) Count(name string, value int64, tags []string, rate float64) error {
|
||||
stat := fmt.Sprintf("%d|c", value)
|
||||
return c.send(name, stat, tags, rate)
|
||||
}
|
||||
|
||||
// Histogram tracks the statistical distribution of a set of values.
|
||||
func (c *Client) Histogram(name string, value float64, tags []string, rate float64) error {
|
||||
stat := fmt.Sprintf("%f|h", value)
|
||||
return c.send(name, stat, tags, rate)
|
||||
}
|
||||
|
||||
// Set counts the number of unique elements in a group.
|
||||
func (c *Client) Set(name string, value string, tags []string, rate float64) error {
|
||||
stat := fmt.Sprintf("%s|s", value)
|
||||
return c.send(name, stat, tags, rate)
|
||||
}
|
||||
|
||||
// TimeInMilliseconds sends timing information in milliseconds.
|
||||
// It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing)
|
||||
func (c *Client) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error {
|
||||
stat := fmt.Sprintf("%f|ms", value)
|
||||
return c.send(name, stat, tags, rate)
|
||||
}
|
||||
|
||||
// Event sends the provided Event.
|
||||
func (c *Client) Event(e *Event) error {
|
||||
stat, err := e.Encode(c.Tags...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.sendMsg(stat)
|
||||
}
|
||||
|
||||
// SimpleEvent sends an event with the provided title and text.
|
||||
func (c *Client) SimpleEvent(title, text string) error {
|
||||
e := NewEvent(title, text)
|
||||
return c.Event(e)
|
||||
}
|
||||
|
||||
// Close the client connection.
|
||||
func (c *Client) Close() error {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
c.stop = true
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// Events support
|
||||
|
||||
type eventAlertType string
|
||||
|
||||
const (
|
||||
// Info is the "info" AlertType for events
|
||||
Info eventAlertType = "info"
|
||||
// Error is the "error" AlertType for events
|
||||
Error eventAlertType = "error"
|
||||
// Warning is the "warning" AlertType for events
|
||||
Warning eventAlertType = "warning"
|
||||
// Success is the "success" AlertType for events
|
||||
Success eventAlertType = "success"
|
||||
)
|
||||
|
||||
type eventPriority string
|
||||
|
||||
const (
|
||||
// Normal is the "normal" Priority for events
|
||||
Normal eventPriority = "normal"
|
||||
// Low is the "low" Priority for events
|
||||
Low eventPriority = "low"
|
||||
)
|
||||
|
||||
// An Event is an object that can be posted to your DataDog event stream.
|
||||
type Event struct {
|
||||
// Title of the event. Required.
|
||||
Title string
|
||||
// Text is the description of the event. Required.
|
||||
Text string
|
||||
// Timestamp is a timestamp for the event. If not provided, the dogstatsd
|
||||
// server will set this to the current time.
|
||||
Timestamp time.Time
|
||||
// Hostname for the event.
|
||||
Hostname string
|
||||
// AggregationKey groups this event with others of the same key.
|
||||
AggregationKey string
|
||||
// Priority of the event. Can be statsd.Low or statsd.Normal.
|
||||
Priority eventPriority
|
||||
// SourceTypeName is a source type for the event.
|
||||
SourceTypeName string
|
||||
// AlertType can be statsd.Info, statsd.Error, statsd.Warning, or statsd.Success.
|
||||
// If absent, the default value applied by the dogstatsd server is Info.
|
||||
AlertType eventAlertType
|
||||
// Tags for the event.
|
||||
Tags []string
|
||||
}
|
||||
|
||||
// NewEvent creates a new event with the given title and text. Error checking
|
||||
// against these values is done at send-time, or upon running e.Check.
|
||||
func NewEvent(title, text string) *Event {
|
||||
return &Event{
|
||||
Title: title,
|
||||
Text: text,
|
||||
}
|
||||
}
|
||||
|
||||
// Check verifies that an event is valid.
|
||||
func (e Event) Check() error {
|
||||
if len(e.Title) == 0 {
|
||||
return fmt.Errorf("statsd.Event title is required")
|
||||
}
|
||||
if len(e.Text) == 0 {
|
||||
return fmt.Errorf("statsd.Event text is required")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Encode returns the dogstatsd wire protocol representation for an event.
|
||||
// Tags may be passed which will be added to the encoded output but not to
|
||||
// the Event's list of tags, eg. for default tags.
|
||||
func (e Event) Encode(tags ...string) (string, error) {
|
||||
err := e.Check()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString("_e{")
|
||||
buffer.WriteString(strconv.FormatInt(int64(len(e.Title)), 10))
|
||||
buffer.WriteRune(',')
|
||||
buffer.WriteString(strconv.FormatInt(int64(len(e.Text)), 10))
|
||||
buffer.WriteString("}:")
|
||||
buffer.WriteString(e.Title)
|
||||
buffer.WriteRune('|')
|
||||
buffer.WriteString(e.Text)
|
||||
|
||||
if !e.Timestamp.IsZero() {
|
||||
buffer.WriteString("|d:")
|
||||
buffer.WriteString(strconv.FormatInt(int64(e.Timestamp.Unix()), 10))
|
||||
}
|
||||
|
||||
if len(e.Hostname) != 0 {
|
||||
buffer.WriteString("|h:")
|
||||
buffer.WriteString(e.Hostname)
|
||||
}
|
||||
|
||||
if len(e.AggregationKey) != 0 {
|
||||
buffer.WriteString("|k:")
|
||||
buffer.WriteString(e.AggregationKey)
|
||||
|
||||
}
|
||||
|
||||
if len(e.Priority) != 0 {
|
||||
buffer.WriteString("|p:")
|
||||
buffer.WriteString(string(e.Priority))
|
||||
}
|
||||
|
||||
if len(e.SourceTypeName) != 0 {
|
||||
buffer.WriteString("|s:")
|
||||
buffer.WriteString(e.SourceTypeName)
|
||||
}
|
||||
|
||||
if len(e.AlertType) != 0 {
|
||||
buffer.WriteString("|t:")
|
||||
buffer.WriteString(string(e.AlertType))
|
||||
}
|
||||
|
||||
if len(tags)+len(e.Tags) > 0 {
|
||||
all := make([]string, 0, len(tags)+len(e.Tags))
|
||||
all = append(all, tags...)
|
||||
all = append(all, e.Tags...)
|
||||
buffer.WriteString("|#")
|
||||
buffer.WriteString(all[0])
|
||||
for _, tag := range all[1:] {
|
||||
buffer.WriteString(",")
|
||||
buffer.WriteString(tag)
|
||||
}
|
||||
}
|
||||
|
||||
return buffer.String(), nil
|
||||
}
|
|
@ -1,109 +0,0 @@
|
|||
package datadog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
)
|
||||
|
||||
// DogStatsdSink provides a MetricSink that can be used
|
||||
// with a dogstatsd server. It utilizes the Dogstatsd client at github.com/DataDog/datadog-go/statsd
|
||||
type DogStatsdSink struct {
|
||||
client *statsd.Client
|
||||
hostName string
|
||||
propagateHostname bool
|
||||
}
|
||||
|
||||
// NewDogStatsdSink is used to create a new DogStatsdSink with sane defaults
|
||||
func NewDogStatsdSink(addr string, hostName string) (*DogStatsdSink, error) {
|
||||
client, err := statsd.New(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sink := &DogStatsdSink{
|
||||
client: client,
|
||||
hostName: hostName,
|
||||
propagateHostname: false,
|
||||
}
|
||||
return sink, nil
|
||||
}
|
||||
|
||||
// SetTags sets common tags on the Dogstatsd Client that will be sent
|
||||
// along with all dogstatsd packets.
|
||||
// Ref: http://docs.datadoghq.com/guides/dogstatsd/#tags
|
||||
func (s *DogStatsdSink) SetTags(tags []string) {
|
||||
s.client.Tags = tags
|
||||
}
|
||||
|
||||
// EnableHostnamePropagation forces a Dogstatsd `host` tag with the value specified by `s.HostName`
|
||||
// Since the go-metrics package has its own mechanism for attaching a hostname to metrics,
|
||||
// setting the `propagateHostname` flag ensures that `s.HostName` overrides the host tag naively set by the DogStatsd server
|
||||
func (s *DogStatsdSink) EnableHostNamePropagation() {
|
||||
s.propagateHostname = true
|
||||
}
|
||||
|
||||
func (s *DogStatsdSink) flattenKey(parts []string) string {
|
||||
joined := strings.Join(parts, ".")
|
||||
return strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case ':':
|
||||
fallthrough
|
||||
case ' ':
|
||||
return '_'
|
||||
default:
|
||||
return r
|
||||
}
|
||||
}, joined)
|
||||
}
|
||||
|
||||
func (s *DogStatsdSink) parseKey(key []string) ([]string, []string) {
|
||||
// Since DogStatsd supports dimensionality via tags on metric keys, this sink's approach is to splice the hostname out of the key in favor of a `host` tag
|
||||
// The `host` tag is either forced here, or set downstream by the DogStatsd server
|
||||
|
||||
var tags []string
|
||||
hostName := s.hostName
|
||||
|
||||
//Splice the hostname out of the key
|
||||
for i, el := range key {
|
||||
if el == hostName {
|
||||
key = append(key[:i], key[i+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
if s.propagateHostname {
|
||||
tags = append(tags, fmt.Sprintf("host:%s", hostName))
|
||||
}
|
||||
return key, tags
|
||||
}
|
||||
|
||||
// Implementation of methods in the MetricSink interface
|
||||
|
||||
func (s *DogStatsdSink) SetGauge(key []string, val float32) {
|
||||
key, tags := s.parseKey(key)
|
||||
flatKey := s.flattenKey(key)
|
||||
|
||||
rate := 1.0
|
||||
s.client.Gauge(flatKey, float64(val), tags, rate)
|
||||
}
|
||||
|
||||
func (s *DogStatsdSink) IncrCounter(key []string, val float32) {
|
||||
key, tags := s.parseKey(key)
|
||||
flatKey := s.flattenKey(key)
|
||||
|
||||
rate := 1.0
|
||||
s.client.Count(flatKey, int64(val), tags, rate)
|
||||
}
|
||||
|
||||
// EmitKey is not implemented since DogStatsd does not provide a metric type that holds an
|
||||
// arbitrary number of values
|
||||
func (s *DogStatsdSink) EmitKey(key []string, val float32) {
|
||||
}
|
||||
|
||||
func (s *DogStatsdSink) AddSample(key []string, val float32) {
|
||||
key, tags := s.parseKey(key)
|
||||
flatKey := s.flattenKey(key)
|
||||
|
||||
rate := 1.0
|
||||
s.client.TimeInMilliseconds(flatKey, float64(val), tags, rate)
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
// +build go1.3
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type PrometheusSink struct {
|
||||
mu sync.Mutex
|
||||
gauges map[string]prometheus.Gauge
|
||||
summaries map[string]prometheus.Summary
|
||||
counters map[string]prometheus.Counter
|
||||
}
|
||||
|
||||
func NewPrometheusSink() (*PrometheusSink, error) {
|
||||
return &PrometheusSink{
|
||||
gauges: make(map[string]prometheus.Gauge),
|
||||
summaries: make(map[string]prometheus.Summary),
|
||||
counters: make(map[string]prometheus.Counter),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *PrometheusSink) flattenKey(parts []string) string {
|
||||
joined := strings.Join(parts, "_")
|
||||
joined = strings.Replace(joined, " ", "_", -1)
|
||||
joined = strings.Replace(joined, ".", "_", -1)
|
||||
joined = strings.Replace(joined, "-", "_", -1)
|
||||
return joined
|
||||
}
|
||||
|
||||
func (p *PrometheusSink) SetGauge(parts []string, val float32) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
key := p.flattenKey(parts)
|
||||
g, ok := p.gauges[key]
|
||||
if !ok {
|
||||
g = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: key,
|
||||
Help: key,
|
||||
})
|
||||
prometheus.MustRegister(g)
|
||||
p.gauges[key] = g
|
||||
}
|
||||
g.Set(float64(val))
|
||||
}
|
||||
|
||||
func (p *PrometheusSink) AddSample(parts []string, val float32) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
key := p.flattenKey(parts)
|
||||
g, ok := p.summaries[key]
|
||||
if !ok {
|
||||
g = prometheus.NewSummary(prometheus.SummaryOpts{
|
||||
Name: key,
|
||||
Help: key,
|
||||
MaxAge: 10 * time.Second,
|
||||
})
|
||||
prometheus.MustRegister(g)
|
||||
p.summaries[key] = g
|
||||
}
|
||||
g.Observe(float64(val))
|
||||
}
|
||||
|
||||
// EmitKey is not implemented. Prometheus doesn’t offer a type for which an
|
||||
// arbitrary number of values is retained, as Prometheus works with a pull
|
||||
// model, rather than a push model.
|
||||
func (p *PrometheusSink) EmitKey(key []string, val float32) {
|
||||
}
|
||||
|
||||
func (p *PrometheusSink) IncrCounter(parts []string, val float32) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
key := p.flattenKey(parts)
|
||||
g, ok := p.counters[key]
|
||||
if !ok {
|
||||
g = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: key,
|
||||
Help: key,
|
||||
})
|
||||
prometheus.MustRegister(g)
|
||||
p.counters[key] = g
|
||||
}
|
||||
g.Add(float64(val))
|
||||
}
|
|
@ -1,246 +0,0 @@
|
|||
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
// Package s3iface provides an interface for the Amazon Simple Storage Service.
|
||||
package s3iface
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
)
|
||||
|
||||
// S3API is the interface type for s3.S3.
|
||||
type S3API interface {
|
||||
AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput)
|
||||
|
||||
AbortMultipartUpload(*s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error)
|
||||
|
||||
CompleteMultipartUploadRequest(*s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput)
|
||||
|
||||
CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
|
||||
|
||||
CopyObjectRequest(*s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput)
|
||||
|
||||
CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
|
||||
|
||||
CreateBucketRequest(*s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput)
|
||||
|
||||
CreateBucket(*s3.CreateBucketInput) (*s3.CreateBucketOutput, error)
|
||||
|
||||
CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput)
|
||||
|
||||
CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
|
||||
|
||||
DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput)
|
||||
|
||||
DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error)
|
||||
|
||||
DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput)
|
||||
|
||||
DeleteBucketCors(*s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error)
|
||||
|
||||
DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput)
|
||||
|
||||
DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error)
|
||||
|
||||
DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput)
|
||||
|
||||
DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error)
|
||||
|
||||
DeleteBucketReplicationRequest(*s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput)
|
||||
|
||||
DeleteBucketReplication(*s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error)
|
||||
|
||||
DeleteBucketTaggingRequest(*s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput)
|
||||
|
||||
DeleteBucketTagging(*s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error)
|
||||
|
||||
DeleteBucketWebsiteRequest(*s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput)
|
||||
|
||||
DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error)
|
||||
|
||||
DeleteObjectRequest(*s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput)
|
||||
|
||||
DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
|
||||
|
||||
DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput)
|
||||
|
||||
DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error)
|
||||
|
||||
GetBucketAclRequest(*s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput)
|
||||
|
||||
GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error)
|
||||
|
||||
GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput)
|
||||
|
||||
GetBucketCors(*s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error)
|
||||
|
||||
GetBucketLifecycleRequest(*s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput)
|
||||
|
||||
GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error)
|
||||
|
||||
GetBucketLifecycleConfigurationRequest(*s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput)
|
||||
|
||||
GetBucketLifecycleConfiguration(*s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error)
|
||||
|
||||
GetBucketLocationRequest(*s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput)
|
||||
|
||||
GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error)
|
||||
|
||||
GetBucketLoggingRequest(*s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput)
|
||||
|
||||
GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error)
|
||||
|
||||
GetBucketNotificationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated)
|
||||
|
||||
GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error)
|
||||
|
||||
GetBucketNotificationConfigurationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration)
|
||||
|
||||
GetBucketNotificationConfiguration(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error)
|
||||
|
||||
GetBucketPolicyRequest(*s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput)
|
||||
|
||||
GetBucketPolicy(*s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error)
|
||||
|
||||
GetBucketReplicationRequest(*s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput)
|
||||
|
||||
GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error)
|
||||
|
||||
GetBucketRequestPaymentRequest(*s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput)
|
||||
|
||||
GetBucketRequestPayment(*s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error)
|
||||
|
||||
GetBucketTaggingRequest(*s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput)
|
||||
|
||||
GetBucketTagging(*s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error)
|
||||
|
||||
GetBucketVersioningRequest(*s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput)
|
||||
|
||||
GetBucketVersioning(*s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error)
|
||||
|
||||
GetBucketWebsiteRequest(*s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput)
|
||||
|
||||
GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error)
|
||||
|
||||
GetObjectRequest(*s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput)
|
||||
|
||||
GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error)
|
||||
|
||||
GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput)
|
||||
|
||||
GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
|
||||
|
||||
GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput)
|
||||
|
||||
GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error)
|
||||
|
||||
HeadBucketRequest(*s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput)
|
||||
|
||||
HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
|
||||
|
||||
HeadObjectRequest(*s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput)
|
||||
|
||||
HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
|
||||
|
||||
ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput)
|
||||
|
||||
ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error)
|
||||
|
||||
ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput)
|
||||
|
||||
ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error)
|
||||
|
||||
ListMultipartUploadsPages(*s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool) error
|
||||
|
||||
ListObjectVersionsRequest(*s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput)
|
||||
|
||||
ListObjectVersions(*s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error)
|
||||
|
||||
ListObjectVersionsPages(*s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool) error
|
||||
|
||||
ListObjectsRequest(*s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput)
|
||||
|
||||
ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error)
|
||||
|
||||
ListObjectsPages(*s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool) error
|
||||
|
||||
ListPartsRequest(*s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput)
|
||||
|
||||
ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error)
|
||||
|
||||
ListPartsPages(*s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool) error
|
||||
|
||||
PutBucketAclRequest(*s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput)
|
||||
|
||||
PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error)
|
||||
|
||||
PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput)
|
||||
|
||||
PutBucketCors(*s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error)
|
||||
|
||||
PutBucketLifecycleRequest(*s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput)
|
||||
|
||||
PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error)
|
||||
|
||||
PutBucketLifecycleConfigurationRequest(*s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput)
|
||||
|
||||
PutBucketLifecycleConfiguration(*s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error)
|
||||
|
||||
PutBucketLoggingRequest(*s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput)
|
||||
|
||||
PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error)
|
||||
|
||||
PutBucketNotificationRequest(*s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput)
|
||||
|
||||
PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error)
|
||||
|
||||
PutBucketNotificationConfigurationRequest(*s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput)
|
||||
|
||||
PutBucketNotificationConfiguration(*s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error)
|
||||
|
||||
PutBucketPolicyRequest(*s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput)
|
||||
|
||||
PutBucketPolicy(*s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error)
|
||||
|
||||
PutBucketReplicationRequest(*s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput)
|
||||
|
||||
PutBucketReplication(*s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error)
|
||||
|
||||
PutBucketRequestPaymentRequest(*s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput)
|
||||
|
||||
PutBucketRequestPayment(*s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error)
|
||||
|
||||
PutBucketTaggingRequest(*s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput)
|
||||
|
||||
PutBucketTagging(*s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error)
|
||||
|
||||
PutBucketVersioningRequest(*s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput)
|
||||
|
||||
PutBucketVersioning(*s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error)
|
||||
|
||||
PutBucketWebsiteRequest(*s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput)
|
||||
|
||||
PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error)
|
||||
|
||||
PutObjectRequest(*s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput)
|
||||
|
||||
PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error)
|
||||
|
||||
PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput)
|
||||
|
||||
PutObjectAcl(*s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error)
|
||||
|
||||
RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput)
|
||||
|
||||
RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error)
|
||||
|
||||
UploadPartRequest(*s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput)
|
||||
|
||||
UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error)
|
||||
|
||||
UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput)
|
||||
|
||||
UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error)
|
||||
}
|
||||
|
||||
var _ S3API = (*s3.S3)(nil)
|
|
@ -1,3 +0,0 @@
|
|||
// Package s3manager provides utilities to upload and download objects from
|
||||
// S3 concurrently. Helpful for when working with large objects.
|
||||
package s3manager
|
|
@ -1,321 +0,0 @@
|
|||
package s3manager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3iface"
|
||||
)
|
||||
|
||||
// DefaultDownloadPartSize is the default range of bytes to get at a time when
|
||||
// using Download().
|
||||
const DefaultDownloadPartSize = 1024 * 1024 * 5
|
||||
|
||||
// DefaultDownloadConcurrency is the default number of goroutines to spin up
|
||||
// when using Download().
|
||||
const DefaultDownloadConcurrency = 5
|
||||
|
||||
// The Downloader structure that calls Download(). It is safe to call Download()
|
||||
// on this structure for multiple objects and across concurrent goroutines.
|
||||
// Mutating the Downloader's properties is not safe to be done concurrently.
|
||||
type Downloader struct {
|
||||
// The buffer size (in bytes) to use when buffering data into chunks and
|
||||
// sending them as parts to S3. The minimum allowed part size is 5MB, and
|
||||
// if this value is set to zero, the DefaultPartSize value will be used.
|
||||
PartSize int64
|
||||
|
||||
// The number of goroutines to spin up in parallel when sending parts.
|
||||
// If this is set to zero, the DefaultConcurrency value will be used.
|
||||
Concurrency int
|
||||
|
||||
// An S3 client to use when performing downloads.
|
||||
S3 s3iface.S3API
|
||||
}
|
||||
|
||||
// NewDownloader creates a new Downloader instance to downloads objects from
|
||||
// S3 in concurrent chunks. Pass in additional functional options to customize
|
||||
// the downloader behavior. Requires a client.ConfigProvider in order to create
|
||||
// a S3 service client. The session.Session satisfies the client.ConfigProvider
|
||||
// interface.
|
||||
//
|
||||
// Example:
|
||||
// // The session the S3 Downloader will use
|
||||
// sess := session.New()
|
||||
//
|
||||
// // Create a downloader with the session and default options
|
||||
// downloader := s3manager.NewDownloader(sess)
|
||||
//
|
||||
// // Create a downloader with the session and custom options
|
||||
// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Uploader) {
|
||||
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader {
|
||||
d := &Downloader{
|
||||
S3: s3.New(c),
|
||||
PartSize: DefaultDownloadPartSize,
|
||||
Concurrency: DefaultDownloadConcurrency,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(d)
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// NewDownloaderWithClient creates a new Downloader instance to downloads
|
||||
// objects from S3 in concurrent chunks. Pass in additional functional
|
||||
// options to customize the downloader behavior. Requires a S3 service client
|
||||
// to make S3 API calls.
|
||||
//
|
||||
// Example:
|
||||
// // The S3 client the S3 Downloader will use
|
||||
// s3Svc := s3.new(session.New())
|
||||
//
|
||||
// // Create a downloader with the s3 client and default options
|
||||
// downloader := s3manager.NewDownloaderWithClient(s3Svc)
|
||||
//
|
||||
// // Create a downloader with the s3 client and custom options
|
||||
// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Uploader) {
|
||||
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader {
|
||||
d := &Downloader{
|
||||
S3: svc,
|
||||
PartSize: DefaultDownloadPartSize,
|
||||
Concurrency: DefaultDownloadConcurrency,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(d)
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// Download downloads an object in S3 and writes the payload into w using
|
||||
// concurrent GET requests.
|
||||
//
|
||||
// Additional functional options can be provided to configure the individual
|
||||
// upload. These options are copies of the Uploader instance Upload is called from.
|
||||
// Modifying the options will not impact the original Uploader instance.
|
||||
//
|
||||
// It is safe to call this method concurrently across goroutines.
|
||||
//
|
||||
// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
|
||||
// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
|
||||
func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {
|
||||
impl := downloader{w: w, in: input, ctx: d}
|
||||
|
||||
for _, option := range options {
|
||||
option(&impl.ctx)
|
||||
}
|
||||
|
||||
return impl.download()
|
||||
}
|
||||
|
||||
// downloader is the implementation structure used internally by Downloader.
|
||||
type downloader struct {
|
||||
ctx Downloader
|
||||
|
||||
in *s3.GetObjectInput
|
||||
w io.WriterAt
|
||||
|
||||
wg sync.WaitGroup
|
||||
m sync.Mutex
|
||||
|
||||
pos int64
|
||||
totalBytes int64
|
||||
written int64
|
||||
err error
|
||||
}
|
||||
|
||||
// init initializes the downloader with default options.
|
||||
func (d *downloader) init() {
|
||||
d.totalBytes = -1
|
||||
|
||||
if d.ctx.Concurrency == 0 {
|
||||
d.ctx.Concurrency = DefaultDownloadConcurrency
|
||||
}
|
||||
|
||||
if d.ctx.PartSize == 0 {
|
||||
d.ctx.PartSize = DefaultDownloadPartSize
|
||||
}
|
||||
}
|
||||
|
||||
// download performs the implementation of the object download across ranged
|
||||
// GETs.
|
||||
func (d *downloader) download() (n int64, err error) {
|
||||
d.init()
|
||||
|
||||
// Spin up workers
|
||||
ch := make(chan dlchunk, d.ctx.Concurrency)
|
||||
for i := 0; i < d.ctx.Concurrency; i++ {
|
||||
d.wg.Add(1)
|
||||
go d.downloadPart(ch)
|
||||
}
|
||||
|
||||
// Assign work
|
||||
for d.geterr() == nil {
|
||||
if d.pos != 0 {
|
||||
// This is not the first chunk, let's wait until we know the total
|
||||
// size of the payload so we can see if we have read the entire
|
||||
// object.
|
||||
total := d.getTotalBytes()
|
||||
|
||||
if total < 0 {
|
||||
// Total has not yet been set, so sleep and loop around while
|
||||
// waiting for our first worker to resolve this value.
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
continue
|
||||
} else if d.pos >= total {
|
||||
break // We're finished queueing chunks
|
||||
}
|
||||
}
|
||||
|
||||
// Queue the next range of bytes to read.
|
||||
ch <- dlchunk{w: d.w, start: d.pos, size: d.ctx.PartSize}
|
||||
d.pos += d.ctx.PartSize
|
||||
}
|
||||
|
||||
// Wait for completion
|
||||
close(ch)
|
||||
d.wg.Wait()
|
||||
|
||||
// Return error
|
||||
return d.written, d.err
|
||||
}
|
||||
|
||||
// downloadPart is an individual goroutine worker reading from the ch channel
|
||||
// and performing a GetObject request on the data with a given byte range.
|
||||
//
|
||||
// If this is the first worker, this operation also resolves the total number
|
||||
// of bytes to be read so that the worker manager knows when it is finished.
|
||||
func (d *downloader) downloadPart(ch chan dlchunk) {
|
||||
defer d.wg.Done()
|
||||
|
||||
for {
|
||||
chunk, ok := <-ch
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
if d.geterr() == nil {
|
||||
// Get the next byte range of data
|
||||
in := &s3.GetObjectInput{}
|
||||
awsutil.Copy(in, d.in)
|
||||
rng := fmt.Sprintf("bytes=%d-%d",
|
||||
chunk.start, chunk.start+chunk.size-1)
|
||||
in.Range = &rng
|
||||
|
||||
req, resp := d.ctx.S3.GetObjectRequest(in)
|
||||
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
|
||||
err := req.Send()
|
||||
|
||||
if err != nil {
|
||||
d.seterr(err)
|
||||
} else {
|
||||
d.setTotalBytes(resp) // Set total if not yet set.
|
||||
|
||||
n, err := io.Copy(&chunk, resp.Body)
|
||||
resp.Body.Close()
|
||||
|
||||
if err != nil {
|
||||
d.seterr(err)
|
||||
}
|
||||
d.incrwritten(n)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getTotalBytes is a thread-safe getter for retrieving the total byte status.
|
||||
func (d *downloader) getTotalBytes() int64 {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
return d.totalBytes
|
||||
}
|
||||
|
||||
// setTotalBytes is a thread-safe setter for setting the total byte status.
|
||||
// Will extract the object's total bytes from the Content-Range if the file
|
||||
// will be chunked, or Content-Length. Content-Length is used when the response
|
||||
// does not include a Content-Range. Meaning the object was not chunked. This
|
||||
// occurs when the full file fits within the PartSize directive.
|
||||
func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
if d.totalBytes >= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if resp.ContentRange == nil {
|
||||
// ContentRange is nil when the full file contents is provied, and
|
||||
// is not chunked. Use ContentLength instead.
|
||||
d.totalBytes = *resp.ContentLength
|
||||
return
|
||||
}
|
||||
|
||||
parts := strings.Split(*resp.ContentRange, "/")
|
||||
total, err := strconv.ParseInt(parts[len(parts)-1], 10, 64)
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
|
||||
d.totalBytes = total
|
||||
}
|
||||
|
||||
func (d *downloader) incrwritten(n int64) {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
d.written += n
|
||||
}
|
||||
|
||||
// geterr is a thread-safe getter for the error object
|
||||
func (d *downloader) geterr() error {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
return d.err
|
||||
}
|
||||
|
||||
// seterr is a thread-safe setter for the error object
|
||||
func (d *downloader) seterr(e error) {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
d.err = e
|
||||
}
|
||||
|
||||
// dlchunk represents a single chunk of data to write by the worker routine.
|
||||
// This structure also implements an io.SectionReader style interface for
|
||||
// io.WriterAt, effectively making it an io.SectionWriter (which does not
|
||||
// exist).
|
||||
type dlchunk struct {
|
||||
w io.WriterAt
|
||||
start int64
|
||||
size int64
|
||||
cur int64
|
||||
}
|
||||
|
||||
// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start
|
||||
// position to its end (or EOF).
|
||||
func (c *dlchunk) Write(p []byte) (n int, err error) {
|
||||
if c.cur >= c.size {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
n, err = c.w.WriteAt(p, c.start+c.cur)
|
||||
c.cur += int64(n)
|
||||
|
||||
return
|
||||
}
|
|
@ -1,661 +0,0 @@
|
|||
package s3manager
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3iface"
|
||||
)
|
||||
|
||||
// MaxUploadParts is the maximum allowed number of parts in a multi-part upload
|
||||
// on Amazon S3.
|
||||
const MaxUploadParts = 10000
|
||||
|
||||
// MinUploadPartSize is the minimum allowed part size when uploading a part to
|
||||
// Amazon S3.
|
||||
const MinUploadPartSize int64 = 1024 * 1024 * 5
|
||||
|
||||
// DefaultUploadPartSize is the default part size to buffer chunks of a
|
||||
// payload into.
|
||||
const DefaultUploadPartSize = MinUploadPartSize
|
||||
|
||||
// DefaultUploadConcurrency is the default number of goroutines to spin up when
|
||||
// using Upload().
|
||||
const DefaultUploadConcurrency = 5
|
||||
|
||||
// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned
|
||||
// will satisfy this interface when a multi part upload failed to upload all
|
||||
// chucks to S3. In the case of a failure the UploadID is needed to operate on
|
||||
// the chunks, if any, which were uploaded.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// u := s3manager.NewUploader(opts)
|
||||
// output, err := u.upload(input)
|
||||
// if err != nil {
|
||||
// if multierr, ok := err.(MultiUploadFailure); ok {
|
||||
// // Process error and its associated uploadID
|
||||
// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID())
|
||||
// } else {
|
||||
// // Process error generically
|
||||
// fmt.Println("Error:", err.Error())
|
||||
// }
|
||||
// }
|
||||
//
|
||||
type MultiUploadFailure interface {
|
||||
awserr.Error
|
||||
|
||||
// Returns the upload id for the S3 multipart upload that failed.
|
||||
UploadID() string
|
||||
}
|
||||
|
||||
// So that the Error interface type can be included as an anonymous field
|
||||
// in the multiUploadError struct and not conflict with the error.Error() method.
|
||||
type awsError awserr.Error
|
||||
|
||||
// A multiUploadError wraps the upload ID of a failed s3 multipart upload.
|
||||
// Composed of BaseError for code, message, and original error
|
||||
//
|
||||
// Should be used for an error that occurred failing a S3 multipart upload,
|
||||
// and a upload ID is available. If an uploadID is not available a more relevant
|
||||
type multiUploadError struct {
|
||||
awsError
|
||||
|
||||
// ID for multipart upload which failed.
|
||||
uploadID string
|
||||
}
|
||||
|
||||
// Error returns the string representation of the error.
|
||||
//
|
||||
// See apierr.BaseError ErrorWithExtra for output format
|
||||
//
|
||||
// Satisfies the error interface.
|
||||
func (m multiUploadError) Error() string {
|
||||
extra := fmt.Sprintf("upload id: %s", m.uploadID)
|
||||
return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr())
|
||||
}
|
||||
|
||||
// String returns the string representation of the error.
|
||||
// Alias for Error to satisfy the stringer interface.
|
||||
func (m multiUploadError) String() string {
|
||||
return m.Error()
|
||||
}
|
||||
|
||||
// UploadID returns the id of the S3 upload which failed.
|
||||
func (m multiUploadError) UploadID() string {
|
||||
return m.uploadID
|
||||
}
|
||||
|
||||
// UploadInput contains all input for upload requests to Amazon S3.
|
||||
type UploadInput struct {
|
||||
// The canned ACL to apply to the object.
|
||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string"`
|
||||
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
||||
// Specifies caching behavior along the request/reply chain.
|
||||
CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
|
||||
|
||||
// Specifies presentational information for the object.
|
||||
ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
|
||||
|
||||
// Specifies what content encodings have been applied to the object and thus
|
||||
// what decoding mechanisms must be applied to obtain the media-type referenced
|
||||
// by the Content-Type header field.
|
||||
ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
|
||||
|
||||
// The language the content is in.
|
||||
ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
|
||||
|
||||
// A standard MIME type describing the format of the object data.
|
||||
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
|
||||
|
||||
// The date and time at which the object is no longer cacheable.
|
||||
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
|
||||
|
||||
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
|
||||
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
|
||||
|
||||
// Allows grantee to read the object data and its metadata.
|
||||
GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
|
||||
|
||||
// Allows grantee to read the object ACL.
|
||||
GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
|
||||
|
||||
// Allows grantee to write the ACL for the applicable object.
|
||||
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
|
||||
|
||||
Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
|
||||
|
||||
// A map of metadata to store with the object in S3.
|
||||
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
|
||||
|
||||
// Confirms that the requester knows that she or he will be charged for the
|
||||
// request. Bucket owners need not specify this parameter in their requests.
|
||||
// Documentation on downloading objects from requester pays buckets can be found
|
||||
// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
|
||||
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
|
||||
|
||||
// Specifies the algorithm to use to when encrypting the object (e.g., AES256,
|
||||
// aws:kms).
|
||||
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
|
||||
|
||||
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
|
||||
// data. This value is used to store the object and then it is discarded; Amazon
|
||||
// does not store the encryption key. The key must be appropriate for use with
|
||||
// the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
|
||||
// header.
|
||||
SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
|
||||
|
||||
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
|
||||
// Amazon S3 uses this header for a message integrity check to ensure the encryption
|
||||
// key was transmitted without error.
|
||||
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
|
||||
|
||||
// Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
|
||||
// requests for an object protected by AWS KMS will fail if not made via SSL
|
||||
// or using SigV4. Documentation on configuring any of the officially supported
|
||||
// AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
|
||||
SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
|
||||
|
||||
// The Server-side encryption algorithm used when storing this object in S3
|
||||
// (e.g., AES256, aws:kms).
|
||||
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"`
|
||||
|
||||
// The type of storage to use for the object. Defaults to 'STANDARD'.
|
||||
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"`
|
||||
|
||||
// If the bucket is configured as a website, redirects requests for this object
|
||||
// to another object in the same bucket or to an external URL. Amazon S3 stores
|
||||
// the value of this header in the object metadata.
|
||||
WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
|
||||
|
||||
// The readable body payload to send to S3.
|
||||
Body io.Reader
|
||||
}
|
||||
|
||||
// UploadOutput represents a response from the Upload() call.
|
||||
type UploadOutput struct {
|
||||
// The URL where the object was uploaded to.
|
||||
Location string
|
||||
|
||||
// The version of the object that was uploaded. Will only be populated if
|
||||
// the S3 Bucket is versioned. If the bucket is not versioned this field
|
||||
// will not be set.
|
||||
VersionID *string
|
||||
|
||||
// The ID for a multipart upload to S3. In the case of an error the error
|
||||
// can be cast to the MultiUploadFailure interface to extract the upload ID.
|
||||
UploadID string
|
||||
}
|
||||
|
||||
// The Uploader structure that calls Upload(). It is safe to call Upload()
|
||||
// on this structure for multiple objects and across concurrent goroutines.
|
||||
// Mutating the Uploader's properties is not safe to be done concurrently.
|
||||
type Uploader struct {
|
||||
// The buffer size (in bytes) to use when buffering data into chunks and
|
||||
// sending them as parts to S3. The minimum allowed part size is 5MB, and
|
||||
// if this value is set to zero, the DefaultPartSize value will be used.
|
||||
PartSize int64
|
||||
|
||||
// The number of goroutines to spin up in parallel when sending parts.
|
||||
// If this is set to zero, the DefaultConcurrency value will be used.
|
||||
Concurrency int
|
||||
|
||||
// Setting this value to true will cause the SDK to avoid calling
|
||||
// AbortMultipartUpload on a failure, leaving all successfully uploaded
|
||||
// parts on S3 for manual recovery.
|
||||
//
|
||||
// Note that storing parts of an incomplete multipart upload counts towards
|
||||
// space usage on S3 and will add additional costs if not cleaned up.
|
||||
LeavePartsOnError bool
|
||||
|
||||
// MaxUploadParts is the max number of parts which will be uploaded to S3.
|
||||
// Will be used to calculate the partsize of the object to be uploaded.
|
||||
// E.g: 5GB file, with MaxUploadParts set to 100, will upload the file
|
||||
// as 100, 50MB parts.
|
||||
// With a limited of s3.MaxUploadParts (10,000 parts).
|
||||
MaxUploadParts int
|
||||
|
||||
// The client to use when uploading to S3.
|
||||
S3 s3iface.S3API
|
||||
}
|
||||
|
||||
// NewUploader creates a new Uploader instance to upload objects to S3. Pass In
|
||||
// additional functional options to customize the uploader's behavior. Requires a
|
||||
// client.ConfigProvider in order to create a S3 service client. The session.Session
|
||||
// satisfies the client.ConfigProvider interface.
|
||||
//
|
||||
// Example:
|
||||
// // The session the S3 Uploader will use
|
||||
// sess := session.New()
|
||||
//
|
||||
// // Create an uploader with the session and default options
|
||||
// uploader := s3manager.NewUploader(sess)
|
||||
//
|
||||
// // Create an uploader with the session and custom options
|
||||
// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) {
|
||||
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader {
|
||||
u := &Uploader{
|
||||
S3: s3.New(c),
|
||||
PartSize: DefaultUploadPartSize,
|
||||
Concurrency: DefaultUploadConcurrency,
|
||||
LeavePartsOnError: false,
|
||||
MaxUploadParts: MaxUploadParts,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(u)
|
||||
}
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in
|
||||
// additional functional options to customize the uploader's behavior. Requires
|
||||
// a S3 service client to make S3 API calls.
|
||||
//
|
||||
// Example:
|
||||
// // S3 service client the Upload manager will use.
|
||||
// s3Svc := s3.New(session.New())
|
||||
//
|
||||
// // Create an uploader with S3 client and default options
|
||||
// uploader := s3manager.NewUploaderWithClient(s3Svc)
|
||||
//
|
||||
// // Create an uploader with S3 client and custom options
|
||||
// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) {
|
||||
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader {
|
||||
u := &Uploader{
|
||||
S3: svc,
|
||||
PartSize: DefaultUploadPartSize,
|
||||
Concurrency: DefaultUploadConcurrency,
|
||||
LeavePartsOnError: false,
|
||||
MaxUploadParts: MaxUploadParts,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(u)
|
||||
}
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
// Upload uploads an object to S3, intelligently buffering large files into
|
||||
// smaller chunks and sending them in parallel across multiple goroutines. You
|
||||
// can configure the buffer size and concurrency through the Uploader's parameters.
|
||||
//
|
||||
// Additional functional options can be provided to configure the individual
|
||||
// upload. These options are copies of the Uploader instance Upload is called from.
|
||||
// Modifying the options will not impact the original Uploader instance.
|
||||
//
|
||||
// It is safe to call this method concurrently across goroutines.
|
||||
//
|
||||
// Example:
|
||||
// // Upload input parameters
|
||||
// upParams := &s3manager.UploadInput{
|
||||
// Bucket: &bucketName,
|
||||
// Key: &keyName,
|
||||
// Body: file,
|
||||
// }
|
||||
//
|
||||
// // Perform an upload.
|
||||
// result, err := uploader.Upload(upParams)
|
||||
//
|
||||
// // Perform upload with options different than the those in the Uploader.
|
||||
// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) {
|
||||
// u.PartSize = 10 * 1024 * 1024 // 10MB part size
|
||||
// u.LeavePartsOnError = true // Dont delete the parts if the upload fails.
|
||||
// })
|
||||
func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) {
|
||||
i := uploader{in: input, ctx: u}
|
||||
|
||||
for _, option := range options {
|
||||
option(&i.ctx)
|
||||
}
|
||||
|
||||
return i.upload()
|
||||
}
|
||||
|
||||
// internal structure to manage an upload to S3.
|
||||
type uploader struct {
|
||||
ctx Uploader
|
||||
|
||||
in *UploadInput
|
||||
|
||||
readerPos int64 // current reader position
|
||||
totalSize int64 // set to -1 if the size is not known
|
||||
}
|
||||
|
||||
// internal logic for deciding whether to upload a single part or use a
|
||||
// multipart upload.
|
||||
func (u *uploader) upload() (*UploadOutput, error) {
|
||||
u.init()
|
||||
|
||||
if u.ctx.PartSize < MinUploadPartSize {
|
||||
msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize)
|
||||
return nil, awserr.New("ConfigError", msg, nil)
|
||||
}
|
||||
|
||||
// Do one read to determine if we have more than one part
|
||||
buf, err := u.nextReader()
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF { // single part
|
||||
return u.singlePart(buf)
|
||||
} else if err != nil {
|
||||
return nil, awserr.New("ReadRequestBody", "read upload data failed", err)
|
||||
}
|
||||
|
||||
mu := multiuploader{uploader: u}
|
||||
return mu.upload(buf)
|
||||
}
|
||||
|
||||
// init will initialize all default options.
|
||||
func (u *uploader) init() {
|
||||
if u.ctx.Concurrency == 0 {
|
||||
u.ctx.Concurrency = DefaultUploadConcurrency
|
||||
}
|
||||
if u.ctx.PartSize == 0 {
|
||||
u.ctx.PartSize = DefaultUploadPartSize
|
||||
}
|
||||
|
||||
// Try to get the total size for some optimizations
|
||||
u.initSize()
|
||||
}
|
||||
|
||||
// initSize tries to detect the total stream size, setting u.totalSize. If
|
||||
// the size is not known, totalSize is set to -1.
|
||||
func (u *uploader) initSize() {
|
||||
u.totalSize = -1
|
||||
|
||||
switch r := u.in.Body.(type) {
|
||||
case io.Seeker:
|
||||
pos, _ := r.Seek(0, 1)
|
||||
defer r.Seek(pos, 0)
|
||||
|
||||
n, err := r.Seek(0, 2)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
u.totalSize = n
|
||||
|
||||
// Try to adjust partSize if it is too small and account for
|
||||
// integer division truncation.
|
||||
if u.totalSize/u.ctx.PartSize >= int64(u.ctx.MaxUploadParts) {
|
||||
// Add one to the part size to account for remainders
|
||||
// during the size calculation. e.g odd number of bytes.
|
||||
u.ctx.PartSize = (u.totalSize / int64(u.ctx.MaxUploadParts)) + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nextReader returns a seekable reader representing the next packet of data.
|
||||
// This operation increases the shared u.readerPos counter, but note that it
|
||||
// does not need to be wrapped in a mutex because nextReader is only called
|
||||
// from the main thread.
|
||||
func (u *uploader) nextReader() (io.ReadSeeker, error) {
|
||||
switch r := u.in.Body.(type) {
|
||||
case io.ReaderAt:
|
||||
var err error
|
||||
|
||||
n := u.ctx.PartSize
|
||||
if u.totalSize >= 0 {
|
||||
bytesLeft := u.totalSize - u.readerPos
|
||||
|
||||
if bytesLeft == 0 {
|
||||
err = io.EOF
|
||||
n = bytesLeft
|
||||
} else if bytesLeft <= u.ctx.PartSize {
|
||||
err = io.ErrUnexpectedEOF
|
||||
n = bytesLeft
|
||||
}
|
||||
}
|
||||
|
||||
buf := io.NewSectionReader(r, u.readerPos, n)
|
||||
u.readerPos += n
|
||||
|
||||
return buf, err
|
||||
|
||||
default:
|
||||
packet := make([]byte, u.ctx.PartSize)
|
||||
n, err := io.ReadFull(u.in.Body, packet)
|
||||
u.readerPos += int64(n)
|
||||
|
||||
return bytes.NewReader(packet[0:n]), err
|
||||
}
|
||||
}
|
||||
|
||||
// singlePart contains upload logic for uploading a single chunk via
|
||||
// a regular PutObject request. Multipart requests require at least two
|
||||
// parts, or at least 5MB of data.
|
||||
func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) {
|
||||
params := &s3.PutObjectInput{}
|
||||
awsutil.Copy(params, u.in)
|
||||
params.Body = buf
|
||||
|
||||
req, out := u.ctx.S3.PutObjectRequest(params)
|
||||
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
|
||||
if err := req.Send(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url := req.HTTPRequest.URL.String()
|
||||
return &UploadOutput{
|
||||
Location: url,
|
||||
VersionID: out.VersionId,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// internal structure to manage a specific multipart upload to S3.
|
||||
type multiuploader struct {
|
||||
*uploader
|
||||
wg sync.WaitGroup
|
||||
m sync.Mutex
|
||||
err error
|
||||
uploadID string
|
||||
parts completedParts
|
||||
}
|
||||
|
||||
// keeps track of a single chunk of data being sent to S3.
|
||||
type chunk struct {
|
||||
buf io.ReadSeeker
|
||||
num int64
|
||||
}
|
||||
|
||||
// completedParts is a wrapper to make parts sortable by their part number,
|
||||
// since S3 required this list to be sent in sorted order.
|
||||
type completedParts []*s3.CompletedPart
|
||||
|
||||
func (a completedParts) Len() int { return len(a) }
|
||||
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber }
|
||||
|
||||
// upload will perform a multipart upload using the firstBuf buffer containing
|
||||
// the first chunk of data.
|
||||
func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) {
|
||||
params := &s3.CreateMultipartUploadInput{}
|
||||
awsutil.Copy(params, u.in)
|
||||
|
||||
// Create the multipart
|
||||
req, resp := u.ctx.S3.CreateMultipartUploadRequest(params)
|
||||
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
|
||||
if err := req.Send(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u.uploadID = *resp.UploadId
|
||||
|
||||
// Create the workers
|
||||
ch := make(chan chunk, u.ctx.Concurrency)
|
||||
for i := 0; i < u.ctx.Concurrency; i++ {
|
||||
u.wg.Add(1)
|
||||
go u.readChunk(ch)
|
||||
}
|
||||
|
||||
// Send part 1 to the workers
|
||||
var num int64 = 1
|
||||
ch <- chunk{buf: firstBuf, num: num}
|
||||
|
||||
// Read and queue the rest of the parts
|
||||
for u.geterr() == nil {
|
||||
// This upload exceeded maximum number of supported parts, error now.
|
||||
if num > int64(u.ctx.MaxUploadParts) || num > int64(MaxUploadParts) {
|
||||
var msg string
|
||||
if num > int64(u.ctx.MaxUploadParts) {
|
||||
msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
|
||||
u.ctx.MaxUploadParts)
|
||||
} else {
|
||||
msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
|
||||
MaxUploadParts)
|
||||
}
|
||||
u.seterr(awserr.New("TotalPartsExceeded", msg, nil))
|
||||
break
|
||||
}
|
||||
num++
|
||||
|
||||
buf, err := u.nextReader()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
ch <- chunk{buf: buf, num: num}
|
||||
|
||||
if err != nil && err != io.ErrUnexpectedEOF {
|
||||
u.seterr(awserr.New(
|
||||
"ReadRequestBody",
|
||||
"read multipart upload data failed",
|
||||
err))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Close the channel, wait for workers, and complete upload
|
||||
close(ch)
|
||||
u.wg.Wait()
|
||||
complete := u.complete()
|
||||
|
||||
if err := u.geterr(); err != nil {
|
||||
return nil, &multiUploadError{
|
||||
awsError: awserr.New(
|
||||
"MultipartUpload",
|
||||
"upload multipart failed",
|
||||
err),
|
||||
uploadID: u.uploadID,
|
||||
}
|
||||
}
|
||||
return &UploadOutput{
|
||||
Location: *complete.Location,
|
||||
VersionID: complete.VersionId,
|
||||
UploadID: u.uploadID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// readChunk runs in worker goroutines to pull chunks off of the ch channel
|
||||
// and send() them as UploadPart requests.
|
||||
func (u *multiuploader) readChunk(ch chan chunk) {
|
||||
defer u.wg.Done()
|
||||
for {
|
||||
data, ok := <-ch
|
||||
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
if u.geterr() == nil {
|
||||
if err := u.send(data); err != nil {
|
||||
u.seterr(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// send performs an UploadPart request and keeps track of the completed
|
||||
// part information.
|
||||
func (u *multiuploader) send(c chunk) error {
|
||||
req, resp := u.ctx.S3.UploadPartRequest(&s3.UploadPartInput{
|
||||
Bucket: u.in.Bucket,
|
||||
Key: u.in.Key,
|
||||
Body: c.buf,
|
||||
UploadId: &u.uploadID,
|
||||
PartNumber: &c.num,
|
||||
})
|
||||
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
|
||||
if err := req.Send(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := c.num
|
||||
completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n}
|
||||
|
||||
u.m.Lock()
|
||||
u.parts = append(u.parts, completed)
|
||||
u.m.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// geterr is a thread-safe getter for the error object
|
||||
func (u *multiuploader) geterr() error {
|
||||
u.m.Lock()
|
||||
defer u.m.Unlock()
|
||||
|
||||
return u.err
|
||||
}
|
||||
|
||||
// seterr is a thread-safe setter for the error object
|
||||
func (u *multiuploader) seterr(e error) {
|
||||
u.m.Lock()
|
||||
defer u.m.Unlock()
|
||||
|
||||
u.err = e
|
||||
}
|
||||
|
||||
// fail will abort the multipart unless LeavePartsOnError is set to true.
|
||||
func (u *multiuploader) fail() {
|
||||
if u.ctx.LeavePartsOnError {
|
||||
return
|
||||
}
|
||||
|
||||
req, _ := u.ctx.S3.AbortMultipartUploadRequest(&s3.AbortMultipartUploadInput{
|
||||
Bucket: u.in.Bucket,
|
||||
Key: u.in.Key,
|
||||
UploadId: &u.uploadID,
|
||||
})
|
||||
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
|
||||
req.Send()
|
||||
}
|
||||
|
||||
// complete successfully completes a multipart upload and returns the response.
|
||||
func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
|
||||
if u.geterr() != nil {
|
||||
u.fail()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parts must be sorted in PartNumber order.
|
||||
sort.Sort(u.parts)
|
||||
|
||||
req, resp := u.ctx.S3.CompleteMultipartUploadRequest(&s3.CompleteMultipartUploadInput{
|
||||
Bucket: u.in.Bucket,
|
||||
Key: u.in.Key,
|
||||
UploadId: &u.uploadID,
|
||||
MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts},
|
||||
})
|
||||
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
|
||||
if err := req.Send(); err != nil {
|
||||
u.seterr(err)
|
||||
u.fail()
|
||||
}
|
||||
|
||||
return resp
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,292 +0,0 @@
|
|||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targets map[float64]float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for quantile, epsilon := range targets {
|
||||
if quantile*s.n <= r {
|
||||
f = (2 * epsilon * r) / quantile
|
||||
} else {
|
||||
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(float64(l) * q)
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
|
@ -1,191 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -1,198 +0,0 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/godbus/dbus"
|
||||
)
|
||||
|
||||
const (
|
||||
alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ`
|
||||
num = `0123456789`
|
||||
alphanum = alpha + num
|
||||
signalBuffer = 100
|
||||
)
|
||||
|
||||
// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped
|
||||
func needsEscape(i int, b byte) bool {
|
||||
// Escape everything that is not a-z-A-Z-0-9
|
||||
// Also escape 0-9 if it's the first character
|
||||
return strings.IndexByte(alphanum, b) == -1 ||
|
||||
(i == 0 && strings.IndexByte(num, b) != -1)
|
||||
}
|
||||
|
||||
// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the
|
||||
// rules that systemd uses for serializing special characters.
|
||||
func PathBusEscape(path string) string {
|
||||
// Special case the empty string
|
||||
if len(path) == 0 {
|
||||
return "_"
|
||||
}
|
||||
n := []byte{}
|
||||
for i := 0; i < len(path); i++ {
|
||||
c := path[i]
|
||||
if needsEscape(i, c) {
|
||||
e := fmt.Sprintf("_%x", c)
|
||||
n = append(n, []byte(e)...)
|
||||
} else {
|
||||
n = append(n, c)
|
||||
}
|
||||
}
|
||||
return string(n)
|
||||
}
|
||||
|
||||
// Conn is a connection to systemd's dbus endpoint.
|
||||
type Conn struct {
|
||||
// sysconn/sysobj are only used to call dbus methods
|
||||
sysconn *dbus.Conn
|
||||
sysobj dbus.BusObject
|
||||
|
||||
// sigconn/sigobj are only used to receive dbus signals
|
||||
sigconn *dbus.Conn
|
||||
sigobj dbus.BusObject
|
||||
|
||||
jobListener struct {
|
||||
jobs map[dbus.ObjectPath]chan<- string
|
||||
sync.Mutex
|
||||
}
|
||||
subscriber struct {
|
||||
updateCh chan<- *SubStateUpdate
|
||||
errCh chan<- error
|
||||
sync.Mutex
|
||||
ignore map[dbus.ObjectPath]int64
|
||||
cleanIgnore int64
|
||||
}
|
||||
}
|
||||
|
||||
// New establishes a connection to the system bus and authenticates.
|
||||
// Callers should call Close() when done with the connection.
|
||||
func New() (*Conn, error) {
|
||||
return newConnection(func() (*dbus.Conn, error) {
|
||||
return dbusAuthHelloConnection(dbus.SystemBusPrivate)
|
||||
})
|
||||
}
|
||||
|
||||
// NewUserConnection establishes a connection to the session bus and
|
||||
// authenticates. This can be used to connect to systemd user instances.
|
||||
// Callers should call Close() when done with the connection.
|
||||
func NewUserConnection() (*Conn, error) {
|
||||
return newConnection(func() (*dbus.Conn, error) {
|
||||
return dbusAuthHelloConnection(dbus.SessionBusPrivate)
|
||||
})
|
||||
}
|
||||
|
||||
// NewSystemdConnection establishes a private, direct connection to systemd.
|
||||
// This can be used for communicating with systemd without a dbus daemon.
|
||||
// Callers should call Close() when done with the connection.
|
||||
func NewSystemdConnection() (*Conn, error) {
|
||||
return newConnection(func() (*dbus.Conn, error) {
|
||||
// We skip Hello when talking directly to systemd.
|
||||
return dbusAuthConnection(func() (*dbus.Conn, error) {
|
||||
return dbus.Dial("unix:path=/run/systemd/private")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Close closes an established connection
|
||||
func (c *Conn) Close() {
|
||||
c.sysconn.Close()
|
||||
c.sigconn.Close()
|
||||
}
|
||||
|
||||
func newConnection(createBus func() (*dbus.Conn, error)) (*Conn, error) {
|
||||
sysconn, err := createBus()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sigconn, err := createBus()
|
||||
if err != nil {
|
||||
sysconn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := &Conn{
|
||||
sysconn: sysconn,
|
||||
sysobj: systemdObject(sysconn),
|
||||
sigconn: sigconn,
|
||||
sigobj: systemdObject(sigconn),
|
||||
}
|
||||
|
||||
c.subscriber.ignore = make(map[dbus.ObjectPath]int64)
|
||||
c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string)
|
||||
|
||||
// Setup the listeners on jobs so that we can get completions
|
||||
c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
|
||||
"type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'")
|
||||
|
||||
c.dispatch()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager
|
||||
// interface. The value is returned in its string representation, as defined at
|
||||
// https://developer.gnome.org/glib/unstable/gvariant-text.html
|
||||
func (c *Conn) GetManagerProperty(prop string) (string, error) {
|
||||
variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return variant.String(), nil
|
||||
}
|
||||
|
||||
func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
|
||||
conn, err := createBus()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Only use EXTERNAL method, and hardcode the uid (not username)
|
||||
// to avoid a username lookup (which requires a dynamically linked
|
||||
// libc)
|
||||
methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))}
|
||||
|
||||
err = conn.Auth(methods)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
|
||||
conn, err := dbusAuthConnection(createBus)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = conn.Hello(); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func systemdObject(conn *dbus.Conn) dbus.BusObject {
|
||||
return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1"))
|
||||
}
|
|
@ -1,442 +0,0 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/godbus/dbus"
|
||||
)
|
||||
|
||||
func (c *Conn) jobComplete(signal *dbus.Signal) {
|
||||
var id uint32
|
||||
var job dbus.ObjectPath
|
||||
var unit string
|
||||
var result string
|
||||
dbus.Store(signal.Body, &id, &job, &unit, &result)
|
||||
c.jobListener.Lock()
|
||||
out, ok := c.jobListener.jobs[job]
|
||||
if ok {
|
||||
out <- result
|
||||
delete(c.jobListener.jobs, job)
|
||||
}
|
||||
c.jobListener.Unlock()
|
||||
}
|
||||
|
||||
func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) {
|
||||
if ch != nil {
|
||||
c.jobListener.Lock()
|
||||
defer c.jobListener.Unlock()
|
||||
}
|
||||
|
||||
var p dbus.ObjectPath
|
||||
err := c.sysobj.Call(job, 0, args...).Store(&p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if ch != nil {
|
||||
c.jobListener.jobs[p] = ch
|
||||
}
|
||||
|
||||
// ignore error since 0 is fine if conversion fails
|
||||
jobID, _ := strconv.Atoi(path.Base(string(p)))
|
||||
|
||||
return jobID, nil
|
||||
}
|
||||
|
||||
// StartUnit enqueues a start job and depending jobs, if any (unless otherwise
|
||||
// specified by the mode string).
|
||||
//
|
||||
// Takes the unit to activate, plus a mode string. The mode needs to be one of
|
||||
// replace, fail, isolate, ignore-dependencies, ignore-requirements. If
|
||||
// "replace" the call will start the unit and its dependencies, possibly
|
||||
// replacing already queued jobs that conflict with this. If "fail" the call
|
||||
// will start the unit and its dependencies, but will fail if this would change
|
||||
// an already queued job. If "isolate" the call will start the unit in question
|
||||
// and terminate all units that aren't dependencies of it. If
|
||||
// "ignore-dependencies" it will start a unit but ignore all its dependencies.
|
||||
// If "ignore-requirements" it will start a unit but only ignore the
|
||||
// requirement dependencies. It is not recommended to make use of the latter
|
||||
// two options.
|
||||
//
|
||||
// If the provided channel is non-nil, a result string will be sent to it upon
|
||||
// job completion: one of done, canceled, timeout, failed, dependency, skipped.
|
||||
// done indicates successful execution of a job. canceled indicates that a job
|
||||
// has been canceled before it finished execution. timeout indicates that the
|
||||
// job timeout was reached. failed indicates that the job failed. dependency
|
||||
// indicates that a job this job has been depending on failed and the job hence
|
||||
// has been removed too. skipped indicates that a job was skipped because it
|
||||
// didn't apply to the units current state.
|
||||
//
|
||||
// If no error occurs, the ID of the underlying systemd job will be returned. There
|
||||
// does exist the possibility for no error to be returned, but for the returned job
|
||||
// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint
|
||||
// should not be considered authoritative.
|
||||
//
|
||||
// If an error does occur, it will be returned to the user alongside a job ID of 0.
|
||||
func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode)
|
||||
}
|
||||
|
||||
// StopUnit is similar to StartUnit but stops the specified unit rather
|
||||
// than starting it.
|
||||
func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode)
|
||||
}
|
||||
|
||||
// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise.
|
||||
func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode)
|
||||
}
|
||||
|
||||
// RestartUnit restarts a service. If a service is restarted that isn't
|
||||
// running it will be started.
|
||||
func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode)
|
||||
}
|
||||
|
||||
// TryRestartUnit is like RestartUnit, except that a service that isn't running
|
||||
// is not affected by the restart.
|
||||
func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode)
|
||||
}
|
||||
|
||||
// ReloadOrRestart attempts a reload if the unit supports it and use a restart
|
||||
// otherwise.
|
||||
func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode)
|
||||
}
|
||||
|
||||
// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try"
|
||||
// flavored restart otherwise.
|
||||
func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode)
|
||||
}
|
||||
|
||||
// StartTransientUnit() may be used to create and start a transient unit, which
|
||||
// will be released as soon as it is not running or referenced anymore or the
|
||||
// system is rebooted. name is the unit name including suffix, and must be
|
||||
// unique. mode is the same as in StartUnit(), properties contains properties
|
||||
// of the unit.
|
||||
func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0))
|
||||
}
|
||||
|
||||
// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's
|
||||
// processes are killed.
|
||||
func (c *Conn) KillUnit(name string, signal int32) {
|
||||
c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store()
|
||||
}
|
||||
|
||||
// ResetFailedUnit resets the "failed" state of a specific unit.
|
||||
func (c *Conn) ResetFailedUnit(name string) error {
|
||||
return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store()
|
||||
}
|
||||
|
||||
// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface
|
||||
func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) {
|
||||
var err error
|
||||
var props map[string]dbus.Variant
|
||||
|
||||
path := unitPath(unit)
|
||||
if !path.IsValid() {
|
||||
return nil, errors.New("invalid unit name: " + unit)
|
||||
}
|
||||
|
||||
obj := c.sysconn.Object("org.freedesktop.systemd1", path)
|
||||
err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out := make(map[string]interface{}, len(props))
|
||||
for k, v := range props {
|
||||
out[k] = v.Value()
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// GetUnitProperties takes the unit name and returns all of its dbus object properties.
|
||||
func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) {
|
||||
return c.getProperties(unit, "org.freedesktop.systemd1.Unit")
|
||||
}
|
||||
|
||||
func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) {
|
||||
var err error
|
||||
var prop dbus.Variant
|
||||
|
||||
path := unitPath(unit)
|
||||
if !path.IsValid() {
|
||||
return nil, errors.New("invalid unit name: " + unit)
|
||||
}
|
||||
|
||||
obj := c.sysconn.Object("org.freedesktop.systemd1", path)
|
||||
err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Property{Name: propertyName, Value: prop}, nil
|
||||
}
|
||||
|
||||
func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) {
|
||||
return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName)
|
||||
}
|
||||
|
||||
// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type.
|
||||
// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope
|
||||
// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit
|
||||
func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) {
|
||||
return c.getProperties(unit, "org.freedesktop.systemd1."+unitType)
|
||||
}
|
||||
|
||||
// SetUnitProperties() may be used to modify certain unit properties at runtime.
|
||||
// Not all properties may be changed at runtime, but many resource management
|
||||
// settings (primarily those in systemd.cgroup(5)) may. The changes are applied
|
||||
// instantly, and stored on disk for future boots, unless runtime is true, in which
|
||||
// case the settings only apply until the next reboot. name is the name of the unit
|
||||
// to modify. properties are the settings to set, encoded as an array of property
|
||||
// name and value pairs.
|
||||
func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error {
|
||||
return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store()
|
||||
}
|
||||
|
||||
func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
|
||||
return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName)
|
||||
}
|
||||
|
||||
type UnitStatus struct {
|
||||
Name string // The primary unit name as string
|
||||
Description string // The human readable description string
|
||||
LoadState string // The load state (i.e. whether the unit file has been loaded successfully)
|
||||
ActiveState string // The active state (i.e. whether the unit is currently started or not)
|
||||
SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not)
|
||||
Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string.
|
||||
Path dbus.ObjectPath // The unit object path
|
||||
JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise
|
||||
JobType string // The job type as string
|
||||
JobPath dbus.ObjectPath // The job object path
|
||||
}
|
||||
|
||||
// ListUnits returns an array with all currently loaded units. Note that
|
||||
// units may be known by multiple names at the same time, and hence there might
|
||||
// be more unit names loaded than actual units behind them.
|
||||
func (c *Conn) ListUnits() ([]UnitStatus, error) {
|
||||
result := make([][]interface{}, 0)
|
||||
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resultInterface := make([]interface{}, len(result))
|
||||
for i := range result {
|
||||
resultInterface[i] = result[i]
|
||||
}
|
||||
|
||||
status := make([]UnitStatus, len(result))
|
||||
statusInterface := make([]interface{}, len(status))
|
||||
for i := range status {
|
||||
statusInterface[i] = &status[i]
|
||||
}
|
||||
|
||||
err = dbus.Store(resultInterface, statusInterface...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return status, nil
|
||||
}
|
||||
|
||||
type UnitFile struct {
|
||||
Path string
|
||||
Type string
|
||||
}
|
||||
|
||||
// ListUnitFiles returns an array of all available units on disk.
|
||||
func (c *Conn) ListUnitFiles() ([]UnitFile, error) {
|
||||
result := make([][]interface{}, 0)
|
||||
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resultInterface := make([]interface{}, len(result))
|
||||
for i := range result {
|
||||
resultInterface[i] = result[i]
|
||||
}
|
||||
|
||||
files := make([]UnitFile, len(result))
|
||||
fileInterface := make([]interface{}, len(files))
|
||||
for i := range files {
|
||||
fileInterface[i] = &files[i]
|
||||
}
|
||||
|
||||
err = dbus.Store(resultInterface, fileInterface...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
type LinkUnitFileChange EnableUnitFileChange
|
||||
|
||||
// LinkUnitFiles() links unit files (that are located outside of the
|
||||
// usual unit search paths) into the unit search path.
|
||||
//
|
||||
// It takes a list of absolute paths to unit files to link and two
|
||||
// booleans. The first boolean controls whether the unit shall be
|
||||
// enabled for runtime only (true, /run), or persistently (false,
|
||||
// /etc).
|
||||
// The second controls whether symlinks pointing to other units shall
|
||||
// be replaced if necessary.
|
||||
//
|
||||
// This call returns a list of the changes made. The list consists of
|
||||
// structures with three strings: the type of the change (one of symlink
|
||||
// or unlink), the file name of the symlink and the destination of the
|
||||
// symlink.
|
||||
func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) {
|
||||
result := make([][]interface{}, 0)
|
||||
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resultInterface := make([]interface{}, len(result))
|
||||
for i := range result {
|
||||
resultInterface[i] = result[i]
|
||||
}
|
||||
|
||||
changes := make([]LinkUnitFileChange, len(result))
|
||||
changesInterface := make([]interface{}, len(changes))
|
||||
for i := range changes {
|
||||
changesInterface[i] = &changes[i]
|
||||
}
|
||||
|
||||
err = dbus.Store(resultInterface, changesInterface...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
// EnableUnitFiles() may be used to enable one or more units in the system (by
|
||||
// creating symlinks to them in /etc or /run).
|
||||
//
|
||||
// It takes a list of unit files to enable (either just file names or full
|
||||
// absolute paths if the unit files are residing outside the usual unit
|
||||
// search paths), and two booleans: the first controls whether the unit shall
|
||||
// be enabled for runtime only (true, /run), or persistently (false, /etc).
|
||||
// The second one controls whether symlinks pointing to other units shall
|
||||
// be replaced if necessary.
|
||||
//
|
||||
// This call returns one boolean and an array with the changes made. The
|
||||
// boolean signals whether the unit files contained any enablement
|
||||
// information (i.e. an [Install]) section. The changes list consists of
|
||||
// structures with three strings: the type of the change (one of symlink
|
||||
// or unlink), the file name of the symlink and the destination of the
|
||||
// symlink.
|
||||
func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) {
|
||||
var carries_install_info bool
|
||||
|
||||
result := make([][]interface{}, 0)
|
||||
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
resultInterface := make([]interface{}, len(result))
|
||||
for i := range result {
|
||||
resultInterface[i] = result[i]
|
||||
}
|
||||
|
||||
changes := make([]EnableUnitFileChange, len(result))
|
||||
changesInterface := make([]interface{}, len(changes))
|
||||
for i := range changes {
|
||||
changesInterface[i] = &changes[i]
|
||||
}
|
||||
|
||||
err = dbus.Store(resultInterface, changesInterface...)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
return carries_install_info, changes, nil
|
||||
}
|
||||
|
||||
type EnableUnitFileChange struct {
|
||||
Type string // Type of the change (one of symlink or unlink)
|
||||
Filename string // File name of the symlink
|
||||
Destination string // Destination of the symlink
|
||||
}
|
||||
|
||||
// DisableUnitFiles() may be used to disable one or more units in the system (by
|
||||
// removing symlinks to them from /etc or /run).
|
||||
//
|
||||
// It takes a list of unit files to disable (either just file names or full
|
||||
// absolute paths if the unit files are residing outside the usual unit
|
||||
// search paths), and one boolean: whether the unit was enabled for runtime
|
||||
// only (true, /run), or persistently (false, /etc).
|
||||
//
|
||||
// This call returns an array with the changes made. The changes list
|
||||
// consists of structures with three strings: the type of the change (one of
|
||||
// symlink or unlink), the file name of the symlink and the destination of the
|
||||
// symlink.
|
||||
func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) {
|
||||
result := make([][]interface{}, 0)
|
||||
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resultInterface := make([]interface{}, len(result))
|
||||
for i := range result {
|
||||
resultInterface[i] = result[i]
|
||||
}
|
||||
|
||||
changes := make([]DisableUnitFileChange, len(result))
|
||||
changesInterface := make([]interface{}, len(changes))
|
||||
for i := range changes {
|
||||
changesInterface[i] = &changes[i]
|
||||
}
|
||||
|
||||
err = dbus.Store(resultInterface, changesInterface...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
type DisableUnitFileChange struct {
|
||||
Type string // Type of the change (one of symlink or unlink)
|
||||
Filename string // File name of the symlink
|
||||
Destination string // Destination of the symlink
|
||||
}
|
||||
|
||||
// Reload instructs systemd to scan for and reload unit files. This is
|
||||
// equivalent to a 'systemctl daemon-reload'.
|
||||
func (c *Conn) Reload() error {
|
||||
return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store()
|
||||
}
|
||||
|
||||
func unitPath(name string) dbus.ObjectPath {
|
||||
return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name))
|
||||
}
|
|
@ -1,218 +0,0 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"github.com/godbus/dbus"
|
||||
)
|
||||
|
||||
// From the systemd docs:
|
||||
//
|
||||
// The properties array of StartTransientUnit() may take many of the settings
|
||||
// that may also be configured in unit files. Not all parameters are currently
|
||||
// accepted though, but we plan to cover more properties with future release.
|
||||
// Currently you may set the Description, Slice and all dependency types of
|
||||
// units, as well as RemainAfterExit, ExecStart for service units,
|
||||
// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares,
|
||||
// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth,
|
||||
// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit,
|
||||
// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map
|
||||
// directly to their counterparts in unit files and as normal D-Bus object
|
||||
// properties. The exception here is the PIDs field of scope units which is
|
||||
// used for construction of the scope only and specifies the initial PIDs to
|
||||
// add to the scope object.
|
||||
|
||||
type Property struct {
|
||||
Name string
|
||||
Value dbus.Variant
|
||||
}
|
||||
|
||||
type PropertyCollection struct {
|
||||
Name string
|
||||
Properties []Property
|
||||
}
|
||||
|
||||
type execStart struct {
|
||||
Path string // the binary path to execute
|
||||
Args []string // an array with all arguments to pass to the executed command, starting with argument 0
|
||||
UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly
|
||||
}
|
||||
|
||||
// PropExecStart sets the ExecStart service property. The first argument is a
|
||||
// slice with the binary path to execute followed by the arguments to pass to
|
||||
// the executed command. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart=
|
||||
func PropExecStart(command []string, uncleanIsFailure bool) Property {
|
||||
execStarts := []execStart{
|
||||
execStart{
|
||||
Path: command[0],
|
||||
Args: command,
|
||||
UncleanIsFailure: uncleanIsFailure,
|
||||
},
|
||||
}
|
||||
|
||||
return Property{
|
||||
Name: "ExecStart",
|
||||
Value: dbus.MakeVariant(execStarts),
|
||||
}
|
||||
}
|
||||
|
||||
// PropRemainAfterExit sets the RemainAfterExit service property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit=
|
||||
func PropRemainAfterExit(b bool) Property {
|
||||
return Property{
|
||||
Name: "RemainAfterExit",
|
||||
Value: dbus.MakeVariant(b),
|
||||
}
|
||||
}
|
||||
|
||||
// PropDescription sets the Description unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description=
|
||||
func PropDescription(desc string) Property {
|
||||
return Property{
|
||||
Name: "Description",
|
||||
Value: dbus.MakeVariant(desc),
|
||||
}
|
||||
}
|
||||
|
||||
func propDependency(name string, units []string) Property {
|
||||
return Property{
|
||||
Name: name,
|
||||
Value: dbus.MakeVariant(units),
|
||||
}
|
||||
}
|
||||
|
||||
// PropRequires sets the Requires unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires=
|
||||
func PropRequires(units ...string) Property {
|
||||
return propDependency("Requires", units)
|
||||
}
|
||||
|
||||
// PropRequiresOverridable sets the RequiresOverridable unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable=
|
||||
func PropRequiresOverridable(units ...string) Property {
|
||||
return propDependency("RequiresOverridable", units)
|
||||
}
|
||||
|
||||
// PropRequisite sets the Requisite unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite=
|
||||
func PropRequisite(units ...string) Property {
|
||||
return propDependency("Requisite", units)
|
||||
}
|
||||
|
||||
// PropRequisiteOverridable sets the RequisiteOverridable unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable=
|
||||
func PropRequisiteOverridable(units ...string) Property {
|
||||
return propDependency("RequisiteOverridable", units)
|
||||
}
|
||||
|
||||
// PropWants sets the Wants unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants=
|
||||
func PropWants(units ...string) Property {
|
||||
return propDependency("Wants", units)
|
||||
}
|
||||
|
||||
// PropBindsTo sets the BindsTo unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo=
|
||||
func PropBindsTo(units ...string) Property {
|
||||
return propDependency("BindsTo", units)
|
||||
}
|
||||
|
||||
// PropRequiredBy sets the RequiredBy unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy=
|
||||
func PropRequiredBy(units ...string) Property {
|
||||
return propDependency("RequiredBy", units)
|
||||
}
|
||||
|
||||
// PropRequiredByOverridable sets the RequiredByOverridable unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable=
|
||||
func PropRequiredByOverridable(units ...string) Property {
|
||||
return propDependency("RequiredByOverridable", units)
|
||||
}
|
||||
|
||||
// PropWantedBy sets the WantedBy unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy=
|
||||
func PropWantedBy(units ...string) Property {
|
||||
return propDependency("WantedBy", units)
|
||||
}
|
||||
|
||||
// PropBoundBy sets the BoundBy unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy=
|
||||
func PropBoundBy(units ...string) Property {
|
||||
return propDependency("BoundBy", units)
|
||||
}
|
||||
|
||||
// PropConflicts sets the Conflicts unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts=
|
||||
func PropConflicts(units ...string) Property {
|
||||
return propDependency("Conflicts", units)
|
||||
}
|
||||
|
||||
// PropConflictedBy sets the ConflictedBy unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy=
|
||||
func PropConflictedBy(units ...string) Property {
|
||||
return propDependency("ConflictedBy", units)
|
||||
}
|
||||
|
||||
// PropBefore sets the Before unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=
|
||||
func PropBefore(units ...string) Property {
|
||||
return propDependency("Before", units)
|
||||
}
|
||||
|
||||
// PropAfter sets the After unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After=
|
||||
func PropAfter(units ...string) Property {
|
||||
return propDependency("After", units)
|
||||
}
|
||||
|
||||
// PropOnFailure sets the OnFailure unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure=
|
||||
func PropOnFailure(units ...string) Property {
|
||||
return propDependency("OnFailure", units)
|
||||
}
|
||||
|
||||
// PropTriggers sets the Triggers unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers=
|
||||
func PropTriggers(units ...string) Property {
|
||||
return propDependency("Triggers", units)
|
||||
}
|
||||
|
||||
// PropTriggeredBy sets the TriggeredBy unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy=
|
||||
func PropTriggeredBy(units ...string) Property {
|
||||
return propDependency("TriggeredBy", units)
|
||||
}
|
||||
|
||||
// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo=
|
||||
func PropPropagatesReloadTo(units ...string) Property {
|
||||
return propDependency("PropagatesReloadTo", units)
|
||||
}
|
||||
|
||||
// PropRequiresMountsFor sets the RequiresMountsFor unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor=
|
||||
func PropRequiresMountsFor(units ...string) Property {
|
||||
return propDependency("RequiresMountsFor", units)
|
||||
}
|
||||
|
||||
// PropSlice sets the Slice unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice=
|
||||
func PropSlice(slice string) Property {
|
||||
return Property{
|
||||
Name: "Slice",
|
||||
Value: dbus.MakeVariant(slice),
|
||||
}
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
type set struct {
|
||||
data map[string]bool
|
||||
}
|
||||
|
||||
func (s *set) Add(value string) {
|
||||
s.data[value] = true
|
||||
}
|
||||
|
||||
func (s *set) Remove(value string) {
|
||||
delete(s.data, value)
|
||||
}
|
||||
|
||||
func (s *set) Contains(value string) (exists bool) {
|
||||
_, exists = s.data[value]
|
||||
return
|
||||
}
|
||||
|
||||
func (s *set) Length() int {
|
||||
return len(s.data)
|
||||
}
|
||||
|
||||
func (s *set) Values() (values []string) {
|
||||
for val, _ := range s.data {
|
||||
values = append(values, val)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func newSet() *set {
|
||||
return &set{make(map[string]bool)}
|
||||
}
|
|
@ -1,250 +0,0 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/godbus/dbus"
|
||||
)
|
||||
|
||||
const (
|
||||
cleanIgnoreInterval = int64(10 * time.Second)
|
||||
ignoreInterval = int64(30 * time.Millisecond)
|
||||
)
|
||||
|
||||
// Subscribe sets up this connection to subscribe to all systemd dbus events.
|
||||
// This is required before calling SubscribeUnits. When the connection closes
|
||||
// systemd will automatically stop sending signals so there is no need to
|
||||
// explicitly call Unsubscribe().
|
||||
func (c *Conn) Subscribe() error {
|
||||
c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
|
||||
"type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'")
|
||||
c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
|
||||
"type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'")
|
||||
|
||||
err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unsubscribe this connection from systemd dbus events.
|
||||
func (c *Conn) Unsubscribe() error {
|
||||
err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) dispatch() {
|
||||
ch := make(chan *dbus.Signal, signalBuffer)
|
||||
|
||||
c.sigconn.Signal(ch)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
signal, ok := <-ch
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" {
|
||||
c.jobComplete(signal)
|
||||
}
|
||||
|
||||
if c.subscriber.updateCh == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var unitPath dbus.ObjectPath
|
||||
switch signal.Name {
|
||||
case "org.freedesktop.systemd1.Manager.JobRemoved":
|
||||
unitName := signal.Body[2].(string)
|
||||
c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath)
|
||||
case "org.freedesktop.systemd1.Manager.UnitNew":
|
||||
unitPath = signal.Body[1].(dbus.ObjectPath)
|
||||
case "org.freedesktop.DBus.Properties.PropertiesChanged":
|
||||
if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" {
|
||||
unitPath = signal.Path
|
||||
}
|
||||
}
|
||||
|
||||
if unitPath == dbus.ObjectPath("") {
|
||||
continue
|
||||
}
|
||||
|
||||
c.sendSubStateUpdate(unitPath)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Returns two unbuffered channels which will receive all changed units every
|
||||
// interval. Deleted units are sent as nil.
|
||||
func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) {
|
||||
return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil)
|
||||
}
|
||||
|
||||
// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer
|
||||
// size of the channels, the comparison function for detecting changes and a filter
|
||||
// function for cutting down on the noise that your channel receives.
|
||||
func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
|
||||
old := make(map[string]*UnitStatus)
|
||||
statusChan := make(chan map[string]*UnitStatus, buffer)
|
||||
errChan := make(chan error, buffer)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
timerChan := time.After(interval)
|
||||
|
||||
units, err := c.ListUnits()
|
||||
if err == nil {
|
||||
cur := make(map[string]*UnitStatus)
|
||||
for i := range units {
|
||||
if filterUnit != nil && filterUnit(units[i].Name) {
|
||||
continue
|
||||
}
|
||||
cur[units[i].Name] = &units[i]
|
||||
}
|
||||
|
||||
// add all new or changed units
|
||||
changed := make(map[string]*UnitStatus)
|
||||
for n, u := range cur {
|
||||
if oldU, ok := old[n]; !ok || isChanged(oldU, u) {
|
||||
changed[n] = u
|
||||
}
|
||||
delete(old, n)
|
||||
}
|
||||
|
||||
// add all deleted units
|
||||
for oldN := range old {
|
||||
changed[oldN] = nil
|
||||
}
|
||||
|
||||
old = cur
|
||||
|
||||
if len(changed) != 0 {
|
||||
statusChan <- changed
|
||||
}
|
||||
} else {
|
||||
errChan <- err
|
||||
}
|
||||
|
||||
<-timerChan
|
||||
}
|
||||
}()
|
||||
|
||||
return statusChan, errChan
|
||||
}
|
||||
|
||||
type SubStateUpdate struct {
|
||||
UnitName string
|
||||
SubState string
|
||||
}
|
||||
|
||||
// SetSubStateSubscriber writes to updateCh when any unit's substate changes.
|
||||
// Although this writes to updateCh on every state change, the reported state
|
||||
// may be more recent than the change that generated it (due to an unavoidable
|
||||
// race in the systemd dbus interface). That is, this method provides a good
|
||||
// way to keep a current view of all units' states, but is not guaranteed to
|
||||
// show every state transition they go through. Furthermore, state changes
|
||||
// will only be written to the channel with non-blocking writes. If updateCh
|
||||
// is full, it attempts to write an error to errCh; if errCh is full, the error
|
||||
// passes silently.
|
||||
func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) {
|
||||
c.subscriber.Lock()
|
||||
defer c.subscriber.Unlock()
|
||||
c.subscriber.updateCh = updateCh
|
||||
c.subscriber.errCh = errCh
|
||||
}
|
||||
|
||||
func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) {
|
||||
c.subscriber.Lock()
|
||||
defer c.subscriber.Unlock()
|
||||
|
||||
if c.shouldIgnore(path) {
|
||||
return
|
||||
}
|
||||
|
||||
info, err := c.GetUnitProperties(string(path))
|
||||
if err != nil {
|
||||
select {
|
||||
case c.subscriber.errCh <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
name := info["Id"].(string)
|
||||
substate := info["SubState"].(string)
|
||||
|
||||
update := &SubStateUpdate{name, substate}
|
||||
select {
|
||||
case c.subscriber.updateCh <- update:
|
||||
default:
|
||||
select {
|
||||
case c.subscriber.errCh <- errors.New("update channel full!"):
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
c.updateIgnore(path, info)
|
||||
}
|
||||
|
||||
// The ignore functions work around a wart in the systemd dbus interface.
|
||||
// Requesting the properties of an unloaded unit will cause systemd to send a
|
||||
// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's
|
||||
// properties on UnitNew (as that's the only indication of a new unit coming up
|
||||
// for the first time), we would enter an infinite loop if we did not attempt
|
||||
// to detect and ignore these spurious signals. The signal themselves are
|
||||
// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an
|
||||
// unloaded unit's signals for a short time after requesting its properties.
|
||||
// This means that we will miss e.g. a transient unit being restarted
|
||||
// *immediately* upon failure and also a transient unit being started
|
||||
// immediately after requesting its status (with systemctl status, for example,
|
||||
// because this causes a UnitNew signal to be sent which then causes us to fetch
|
||||
// the properties).
|
||||
|
||||
func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool {
|
||||
t, ok := c.subscriber.ignore[path]
|
||||
return ok && t >= time.Now().UnixNano()
|
||||
}
|
||||
|
||||
func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) {
|
||||
c.cleanIgnore()
|
||||
|
||||
// unit is unloaded - it will trigger bad systemd dbus behavior
|
||||
if info["LoadState"].(string) == "not-found" {
|
||||
c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval
|
||||
}
|
||||
}
|
||||
|
||||
// without this, ignore would grow unboundedly over time
|
||||
func (c *Conn) cleanIgnore() {
|
||||
now := time.Now().UnixNano()
|
||||
if c.subscriber.cleanIgnore < now {
|
||||
c.subscriber.cleanIgnore = now + cleanIgnoreInterval
|
||||
|
||||
for p, t := range c.subscriber.ignore {
|
||||
if t < now {
|
||||
delete(c.subscriber.ignore, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// SubscriptionSet returns a subscription set which is like conn.Subscribe but
|
||||
// can filter to only return events for a set of units.
|
||||
type SubscriptionSet struct {
|
||||
*set
|
||||
conn *Conn
|
||||
}
|
||||
|
||||
func (s *SubscriptionSet) filter(unit string) bool {
|
||||
return !s.Contains(unit)
|
||||
}
|
||||
|
||||
// Subscribe starts listening for dbus events for all of the units in the set.
|
||||
// Returns channels identical to conn.SubscribeUnits.
|
||||
func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) {
|
||||
// TODO: Make fully evented by using systemd 209 with properties changed values
|
||||
return s.conn.SubscribeUnitsCustom(time.Second, 0,
|
||||
mismatchUnitStatus,
|
||||
func(unit string) bool { return s.filter(unit) },
|
||||
)
|
||||
}
|
||||
|
||||
// NewSubscriptionSet returns a new subscription set.
|
||||
func (conn *Conn) NewSubscriptionSet() *SubscriptionSet {
|
||||
return &SubscriptionSet{newSet(), conn}
|
||||
}
|
||||
|
||||
// mismatchUnitStatus returns true if the provided UnitStatus objects
|
||||
// are not equivalent. false is returned if the objects are equivalent.
|
||||
// Only the Name, Description and state-related fields are used in
|
||||
// the comparison.
|
||||
func mismatchUnitStatus(u1, u2 *UnitStatus) bool {
|
||||
return u1.Name != u2.Name ||
|
||||
u1.Description != u2.Description ||
|
||||
u1.LoadState != u2.LoadState ||
|
||||
u1.ActiveState != u2.ActiveState ||
|
||||
u1.SubState != u2.SubState
|
||||
}
|
|
@ -1,270 +0,0 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package util contains utility functions related to systemd that applications
|
||||
// can use to check things like whether systemd is running. Note that some of
|
||||
// these functions attempt to manually load systemd libraries at runtime rather
|
||||
// than linking against them.
|
||||
package util
|
||||
|
||||
// #cgo LDFLAGS: -ldl
|
||||
// #include <stdlib.h>
|
||||
// #include <dlfcn.h>
|
||||
// #include <sys/types.h>
|
||||
// #include <unistd.h>
|
||||
//
|
||||
// int
|
||||
// my_sd_pid_get_owner_uid(void *f, pid_t pid, uid_t *uid)
|
||||
// {
|
||||
// int (*sd_pid_get_owner_uid)(pid_t, uid_t *);
|
||||
//
|
||||
// sd_pid_get_owner_uid = (int (*)(pid_t, uid_t *))f;
|
||||
// return sd_pid_get_owner_uid(pid, uid);
|
||||
// }
|
||||
//
|
||||
// int
|
||||
// my_sd_pid_get_unit(void *f, pid_t pid, char **unit)
|
||||
// {
|
||||
// int (*sd_pid_get_unit)(pid_t, char **);
|
||||
//
|
||||
// sd_pid_get_unit = (int (*)(pid_t, char **))f;
|
||||
// return sd_pid_get_unit(pid, unit);
|
||||
// }
|
||||
//
|
||||
// int
|
||||
// my_sd_pid_get_slice(void *f, pid_t pid, char **slice)
|
||||
// {
|
||||
// int (*sd_pid_get_slice)(pid_t, char **);
|
||||
//
|
||||
// sd_pid_get_slice = (int (*)(pid_t, char **))f;
|
||||
// return sd_pid_get_slice(pid, slice);
|
||||
// }
|
||||
//
|
||||
// int
|
||||
// am_session_leader()
|
||||
// {
|
||||
// return (getsid(0) == getpid());
|
||||
// }
|
||||
import "C"
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var ErrSoNotFound = errors.New("unable to open a handle to libsystemd")
|
||||
|
||||
// libHandle represents an open handle to the systemd C library
|
||||
type libHandle struct {
|
||||
handle unsafe.Pointer
|
||||
libname string
|
||||
}
|
||||
|
||||
func (h *libHandle) Close() error {
|
||||
if r := C.dlclose(h.handle); r != 0 {
|
||||
return fmt.Errorf("error closing %v: %d", h.libname, r)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getHandle tries to get a handle to a systemd library (.so), attempting to
|
||||
// access it by several different names and returning the first that is
|
||||
// successfully opened. Callers are responsible for closing the handler.
|
||||
// If no library can be successfully opened, an error is returned.
|
||||
func getHandle() (*libHandle, error) {
|
||||
for _, name := range []string{
|
||||
// systemd < 209
|
||||
"libsystemd-login.so",
|
||||
"libsystemd-login.so.0",
|
||||
|
||||
// systemd >= 209 merged libsystemd-login into libsystemd proper
|
||||
"libsystemd.so",
|
||||
"libsystemd.so.0",
|
||||
} {
|
||||
libname := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(libname))
|
||||
handle := C.dlopen(libname, C.RTLD_LAZY)
|
||||
if handle != nil {
|
||||
h := &libHandle{
|
||||
handle: handle,
|
||||
libname: name,
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
}
|
||||
return nil, ErrSoNotFound
|
||||
}
|
||||
|
||||
// GetRunningSlice attempts to retrieve the name of the systemd slice in which
|
||||
// the current process is running.
|
||||
// This function is a wrapper around the libsystemd C library; if it cannot be
|
||||
// opened, an error is returned.
|
||||
func GetRunningSlice() (slice string, err error) {
|
||||
var h *libHandle
|
||||
h, err = getHandle()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err1 := h.Close(); err1 != nil {
|
||||
err = err1
|
||||
}
|
||||
}()
|
||||
|
||||
sym := C.CString("sd_pid_get_slice")
|
||||
defer C.free(unsafe.Pointer(sym))
|
||||
sd_pid_get_slice := C.dlsym(h.handle, sym)
|
||||
if sd_pid_get_slice == nil {
|
||||
err = fmt.Errorf("error resolving sd_pid_get_slice function")
|
||||
return
|
||||
}
|
||||
|
||||
var s string
|
||||
sl := C.CString(s)
|
||||
defer C.free(unsafe.Pointer(sl))
|
||||
|
||||
ret := C.my_sd_pid_get_slice(sd_pid_get_slice, 0, &sl)
|
||||
if ret < 0 {
|
||||
err = fmt.Errorf("error calling sd_pid_get_slice: %v", syscall.Errno(-ret))
|
||||
return
|
||||
}
|
||||
|
||||
return C.GoString(sl), nil
|
||||
}
|
||||
|
||||
// RunningFromSystemService tries to detect whether the current process has
|
||||
// been invoked from a system service. The condition for this is whether the
|
||||
// process is _not_ a user process. User processes are those running in session
|
||||
// scopes or under per-user `systemd --user` instances.
|
||||
//
|
||||
// To avoid false positives on systems without `pam_systemd` (which is
|
||||
// responsible for creating user sessions), this function also uses a heuristic
|
||||
// to detect whether it's being invoked from a session leader process. This is
|
||||
// the case if the current process is executed directly from a service file
|
||||
// (e.g. with `ExecStart=/this/cmd`). Note that this heuristic will fail if the
|
||||
// command is instead launched in a subshell or similar so that it is not
|
||||
// session leader (e.g. `ExecStart=/bin/bash -c "/this/cmd"`)
|
||||
//
|
||||
// This function is a wrapper around the libsystemd C library; if this is
|
||||
// unable to successfully open a handle to the library for any reason (e.g. it
|
||||
// cannot be found), an errr will be returned
|
||||
func RunningFromSystemService() (ret bool, err error) {
|
||||
var h *libHandle
|
||||
h, err = getHandle()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err1 := h.Close(); err1 != nil {
|
||||
err = err1
|
||||
}
|
||||
}()
|
||||
|
||||
sym := C.CString("sd_pid_get_owner_uid")
|
||||
defer C.free(unsafe.Pointer(sym))
|
||||
sd_pid_get_owner_uid := C.dlsym(h.handle, sym)
|
||||
if sd_pid_get_owner_uid == nil {
|
||||
err = fmt.Errorf("error resolving sd_pid_get_owner_uid function")
|
||||
return
|
||||
}
|
||||
|
||||
var uid C.uid_t
|
||||
errno := C.my_sd_pid_get_owner_uid(sd_pid_get_owner_uid, 0, &uid)
|
||||
serrno := syscall.Errno(-errno)
|
||||
// when we're running from a unit file, sd_pid_get_owner_uid returns
|
||||
// ENOENT (systemd <220) or ENXIO (systemd >=220)
|
||||
switch {
|
||||
case errno >= 0:
|
||||
ret = false
|
||||
case serrno == syscall.ENOENT, serrno == syscall.ENXIO:
|
||||
// Since the implementation of sessions in systemd relies on
|
||||
// the `pam_systemd` module, using the sd_pid_get_owner_uid
|
||||
// heuristic alone can result in false positives if that module
|
||||
// (or PAM itself) is not present or properly configured on the
|
||||
// system. As such, we also check if we're the session leader,
|
||||
// which should be the case if we're invoked from a unit file,
|
||||
// but not if e.g. we're invoked from the command line from a
|
||||
// user's login session
|
||||
ret = C.am_session_leader() == 1
|
||||
default:
|
||||
err = fmt.Errorf("error calling sd_pid_get_owner_uid: %v", syscall.Errno(-errno))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CurrentUnitName attempts to retrieve the name of the systemd system unit
|
||||
// from which the calling process has been invoked. It wraps the systemd
|
||||
// `sd_pid_get_unit` call, with the same caveat: for processes not part of a
|
||||
// systemd system unit, this function will return an error.
|
||||
func CurrentUnitName() (unit string, err error) {
|
||||
var h *libHandle
|
||||
h, err = getHandle()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err1 := h.Close(); err1 != nil {
|
||||
err = err1
|
||||
}
|
||||
}()
|
||||
|
||||
sym := C.CString("sd_pid_get_unit")
|
||||
defer C.free(unsafe.Pointer(sym))
|
||||
sd_pid_get_unit := C.dlsym(h.handle, sym)
|
||||
if sd_pid_get_unit == nil {
|
||||
err = fmt.Errorf("error resolving sd_pid_get_unit function")
|
||||
return
|
||||
}
|
||||
|
||||
var s string
|
||||
u := C.CString(s)
|
||||
defer C.free(unsafe.Pointer(u))
|
||||
|
||||
ret := C.my_sd_pid_get_unit(sd_pid_get_unit, 0, &u)
|
||||
if ret < 0 {
|
||||
err = fmt.Errorf("error calling sd_pid_get_unit: %v", syscall.Errno(-ret))
|
||||
return
|
||||
}
|
||||
|
||||
unit = C.GoString(u)
|
||||
return
|
||||
}
|
||||
|
||||
// IsRunningSystemd checks whether the host was booted with systemd as its init
|
||||
// system. This functions similarly to systemd's `sd_booted(3)`: internally, it
|
||||
// checks whether /run/systemd/system/ exists and is a directory.
|
||||
// http://www.freedesktop.org/software/systemd/man/sd_booted.html
|
||||
func IsRunningSystemd() bool {
|
||||
fi, err := os.Lstat("/run/systemd/system")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return fi.IsDir()
|
||||
}
|
||||
|
||||
// GetMachineID returns a host's 128-bit machine ID as a string. This functions
|
||||
// similarly to systemd's `sd_id128_get_machine`: internally, it simply reads
|
||||
// the contents of /etc/machine-id
|
||||
// http://www.freedesktop.org/software/systemd/man/sd_id128_get_machine.html
|
||||
func GetMachineID() (string, error) {
|
||||
machineID, err := ioutil.ReadFile("/etc/machine-id")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read /etc/machine-id: %v", err)
|
||||
}
|
||||
return strings.TrimSpace(string(machineID)), nil
|
||||
}
|
|
@ -1,92 +0,0 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Parse fstab type mount options into mount() flags
|
||||
// and device specific data
|
||||
func parseOptions(options string) (int, string) {
|
||||
var (
|
||||
flag int
|
||||
data []string
|
||||
)
|
||||
|
||||
flags := map[string]struct {
|
||||
clear bool
|
||||
flag int
|
||||
}{
|
||||
"defaults": {false, 0},
|
||||
"ro": {false, RDONLY},
|
||||
"rw": {true, RDONLY},
|
||||
"suid": {true, NOSUID},
|
||||
"nosuid": {false, NOSUID},
|
||||
"dev": {true, NODEV},
|
||||
"nodev": {false, NODEV},
|
||||
"exec": {true, NOEXEC},
|
||||
"noexec": {false, NOEXEC},
|
||||
"sync": {false, SYNCHRONOUS},
|
||||
"async": {true, SYNCHRONOUS},
|
||||
"dirsync": {false, DIRSYNC},
|
||||
"remount": {false, REMOUNT},
|
||||
"mand": {false, MANDLOCK},
|
||||
"nomand": {true, MANDLOCK},
|
||||
"atime": {true, NOATIME},
|
||||
"noatime": {false, NOATIME},
|
||||
"diratime": {true, NODIRATIME},
|
||||
"nodiratime": {false, NODIRATIME},
|
||||
"bind": {false, BIND},
|
||||
"rbind": {false, RBIND},
|
||||
"unbindable": {false, UNBINDABLE},
|
||||
"runbindable": {false, RUNBINDABLE},
|
||||
"private": {false, PRIVATE},
|
||||
"rprivate": {false, RPRIVATE},
|
||||
"shared": {false, SHARED},
|
||||
"rshared": {false, RSHARED},
|
||||
"slave": {false, SLAVE},
|
||||
"rslave": {false, RSLAVE},
|
||||
"relatime": {false, RELATIME},
|
||||
"norelatime": {true, RELATIME},
|
||||
"strictatime": {false, STRICTATIME},
|
||||
"nostrictatime": {true, STRICTATIME},
|
||||
}
|
||||
|
||||
for _, o := range strings.Split(options, ",") {
|
||||
// If the option does not exist in the flags table or the flag
|
||||
// is not supported on the platform,
|
||||
// then it is a data value for a specific fs type
|
||||
if f, exists := flags[o]; exists && f.flag != 0 {
|
||||
if f.clear {
|
||||
flag &= ^f.flag
|
||||
} else {
|
||||
flag |= f.flag
|
||||
}
|
||||
} else {
|
||||
data = append(data, o)
|
||||
}
|
||||
}
|
||||
return flag, strings.Join(data, ",")
|
||||
}
|
||||
|
||||
// ParseTmpfsOptions parse fstab type mount options into flags and data
|
||||
func ParseTmpfsOptions(options string) (int, string, error) {
|
||||
flags, data := parseOptions(options)
|
||||
validFlags := map[string]bool{
|
||||
"": true,
|
||||
"size": true,
|
||||
"mode": true,
|
||||
"uid": true,
|
||||
"gid": true,
|
||||
"nr_inodes": true,
|
||||
"nr_blocks": true,
|
||||
"mpol": true,
|
||||
}
|
||||
for _, o := range strings.Split(data, ",") {
|
||||
opt := strings.SplitN(o, "=", 2)
|
||||
if !validFlags[opt[0]] {
|
||||
return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt)
|
||||
}
|
||||
}
|
||||
return flags, data, nil
|
||||
}
|
|
@ -1,48 +0,0 @@
|
|||
// +build freebsd,cgo
|
||||
|
||||
package mount
|
||||
|
||||
/*
|
||||
#include <sys/mount.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
const (
|
||||
// RDONLY will mount the filesystem as read-only.
|
||||
RDONLY = C.MNT_RDONLY
|
||||
|
||||
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
|
||||
// take effect.
|
||||
NOSUID = C.MNT_NOSUID
|
||||
|
||||
// NOEXEC will not allow execution of any binaries on the mounted file system.
|
||||
NOEXEC = C.MNT_NOEXEC
|
||||
|
||||
// SYNCHRONOUS will allow any I/O to the file system to be done synchronously.
|
||||
SYNCHRONOUS = C.MNT_SYNCHRONOUS
|
||||
|
||||
// NOATIME will not update the file access time when reading from a file.
|
||||
NOATIME = C.MNT_NOATIME
|
||||
)
|
||||
|
||||
// These flags are unsupported.
|
||||
const (
|
||||
BIND = 0
|
||||
DIRSYNC = 0
|
||||
MANDLOCK = 0
|
||||
NODEV = 0
|
||||
NODIRATIME = 0
|
||||
UNBINDABLE = 0
|
||||
RUNBINDABLE = 0
|
||||
PRIVATE = 0
|
||||
RPRIVATE = 0
|
||||
SHARED = 0
|
||||
RSHARED = 0
|
||||
SLAVE = 0
|
||||
RSLAVE = 0
|
||||
RBIND = 0
|
||||
RELATIVE = 0
|
||||
RELATIME = 0
|
||||
REMOUNT = 0
|
||||
STRICTATIME = 0
|
||||
)
|
|
@ -1,85 +0,0 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
// RDONLY will mount the file system read-only.
|
||||
RDONLY = syscall.MS_RDONLY
|
||||
|
||||
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
|
||||
// take effect.
|
||||
NOSUID = syscall.MS_NOSUID
|
||||
|
||||
// NODEV will not interpret character or block special devices on the file
|
||||
// system.
|
||||
NODEV = syscall.MS_NODEV
|
||||
|
||||
// NOEXEC will not allow execution of any binaries on the mounted file system.
|
||||
NOEXEC = syscall.MS_NOEXEC
|
||||
|
||||
// SYNCHRONOUS will allow I/O to the file system to be done synchronously.
|
||||
SYNCHRONOUS = syscall.MS_SYNCHRONOUS
|
||||
|
||||
// DIRSYNC will force all directory updates within the file system to be done
|
||||
// synchronously. This affects the following system calls: create, link,
|
||||
// unlink, symlink, mkdir, rmdir, mknod and rename.
|
||||
DIRSYNC = syscall.MS_DIRSYNC
|
||||
|
||||
// REMOUNT will attempt to remount an already-mounted file system. This is
|
||||
// commonly used to change the mount flags for a file system, especially to
|
||||
// make a readonly file system writeable. It does not change device or mount
|
||||
// point.
|
||||
REMOUNT = syscall.MS_REMOUNT
|
||||
|
||||
// MANDLOCK will force mandatory locks on a filesystem.
|
||||
MANDLOCK = syscall.MS_MANDLOCK
|
||||
|
||||
// NOATIME will not update the file access time when reading from a file.
|
||||
NOATIME = syscall.MS_NOATIME
|
||||
|
||||
// NODIRATIME will not update the directory access time.
|
||||
NODIRATIME = syscall.MS_NODIRATIME
|
||||
|
||||
// BIND remounts a subtree somewhere else.
|
||||
BIND = syscall.MS_BIND
|
||||
|
||||
// RBIND remounts a subtree and all possible submounts somewhere else.
|
||||
RBIND = syscall.MS_BIND | syscall.MS_REC
|
||||
|
||||
// UNBINDABLE creates a mount which cannot be cloned through a bind operation.
|
||||
UNBINDABLE = syscall.MS_UNBINDABLE
|
||||
|
||||
// RUNBINDABLE marks the entire mount tree as UNBINDABLE.
|
||||
RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC
|
||||
|
||||
// PRIVATE creates a mount which carries no propagation abilities.
|
||||
PRIVATE = syscall.MS_PRIVATE
|
||||
|
||||
// RPRIVATE marks the entire mount tree as PRIVATE.
|
||||
RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC
|
||||
|
||||
// SLAVE creates a mount which receives propagation from its master, but not
|
||||
// vice versa.
|
||||
SLAVE = syscall.MS_SLAVE
|
||||
|
||||
// RSLAVE marks the entire mount tree as SLAVE.
|
||||
RSLAVE = syscall.MS_SLAVE | syscall.MS_REC
|
||||
|
||||
// SHARED creates a mount which provides the ability to create mirrors of
|
||||
// that mount such that mounts and unmounts within any of the mirrors
|
||||
// propagate to the other mirrors.
|
||||
SHARED = syscall.MS_SHARED
|
||||
|
||||
// RSHARED marks the entire mount tree as SHARED.
|
||||
RSHARED = syscall.MS_SHARED | syscall.MS_REC
|
||||
|
||||
// RELATIME updates inode access times relative to modify or change time.
|
||||
RELATIME = syscall.MS_RELATIME
|
||||
|
||||
// STRICTATIME allows to explicitly request full atime updates. This makes
|
||||
// it possible for the kernel to default to relatime or noatime but still
|
||||
// allow userspace to override it.
|
||||
STRICTATIME = syscall.MS_STRICTATIME
|
||||
)
|
|
@ -1,30 +0,0 @@
|
|||
// +build !linux,!freebsd freebsd,!cgo
|
||||
|
||||
package mount
|
||||
|
||||
// These flags are unsupported.
|
||||
const (
|
||||
BIND = 0
|
||||
DIRSYNC = 0
|
||||
MANDLOCK = 0
|
||||
NOATIME = 0
|
||||
NODEV = 0
|
||||
NODIRATIME = 0
|
||||
NOEXEC = 0
|
||||
NOSUID = 0
|
||||
UNBINDABLE = 0
|
||||
RUNBINDABLE = 0
|
||||
PRIVATE = 0
|
||||
RPRIVATE = 0
|
||||
SHARED = 0
|
||||
RSHARED = 0
|
||||
SLAVE = 0
|
||||
RSLAVE = 0
|
||||
RBIND = 0
|
||||
RELATIME = 0
|
||||
RELATIVE = 0
|
||||
REMOUNT = 0
|
||||
STRICTATIME = 0
|
||||
SYNCHRONOUS = 0
|
||||
RDONLY = 0
|
||||
)
|
|
@ -1,74 +0,0 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// GetMounts retrieves a list of mounts for the current running process.
|
||||
func GetMounts() ([]*Info, error) {
|
||||
return parseMountTable()
|
||||
}
|
||||
|
||||
// Mounted looks at /proc/self/mountinfo to determine of the specified
|
||||
// mountpoint has been mounted
|
||||
func Mounted(mountpoint string) (bool, error) {
|
||||
entries, err := parseMountTable()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Search the table for the mountpoint
|
||||
for _, e := range entries {
|
||||
if e.Mountpoint == mountpoint {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Mount will mount filesystem according to the specified configuration, on the
|
||||
// condition that the target path is *not* already mounted. Options must be
|
||||
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
|
||||
// flags.go for supported option flags.
|
||||
func Mount(device, target, mType, options string) error {
|
||||
flag, _ := parseOptions(options)
|
||||
if flag&REMOUNT != REMOUNT {
|
||||
if mounted, err := Mounted(target); err != nil || mounted {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return ForceMount(device, target, mType, options)
|
||||
}
|
||||
|
||||
// ForceMount will mount a filesystem according to the specified configuration,
|
||||
// *regardless* if the target path is not already mounted. Options must be
|
||||
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
|
||||
// flags.go for supported option flags.
|
||||
func ForceMount(device, target, mType, options string) error {
|
||||
flag, data := parseOptions(options)
|
||||
if err := mount(device, target, mType, uintptr(flag), data); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmount will unmount the target filesystem, so long as it is mounted.
|
||||
func Unmount(target string) error {
|
||||
if mounted, err := Mounted(target); err != nil || !mounted {
|
||||
return err
|
||||
}
|
||||
return ForceUnmount(target)
|
||||
}
|
||||
|
||||
// ForceUnmount will force an unmount of the target filesystem, regardless if
|
||||
// it is mounted or not.
|
||||
func ForceUnmount(target string) (err error) {
|
||||
// Simple retry logic for unmount
|
||||
for i := 0; i < 10; i++ {
|
||||
if err = unmount(target, 0); err == nil {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
return
|
||||
}
|
|
@ -1,59 +0,0 @@
|
|||
package mount
|
||||
|
||||
/*
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/_iovec.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/param.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func allocateIOVecs(options []string) []C.struct_iovec {
|
||||
out := make([]C.struct_iovec, len(options))
|
||||
for i, option := range options {
|
||||
out[i].iov_base = unsafe.Pointer(C.CString(option))
|
||||
out[i].iov_len = C.size_t(len(option) + 1)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func mount(device, target, mType string, flag uintptr, data string) error {
|
||||
isNullFS := false
|
||||
|
||||
xs := strings.Split(data, ",")
|
||||
for _, x := range xs {
|
||||
if x == "bind" {
|
||||
isNullFS = true
|
||||
}
|
||||
}
|
||||
|
||||
options := []string{"fspath", target}
|
||||
if isNullFS {
|
||||
options = append(options, "fstype", "nullfs", "target", device)
|
||||
} else {
|
||||
options = append(options, "fstype", mType, "from", device)
|
||||
}
|
||||
rawOptions := allocateIOVecs(options)
|
||||
for _, rawOption := range rawOptions {
|
||||
defer C.free(rawOption.iov_base)
|
||||
}
|
||||
|
||||
if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
|
||||
reason := C.GoString(C.strerror(*C.__error()))
|
||||
return fmt.Errorf("Failed to call nmount: %s", reason)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmount(target string, flag int) error {
|
||||
return syscall.Unmount(target, flag)
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func mount(device, target, mType string, flag uintptr, data string) error {
|
||||
if err := syscall.Mount(device, target, mType, flag, data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we have a bind mount or remount, remount...
|
||||
if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY {
|
||||
return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmount(target string, flag int) error {
|
||||
return syscall.Unmount(target, flag)
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
// +build !linux,!freebsd freebsd,!cgo
|
||||
|
||||
package mount
|
||||
|
||||
func mount(device, target, mType string, flag uintptr, data string) error {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func unmount(target string, flag int) error {
|
||||
panic("Not implemented")
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
package mount
|
||||
|
||||
// Info reveals information about a particular mounted filesystem. This
|
||||
// struct is populated from the content in the /proc/<pid>/mountinfo file.
|
||||
type Info struct {
|
||||
// ID is a unique identifier of the mount (may be reused after umount).
|
||||
ID int
|
||||
|
||||
// Parent indicates the ID of the mount parent (or of self for the top of the
|
||||
// mount tree).
|
||||
Parent int
|
||||
|
||||
// Major indicates one half of the device ID which identifies the device class.
|
||||
Major int
|
||||
|
||||
// Minor indicates one half of the device ID which identifies a specific
|
||||
// instance of device.
|
||||
Minor int
|
||||
|
||||
// Root of the mount within the filesystem.
|
||||
Root string
|
||||
|
||||
// Mountpoint indicates the mount point relative to the process's root.
|
||||
Mountpoint string
|
||||
|
||||
// Opts represents mount-specific options.
|
||||
Opts string
|
||||
|
||||
// Optional represents optional fields.
|
||||
Optional string
|
||||
|
||||
// Fstype indicates the type of filesystem, such as EXT3.
|
||||
Fstype string
|
||||
|
||||
// Source indicates filesystem specific information or "none".
|
||||
Source string
|
||||
|
||||
// VfsOpts represents per super block options.
|
||||
VfsOpts string
|
||||
}
|
|
@ -1,41 +0,0 @@
|
|||
package mount
|
||||
|
||||
/*
|
||||
#include <sys/param.h>
|
||||
#include <sys/ucred.h>
|
||||
#include <sys/mount.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
|
||||
// bind mounts.
|
||||
func parseMountTable() ([]*Info, error) {
|
||||
var rawEntries *C.struct_statfs
|
||||
|
||||
count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
|
||||
if count == 0 {
|
||||
return nil, fmt.Errorf("Failed to call getmntinfo")
|
||||
}
|
||||
|
||||
var entries []C.struct_statfs
|
||||
header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
|
||||
header.Cap = count
|
||||
header.Len = count
|
||||
header.Data = uintptr(unsafe.Pointer(rawEntries))
|
||||
|
||||
var out []*Info
|
||||
for _, entry := range entries {
|
||||
var mountinfo Info
|
||||
mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
|
||||
mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
|
||||
mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
|
||||
out = append(out, &mountinfo)
|
||||
}
|
||||
return out, nil
|
||||
}
|
|
@ -1,95 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
/* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
|
||||
(1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
|
||||
|
||||
(1) mount ID: unique identifier of the mount (may be reused after umount)
|
||||
(2) parent ID: ID of parent (or of self for the top of the mount tree)
|
||||
(3) major:minor: value of st_dev for files on filesystem
|
||||
(4) root: root of the mount within the filesystem
|
||||
(5) mount point: mount point relative to the process's root
|
||||
(6) mount options: per mount options
|
||||
(7) optional fields: zero or more fields of the form "tag[:value]"
|
||||
(8) separator: marks the end of the optional fields
|
||||
(9) filesystem type: name of filesystem of the form "type[.subtype]"
|
||||
(10) mount source: filesystem specific information or "none"
|
||||
(11) super options: per super block options*/
|
||||
mountinfoFormat = "%d %d %d:%d %s %s %s %s"
|
||||
)
|
||||
|
||||
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
|
||||
// bind mounts
|
||||
func parseMountTable() ([]*Info, error) {
|
||||
f, err := os.Open("/proc/self/mountinfo")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseInfoFile(f)
|
||||
}
|
||||
|
||||
func parseInfoFile(r io.Reader) ([]*Info, error) {
|
||||
var (
|
||||
s = bufio.NewScanner(r)
|
||||
out = []*Info{}
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
p = &Info{}
|
||||
text = s.Text()
|
||||
optionalFields string
|
||||
)
|
||||
|
||||
if _, err := fmt.Sscanf(text, mountinfoFormat,
|
||||
&p.ID, &p.Parent, &p.Major, &p.Minor,
|
||||
&p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil {
|
||||
return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
|
||||
}
|
||||
// Safe as mountinfo encodes mountpoints with spaces as \040.
|
||||
index := strings.Index(text, " - ")
|
||||
postSeparatorFields := strings.Fields(text[index+3:])
|
||||
if len(postSeparatorFields) < 3 {
|
||||
return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
|
||||
}
|
||||
|
||||
if optionalFields != "-" {
|
||||
p.Optional = optionalFields
|
||||
}
|
||||
|
||||
p.Fstype = postSeparatorFields[0]
|
||||
p.Source = postSeparatorFields[1]
|
||||
p.VfsOpts = strings.Join(postSeparatorFields[2:], " ")
|
||||
out = append(out, p)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// PidMountInfo collects the mounts for a specific process ID. If the process
|
||||
// ID is unknown, it is better to use `GetMounts` which will inspect
|
||||
// "/proc/self/mountinfo" instead.
|
||||
func PidMountInfo(pid int) ([]*Info, error) {
|
||||
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseInfoFile(f)
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
// +build !linux,!freebsd freebsd,!cgo
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func parseMountTable() ([]*Info, error) {
|
||||
return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package mount
|
||||
|
||||
// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakeShared(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "shared")
|
||||
}
|
||||
|
||||
// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakeRShared(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "rshared")
|
||||
}
|
||||
|
||||
// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakePrivate(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "private")
|
||||
}
|
||||
|
||||
// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
|
||||
// enabled. See the supported options in flags.go for further reference.
|
||||
func MakeRPrivate(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "rprivate")
|
||||
}
|
||||
|
||||
// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakeSlave(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "slave")
|
||||
}
|
||||
|
||||
// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakeRSlave(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "rslave")
|
||||
}
|
||||
|
||||
// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
|
||||
// enabled. See the supported options in flags.go for further reference.
|
||||
func MakeUnbindable(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "unbindable")
|
||||
}
|
||||
|
||||
// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
|
||||
// option enabled. See the supported options in flags.go for further reference.
|
||||
func MakeRUnbindable(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "runbindable")
|
||||
}
|
||||
|
||||
func ensureMountedAs(mountPoint, options string) error {
|
||||
mounted, err := Mounted(mountPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !mounted {
|
||||
if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
mounted, err = Mounted(mountPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ForceMount("", mountPoint, "none", options)
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
# How to Contribute
|
||||
|
||||
## Getting Started
|
||||
|
||||
- Fork the repository on GitHub
|
||||
- Read the [README](README.markdown) for build and test instructions
|
||||
- Play with the project, submit bugs, submit patches!
|
||||
|
||||
## Contribution Flow
|
||||
|
||||
This is a rough outline of what a contributor's workflow looks like:
|
||||
|
||||
- Create a topic branch from where you want to base your work (usually master).
|
||||
- Make commits of logical units.
|
||||
- Make sure your commit messages are in the proper format (see below).
|
||||
- Push your changes to a topic branch in your fork of the repository.
|
||||
- Make sure the tests pass, and add any new tests as appropriate.
|
||||
- Submit a pull request to the original repository.
|
||||
|
||||
Thanks for your contributions!
|
||||
|
||||
### Format of the Commit Message
|
||||
|
||||
We follow a rough convention for commit messages that is designed to answer two
|
||||
questions: what changed and why. The subject line should feature the what and
|
||||
the body of the commit should describe the why.
|
||||
|
||||
```
|
||||
scripts: add the test-cluster command
|
||||
|
||||
this uses tmux to setup a test cluster that you can easily kill and
|
||||
start for debugging.
|
||||
|
||||
Fixes #38
|
||||
```
|
||||
|
||||
The format can be described more formally as follows:
|
||||
|
||||
```
|
||||
<subsystem>: <what changed>
|
||||
<BLANK LINE>
|
||||
<why this change was made>
|
||||
<BLANK LINE>
|
||||
<footer>
|
||||
```
|
||||
|
||||
The first line is the subject and should be no longer than 70 characters, the
|
||||
second line is always blank, and other lines should be wrapped at 80 characters.
|
||||
This allows the message to be easier to read on GitHub as well as in various
|
||||
git tools.
|
|
@ -1,25 +0,0 @@
|
|||
Copyright (c) 2013, Georg Reinke (<guelfey at gmail dot com>), Google
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,2 +0,0 @@
|
|||
Brandon Philips <brandon@ifup.org> (@philips)
|
||||
Brian Waldon <brian@waldon.cc> (@bcwaldon)
|
|
@ -1,41 +0,0 @@
|
|||
dbus
|
||||
----
|
||||
|
||||
dbus is a simple library that implements native Go client bindings for the
|
||||
D-Bus message bus system.
|
||||
|
||||
### Features
|
||||
|
||||
* Complete native implementation of the D-Bus message protocol
|
||||
* Go-like API (channels for signals / asynchronous method calls, Goroutine-safe connections)
|
||||
* Subpackages that help with the introspection / property interfaces
|
||||
|
||||
### Installation
|
||||
|
||||
This packages requires Go 1.1. If you installed it and set up your GOPATH, just run:
|
||||
|
||||
```
|
||||
go get github.com/godbus/dbus
|
||||
```
|
||||
|
||||
If you want to use the subpackages, you can install them the same way.
|
||||
|
||||
### Usage
|
||||
|
||||
The complete package documentation and some simple examples are available at
|
||||
[godoc.org](http://godoc.org/github.com/godbus/dbus). Also, the
|
||||
[_examples](https://github.com/godbus/dbus/tree/master/_examples) directory
|
||||
gives a short overview over the basic usage.
|
||||
|
||||
#### Projects using godbus
|
||||
- [notify](https://github.com/esiqveland/notify) provides desktop notifications over dbus into a library.
|
||||
|
||||
Please note that the API is considered unstable for now and may change without
|
||||
further notice.
|
||||
|
||||
### License
|
||||
|
||||
go.dbus is available under the Simplified BSD License; see LICENSE for the full
|
||||
text.
|
||||
|
||||
Nearly all of the credit for this library goes to github.com/guelfey/go.dbus.
|
|
@ -1,253 +0,0 @@
|
|||
package dbus
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// AuthStatus represents the Status of an authentication mechanism.
|
||||
type AuthStatus byte
|
||||
|
||||
const (
|
||||
// AuthOk signals that authentication is finished; the next command
|
||||
// from the server should be an OK.
|
||||
AuthOk AuthStatus = iota
|
||||
|
||||
// AuthContinue signals that additional data is needed; the next command
|
||||
// from the server should be a DATA.
|
||||
AuthContinue
|
||||
|
||||
// AuthError signals an error; the server sent invalid data or some
|
||||
// other unexpected thing happened and the current authentication
|
||||
// process should be aborted.
|
||||
AuthError
|
||||
)
|
||||
|
||||
type authState byte
|
||||
|
||||
const (
|
||||
waitingForData authState = iota
|
||||
waitingForOk
|
||||
waitingForReject
|
||||
)
|
||||
|
||||
// Auth defines the behaviour of an authentication mechanism.
|
||||
type Auth interface {
|
||||
// Return the name of the mechnism, the argument to the first AUTH command
|
||||
// and the next status.
|
||||
FirstData() (name, resp []byte, status AuthStatus)
|
||||
|
||||
// Process the given DATA command, and return the argument to the DATA
|
||||
// command and the next status. If len(resp) == 0, no DATA command is sent.
|
||||
HandleData(data []byte) (resp []byte, status AuthStatus)
|
||||
}
|
||||
|
||||
// Auth authenticates the connection, trying the given list of authentication
|
||||
// mechanisms (in that order). If nil is passed, the EXTERNAL and
|
||||
// DBUS_COOKIE_SHA1 mechanisms are tried for the current user. For private
|
||||
// connections, this method must be called before sending any messages to the
|
||||
// bus. Auth must not be called on shared connections.
|
||||
func (conn *Conn) Auth(methods []Auth) error {
|
||||
if methods == nil {
|
||||
uid := strconv.Itoa(os.Getuid())
|
||||
methods = []Auth{AuthExternal(uid), AuthCookieSha1(uid, getHomeDir())}
|
||||
}
|
||||
in := bufio.NewReader(conn.transport)
|
||||
err := conn.transport.SendNullByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = authWriteLine(conn.transport, []byte("AUTH"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s, err := authReadLine(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(s) < 2 || !bytes.Equal(s[0], []byte("REJECTED")) {
|
||||
return errors.New("dbus: authentication protocol error")
|
||||
}
|
||||
s = s[1:]
|
||||
for _, v := range s {
|
||||
for _, m := range methods {
|
||||
if name, data, status := m.FirstData(); bytes.Equal(v, name) {
|
||||
var ok bool
|
||||
err = authWriteLine(conn.transport, []byte("AUTH"), []byte(v), data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch status {
|
||||
case AuthOk:
|
||||
err, ok = conn.tryAuth(m, waitingForOk, in)
|
||||
case AuthContinue:
|
||||
err, ok = conn.tryAuth(m, waitingForData, in)
|
||||
default:
|
||||
panic("dbus: invalid authentication status")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
if conn.transport.SupportsUnixFDs() {
|
||||
err = authWriteLine(conn, []byte("NEGOTIATE_UNIX_FD"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
line, err := authReadLine(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case bytes.Equal(line[0], []byte("AGREE_UNIX_FD")):
|
||||
conn.EnableUnixFDs()
|
||||
conn.unixFD = true
|
||||
case bytes.Equal(line[0], []byte("ERROR")):
|
||||
default:
|
||||
return errors.New("dbus: authentication protocol error")
|
||||
}
|
||||
}
|
||||
err = authWriteLine(conn.transport, []byte("BEGIN"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go conn.inWorker()
|
||||
go conn.outWorker()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return errors.New("dbus: authentication failed")
|
||||
}
|
||||
|
||||
// tryAuth tries to authenticate with m as the mechanism, using state as the
|
||||
// initial authState and in for reading input. It returns (nil, true) on
|
||||
// success, (nil, false) on a REJECTED and (someErr, false) if some other
|
||||
// error occured.
|
||||
func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, bool) {
|
||||
for {
|
||||
s, err := authReadLine(in)
|
||||
if err != nil {
|
||||
return err, false
|
||||
}
|
||||
switch {
|
||||
case state == waitingForData && string(s[0]) == "DATA":
|
||||
if len(s) != 2 {
|
||||
err = authWriteLine(conn.transport, []byte("ERROR"))
|
||||
if err != nil {
|
||||
return err, false
|
||||
}
|
||||
continue
|
||||
}
|
||||
data, status := m.HandleData(s[1])
|
||||
switch status {
|
||||
case AuthOk, AuthContinue:
|
||||
if len(data) != 0 {
|
||||
err = authWriteLine(conn.transport, []byte("DATA"), data)
|
||||
if err != nil {
|
||||
return err, false
|
||||
}
|
||||
}
|
||||
if status == AuthOk {
|
||||
state = waitingForOk
|
||||
}
|
||||
case AuthError:
|
||||
err = authWriteLine(conn.transport, []byte("ERROR"))
|
||||
if err != nil {
|
||||
return err, false
|
||||
}
|
||||
}
|
||||
case state == waitingForData && string(s[0]) == "REJECTED":
|
||||
return nil, false
|
||||
case state == waitingForData && string(s[0]) == "ERROR":
|
||||
err = authWriteLine(conn.transport, []byte("CANCEL"))
|
||||
if err != nil {
|
||||
return err, false
|
||||
}
|
||||
state = waitingForReject
|
||||
case state == waitingForData && string(s[0]) == "OK":
|
||||
if len(s) != 2 {
|
||||
err = authWriteLine(conn.transport, []byte("CANCEL"))
|
||||
if err != nil {
|
||||
return err, false
|
||||
}
|
||||
state = waitingForReject
|
||||
}
|
||||
conn.uuid = string(s[1])
|
||||
return nil, true
|
||||
case state == waitingForData:
|
||||
err = authWriteLine(conn.transport, []byte("ERROR"))
|
||||
if err != nil {
|
||||
return err, false
|
||||
}
|
||||
case state == waitingForOk && string(s[0]) == "OK":
|
||||
if len(s) != 2 {
|
||||
err = authWriteLine(conn.transport, []byte("CANCEL"))
|
||||
if err != nil {
|
||||
return err, false
|
||||
}
|
||||
state = waitingForReject
|
||||
}
|
||||
conn.uuid = string(s[1])
|
||||
return nil, true
|
||||
case state == waitingForOk && string(s[0]) == "REJECTED":
|
||||
return nil, false
|
||||
case state == waitingForOk && (string(s[0]) == "DATA" ||
|
||||
string(s[0]) == "ERROR"):
|
||||
|
||||
err = authWriteLine(conn.transport, []byte("CANCEL"))
|
||||
if err != nil {
|
||||
return err, false
|
||||
}
|
||||
state = waitingForReject
|
||||
case state == waitingForOk:
|
||||
err = authWriteLine(conn.transport, []byte("ERROR"))
|
||||
if err != nil {
|
||||
return err, false
|
||||
}
|
||||
case state == waitingForReject && string(s[0]) == "REJECTED":
|
||||
return nil, false
|
||||
case state == waitingForReject:
|
||||
return errors.New("dbus: authentication protocol error"), false
|
||||
default:
|
||||
panic("dbus: invalid auth state")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// authReadLine reads a line and separates it into its fields.
|
||||
func authReadLine(in *bufio.Reader) ([][]byte, error) {
|
||||
data, err := in.ReadBytes('\n')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data = bytes.TrimSuffix(data, []byte("\r\n"))
|
||||
return bytes.Split(data, []byte{' '}), nil
|
||||
}
|
||||
|
||||
// authWriteLine writes the given line in the authentication protocol format
|
||||
// (elements of data separated by a " " and terminated by "\r\n").
|
||||
func authWriteLine(out io.Writer, data ...[]byte) error {
|
||||
buf := make([]byte, 0)
|
||||
for i, v := range data {
|
||||
buf = append(buf, v...)
|
||||
if i != len(data)-1 {
|
||||
buf = append(buf, ' ')
|
||||
}
|
||||
}
|
||||
buf = append(buf, '\r')
|
||||
buf = append(buf, '\n')
|
||||
n, err := out.Write(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != len(buf) {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
package dbus
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
)
|
||||
|
||||
// AuthExternal returns an Auth that authenticates as the given user with the
|
||||
// EXTERNAL mechanism.
|
||||
func AuthExternal(user string) Auth {
|
||||
return authExternal{user}
|
||||
}
|
||||
|
||||
// AuthExternal implements the EXTERNAL authentication mechanism.
|
||||
type authExternal struct {
|
||||
user string
|
||||
}
|
||||
|
||||
func (a authExternal) FirstData() ([]byte, []byte, AuthStatus) {
|
||||
b := make([]byte, 2*len(a.user))
|
||||
hex.Encode(b, []byte(a.user))
|
||||
return []byte("EXTERNAL"), b, AuthOk
|
||||
}
|
||||
|
||||
func (a authExternal) HandleData(b []byte) ([]byte, AuthStatus) {
|
||||
return nil, AuthError
|
||||
}
|
|
@ -1,102 +0,0 @@
|
|||
package dbus
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"os"
|
||||
)
|
||||
|
||||
// AuthCookieSha1 returns an Auth that authenticates as the given user with the
|
||||
// DBUS_COOKIE_SHA1 mechanism. The home parameter should specify the home
|
||||
// directory of the user.
|
||||
func AuthCookieSha1(user, home string) Auth {
|
||||
return authCookieSha1{user, home}
|
||||
}
|
||||
|
||||
type authCookieSha1 struct {
|
||||
user, home string
|
||||
}
|
||||
|
||||
func (a authCookieSha1) FirstData() ([]byte, []byte, AuthStatus) {
|
||||
b := make([]byte, 2*len(a.user))
|
||||
hex.Encode(b, []byte(a.user))
|
||||
return []byte("DBUS_COOKIE_SHA1"), b, AuthContinue
|
||||
}
|
||||
|
||||
func (a authCookieSha1) HandleData(data []byte) ([]byte, AuthStatus) {
|
||||
challenge := make([]byte, len(data)/2)
|
||||
_, err := hex.Decode(challenge, data)
|
||||
if err != nil {
|
||||
return nil, AuthError
|
||||
}
|
||||
b := bytes.Split(challenge, []byte{' '})
|
||||
if len(b) != 3 {
|
||||
return nil, AuthError
|
||||
}
|
||||
context := b[0]
|
||||
id := b[1]
|
||||
svchallenge := b[2]
|
||||
cookie := a.getCookie(context, id)
|
||||
if cookie == nil {
|
||||
return nil, AuthError
|
||||
}
|
||||
clchallenge := a.generateChallenge()
|
||||
if clchallenge == nil {
|
||||
return nil, AuthError
|
||||
}
|
||||
hash := sha1.New()
|
||||
hash.Write(bytes.Join([][]byte{svchallenge, clchallenge, cookie}, []byte{':'}))
|
||||
hexhash := make([]byte, 2*hash.Size())
|
||||
hex.Encode(hexhash, hash.Sum(nil))
|
||||
data = append(clchallenge, ' ')
|
||||
data = append(data, hexhash...)
|
||||
resp := make([]byte, 2*len(data))
|
||||
hex.Encode(resp, data)
|
||||
return resp, AuthOk
|
||||
}
|
||||
|
||||
// getCookie searches for the cookie identified by id in context and returns
|
||||
// the cookie content or nil. (Since HandleData can't return a specific error,
|
||||
// but only whether an error occured, this function also doesn't bother to
|
||||
// return an error.)
|
||||
func (a authCookieSha1) getCookie(context, id []byte) []byte {
|
||||
file, err := os.Open(a.home + "/.dbus-keyrings/" + string(context))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer file.Close()
|
||||
rd := bufio.NewReader(file)
|
||||
for {
|
||||
line, err := rd.ReadBytes('\n')
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
line = line[:len(line)-1]
|
||||
b := bytes.Split(line, []byte{' '})
|
||||
if len(b) != 3 {
|
||||
return nil
|
||||
}
|
||||
if bytes.Equal(b[0], id) {
|
||||
return b[2]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// generateChallenge returns a random, hex-encoded challenge, or nil on error
|
||||
// (see above).
|
||||
func (a authCookieSha1) generateChallenge() []byte {
|
||||
b := make([]byte, 16)
|
||||
n, err := rand.Read(b)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if n != 16 {
|
||||
return nil
|
||||
}
|
||||
enc := make([]byte, 32)
|
||||
hex.Encode(enc, b)
|
||||
return enc
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
package dbus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Call represents a pending or completed method call.
|
||||
type Call struct {
|
||||
Destination string
|
||||
Path ObjectPath
|
||||
Method string
|
||||
Args []interface{}
|
||||
|
||||
// Strobes when the call is complete.
|
||||
Done chan *Call
|
||||
|
||||
// After completion, the error status. If this is non-nil, it may be an
|
||||
// error message from the peer (with Error as its type) or some other error.
|
||||
Err error
|
||||
|
||||
// Holds the response once the call is done.
|
||||
Body []interface{}
|
||||
}
|
||||
|
||||
var errSignature = errors.New("dbus: mismatched signature")
|
||||
|
||||
// Store stores the body of the reply into the provided pointers. It returns
|
||||
// an error if the signatures of the body and retvalues don't match, or if
|
||||
// the error status is not nil.
|
||||
func (c *Call) Store(retvalues ...interface{}) error {
|
||||
if c.Err != nil {
|
||||
return c.Err
|
||||
}
|
||||
|
||||
return Store(c.Body, retvalues...)
|
||||
}
|
|
@ -1,636 +0,0 @@
|
|||
package dbus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket"
|
||||
|
||||
var (
|
||||
systemBus *Conn
|
||||
systemBusLck sync.Mutex
|
||||
sessionBus *Conn
|
||||
sessionBusLck sync.Mutex
|
||||
)
|
||||
|
||||
// ErrClosed is the error returned by calls on a closed connection.
|
||||
var ErrClosed = errors.New("dbus: connection closed by user")
|
||||
|
||||
// Conn represents a connection to a message bus (usually, the system or
|
||||
// session bus).
|
||||
//
|
||||
// Connections are either shared or private. Shared connections
|
||||
// are shared between calls to the functions that return them. As a result,
|
||||
// the methods Close, Auth and Hello must not be called on them.
|
||||
//
|
||||
// Multiple goroutines may invoke methods on a connection simultaneously.
|
||||
type Conn struct {
|
||||
transport
|
||||
|
||||
busObj BusObject
|
||||
unixFD bool
|
||||
uuid string
|
||||
|
||||
names []string
|
||||
namesLck sync.RWMutex
|
||||
|
||||
serialLck sync.Mutex
|
||||
nextSerial uint32
|
||||
serialUsed map[uint32]bool
|
||||
|
||||
calls map[uint32]*Call
|
||||
callsLck sync.RWMutex
|
||||
|
||||
handlers map[ObjectPath]map[string]exportWithMapping
|
||||
handlersLck sync.RWMutex
|
||||
|
||||
out chan *Message
|
||||
closed bool
|
||||
outLck sync.RWMutex
|
||||
|
||||
signals []chan<- *Signal
|
||||
signalsLck sync.Mutex
|
||||
|
||||
eavesdropped chan<- *Message
|
||||
eavesdroppedLck sync.Mutex
|
||||
}
|
||||
|
||||
// SessionBus returns a shared connection to the session bus, connecting to it
|
||||
// if not already done.
|
||||
func SessionBus() (conn *Conn, err error) {
|
||||
sessionBusLck.Lock()
|
||||
defer sessionBusLck.Unlock()
|
||||
if sessionBus != nil {
|
||||
return sessionBus, nil
|
||||
}
|
||||
defer func() {
|
||||
if conn != nil {
|
||||
sessionBus = conn
|
||||
}
|
||||
}()
|
||||
conn, err = SessionBusPrivate()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = conn.Auth(nil); err != nil {
|
||||
conn.Close()
|
||||
conn = nil
|
||||
return
|
||||
}
|
||||
if err = conn.Hello(); err != nil {
|
||||
conn.Close()
|
||||
conn = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SessionBusPrivate returns a new private connection to the session bus.
|
||||
func SessionBusPrivate() (*Conn, error) {
|
||||
address := os.Getenv("DBUS_SESSION_BUS_ADDRESS")
|
||||
if address != "" && address != "autolaunch:" {
|
||||
return Dial(address)
|
||||
}
|
||||
|
||||
return sessionBusPlatform()
|
||||
}
|
||||
|
||||
// SystemBus returns a shared connection to the system bus, connecting to it if
|
||||
// not already done.
|
||||
func SystemBus() (conn *Conn, err error) {
|
||||
systemBusLck.Lock()
|
||||
defer systemBusLck.Unlock()
|
||||
if systemBus != nil {
|
||||
return systemBus, nil
|
||||
}
|
||||
defer func() {
|
||||
if conn != nil {
|
||||
systemBus = conn
|
||||
}
|
||||
}()
|
||||
conn, err = SystemBusPrivate()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = conn.Auth(nil); err != nil {
|
||||
conn.Close()
|
||||
conn = nil
|
||||
return
|
||||
}
|
||||
if err = conn.Hello(); err != nil {
|
||||
conn.Close()
|
||||
conn = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SystemBusPrivate returns a new private connection to the system bus.
|
||||
func SystemBusPrivate() (*Conn, error) {
|
||||
address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS")
|
||||
if address != "" {
|
||||
return Dial(address)
|
||||
}
|
||||
return Dial(defaultSystemBusAddress)
|
||||
}
|
||||
|
||||
// Dial establishes a new private connection to the message bus specified by address.
|
||||
func Dial(address string) (*Conn, error) {
|
||||
tr, err := getTransport(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newConn(tr)
|
||||
}
|
||||
|
||||
// NewConn creates a new private *Conn from an already established connection.
|
||||
func NewConn(conn io.ReadWriteCloser) (*Conn, error) {
|
||||
return newConn(genericTransport{conn})
|
||||
}
|
||||
|
||||
// newConn creates a new *Conn from a transport.
|
||||
func newConn(tr transport) (*Conn, error) {
|
||||
conn := new(Conn)
|
||||
conn.transport = tr
|
||||
conn.calls = make(map[uint32]*Call)
|
||||
conn.out = make(chan *Message, 10)
|
||||
conn.handlers = make(map[ObjectPath]map[string]exportWithMapping)
|
||||
conn.nextSerial = 1
|
||||
conn.serialUsed = map[uint32]bool{0: true}
|
||||
conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus")
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// BusObject returns the object owned by the bus daemon which handles
|
||||
// administrative requests.
|
||||
func (conn *Conn) BusObject() BusObject {
|
||||
return conn.busObj
|
||||
}
|
||||
|
||||
// Close closes the connection. Any blocked operations will return with errors
|
||||
// and the channels passed to Eavesdrop and Signal are closed. This method must
|
||||
// not be called on shared connections.
|
||||
func (conn *Conn) Close() error {
|
||||
conn.outLck.Lock()
|
||||
if conn.closed {
|
||||
// inWorker calls Close on read error, the read error may
|
||||
// be caused by another caller calling Close to shutdown the
|
||||
// dbus connection, a double-close scenario we prevent here.
|
||||
conn.outLck.Unlock()
|
||||
return nil
|
||||
}
|
||||
close(conn.out)
|
||||
conn.closed = true
|
||||
conn.outLck.Unlock()
|
||||
conn.signalsLck.Lock()
|
||||
for _, ch := range conn.signals {
|
||||
close(ch)
|
||||
}
|
||||
conn.signalsLck.Unlock()
|
||||
conn.eavesdroppedLck.Lock()
|
||||
if conn.eavesdropped != nil {
|
||||
close(conn.eavesdropped)
|
||||
}
|
||||
conn.eavesdroppedLck.Unlock()
|
||||
return conn.transport.Close()
|
||||
}
|
||||
|
||||
// Eavesdrop causes conn to send all incoming messages to the given channel
|
||||
// without further processing. Method replies, errors and signals will not be
|
||||
// sent to the appropiate channels and method calls will not be handled. If nil
|
||||
// is passed, the normal behaviour is restored.
|
||||
//
|
||||
// The caller has to make sure that ch is sufficiently buffered;
|
||||
// if a message arrives when a write to ch is not possible, the message is
|
||||
// discarded.
|
||||
func (conn *Conn) Eavesdrop(ch chan<- *Message) {
|
||||
conn.eavesdroppedLck.Lock()
|
||||
conn.eavesdropped = ch
|
||||
conn.eavesdroppedLck.Unlock()
|
||||
}
|
||||
|
||||
// getSerial returns an unused serial.
|
||||
func (conn *Conn) getSerial() uint32 {
|
||||
conn.serialLck.Lock()
|
||||
defer conn.serialLck.Unlock()
|
||||
n := conn.nextSerial
|
||||
for conn.serialUsed[n] {
|
||||
n++
|
||||
}
|
||||
conn.serialUsed[n] = true
|
||||
conn.nextSerial = n + 1
|
||||
return n
|
||||
}
|
||||
|
||||
// Hello sends the initial org.freedesktop.DBus.Hello call. This method must be
|
||||
// called after authentication, but before sending any other messages to the
|
||||
// bus. Hello must not be called for shared connections.
|
||||
func (conn *Conn) Hello() error {
|
||||
var s string
|
||||
err := conn.busObj.Call("org.freedesktop.DBus.Hello", 0).Store(&s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conn.namesLck.Lock()
|
||||
conn.names = make([]string, 1)
|
||||
conn.names[0] = s
|
||||
conn.namesLck.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// inWorker runs in an own goroutine, reading incoming messages from the
|
||||
// transport and dispatching them appropiately.
|
||||
func (conn *Conn) inWorker() {
|
||||
for {
|
||||
msg, err := conn.ReadMessage()
|
||||
if err == nil {
|
||||
conn.eavesdroppedLck.Lock()
|
||||
if conn.eavesdropped != nil {
|
||||
select {
|
||||
case conn.eavesdropped <- msg:
|
||||
default:
|
||||
}
|
||||
conn.eavesdroppedLck.Unlock()
|
||||
continue
|
||||
}
|
||||
conn.eavesdroppedLck.Unlock()
|
||||
dest, _ := msg.Headers[FieldDestination].value.(string)
|
||||
found := false
|
||||
if dest == "" {
|
||||
found = true
|
||||
} else {
|
||||
conn.namesLck.RLock()
|
||||
if len(conn.names) == 0 {
|
||||
found = true
|
||||
}
|
||||
for _, v := range conn.names {
|
||||
if dest == v {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
conn.namesLck.RUnlock()
|
||||
}
|
||||
if !found {
|
||||
// Eavesdropped a message, but no channel for it is registered.
|
||||
// Ignore it.
|
||||
continue
|
||||
}
|
||||
switch msg.Type {
|
||||
case TypeMethodReply, TypeError:
|
||||
serial := msg.Headers[FieldReplySerial].value.(uint32)
|
||||
conn.callsLck.Lock()
|
||||
if c, ok := conn.calls[serial]; ok {
|
||||
if msg.Type == TypeError {
|
||||
name, _ := msg.Headers[FieldErrorName].value.(string)
|
||||
c.Err = Error{name, msg.Body}
|
||||
} else {
|
||||
c.Body = msg.Body
|
||||
}
|
||||
c.Done <- c
|
||||
conn.serialLck.Lock()
|
||||
delete(conn.serialUsed, serial)
|
||||
conn.serialLck.Unlock()
|
||||
delete(conn.calls, serial)
|
||||
}
|
||||
conn.callsLck.Unlock()
|
||||
case TypeSignal:
|
||||
iface := msg.Headers[FieldInterface].value.(string)
|
||||
member := msg.Headers[FieldMember].value.(string)
|
||||
// as per http://dbus.freedesktop.org/doc/dbus-specification.html ,
|
||||
// sender is optional for signals.
|
||||
sender, _ := msg.Headers[FieldSender].value.(string)
|
||||
if iface == "org.freedesktop.DBus" && sender == "org.freedesktop.DBus" {
|
||||
if member == "NameLost" {
|
||||
// If we lost the name on the bus, remove it from our
|
||||
// tracking list.
|
||||
name, ok := msg.Body[0].(string)
|
||||
if !ok {
|
||||
panic("Unable to read the lost name")
|
||||
}
|
||||
conn.namesLck.Lock()
|
||||
for i, v := range conn.names {
|
||||
if v == name {
|
||||
conn.names = append(conn.names[:i],
|
||||
conn.names[i+1:]...)
|
||||
}
|
||||
}
|
||||
conn.namesLck.Unlock()
|
||||
} else if member == "NameAcquired" {
|
||||
// If we acquired the name on the bus, add it to our
|
||||
// tracking list.
|
||||
name, ok := msg.Body[0].(string)
|
||||
if !ok {
|
||||
panic("Unable to read the acquired name")
|
||||
}
|
||||
conn.namesLck.Lock()
|
||||
conn.names = append(conn.names, name)
|
||||
conn.namesLck.Unlock()
|
||||
}
|
||||
}
|
||||
signal := &Signal{
|
||||
Sender: sender,
|
||||
Path: msg.Headers[FieldPath].value.(ObjectPath),
|
||||
Name: iface + "." + member,
|
||||
Body: msg.Body,
|
||||
}
|
||||
conn.signalsLck.Lock()
|
||||
for _, ch := range conn.signals {
|
||||
ch <- signal
|
||||
}
|
||||
conn.signalsLck.Unlock()
|
||||
case TypeMethodCall:
|
||||
go conn.handleCall(msg)
|
||||
}
|
||||
} else if _, ok := err.(InvalidMessageError); !ok {
|
||||
// Some read error occured (usually EOF); we can't really do
|
||||
// anything but to shut down all stuff and returns errors to all
|
||||
// pending replies.
|
||||
conn.Close()
|
||||
conn.callsLck.RLock()
|
||||
for _, v := range conn.calls {
|
||||
v.Err = err
|
||||
v.Done <- v
|
||||
}
|
||||
conn.callsLck.RUnlock()
|
||||
return
|
||||
}
|
||||
// invalid messages are ignored
|
||||
}
|
||||
}
|
||||
|
||||
// Names returns the list of all names that are currently owned by this
|
||||
// connection. The slice is always at least one element long, the first element
|
||||
// being the unique name of the connection.
|
||||
func (conn *Conn) Names() []string {
|
||||
conn.namesLck.RLock()
|
||||
// copy the slice so it can't be modified
|
||||
s := make([]string, len(conn.names))
|
||||
copy(s, conn.names)
|
||||
conn.namesLck.RUnlock()
|
||||
return s
|
||||
}
|
||||
|
||||
// Object returns the object identified by the given destination name and path.
|
||||
func (conn *Conn) Object(dest string, path ObjectPath) BusObject {
|
||||
return &Object{conn, dest, path}
|
||||
}
|
||||
|
||||
// outWorker runs in an own goroutine, encoding and sending messages that are
|
||||
// sent to conn.out.
|
||||
func (conn *Conn) outWorker() {
|
||||
for msg := range conn.out {
|
||||
err := conn.SendMessage(msg)
|
||||
conn.callsLck.RLock()
|
||||
if err != nil {
|
||||
if c := conn.calls[msg.serial]; c != nil {
|
||||
c.Err = err
|
||||
c.Done <- c
|
||||
}
|
||||
conn.serialLck.Lock()
|
||||
delete(conn.serialUsed, msg.serial)
|
||||
conn.serialLck.Unlock()
|
||||
} else if msg.Type != TypeMethodCall {
|
||||
conn.serialLck.Lock()
|
||||
delete(conn.serialUsed, msg.serial)
|
||||
conn.serialLck.Unlock()
|
||||
}
|
||||
conn.callsLck.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Send sends the given message to the message bus. You usually don't need to
|
||||
// use this; use the higher-level equivalents (Call / Go, Emit and Export)
|
||||
// instead. If msg is a method call and NoReplyExpected is not set, a non-nil
|
||||
// call is returned and the same value is sent to ch (which must be buffered)
|
||||
// once the call is complete. Otherwise, ch is ignored and a Call structure is
|
||||
// returned of which only the Err member is valid.
|
||||
func (conn *Conn) Send(msg *Message, ch chan *Call) *Call {
|
||||
var call *Call
|
||||
|
||||
msg.serial = conn.getSerial()
|
||||
if msg.Type == TypeMethodCall && msg.Flags&FlagNoReplyExpected == 0 {
|
||||
if ch == nil {
|
||||
ch = make(chan *Call, 5)
|
||||
} else if cap(ch) == 0 {
|
||||
panic("dbus: unbuffered channel passed to (*Conn).Send")
|
||||
}
|
||||
call = new(Call)
|
||||
call.Destination, _ = msg.Headers[FieldDestination].value.(string)
|
||||
call.Path, _ = msg.Headers[FieldPath].value.(ObjectPath)
|
||||
iface, _ := msg.Headers[FieldInterface].value.(string)
|
||||
member, _ := msg.Headers[FieldMember].value.(string)
|
||||
call.Method = iface + "." + member
|
||||
call.Args = msg.Body
|
||||
call.Done = ch
|
||||
conn.callsLck.Lock()
|
||||
conn.calls[msg.serial] = call
|
||||
conn.callsLck.Unlock()
|
||||
conn.outLck.RLock()
|
||||
if conn.closed {
|
||||
call.Err = ErrClosed
|
||||
call.Done <- call
|
||||
} else {
|
||||
conn.out <- msg
|
||||
}
|
||||
conn.outLck.RUnlock()
|
||||
} else {
|
||||
conn.outLck.RLock()
|
||||
if conn.closed {
|
||||
call = &Call{Err: ErrClosed}
|
||||
} else {
|
||||
conn.out <- msg
|
||||
call = &Call{Err: nil}
|
||||
}
|
||||
conn.outLck.RUnlock()
|
||||
}
|
||||
return call
|
||||
}
|
||||
|
||||
// sendError creates an error message corresponding to the parameters and sends
|
||||
// it to conn.out.
|
||||
func (conn *Conn) sendError(e Error, dest string, serial uint32) {
|
||||
msg := new(Message)
|
||||
msg.Type = TypeError
|
||||
msg.serial = conn.getSerial()
|
||||
msg.Headers = make(map[HeaderField]Variant)
|
||||
if dest != "" {
|
||||
msg.Headers[FieldDestination] = MakeVariant(dest)
|
||||
}
|
||||
msg.Headers[FieldErrorName] = MakeVariant(e.Name)
|
||||
msg.Headers[FieldReplySerial] = MakeVariant(serial)
|
||||
msg.Body = e.Body
|
||||
if len(e.Body) > 0 {
|
||||
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...))
|
||||
}
|
||||
conn.outLck.RLock()
|
||||
if !conn.closed {
|
||||
conn.out <- msg
|
||||
}
|
||||
conn.outLck.RUnlock()
|
||||
}
|
||||
|
||||
// sendReply creates a method reply message corresponding to the parameters and
|
||||
// sends it to conn.out.
|
||||
func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) {
|
||||
msg := new(Message)
|
||||
msg.Type = TypeMethodReply
|
||||
msg.serial = conn.getSerial()
|
||||
msg.Headers = make(map[HeaderField]Variant)
|
||||
if dest != "" {
|
||||
msg.Headers[FieldDestination] = MakeVariant(dest)
|
||||
}
|
||||
msg.Headers[FieldReplySerial] = MakeVariant(serial)
|
||||
msg.Body = values
|
||||
if len(values) > 0 {
|
||||
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
|
||||
}
|
||||
conn.outLck.RLock()
|
||||
if !conn.closed {
|
||||
conn.out <- msg
|
||||
}
|
||||
conn.outLck.RUnlock()
|
||||
}
|
||||
|
||||
// Signal registers the given channel to be passed all received signal messages.
|
||||
// The caller has to make sure that ch is sufficiently buffered; if a message
|
||||
// arrives when a write to c is not possible, it is discarded.
|
||||
//
|
||||
// Multiple of these channels can be registered at the same time.
|
||||
//
|
||||
// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a
|
||||
// channel for eavesdropped messages, this channel receives all signals, and
|
||||
// none of the channels passed to Signal will receive any signals.
|
||||
func (conn *Conn) Signal(ch chan<- *Signal) {
|
||||
conn.signalsLck.Lock()
|
||||
conn.signals = append(conn.signals, ch)
|
||||
conn.signalsLck.Unlock()
|
||||
}
|
||||
|
||||
// RemoveSignal removes the given channel from the list of the registered channels.
|
||||
func (conn *Conn) RemoveSignal(ch chan<- *Signal) {
|
||||
conn.signalsLck.Lock()
|
||||
for i := len(conn.signals) - 1; i >= 0; i-- {
|
||||
if ch == conn.signals[i] {
|
||||
copy(conn.signals[i:], conn.signals[i+1:])
|
||||
conn.signals[len(conn.signals)-1] = nil
|
||||
conn.signals = conn.signals[:len(conn.signals)-1]
|
||||
}
|
||||
}
|
||||
conn.signalsLck.Unlock()
|
||||
}
|
||||
|
||||
// SupportsUnixFDs returns whether the underlying transport supports passing of
|
||||
// unix file descriptors. If this is false, method calls containing unix file
|
||||
// descriptors will return an error and emitted signals containing them will
|
||||
// not be sent.
|
||||
func (conn *Conn) SupportsUnixFDs() bool {
|
||||
return conn.unixFD
|
||||
}
|
||||
|
||||
// Error represents a D-Bus message of type Error.
|
||||
type Error struct {
|
||||
Name string
|
||||
Body []interface{}
|
||||
}
|
||||
|
||||
func NewError(name string, body []interface{}) *Error {
|
||||
return &Error{name, body}
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
if len(e.Body) >= 1 {
|
||||
s, ok := e.Body[0].(string)
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return e.Name
|
||||
}
|
||||
|
||||
// Signal represents a D-Bus message of type Signal. The name member is given in
|
||||
// "interface.member" notation, e.g. org.freedesktop.D-Bus.NameLost.
|
||||
type Signal struct {
|
||||
Sender string
|
||||
Path ObjectPath
|
||||
Name string
|
||||
Body []interface{}
|
||||
}
|
||||
|
||||
// transport is a D-Bus transport.
|
||||
type transport interface {
|
||||
// Read and Write raw data (for example, for the authentication protocol).
|
||||
io.ReadWriteCloser
|
||||
|
||||
// Send the initial null byte used for the EXTERNAL mechanism.
|
||||
SendNullByte() error
|
||||
|
||||
// Returns whether this transport supports passing Unix FDs.
|
||||
SupportsUnixFDs() bool
|
||||
|
||||
// Signal the transport that Unix FD passing is enabled for this connection.
|
||||
EnableUnixFDs()
|
||||
|
||||
// Read / send a message, handling things like Unix FDs.
|
||||
ReadMessage() (*Message, error)
|
||||
SendMessage(*Message) error
|
||||
}
|
||||
|
||||
var (
|
||||
transports = make(map[string]func(string) (transport, error))
|
||||
)
|
||||
|
||||
func getTransport(address string) (transport, error) {
|
||||
var err error
|
||||
var t transport
|
||||
|
||||
addresses := strings.Split(address, ";")
|
||||
for _, v := range addresses {
|
||||
i := strings.IndexRune(v, ':')
|
||||
if i == -1 {
|
||||
err = errors.New("dbus: invalid bus address (no transport)")
|
||||
continue
|
||||
}
|
||||
f := transports[v[:i]]
|
||||
if f == nil {
|
||||
err = errors.New("dbus: invalid bus address (invalid or unsupported transport)")
|
||||
continue
|
||||
}
|
||||
t, err = f(v[i+1:])
|
||||
if err == nil {
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// dereferenceAll returns a slice that, assuming that vs is a slice of pointers
|
||||
// of arbitrary types, containes the values that are obtained from dereferencing
|
||||
// all elements in vs.
|
||||
func dereferenceAll(vs []interface{}) []interface{} {
|
||||
for i := range vs {
|
||||
v := reflect.ValueOf(vs[i])
|
||||
v = v.Elem()
|
||||
vs[i] = v.Interface()
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
// getKey gets a key from a the list of keys. Returns "" on error / not found...
|
||||
func getKey(s, key string) string {
|
||||
i := strings.Index(s, key)
|
||||
if i == -1 {
|
||||
return ""
|
||||
}
|
||||
if i+len(key)+1 >= len(s) || s[i+len(key)] != '=' {
|
||||
return ""
|
||||
}
|
||||
j := strings.Index(s, ",")
|
||||
if j == -1 {
|
||||
j = len(s)
|
||||
}
|
||||
return s[i+len(key)+1 : j]
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
package dbus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func sessionBusPlatform() (*Conn, error) {
|
||||
cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET")
|
||||
b, err := cmd.CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
return nil, errors.New("dbus: couldn't determine address of session bus")
|
||||
}
|
||||
|
||||
return Dial("unix:path=" + string(b[:len(b)-1]))
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
// +build !darwin
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func sessionBusPlatform() (*Conn, error) {
|
||||
cmd := exec.Command("dbus-launch")
|
||||
b, err := cmd.CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
i := bytes.IndexByte(b, '=')
|
||||
j := bytes.IndexByte(b, '\n')
|
||||
|
||||
if i == -1 || j == -1 {
|
||||
return nil, errors.New("dbus: couldn't determine address of session bus")
|
||||
}
|
||||
|
||||
return Dial(string(b[i+1 : j]))
|
||||
}
|
|
@ -1,258 +0,0 @@
|
|||
package dbus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
byteType = reflect.TypeOf(byte(0))
|
||||
boolType = reflect.TypeOf(false)
|
||||
uint8Type = reflect.TypeOf(uint8(0))
|
||||
int16Type = reflect.TypeOf(int16(0))
|
||||
uint16Type = reflect.TypeOf(uint16(0))
|
||||
int32Type = reflect.TypeOf(int32(0))
|
||||
uint32Type = reflect.TypeOf(uint32(0))
|
||||
int64Type = reflect.TypeOf(int64(0))
|
||||
uint64Type = reflect.TypeOf(uint64(0))
|
||||
float64Type = reflect.TypeOf(float64(0))
|
||||
stringType = reflect.TypeOf("")
|
||||
signatureType = reflect.TypeOf(Signature{""})
|
||||
objectPathType = reflect.TypeOf(ObjectPath(""))
|
||||
variantType = reflect.TypeOf(Variant{Signature{""}, nil})
|
||||
interfacesType = reflect.TypeOf([]interface{}{})
|
||||
unixFDType = reflect.TypeOf(UnixFD(0))
|
||||
unixFDIndexType = reflect.TypeOf(UnixFDIndex(0))
|
||||
)
|
||||
|
||||
// An InvalidTypeError signals that a value which cannot be represented in the
|
||||
// D-Bus wire format was passed to a function.
|
||||
type InvalidTypeError struct {
|
||||
Type reflect.Type
|
||||
}
|
||||
|
||||
func (e InvalidTypeError) Error() string {
|
||||
return "dbus: invalid type " + e.Type.String()
|
||||
}
|
||||
|
||||
// Store copies the values contained in src to dest, which must be a slice of
|
||||
// pointers. It converts slices of interfaces from src to corresponding structs
|
||||
// in dest. An error is returned if the lengths of src and dest or the types of
|
||||
// their elements don't match.
|
||||
func Store(src []interface{}, dest ...interface{}) error {
|
||||
if len(src) != len(dest) {
|
||||
return errors.New("dbus.Store: length mismatch")
|
||||
}
|
||||
|
||||
for i := range src {
|
||||
if err := store(src[i], dest[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func store(src, dest interface{}) error {
|
||||
if reflect.TypeOf(dest).Elem() == reflect.TypeOf(src) {
|
||||
reflect.ValueOf(dest).Elem().Set(reflect.ValueOf(src))
|
||||
return nil
|
||||
} else if hasStruct(dest) {
|
||||
rv := reflect.ValueOf(dest).Elem()
|
||||
switch rv.Kind() {
|
||||
case reflect.Struct:
|
||||
vs, ok := src.([]interface{})
|
||||
if !ok {
|
||||
return errors.New("dbus.Store: type mismatch")
|
||||
}
|
||||
t := rv.Type()
|
||||
ndest := make([]interface{}, 0, rv.NumField())
|
||||
for i := 0; i < rv.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
|
||||
ndest = append(ndest, rv.Field(i).Addr().Interface())
|
||||
}
|
||||
}
|
||||
if len(vs) != len(ndest) {
|
||||
return errors.New("dbus.Store: type mismatch")
|
||||
}
|
||||
err := Store(vs, ndest...)
|
||||
if err != nil {
|
||||
return errors.New("dbus.Store: type mismatch")
|
||||
}
|
||||
case reflect.Slice:
|
||||
sv := reflect.ValueOf(src)
|
||||
if sv.Kind() != reflect.Slice {
|
||||
return errors.New("dbus.Store: type mismatch")
|
||||
}
|
||||
rv.Set(reflect.MakeSlice(rv.Type(), sv.Len(), sv.Len()))
|
||||
for i := 0; i < sv.Len(); i++ {
|
||||
if err := store(sv.Index(i).Interface(), rv.Index(i).Addr().Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
sv := reflect.ValueOf(src)
|
||||
if sv.Kind() != reflect.Map {
|
||||
return errors.New("dbus.Store: type mismatch")
|
||||
}
|
||||
keys := sv.MapKeys()
|
||||
rv.Set(reflect.MakeMap(sv.Type()))
|
||||
for _, key := range keys {
|
||||
v := reflect.New(sv.Type().Elem())
|
||||
if err := store(v, sv.MapIndex(key).Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
rv.SetMapIndex(key, v.Elem())
|
||||
}
|
||||
default:
|
||||
return errors.New("dbus.Store: type mismatch")
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
return errors.New("dbus.Store: type mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
func hasStruct(v interface{}) bool {
|
||||
t := reflect.TypeOf(v)
|
||||
for {
|
||||
switch t.Kind() {
|
||||
case reflect.Struct:
|
||||
return true
|
||||
case reflect.Slice, reflect.Ptr, reflect.Map:
|
||||
t = t.Elem()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// An ObjectPath is an object path as defined by the D-Bus spec.
|
||||
type ObjectPath string
|
||||
|
||||
// IsValid returns whether the object path is valid.
|
||||
func (o ObjectPath) IsValid() bool {
|
||||
s := string(o)
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
}
|
||||
if s[0] != '/' {
|
||||
return false
|
||||
}
|
||||
if s[len(s)-1] == '/' && len(s) != 1 {
|
||||
return false
|
||||
}
|
||||
// probably not used, but technically possible
|
||||
if s == "/" {
|
||||
return true
|
||||
}
|
||||
split := strings.Split(s[1:], "/")
|
||||
for _, v := range split {
|
||||
if len(v) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, c := range v {
|
||||
if !isMemberChar(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// A UnixFD is a Unix file descriptor sent over the wire. See the package-level
|
||||
// documentation for more information about Unix file descriptor passsing.
|
||||
type UnixFD int32
|
||||
|
||||
// A UnixFDIndex is the representation of a Unix file descriptor in a message.
|
||||
type UnixFDIndex uint32
|
||||
|
||||
// alignment returns the alignment of values of type t.
|
||||
func alignment(t reflect.Type) int {
|
||||
switch t {
|
||||
case variantType:
|
||||
return 1
|
||||
case objectPathType:
|
||||
return 4
|
||||
case signatureType:
|
||||
return 1
|
||||
case interfacesType: // sometimes used for structs
|
||||
return 8
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Uint8:
|
||||
return 1
|
||||
case reflect.Uint16, reflect.Int16:
|
||||
return 2
|
||||
case reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map:
|
||||
return 4
|
||||
case reflect.Uint64, reflect.Int64, reflect.Float64, reflect.Struct:
|
||||
return 8
|
||||
case reflect.Ptr:
|
||||
return alignment(t.Elem())
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
// isKeyType returns whether t is a valid type for a D-Bus dict.
|
||||
func isKeyType(t reflect.Type) bool {
|
||||
switch t.Kind() {
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
|
||||
reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float64,
|
||||
reflect.String:
|
||||
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isValidInterface returns whether s is a valid name for an interface.
|
||||
func isValidInterface(s string) bool {
|
||||
if len(s) == 0 || len(s) > 255 || s[0] == '.' {
|
||||
return false
|
||||
}
|
||||
elem := strings.Split(s, ".")
|
||||
if len(elem) < 2 {
|
||||
return false
|
||||
}
|
||||
for _, v := range elem {
|
||||
if len(v) == 0 {
|
||||
return false
|
||||
}
|
||||
if v[0] >= '0' && v[0] <= '9' {
|
||||
return false
|
||||
}
|
||||
for _, c := range v {
|
||||
if !isMemberChar(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// isValidMember returns whether s is a valid name for a member.
|
||||
func isValidMember(s string) bool {
|
||||
if len(s) == 0 || len(s) > 255 {
|
||||
return false
|
||||
}
|
||||
i := strings.Index(s, ".")
|
||||
if i != -1 {
|
||||
return false
|
||||
}
|
||||
if s[0] >= '0' && s[0] <= '9' {
|
||||
return false
|
||||
}
|
||||
for _, c := range s {
|
||||
if !isMemberChar(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isMemberChar(c rune) bool {
|
||||
return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') ||
|
||||
(c >= 'a' && c <= 'z') || c == '_'
|
||||
}
|
|
@ -1,228 +0,0 @@
|
|||
package dbus
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
in io.Reader
|
||||
order binary.ByteOrder
|
||||
pos int
|
||||
}
|
||||
|
||||
// newDecoder returns a new decoder that reads values from in. The input is
|
||||
// expected to be in the given byte order.
|
||||
func newDecoder(in io.Reader, order binary.ByteOrder) *decoder {
|
||||
dec := new(decoder)
|
||||
dec.in = in
|
||||
dec.order = order
|
||||
return dec
|
||||
}
|
||||
|
||||
// align aligns the input to the given boundary and panics on error.
|
||||
func (dec *decoder) align(n int) {
|
||||
if dec.pos%n != 0 {
|
||||
newpos := (dec.pos + n - 1) & ^(n - 1)
|
||||
empty := make([]byte, newpos-dec.pos)
|
||||
if _, err := io.ReadFull(dec.in, empty); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
dec.pos = newpos
|
||||
}
|
||||
}
|
||||
|
||||
// Calls binary.Read(dec.in, dec.order, v) and panics on read errors.
|
||||
func (dec *decoder) binread(v interface{}) {
|
||||
if err := binary.Read(dec.in, dec.order, v); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (dec *decoder) Decode(sig Signature) (vs []interface{}, err error) {
|
||||
defer func() {
|
||||
var ok bool
|
||||
v := recover()
|
||||
if err, ok = v.(error); ok {
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
err = FormatError("unexpected EOF")
|
||||
}
|
||||
}
|
||||
}()
|
||||
vs = make([]interface{}, 0)
|
||||
s := sig.str
|
||||
for s != "" {
|
||||
err, rem := validSingle(s, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v := dec.decode(s[:len(s)-len(rem)], 0)
|
||||
vs = append(vs, v)
|
||||
s = rem
|
||||
}
|
||||
return vs, nil
|
||||
}
|
||||
|
||||
func (dec *decoder) decode(s string, depth int) interface{} {
|
||||
dec.align(alignment(typeFor(s)))
|
||||
switch s[0] {
|
||||
case 'y':
|
||||
var b [1]byte
|
||||
if _, err := dec.in.Read(b[:]); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
dec.pos++
|
||||
return b[0]
|
||||
case 'b':
|
||||
i := dec.decode("u", depth).(uint32)
|
||||
switch {
|
||||
case i == 0:
|
||||
return false
|
||||
case i == 1:
|
||||
return true
|
||||
default:
|
||||
panic(FormatError("invalid value for boolean"))
|
||||
}
|
||||
case 'n':
|
||||
var i int16
|
||||
dec.binread(&i)
|
||||
dec.pos += 2
|
||||
return i
|
||||
case 'i':
|
||||
var i int32
|
||||
dec.binread(&i)
|
||||
dec.pos += 4
|
||||
return i
|
||||
case 'x':
|
||||
var i int64
|
||||
dec.binread(&i)
|
||||
dec.pos += 8
|
||||
return i
|
||||
case 'q':
|
||||
var i uint16
|
||||
dec.binread(&i)
|
||||
dec.pos += 2
|
||||
return i
|
||||
case 'u':
|
||||
var i uint32
|
||||
dec.binread(&i)
|
||||
dec.pos += 4
|
||||
return i
|
||||
case 't':
|
||||
var i uint64
|
||||
dec.binread(&i)
|
||||
dec.pos += 8
|
||||
return i
|
||||
case 'd':
|
||||
var f float64
|
||||
dec.binread(&f)
|
||||
dec.pos += 8
|
||||
return f
|
||||
case 's':
|
||||
length := dec.decode("u", depth).(uint32)
|
||||
b := make([]byte, int(length)+1)
|
||||
if _, err := io.ReadFull(dec.in, b); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
dec.pos += int(length) + 1
|
||||
return string(b[:len(b)-1])
|
||||
case 'o':
|
||||
return ObjectPath(dec.decode("s", depth).(string))
|
||||
case 'g':
|
||||
length := dec.decode("y", depth).(byte)
|
||||
b := make([]byte, int(length)+1)
|
||||
if _, err := io.ReadFull(dec.in, b); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
dec.pos += int(length) + 1
|
||||
sig, err := ParseSignature(string(b[:len(b)-1]))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return sig
|
||||
case 'v':
|
||||
if depth >= 64 {
|
||||
panic(FormatError("input exceeds container depth limit"))
|
||||
}
|
||||
var variant Variant
|
||||
sig := dec.decode("g", depth).(Signature)
|
||||
if len(sig.str) == 0 {
|
||||
panic(FormatError("variant signature is empty"))
|
||||
}
|
||||
err, rem := validSingle(sig.str, 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if rem != "" {
|
||||
panic(FormatError("variant signature has multiple types"))
|
||||
}
|
||||
variant.sig = sig
|
||||
variant.value = dec.decode(sig.str, depth+1)
|
||||
return variant
|
||||
case 'h':
|
||||
return UnixFDIndex(dec.decode("u", depth).(uint32))
|
||||
case 'a':
|
||||
if len(s) > 1 && s[1] == '{' {
|
||||
ksig := s[2:3]
|
||||
vsig := s[3 : len(s)-1]
|
||||
v := reflect.MakeMap(reflect.MapOf(typeFor(ksig), typeFor(vsig)))
|
||||
if depth >= 63 {
|
||||
panic(FormatError("input exceeds container depth limit"))
|
||||
}
|
||||
length := dec.decode("u", depth).(uint32)
|
||||
// Even for empty maps, the correct padding must be included
|
||||
dec.align(8)
|
||||
spos := dec.pos
|
||||
for dec.pos < spos+int(length) {
|
||||
dec.align(8)
|
||||
if !isKeyType(v.Type().Key()) {
|
||||
panic(InvalidTypeError{v.Type()})
|
||||
}
|
||||
kv := dec.decode(ksig, depth+2)
|
||||
vv := dec.decode(vsig, depth+2)
|
||||
v.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv))
|
||||
}
|
||||
return v.Interface()
|
||||
}
|
||||
if depth >= 64 {
|
||||
panic(FormatError("input exceeds container depth limit"))
|
||||
}
|
||||
length := dec.decode("u", depth).(uint32)
|
||||
v := reflect.MakeSlice(reflect.SliceOf(typeFor(s[1:])), 0, int(length))
|
||||
// Even for empty arrays, the correct padding must be included
|
||||
dec.align(alignment(typeFor(s[1:])))
|
||||
spos := dec.pos
|
||||
for dec.pos < spos+int(length) {
|
||||
ev := dec.decode(s[1:], depth+1)
|
||||
v = reflect.Append(v, reflect.ValueOf(ev))
|
||||
}
|
||||
return v.Interface()
|
||||
case '(':
|
||||
if depth >= 64 {
|
||||
panic(FormatError("input exceeds container depth limit"))
|
||||
}
|
||||
dec.align(8)
|
||||
v := make([]interface{}, 0)
|
||||
s = s[1 : len(s)-1]
|
||||
for s != "" {
|
||||
err, rem := validSingle(s, 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ev := dec.decode(s[:len(s)-len(rem)], depth+1)
|
||||
v = append(v, ev)
|
||||
s = rem
|
||||
}
|
||||
return v
|
||||
default:
|
||||
panic(SignatureError{Sig: s})
|
||||
}
|
||||
}
|
||||
|
||||
// A FormatError is an error in the wire format.
|
||||
type FormatError string
|
||||
|
||||
func (e FormatError) Error() string {
|
||||
return "dbus: wire format error: " + string(e)
|
||||
}
|
|
@ -1,63 +0,0 @@
|
|||
/*
|
||||
Package dbus implements bindings to the D-Bus message bus system.
|
||||
|
||||
To use the message bus API, you first need to connect to a bus (usually the
|
||||
session or system bus). The acquired connection then can be used to call methods
|
||||
on remote objects and emit or receive signals. Using the Export method, you can
|
||||
arrange D-Bus methods calls to be directly translated to method calls on a Go
|
||||
value.
|
||||
|
||||
Conversion Rules
|
||||
|
||||
For outgoing messages, Go types are automatically converted to the
|
||||
corresponding D-Bus types. The following types are directly encoded as their
|
||||
respective D-Bus equivalents:
|
||||
|
||||
Go type | D-Bus type
|
||||
------------+-----------
|
||||
byte | BYTE
|
||||
bool | BOOLEAN
|
||||
int16 | INT16
|
||||
uint16 | UINT16
|
||||
int32 | INT32
|
||||
uint32 | UINT32
|
||||
int64 | INT64
|
||||
uint64 | UINT64
|
||||
float64 | DOUBLE
|
||||
string | STRING
|
||||
ObjectPath | OBJECT_PATH
|
||||
Signature | SIGNATURE
|
||||
Variant | VARIANT
|
||||
UnixFDIndex | UNIX_FD
|
||||
|
||||
Slices and arrays encode as ARRAYs of their element type.
|
||||
|
||||
Maps encode as DICTs, provided that their key type can be used as a key for
|
||||
a DICT.
|
||||
|
||||
Structs other than Variant and Signature encode as a STRUCT containing their
|
||||
exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will
|
||||
be skipped.
|
||||
|
||||
Pointers encode as the value they're pointed to.
|
||||
|
||||
Trying to encode any other type or a slice, map or struct containing an
|
||||
unsupported type will result in an InvalidTypeError.
|
||||
|
||||
For incoming messages, the inverse of these rules are used, with the exception
|
||||
of STRUCTs. Incoming STRUCTS are represented as a slice of empty interfaces
|
||||
containing the struct fields in the correct order. The Store function can be
|
||||
used to convert such values to Go structs.
|
||||
|
||||
Unix FD passing
|
||||
|
||||
Handling Unix file descriptors deserves special mention. To use them, you should
|
||||
first check that they are supported on a connection by calling SupportsUnixFDs.
|
||||
If it returns true, all method of Connection will translate messages containing
|
||||
UnixFD's to messages that are accompanied by the given file descriptors with the
|
||||
UnixFD values being substituted by the correct indices. Similarily, the indices
|
||||
of incoming messages are automatically resolved. It shouldn't be necessary to use
|
||||
UnixFDIndex.
|
||||
|
||||
*/
|
||||
package dbus
|
|
@ -1,208 +0,0 @@
|
|||
package dbus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// An encoder encodes values to the D-Bus wire format.
|
||||
type encoder struct {
|
||||
out io.Writer
|
||||
order binary.ByteOrder
|
||||
pos int
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to out in the given byte order.
|
||||
func newEncoder(out io.Writer, order binary.ByteOrder) *encoder {
|
||||
return newEncoderAtOffset(out, 0, order)
|
||||
}
|
||||
|
||||
// newEncoderAtOffset returns a new encoder that writes to out in the given
|
||||
// byte order. Specify the offset to initialize pos for proper alignment
|
||||
// computation.
|
||||
func newEncoderAtOffset(out io.Writer, offset int, order binary.ByteOrder) *encoder {
|
||||
enc := new(encoder)
|
||||
enc.out = out
|
||||
enc.order = order
|
||||
enc.pos = offset
|
||||
return enc
|
||||
}
|
||||
|
||||
// Aligns the next output to be on a multiple of n. Panics on write errors.
|
||||
func (enc *encoder) align(n int) {
|
||||
pad := enc.padding(0, n)
|
||||
if pad > 0 {
|
||||
empty := make([]byte, pad)
|
||||
if _, err := enc.out.Write(empty); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
enc.pos += pad
|
||||
}
|
||||
}
|
||||
|
||||
// pad returns the number of bytes of padding, based on current position and additional offset.
|
||||
// and alignment.
|
||||
func (enc *encoder) padding(offset, algn int) int {
|
||||
abs := enc.pos + offset
|
||||
if abs%algn != 0 {
|
||||
newabs := (abs + algn - 1) & ^(algn - 1)
|
||||
return newabs - abs
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Calls binary.Write(enc.out, enc.order, v) and panics on write errors.
|
||||
func (enc *encoder) binwrite(v interface{}) {
|
||||
if err := binary.Write(enc.out, enc.order, v); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Encode encodes the given values to the underyling reader. All written values
|
||||
// are aligned properly as required by the D-Bus spec.
|
||||
func (enc *encoder) Encode(vs ...interface{}) (err error) {
|
||||
defer func() {
|
||||
err, _ = recover().(error)
|
||||
}()
|
||||
for _, v := range vs {
|
||||
enc.encode(reflect.ValueOf(v), 0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// encode encodes the given value to the writer and panics on error. depth holds
|
||||
// the depth of the container nesting.
|
||||
func (enc *encoder) encode(v reflect.Value, depth int) {
|
||||
enc.align(alignment(v.Type()))
|
||||
switch v.Kind() {
|
||||
case reflect.Uint8:
|
||||
var b [1]byte
|
||||
b[0] = byte(v.Uint())
|
||||
if _, err := enc.out.Write(b[:]); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
enc.pos++
|
||||
case reflect.Bool:
|
||||
if v.Bool() {
|
||||
enc.encode(reflect.ValueOf(uint32(1)), depth)
|
||||
} else {
|
||||
enc.encode(reflect.ValueOf(uint32(0)), depth)
|
||||
}
|
||||
case reflect.Int16:
|
||||
enc.binwrite(int16(v.Int()))
|
||||
enc.pos += 2
|
||||
case reflect.Uint16:
|
||||
enc.binwrite(uint16(v.Uint()))
|
||||
enc.pos += 2
|
||||
case reflect.Int32:
|
||||
enc.binwrite(int32(v.Int()))
|
||||
enc.pos += 4
|
||||
case reflect.Uint32:
|
||||
enc.binwrite(uint32(v.Uint()))
|
||||
enc.pos += 4
|
||||
case reflect.Int64:
|
||||
enc.binwrite(v.Int())
|
||||
enc.pos += 8
|
||||
case reflect.Uint64:
|
||||
enc.binwrite(v.Uint())
|
||||
enc.pos += 8
|
||||
case reflect.Float64:
|
||||
enc.binwrite(v.Float())
|
||||
enc.pos += 8
|
||||
case reflect.String:
|
||||
enc.encode(reflect.ValueOf(uint32(len(v.String()))), depth)
|
||||
b := make([]byte, v.Len()+1)
|
||||
copy(b, v.String())
|
||||
b[len(b)-1] = 0
|
||||
n, err := enc.out.Write(b)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
enc.pos += n
|
||||
case reflect.Ptr:
|
||||
enc.encode(v.Elem(), depth)
|
||||
case reflect.Slice, reflect.Array:
|
||||
if depth >= 64 {
|
||||
panic(FormatError("input exceeds container depth limit"))
|
||||
}
|
||||
// Lookahead offset: 4 bytes for uint32 length (with alignment),
|
||||
// plus alignment for elements.
|
||||
n := enc.padding(0, 4) + 4
|
||||
offset := enc.pos + n + enc.padding(n, alignment(v.Type().Elem()))
|
||||
|
||||
var buf bytes.Buffer
|
||||
bufenc := newEncoderAtOffset(&buf, offset, enc.order)
|
||||
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
bufenc.encode(v.Index(i), depth+1)
|
||||
}
|
||||
enc.encode(reflect.ValueOf(uint32(buf.Len())), depth)
|
||||
length := buf.Len()
|
||||
enc.align(alignment(v.Type().Elem()))
|
||||
if _, err := buf.WriteTo(enc.out); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
enc.pos += length
|
||||
case reflect.Struct:
|
||||
if depth >= 64 && v.Type() != signatureType {
|
||||
panic(FormatError("input exceeds container depth limit"))
|
||||
}
|
||||
switch t := v.Type(); t {
|
||||
case signatureType:
|
||||
str := v.Field(0)
|
||||
enc.encode(reflect.ValueOf(byte(str.Len())), depth+1)
|
||||
b := make([]byte, str.Len()+1)
|
||||
copy(b, str.String())
|
||||
b[len(b)-1] = 0
|
||||
n, err := enc.out.Write(b)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
enc.pos += n
|
||||
case variantType:
|
||||
variant := v.Interface().(Variant)
|
||||
enc.encode(reflect.ValueOf(variant.sig), depth+1)
|
||||
enc.encode(reflect.ValueOf(variant.value), depth+1)
|
||||
default:
|
||||
for i := 0; i < v.Type().NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
|
||||
enc.encode(v.Field(i), depth+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
// Maps are arrays of structures, so they actually increase the depth by
|
||||
// 2.
|
||||
if depth >= 63 {
|
||||
panic(FormatError("input exceeds container depth limit"))
|
||||
}
|
||||
if !isKeyType(v.Type().Key()) {
|
||||
panic(InvalidTypeError{v.Type()})
|
||||
}
|
||||
keys := v.MapKeys()
|
||||
// Lookahead offset: 4 bytes for uint32 length (with alignment),
|
||||
// plus 8-byte alignment
|
||||
n := enc.padding(0, 4) + 4
|
||||
offset := enc.pos + n + enc.padding(n, 8)
|
||||
|
||||
var buf bytes.Buffer
|
||||
bufenc := newEncoderAtOffset(&buf, offset, enc.order)
|
||||
for _, k := range keys {
|
||||
bufenc.align(8)
|
||||
bufenc.encode(k, depth+2)
|
||||
bufenc.encode(v.MapIndex(k), depth+2)
|
||||
}
|
||||
enc.encode(reflect.ValueOf(uint32(buf.Len())), depth)
|
||||
length := buf.Len()
|
||||
enc.align(8)
|
||||
if _, err := buf.WriteTo(enc.out); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
enc.pos += length
|
||||
default:
|
||||
panic(InvalidTypeError{v.Type()})
|
||||
}
|
||||
}
|
|
@ -1,411 +0,0 @@
|
|||
package dbus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
errmsgInvalidArg = Error{
|
||||
"org.freedesktop.DBus.Error.InvalidArgs",
|
||||
[]interface{}{"Invalid type / number of args"},
|
||||
}
|
||||
errmsgNoObject = Error{
|
||||
"org.freedesktop.DBus.Error.NoSuchObject",
|
||||
[]interface{}{"No such object"},
|
||||
}
|
||||
errmsgUnknownMethod = Error{
|
||||
"org.freedesktop.DBus.Error.UnknownMethod",
|
||||
[]interface{}{"Unknown / invalid method"},
|
||||
}
|
||||
)
|
||||
|
||||
// exportWithMapping represents an exported struct along with a method name
|
||||
// mapping to allow for exporting lower-case methods, etc.
|
||||
type exportWithMapping struct {
|
||||
export interface{}
|
||||
|
||||
// Method name mapping; key -> struct method, value -> dbus method.
|
||||
mapping map[string]string
|
||||
|
||||
// Whether or not this export is for the entire subtree
|
||||
includeSubtree bool
|
||||
}
|
||||
|
||||
// Sender is a type which can be used in exported methods to receive the message
|
||||
// sender.
|
||||
type Sender string
|
||||
|
||||
func exportedMethod(export exportWithMapping, name string) reflect.Value {
|
||||
if export.export == nil {
|
||||
return reflect.Value{}
|
||||
}
|
||||
|
||||
// If a mapping was included in the export, check the map to see if we
|
||||
// should be looking for a different method in the export.
|
||||
if export.mapping != nil {
|
||||
for key, value := range export.mapping {
|
||||
if value == name {
|
||||
name = key
|
||||
break
|
||||
}
|
||||
|
||||
// Catch the case where a method is aliased but the client is calling
|
||||
// the original, e.g. the "Foo" method was exported mapped to
|
||||
// "foo," and dbus client called the original "Foo."
|
||||
if key == name {
|
||||
return reflect.Value{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
value := reflect.ValueOf(export.export)
|
||||
m := value.MethodByName(name)
|
||||
|
||||
// Catch the case of attempting to call an unexported method
|
||||
method, ok := value.Type().MethodByName(name)
|
||||
|
||||
if !m.IsValid() || !ok || method.PkgPath != "" {
|
||||
return reflect.Value{}
|
||||
}
|
||||
t := m.Type()
|
||||
if t.NumOut() == 0 ||
|
||||
t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) {
|
||||
|
||||
return reflect.Value{}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// searchHandlers will look through all registered handlers looking for one
|
||||
// to handle the given path. If a verbatim one isn't found, it will check for
|
||||
// a subtree registration for the path as well.
|
||||
func (conn *Conn) searchHandlers(path ObjectPath) (map[string]exportWithMapping, bool) {
|
||||
conn.handlersLck.RLock()
|
||||
defer conn.handlersLck.RUnlock()
|
||||
|
||||
handlers, ok := conn.handlers[path]
|
||||
if ok {
|
||||
return handlers, ok
|
||||
}
|
||||
|
||||
// If handlers weren't found for this exact path, look for a matching subtree
|
||||
// registration
|
||||
handlers = make(map[string]exportWithMapping)
|
||||
path = path[:strings.LastIndex(string(path), "/")]
|
||||
for len(path) > 0 {
|
||||
var subtreeHandlers map[string]exportWithMapping
|
||||
subtreeHandlers, ok = conn.handlers[path]
|
||||
if ok {
|
||||
for iface, handler := range subtreeHandlers {
|
||||
// Only include this handler if it registered for the subtree
|
||||
if handler.includeSubtree {
|
||||
handlers[iface] = handler
|
||||
}
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
path = path[:strings.LastIndex(string(path), "/")]
|
||||
}
|
||||
|
||||
return handlers, ok
|
||||
}
|
||||
|
||||
// handleCall handles the given method call (i.e. looks if it's one of the
|
||||
// pre-implemented ones and searches for a corresponding handler if not).
|
||||
func (conn *Conn) handleCall(msg *Message) {
|
||||
name := msg.Headers[FieldMember].value.(string)
|
||||
path := msg.Headers[FieldPath].value.(ObjectPath)
|
||||
ifaceName, hasIface := msg.Headers[FieldInterface].value.(string)
|
||||
sender, hasSender := msg.Headers[FieldSender].value.(string)
|
||||
serial := msg.serial
|
||||
if ifaceName == "org.freedesktop.DBus.Peer" {
|
||||
switch name {
|
||||
case "Ping":
|
||||
conn.sendReply(sender, serial)
|
||||
case "GetMachineId":
|
||||
conn.sendReply(sender, serial, conn.uuid)
|
||||
default:
|
||||
conn.sendError(errmsgUnknownMethod, sender, serial)
|
||||
}
|
||||
return
|
||||
}
|
||||
if len(name) == 0 {
|
||||
conn.sendError(errmsgUnknownMethod, sender, serial)
|
||||
}
|
||||
|
||||
// Find the exported handler (if any) for this path
|
||||
handlers, ok := conn.searchHandlers(path)
|
||||
if !ok {
|
||||
conn.sendError(errmsgNoObject, sender, serial)
|
||||
return
|
||||
}
|
||||
|
||||
var m reflect.Value
|
||||
if hasIface {
|
||||
iface := handlers[ifaceName]
|
||||
m = exportedMethod(iface, name)
|
||||
} else {
|
||||
for _, v := range handlers {
|
||||
m = exportedMethod(v, name)
|
||||
if m.IsValid() {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !m.IsValid() {
|
||||
conn.sendError(errmsgUnknownMethod, sender, serial)
|
||||
return
|
||||
}
|
||||
|
||||
t := m.Type()
|
||||
vs := msg.Body
|
||||
pointers := make([]interface{}, t.NumIn())
|
||||
decode := make([]interface{}, 0, len(vs))
|
||||
for i := 0; i < t.NumIn(); i++ {
|
||||
tp := t.In(i)
|
||||
val := reflect.New(tp)
|
||||
pointers[i] = val.Interface()
|
||||
if tp == reflect.TypeOf((*Sender)(nil)).Elem() {
|
||||
val.Elem().SetString(sender)
|
||||
} else if tp == reflect.TypeOf((*Message)(nil)).Elem() {
|
||||
val.Elem().Set(reflect.ValueOf(*msg))
|
||||
} else {
|
||||
decode = append(decode, pointers[i])
|
||||
}
|
||||
}
|
||||
|
||||
if len(decode) != len(vs) {
|
||||
conn.sendError(errmsgInvalidArg, sender, serial)
|
||||
return
|
||||
}
|
||||
|
||||
if err := Store(vs, decode...); err != nil {
|
||||
conn.sendError(errmsgInvalidArg, sender, serial)
|
||||
return
|
||||
}
|
||||
|
||||
// Extract parameters
|
||||
params := make([]reflect.Value, len(pointers))
|
||||
for i := 0; i < len(pointers); i++ {
|
||||
params[i] = reflect.ValueOf(pointers[i]).Elem()
|
||||
}
|
||||
|
||||
// Call method
|
||||
ret := m.Call(params)
|
||||
if em := ret[t.NumOut()-1].Interface().(*Error); em != nil {
|
||||
conn.sendError(*em, sender, serial)
|
||||
return
|
||||
}
|
||||
|
||||
if msg.Flags&FlagNoReplyExpected == 0 {
|
||||
reply := new(Message)
|
||||
reply.Type = TypeMethodReply
|
||||
reply.serial = conn.getSerial()
|
||||
reply.Headers = make(map[HeaderField]Variant)
|
||||
if hasSender {
|
||||
reply.Headers[FieldDestination] = msg.Headers[FieldSender]
|
||||
}
|
||||
reply.Headers[FieldReplySerial] = MakeVariant(msg.serial)
|
||||
reply.Body = make([]interface{}, len(ret)-1)
|
||||
for i := 0; i < len(ret)-1; i++ {
|
||||
reply.Body[i] = ret[i].Interface()
|
||||
}
|
||||
if len(ret) != 1 {
|
||||
reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...))
|
||||
}
|
||||
conn.outLck.RLock()
|
||||
if !conn.closed {
|
||||
conn.out <- reply
|
||||
}
|
||||
conn.outLck.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Emit emits the given signal on the message bus. The name parameter must be
|
||||
// formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost".
|
||||
func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error {
|
||||
if !path.IsValid() {
|
||||
return errors.New("dbus: invalid object path")
|
||||
}
|
||||
i := strings.LastIndex(name, ".")
|
||||
if i == -1 {
|
||||
return errors.New("dbus: invalid method name")
|
||||
}
|
||||
iface := name[:i]
|
||||
member := name[i+1:]
|
||||
if !isValidMember(member) {
|
||||
return errors.New("dbus: invalid method name")
|
||||
}
|
||||
if !isValidInterface(iface) {
|
||||
return errors.New("dbus: invalid interface name")
|
||||
}
|
||||
msg := new(Message)
|
||||
msg.Type = TypeSignal
|
||||
msg.serial = conn.getSerial()
|
||||
msg.Headers = make(map[HeaderField]Variant)
|
||||
msg.Headers[FieldInterface] = MakeVariant(iface)
|
||||
msg.Headers[FieldMember] = MakeVariant(member)
|
||||
msg.Headers[FieldPath] = MakeVariant(path)
|
||||
msg.Body = values
|
||||
if len(values) > 0 {
|
||||
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
|
||||
}
|
||||
conn.outLck.RLock()
|
||||
defer conn.outLck.RUnlock()
|
||||
if conn.closed {
|
||||
return ErrClosed
|
||||
}
|
||||
conn.out <- msg
|
||||
return nil
|
||||
}
|
||||
|
||||
// Export registers the given value to be exported as an object on the
|
||||
// message bus.
|
||||
//
|
||||
// If a method call on the given path and interface is received, an exported
|
||||
// method with the same name is called with v as the receiver if the
|
||||
// parameters match and the last return value is of type *Error. If this
|
||||
// *Error is not nil, it is sent back to the caller as an error.
|
||||
// Otherwise, a method reply is sent with the other return values as its body.
|
||||
//
|
||||
// Any parameters with the special type Sender are set to the sender of the
|
||||
// dbus message when the method is called. Parameters of this type do not
|
||||
// contribute to the dbus signature of the method (i.e. the method is exposed
|
||||
// as if the parameters of type Sender were not there).
|
||||
//
|
||||
// Similarly, any parameters with the type Message are set to the raw message
|
||||
// received on the bus. Again, parameters of this type do not contribute to the
|
||||
// dbus signature of the method.
|
||||
//
|
||||
// Every method call is executed in a new goroutine, so the method may be called
|
||||
// in multiple goroutines at once.
|
||||
//
|
||||
// Method calls on the interface org.freedesktop.DBus.Peer will be automatically
|
||||
// handled for every object.
|
||||
//
|
||||
// Passing nil as the first parameter will cause conn to cease handling calls on
|
||||
// the given combination of path and interface.
|
||||
//
|
||||
// Export returns an error if path is not a valid path name.
|
||||
func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error {
|
||||
return conn.ExportWithMap(v, nil, path, iface)
|
||||
}
|
||||
|
||||
// ExportWithMap works exactly like Export but provides the ability to remap
|
||||
// method names (e.g. export a lower-case method).
|
||||
//
|
||||
// The keys in the map are the real method names (exported on the struct), and
|
||||
// the values are the method names to be exported on DBus.
|
||||
func (conn *Conn) ExportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error {
|
||||
return conn.exportWithMap(v, mapping, path, iface, false)
|
||||
}
|
||||
|
||||
// ExportSubtree works exactly like Export but registers the given value for
|
||||
// an entire subtree rather under the root path provided.
|
||||
//
|
||||
// In order to make this useful, one parameter in each of the value's exported
|
||||
// methods should be a Message, in which case it will contain the raw message
|
||||
// (allowing one to get access to the path that caused the method to be called).
|
||||
//
|
||||
// Note that more specific export paths take precedence over less specific. For
|
||||
// example, a method call using the ObjectPath /foo/bar/baz will call a method
|
||||
// exported on /foo/bar before a method exported on /foo.
|
||||
func (conn *Conn) ExportSubtree(v interface{}, path ObjectPath, iface string) error {
|
||||
return conn.ExportSubtreeWithMap(v, nil, path, iface)
|
||||
}
|
||||
|
||||
// ExportSubtreeWithMap works exactly like ExportSubtree but provides the
|
||||
// ability to remap method names (e.g. export a lower-case method).
|
||||
//
|
||||
// The keys in the map are the real method names (exported on the struct), and
|
||||
// the values are the method names to be exported on DBus.
|
||||
func (conn *Conn) ExportSubtreeWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error {
|
||||
return conn.exportWithMap(v, mapping, path, iface, true)
|
||||
}
|
||||
|
||||
// exportWithMap is the worker function for all exports/registrations.
|
||||
func (conn *Conn) exportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string, includeSubtree bool) error {
|
||||
if !path.IsValid() {
|
||||
return fmt.Errorf(`dbus: Invalid path name: "%s"`, path)
|
||||
}
|
||||
|
||||
conn.handlersLck.Lock()
|
||||
defer conn.handlersLck.Unlock()
|
||||
|
||||
// Remove a previous export if the interface is nil
|
||||
if v == nil {
|
||||
if _, ok := conn.handlers[path]; ok {
|
||||
delete(conn.handlers[path], iface)
|
||||
if len(conn.handlers[path]) == 0 {
|
||||
delete(conn.handlers, path)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// If this is the first handler for this path, make a new map to hold all
|
||||
// handlers for this path.
|
||||
if _, ok := conn.handlers[path]; !ok {
|
||||
conn.handlers[path] = make(map[string]exportWithMapping)
|
||||
}
|
||||
|
||||
// Finally, save this handler
|
||||
conn.handlers[path][iface] = exportWithMapping{export: v, mapping: mapping, includeSubtree: includeSubtree}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReleaseName calls org.freedesktop.DBus.ReleaseName and awaits a response.
|
||||
func (conn *Conn) ReleaseName(name string) (ReleaseNameReply, error) {
|
||||
var r uint32
|
||||
err := conn.busObj.Call("org.freedesktop.DBus.ReleaseName", 0, name).Store(&r)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return ReleaseNameReply(r), nil
|
||||
}
|
||||
|
||||
// RequestName calls org.freedesktop.DBus.RequestName and awaits a response.
|
||||
func (conn *Conn) RequestName(name string, flags RequestNameFlags) (RequestNameReply, error) {
|
||||
var r uint32
|
||||
err := conn.busObj.Call("org.freedesktop.DBus.RequestName", 0, name, flags).Store(&r)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return RequestNameReply(r), nil
|
||||
}
|
||||
|
||||
// ReleaseNameReply is the reply to a ReleaseName call.
|
||||
type ReleaseNameReply uint32
|
||||
|
||||
const (
|
||||
ReleaseNameReplyReleased ReleaseNameReply = 1 + iota
|
||||
ReleaseNameReplyNonExistent
|
||||
ReleaseNameReplyNotOwner
|
||||
)
|
||||
|
||||
// RequestNameFlags represents the possible flags for a RequestName call.
|
||||
type RequestNameFlags uint32
|
||||
|
||||
const (
|
||||
NameFlagAllowReplacement RequestNameFlags = 1 << iota
|
||||
NameFlagReplaceExisting
|
||||
NameFlagDoNotQueue
|
||||
)
|
||||
|
||||
// RequestNameReply is the reply to a RequestName call.
|
||||
type RequestNameReply uint32
|
||||
|
||||
const (
|
||||
RequestNameReplyPrimaryOwner RequestNameReply = 1 + iota
|
||||
RequestNameReplyInQueue
|
||||
RequestNameReplyExists
|
||||
RequestNameReplyAlreadyOwner
|
||||
)
|
|
@ -1,28 +0,0 @@
|
|||
package dbus
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
homeDir string
|
||||
homeDirLock sync.Mutex
|
||||
)
|
||||
|
||||
func getHomeDir() string {
|
||||
homeDirLock.Lock()
|
||||
defer homeDirLock.Unlock()
|
||||
|
||||
if homeDir != "" {
|
||||
return homeDir
|
||||
}
|
||||
|
||||
homeDir = os.Getenv("HOME")
|
||||
if homeDir != "" {
|
||||
return homeDir
|
||||
}
|
||||
|
||||
homeDir = lookupHomeDir()
|
||||
return homeDir
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
// +build !static_build
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"os/user"
|
||||
)
|
||||
|
||||
func lookupHomeDir() string {
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return "/"
|
||||
}
|
||||
return u.HomeDir
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
// +build static_build
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func lookupHomeDir() string {
|
||||
myUid := os.Getuid()
|
||||
|
||||
f, err := os.Open("/etc/passwd")
|
||||
if err != nil {
|
||||
return "/"
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
s := bufio.NewScanner(f)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
line := strings.TrimSpace(s.Text())
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
parts := strings.Split(line, ":")
|
||||
|
||||
if len(parts) >= 6 {
|
||||
uid, err := strconv.Atoi(parts[2])
|
||||
if err == nil && uid == myUid {
|
||||
return parts[5]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Default to / if we can't get a better value
|
||||
return "/"
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
package introspect
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"github.com/godbus/dbus"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Call calls org.freedesktop.Introspectable.Introspect on a remote object
|
||||
// and returns the introspection data.
|
||||
func Call(o dbus.BusObject) (*Node, error) {
|
||||
var xmldata string
|
||||
var node Node
|
||||
|
||||
err := o.Call("org.freedesktop.DBus.Introspectable.Introspect", 0).Store(&xmldata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = xml.NewDecoder(strings.NewReader(xmldata)).Decode(&node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if node.Name == "" {
|
||||
node.Name = string(o.Path())
|
||||
}
|
||||
return &node, nil
|
||||
}
|
|
@ -1,86 +0,0 @@
|
|||
// Package introspect provides some utilities for dealing with the DBus
|
||||
// introspection format.
|
||||
package introspect
|
||||
|
||||
import "encoding/xml"
|
||||
|
||||
// The introspection data for the org.freedesktop.DBus.Introspectable interface.
|
||||
var IntrospectData = Interface{
|
||||
Name: "org.freedesktop.DBus.Introspectable",
|
||||
Methods: []Method{
|
||||
{
|
||||
Name: "Introspect",
|
||||
Args: []Arg{
|
||||
{"out", "s", "out"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// XML document type declaration of the introspection format version 1.0
|
||||
const IntrospectDeclarationString = `
|
||||
<!DOCTYPE node PUBLIC "-//freedesktop//DTD D-BUS Object Introspection 1.0//EN"
|
||||
"http://www.freedesktop.org/standards/dbus/1.0/introspect.dtd">
|
||||
`
|
||||
|
||||
// The introspection data for the org.freedesktop.DBus.Introspectable interface,
|
||||
// as a string.
|
||||
const IntrospectDataString = `
|
||||
<interface name="org.freedesktop.DBus.Introspectable">
|
||||
<method name="Introspect">
|
||||
<arg name="out" direction="out" type="s"/>
|
||||
</method>
|
||||
</interface>
|
||||
`
|
||||
|
||||
// Node is the root element of an introspection.
|
||||
type Node struct {
|
||||
XMLName xml.Name `xml:"node"`
|
||||
Name string `xml:"name,attr,omitempty"`
|
||||
Interfaces []Interface `xml:"interface"`
|
||||
Children []Node `xml:"node,omitempty"`
|
||||
}
|
||||
|
||||
// Interface describes a DBus interface that is available on the message bus.
|
||||
type Interface struct {
|
||||
Name string `xml:"name,attr"`
|
||||
Methods []Method `xml:"method"`
|
||||
Signals []Signal `xml:"signal"`
|
||||
Properties []Property `xml:"property"`
|
||||
Annotations []Annotation `xml:"annotation"`
|
||||
}
|
||||
|
||||
// Method describes a Method on an Interface as retured by an introspection.
|
||||
type Method struct {
|
||||
Name string `xml:"name,attr"`
|
||||
Args []Arg `xml:"arg"`
|
||||
Annotations []Annotation `xml:"annotation"`
|
||||
}
|
||||
|
||||
// Signal describes a Signal emitted on an Interface.
|
||||
type Signal struct {
|
||||
Name string `xml:"name,attr"`
|
||||
Args []Arg `xml:"arg"`
|
||||
Annotations []Annotation `xml:"annotation"`
|
||||
}
|
||||
|
||||
// Property describes a property of an Interface.
|
||||
type Property struct {
|
||||
Name string `xml:"name,attr"`
|
||||
Type string `xml:"type,attr"`
|
||||
Access string `xml:"access,attr"`
|
||||
Annotations []Annotation `xml:"annotation"`
|
||||
}
|
||||
|
||||
// Arg represents an argument of a method or a signal.
|
||||
type Arg struct {
|
||||
Name string `xml:"name,attr,omitempty"`
|
||||
Type string `xml:"type,attr"`
|
||||
Direction string `xml:"direction,attr,omitempty"`
|
||||
}
|
||||
|
||||
// Annotation is an annotation in the introspection format.
|
||||
type Annotation struct {
|
||||
Name string `xml:"name,attr"`
|
||||
Value string `xml:"value,attr"`
|
||||
}
|
|
@ -1,76 +0,0 @@
|
|||
package introspect
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"github.com/godbus/dbus"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Introspectable implements org.freedesktop.Introspectable.
|
||||
//
|
||||
// You can create it by converting the XML-formatted introspection data from a
|
||||
// string to an Introspectable or call NewIntrospectable with a Node. Then,
|
||||
// export it as org.freedesktop.Introspectable on you object.
|
||||
type Introspectable string
|
||||
|
||||
// NewIntrospectable returns an Introspectable that returns the introspection
|
||||
// data that corresponds to the given Node. If n.Interfaces doesn't contain the
|
||||
// data for org.freedesktop.DBus.Introspectable, it is added automatically.
|
||||
func NewIntrospectable(n *Node) Introspectable {
|
||||
found := false
|
||||
for _, v := range n.Interfaces {
|
||||
if v.Name == "org.freedesktop.DBus.Introspectable" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
n.Interfaces = append(n.Interfaces, IntrospectData)
|
||||
}
|
||||
b, err := xml.Marshal(n)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return Introspectable(strings.TrimSpace(IntrospectDeclarationString) + string(b))
|
||||
}
|
||||
|
||||
// Introspect implements org.freedesktop.Introspectable.Introspect.
|
||||
func (i Introspectable) Introspect() (string, *dbus.Error) {
|
||||
return string(i), nil
|
||||
}
|
||||
|
||||
// Methods returns the description of the methods of v. This can be used to
|
||||
// create a Node which can be passed to NewIntrospectable.
|
||||
func Methods(v interface{}) []Method {
|
||||
t := reflect.TypeOf(v)
|
||||
ms := make([]Method, 0, t.NumMethod())
|
||||
for i := 0; i < t.NumMethod(); i++ {
|
||||
if t.Method(i).PkgPath != "" {
|
||||
continue
|
||||
}
|
||||
mt := t.Method(i).Type
|
||||
if mt.NumOut() == 0 ||
|
||||
mt.Out(mt.NumOut()-1) != reflect.TypeOf(&dbus.Error{}) {
|
||||
|
||||
continue
|
||||
}
|
||||
var m Method
|
||||
m.Name = t.Method(i).Name
|
||||
m.Args = make([]Arg, 0, mt.NumIn()+mt.NumOut()-2)
|
||||
for j := 1; j < mt.NumIn(); j++ {
|
||||
if mt.In(j) != reflect.TypeOf((*dbus.Sender)(nil)).Elem() &&
|
||||
mt.In(j) != reflect.TypeOf((*dbus.Message)(nil)).Elem() {
|
||||
arg := Arg{"", dbus.SignatureOfType(mt.In(j)).String(), "in"}
|
||||
m.Args = append(m.Args, arg)
|
||||
}
|
||||
}
|
||||
for j := 0; j < mt.NumOut()-1; j++ {
|
||||
arg := Arg{"", dbus.SignatureOfType(mt.Out(j)).String(), "out"}
|
||||
m.Args = append(m.Args, arg)
|
||||
}
|
||||
m.Annotations = make([]Annotation, 0)
|
||||
ms = append(ms, m)
|
||||
}
|
||||
return ms
|
||||
}
|
|
@ -1,346 +0,0 @@
|
|||
package dbus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const protoVersion byte = 1
|
||||
|
||||
// Flags represents the possible flags of a D-Bus message.
|
||||
type Flags byte
|
||||
|
||||
const (
|
||||
// FlagNoReplyExpected signals that the message is not expected to generate
|
||||
// a reply. If this flag is set on outgoing messages, any possible reply
|
||||
// will be discarded.
|
||||
FlagNoReplyExpected Flags = 1 << iota
|
||||
// FlagNoAutoStart signals that the message bus should not automatically
|
||||
// start an application when handling this message.
|
||||
FlagNoAutoStart
|
||||
)
|
||||
|
||||
// Type represents the possible types of a D-Bus message.
|
||||
type Type byte
|
||||
|
||||
const (
|
||||
TypeMethodCall Type = 1 + iota
|
||||
TypeMethodReply
|
||||
TypeError
|
||||
TypeSignal
|
||||
typeMax
|
||||
)
|
||||
|
||||
func (t Type) String() string {
|
||||
switch t {
|
||||
case TypeMethodCall:
|
||||
return "method call"
|
||||
case TypeMethodReply:
|
||||
return "reply"
|
||||
case TypeError:
|
||||
return "error"
|
||||
case TypeSignal:
|
||||
return "signal"
|
||||
}
|
||||
return "invalid"
|
||||
}
|
||||
|
||||
// HeaderField represents the possible byte codes for the headers
|
||||
// of a D-Bus message.
|
||||
type HeaderField byte
|
||||
|
||||
const (
|
||||
FieldPath HeaderField = 1 + iota
|
||||
FieldInterface
|
||||
FieldMember
|
||||
FieldErrorName
|
||||
FieldReplySerial
|
||||
FieldDestination
|
||||
FieldSender
|
||||
FieldSignature
|
||||
FieldUnixFDs
|
||||
fieldMax
|
||||
)
|
||||
|
||||
// An InvalidMessageError describes the reason why a D-Bus message is regarded as
|
||||
// invalid.
|
||||
type InvalidMessageError string
|
||||
|
||||
func (e InvalidMessageError) Error() string {
|
||||
return "dbus: invalid message: " + string(e)
|
||||
}
|
||||
|
||||
// fieldType are the types of the various header fields.
|
||||
var fieldTypes = [fieldMax]reflect.Type{
|
||||
FieldPath: objectPathType,
|
||||
FieldInterface: stringType,
|
||||
FieldMember: stringType,
|
||||
FieldErrorName: stringType,
|
||||
FieldReplySerial: uint32Type,
|
||||
FieldDestination: stringType,
|
||||
FieldSender: stringType,
|
||||
FieldSignature: signatureType,
|
||||
FieldUnixFDs: uint32Type,
|
||||
}
|
||||
|
||||
// requiredFields lists the header fields that are required by the different
|
||||
// message types.
|
||||
var requiredFields = [typeMax][]HeaderField{
|
||||
TypeMethodCall: {FieldPath, FieldMember},
|
||||
TypeMethodReply: {FieldReplySerial},
|
||||
TypeError: {FieldErrorName, FieldReplySerial},
|
||||
TypeSignal: {FieldPath, FieldInterface, FieldMember},
|
||||
}
|
||||
|
||||
// Message represents a single D-Bus message.
|
||||
type Message struct {
|
||||
Type
|
||||
Flags
|
||||
Headers map[HeaderField]Variant
|
||||
Body []interface{}
|
||||
|
||||
serial uint32
|
||||
}
|
||||
|
||||
type header struct {
|
||||
Field byte
|
||||
Variant
|
||||
}
|
||||
|
||||
// DecodeMessage tries to decode a single message in the D-Bus wire format
|
||||
// from the given reader. The byte order is figured out from the first byte.
|
||||
// The possibly returned error can be an error of the underlying reader, an
|
||||
// InvalidMessageError or a FormatError.
|
||||
func DecodeMessage(rd io.Reader) (msg *Message, err error) {
|
||||
var order binary.ByteOrder
|
||||
var hlength, length uint32
|
||||
var typ, flags, proto byte
|
||||
var headers []header
|
||||
|
||||
b := make([]byte, 1)
|
||||
_, err = rd.Read(b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
switch b[0] {
|
||||
case 'l':
|
||||
order = binary.LittleEndian
|
||||
case 'B':
|
||||
order = binary.BigEndian
|
||||
default:
|
||||
return nil, InvalidMessageError("invalid byte order")
|
||||
}
|
||||
|
||||
dec := newDecoder(rd, order)
|
||||
dec.pos = 1
|
||||
|
||||
msg = new(Message)
|
||||
vs, err := dec.Decode(Signature{"yyyuu"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = Store(vs, &typ, &flags, &proto, &length, &msg.serial); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msg.Type = Type(typ)
|
||||
msg.Flags = Flags(flags)
|
||||
|
||||
// get the header length separately because we need it later
|
||||
b = make([]byte, 4)
|
||||
_, err = io.ReadFull(rd, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
binary.Read(bytes.NewBuffer(b), order, &hlength)
|
||||
if hlength+length+16 > 1<<27 {
|
||||
return nil, InvalidMessageError("message is too long")
|
||||
}
|
||||
dec = newDecoder(io.MultiReader(bytes.NewBuffer(b), rd), order)
|
||||
dec.pos = 12
|
||||
vs, err = dec.Decode(Signature{"a(yv)"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = Store(vs, &headers); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msg.Headers = make(map[HeaderField]Variant)
|
||||
for _, v := range headers {
|
||||
msg.Headers[HeaderField(v.Field)] = v.Variant
|
||||
}
|
||||
|
||||
dec.align(8)
|
||||
body := make([]byte, int(length))
|
||||
if length != 0 {
|
||||
_, err := io.ReadFull(rd, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err = msg.IsValid(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sig, _ := msg.Headers[FieldSignature].value.(Signature)
|
||||
if sig.str != "" {
|
||||
buf := bytes.NewBuffer(body)
|
||||
dec = newDecoder(buf, order)
|
||||
vs, err := dec.Decode(sig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msg.Body = vs
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeTo encodes and sends a message to the given writer. The byte order must
|
||||
// be either binary.LittleEndian or binary.BigEndian. If the message is not
|
||||
// valid or an error occurs when writing, an error is returned.
|
||||
func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error {
|
||||
if err := msg.IsValid(); err != nil {
|
||||
return err
|
||||
}
|
||||
var vs [7]interface{}
|
||||
switch order {
|
||||
case binary.LittleEndian:
|
||||
vs[0] = byte('l')
|
||||
case binary.BigEndian:
|
||||
vs[0] = byte('B')
|
||||
default:
|
||||
return errors.New("dbus: invalid byte order")
|
||||
}
|
||||
body := new(bytes.Buffer)
|
||||
enc := newEncoder(body, order)
|
||||
if len(msg.Body) != 0 {
|
||||
enc.Encode(msg.Body...)
|
||||
}
|
||||
vs[1] = msg.Type
|
||||
vs[2] = msg.Flags
|
||||
vs[3] = protoVersion
|
||||
vs[4] = uint32(len(body.Bytes()))
|
||||
vs[5] = msg.serial
|
||||
headers := make([]header, 0, len(msg.Headers))
|
||||
for k, v := range msg.Headers {
|
||||
headers = append(headers, header{byte(k), v})
|
||||
}
|
||||
vs[6] = headers
|
||||
var buf bytes.Buffer
|
||||
enc = newEncoder(&buf, order)
|
||||
enc.Encode(vs[:]...)
|
||||
enc.align(8)
|
||||
body.WriteTo(&buf)
|
||||
if buf.Len() > 1<<27 {
|
||||
return InvalidMessageError("message is too long")
|
||||
}
|
||||
if _, err := buf.WriteTo(out); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsValid checks whether msg is a valid message and returns an
|
||||
// InvalidMessageError if it is not.
|
||||
func (msg *Message) IsValid() error {
|
||||
if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected) != 0 {
|
||||
return InvalidMessageError("invalid flags")
|
||||
}
|
||||
if msg.Type == 0 || msg.Type >= typeMax {
|
||||
return InvalidMessageError("invalid message type")
|
||||
}
|
||||
for k, v := range msg.Headers {
|
||||
if k == 0 || k >= fieldMax {
|
||||
return InvalidMessageError("invalid header")
|
||||
}
|
||||
if reflect.TypeOf(v.value) != fieldTypes[k] {
|
||||
return InvalidMessageError("invalid type of header field")
|
||||
}
|
||||
}
|
||||
for _, v := range requiredFields[msg.Type] {
|
||||
if _, ok := msg.Headers[v]; !ok {
|
||||
return InvalidMessageError("missing required header")
|
||||
}
|
||||
}
|
||||
if path, ok := msg.Headers[FieldPath]; ok {
|
||||
if !path.value.(ObjectPath).IsValid() {
|
||||
return InvalidMessageError("invalid path name")
|
||||
}
|
||||
}
|
||||
if iface, ok := msg.Headers[FieldInterface]; ok {
|
||||
if !isValidInterface(iface.value.(string)) {
|
||||
return InvalidMessageError("invalid interface name")
|
||||
}
|
||||
}
|
||||
if member, ok := msg.Headers[FieldMember]; ok {
|
||||
if !isValidMember(member.value.(string)) {
|
||||
return InvalidMessageError("invalid member name")
|
||||
}
|
||||
}
|
||||
if errname, ok := msg.Headers[FieldErrorName]; ok {
|
||||
if !isValidInterface(errname.value.(string)) {
|
||||
return InvalidMessageError("invalid error name")
|
||||
}
|
||||
}
|
||||
if len(msg.Body) != 0 {
|
||||
if _, ok := msg.Headers[FieldSignature]; !ok {
|
||||
return InvalidMessageError("missing signature")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Serial returns the message's serial number. The returned value is only valid
|
||||
// for messages received by eavesdropping.
|
||||
func (msg *Message) Serial() uint32 {
|
||||
return msg.serial
|
||||
}
|
||||
|
||||
// String returns a string representation of a message similar to the format of
|
||||
// dbus-monitor.
|
||||
func (msg *Message) String() string {
|
||||
if err := msg.IsValid(); err != nil {
|
||||
return "<invalid>"
|
||||
}
|
||||
s := msg.Type.String()
|
||||
if v, ok := msg.Headers[FieldSender]; ok {
|
||||
s += " from " + v.value.(string)
|
||||
}
|
||||
if v, ok := msg.Headers[FieldDestination]; ok {
|
||||
s += " to " + v.value.(string)
|
||||
}
|
||||
s += " serial " + strconv.FormatUint(uint64(msg.serial), 10)
|
||||
if v, ok := msg.Headers[FieldReplySerial]; ok {
|
||||
s += " reply_serial " + strconv.FormatUint(uint64(v.value.(uint32)), 10)
|
||||
}
|
||||
if v, ok := msg.Headers[FieldUnixFDs]; ok {
|
||||
s += " unixfds " + strconv.FormatUint(uint64(v.value.(uint32)), 10)
|
||||
}
|
||||
if v, ok := msg.Headers[FieldPath]; ok {
|
||||
s += " path " + string(v.value.(ObjectPath))
|
||||
}
|
||||
if v, ok := msg.Headers[FieldInterface]; ok {
|
||||
s += " interface " + v.value.(string)
|
||||
}
|
||||
if v, ok := msg.Headers[FieldErrorName]; ok {
|
||||
s += " error " + v.value.(string)
|
||||
}
|
||||
if v, ok := msg.Headers[FieldMember]; ok {
|
||||
s += " member " + v.value.(string)
|
||||
}
|
||||
if len(msg.Body) != 0 {
|
||||
s += "\n"
|
||||
}
|
||||
for i, v := range msg.Body {
|
||||
s += " " + MakeVariant(v).String()
|
||||
if i != len(msg.Body)-1 {
|
||||
s += "\n"
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
|
@ -1,136 +0,0 @@
|
|||
package dbus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// BusObject is the interface of a remote object on which methods can be
|
||||
// invoked.
|
||||
type BusObject interface {
|
||||
Call(method string, flags Flags, args ...interface{}) *Call
|
||||
Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call
|
||||
GetProperty(p string) (Variant, error)
|
||||
Destination() string
|
||||
Path() ObjectPath
|
||||
}
|
||||
|
||||
// Object represents a remote object on which methods can be invoked.
|
||||
type Object struct {
|
||||
conn *Conn
|
||||
dest string
|
||||
path ObjectPath
|
||||
}
|
||||
|
||||
// Call calls a method with (*Object).Go and waits for its reply.
|
||||
func (o *Object) Call(method string, flags Flags, args ...interface{}) *Call {
|
||||
return <-o.Go(method, flags, make(chan *Call, 1), args...).Done
|
||||
}
|
||||
|
||||
// AddMatchSignal subscribes BusObject to signals from specified interface and
|
||||
// method (member).
|
||||
func (o *Object) AddMatchSignal(iface, member string) *Call {
|
||||
return o.Call(
|
||||
"org.freedesktop.DBus.AddMatch",
|
||||
0,
|
||||
"type='signal',interface='"+iface+"',member='"+member+"'",
|
||||
)
|
||||
}
|
||||
|
||||
// Go calls a method with the given arguments asynchronously. It returns a
|
||||
// Call structure representing this method call. The passed channel will
|
||||
// return the same value once the call is done. If ch is nil, a new channel
|
||||
// will be allocated. Otherwise, ch has to be buffered or Go will panic.
|
||||
//
|
||||
// If the flags include FlagNoReplyExpected, ch is ignored and a Call structure
|
||||
// is returned of which only the Err member is valid.
|
||||
//
|
||||
// If the method parameter contains a dot ('.'), the part before the last dot
|
||||
// specifies the interface on which the method is called.
|
||||
func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call {
|
||||
iface := ""
|
||||
i := strings.LastIndex(method, ".")
|
||||
if i != -1 {
|
||||
iface = method[:i]
|
||||
}
|
||||
method = method[i+1:]
|
||||
msg := new(Message)
|
||||
msg.Type = TypeMethodCall
|
||||
msg.serial = o.conn.getSerial()
|
||||
msg.Flags = flags & (FlagNoAutoStart | FlagNoReplyExpected)
|
||||
msg.Headers = make(map[HeaderField]Variant)
|
||||
msg.Headers[FieldPath] = MakeVariant(o.path)
|
||||
msg.Headers[FieldDestination] = MakeVariant(o.dest)
|
||||
msg.Headers[FieldMember] = MakeVariant(method)
|
||||
if iface != "" {
|
||||
msg.Headers[FieldInterface] = MakeVariant(iface)
|
||||
}
|
||||
msg.Body = args
|
||||
if len(args) > 0 {
|
||||
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(args...))
|
||||
}
|
||||
if msg.Flags&FlagNoReplyExpected == 0 {
|
||||
if ch == nil {
|
||||
ch = make(chan *Call, 10)
|
||||
} else if cap(ch) == 0 {
|
||||
panic("dbus: unbuffered channel passed to (*Object).Go")
|
||||
}
|
||||
call := &Call{
|
||||
Destination: o.dest,
|
||||
Path: o.path,
|
||||
Method: method,
|
||||
Args: args,
|
||||
Done: ch,
|
||||
}
|
||||
o.conn.callsLck.Lock()
|
||||
o.conn.calls[msg.serial] = call
|
||||
o.conn.callsLck.Unlock()
|
||||
o.conn.outLck.RLock()
|
||||
if o.conn.closed {
|
||||
call.Err = ErrClosed
|
||||
call.Done <- call
|
||||
} else {
|
||||
o.conn.out <- msg
|
||||
}
|
||||
o.conn.outLck.RUnlock()
|
||||
return call
|
||||
}
|
||||
o.conn.outLck.RLock()
|
||||
defer o.conn.outLck.RUnlock()
|
||||
if o.conn.closed {
|
||||
return &Call{Err: ErrClosed}
|
||||
}
|
||||
o.conn.out <- msg
|
||||
return &Call{Err: nil}
|
||||
}
|
||||
|
||||
// GetProperty calls org.freedesktop.DBus.Properties.GetProperty on the given
|
||||
// object. The property name must be given in interface.member notation.
|
||||
func (o *Object) GetProperty(p string) (Variant, error) {
|
||||
idx := strings.LastIndex(p, ".")
|
||||
if idx == -1 || idx+1 == len(p) {
|
||||
return Variant{}, errors.New("dbus: invalid property " + p)
|
||||
}
|
||||
|
||||
iface := p[:idx]
|
||||
prop := p[idx+1:]
|
||||
|
||||
result := Variant{}
|
||||
err := o.Call("org.freedesktop.DBus.Properties.Get", 0, iface, prop).Store(&result)
|
||||
|
||||
if err != nil {
|
||||
return Variant{}, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Destination returns the destination that calls on o are sent to.
|
||||
func (o *Object) Destination() string {
|
||||
return o.dest
|
||||
}
|
||||
|
||||
// Path returns the path that calls on o are sent to.
|
||||
func (o *Object) Path() ObjectPath {
|
||||
return o.path
|
||||
}
|
|
@ -1,264 +0,0 @@
|
|||
// Package prop provides the Properties struct which can be used to implement
|
||||
// org.freedesktop.DBus.Properties.
|
||||
package prop
|
||||
|
||||
import (
|
||||
"github.com/godbus/dbus"
|
||||
"github.com/godbus/dbus/introspect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// EmitType controls how org.freedesktop.DBus.Properties.PropertiesChanged is
|
||||
// emitted for a property. If it is EmitTrue, the signal is emitted. If it is
|
||||
// EmitInvalidates, the signal is also emitted, but the new value of the property
|
||||
// is not disclosed.
|
||||
type EmitType byte
|
||||
|
||||
const (
|
||||
EmitFalse EmitType = iota
|
||||
EmitTrue
|
||||
EmitInvalidates
|
||||
)
|
||||
|
||||
// ErrIfaceNotFound is the error returned to peers who try to access properties
|
||||
// on interfaces that aren't found.
|
||||
var ErrIfaceNotFound = dbus.NewError("org.freedesktop.DBus.Properties.Error.InterfaceNotFound", nil)
|
||||
|
||||
// ErrPropNotFound is the error returned to peers trying to access properties
|
||||
// that aren't found.
|
||||
var ErrPropNotFound = dbus.NewError("org.freedesktop.DBus.Properties.Error.PropertyNotFound", nil)
|
||||
|
||||
// ErrReadOnly is the error returned to peers trying to set a read-only
|
||||
// property.
|
||||
var ErrReadOnly = dbus.NewError("org.freedesktop.DBus.Properties.Error.ReadOnly", nil)
|
||||
|
||||
// ErrInvalidArg is returned to peers if the type of the property that is being
|
||||
// changed and the argument don't match.
|
||||
var ErrInvalidArg = dbus.NewError("org.freedesktop.DBus.Properties.Error.InvalidArg", nil)
|
||||
|
||||
// The introspection data for the org.freedesktop.DBus.Properties interface.
|
||||
var IntrospectData = introspect.Interface{
|
||||
Name: "org.freedesktop.DBus.Properties",
|
||||
Methods: []introspect.Method{
|
||||
{
|
||||
Name: "Get",
|
||||
Args: []introspect.Arg{
|
||||
{"interface", "s", "in"},
|
||||
{"property", "s", "in"},
|
||||
{"value", "v", "out"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "GetAll",
|
||||
Args: []introspect.Arg{
|
||||
{"interface", "s", "in"},
|
||||
{"props", "a{sv}", "out"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Set",
|
||||
Args: []introspect.Arg{
|
||||
{"interface", "s", "in"},
|
||||
{"property", "s", "in"},
|
||||
{"value", "v", "in"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Signals: []introspect.Signal{
|
||||
{
|
||||
Name: "PropertiesChanged",
|
||||
Args: []introspect.Arg{
|
||||
{"interface", "s", "out"},
|
||||
{"changed_properties", "a{sv}", "out"},
|
||||
{"invalidates_properties", "as", "out"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// The introspection data for the org.freedesktop.DBus.Properties interface, as
|
||||
// a string.
|
||||
const IntrospectDataString = `
|
||||
<interface name="org.freedesktop.DBus.Properties">
|
||||
<method name="Get">
|
||||
<arg name="interface" direction="in" type="s"/>
|
||||
<arg name="property" direction="in" type="s"/>
|
||||
<arg name="value" direction="out" type="v"/>
|
||||
</method>
|
||||
<method name="GetAll">
|
||||
<arg name="interface" direction="in" type="s"/>
|
||||
<arg name="props" direction="out" type="a{sv}"/>
|
||||
</method>
|
||||
<method name="Set">
|
||||
<arg name="interface" direction="in" type="s"/>
|
||||
<arg name="property" direction="in" type="s"/>
|
||||
<arg name="value" direction="in" type="v"/>
|
||||
</method>
|
||||
<signal name="PropertiesChanged">
|
||||
<arg name="interface" type="s"/>
|
||||
<arg name="changed_properties" type="a{sv}"/>
|
||||
<arg name="invalidates_properties" type="as"/>
|
||||
</signal>
|
||||
</interface>
|
||||
`
|
||||
|
||||
// Prop represents a single property. It is used for creating a Properties
|
||||
// value.
|
||||
type Prop struct {
|
||||
// Initial value. Must be a DBus-representable type.
|
||||
Value interface{}
|
||||
|
||||
// If true, the value can be modified by calls to Set.
|
||||
Writable bool
|
||||
|
||||
// Controls how org.freedesktop.DBus.Properties.PropertiesChanged is
|
||||
// emitted if this property changes.
|
||||
Emit EmitType
|
||||
|
||||
// If not nil, anytime this property is changed by Set, this function is
|
||||
// called with an appropiate Change as its argument. If the returned error
|
||||
// is not nil, it is sent back to the caller of Set and the property is not
|
||||
// changed.
|
||||
Callback func(*Change) *dbus.Error
|
||||
}
|
||||
|
||||
// Change represents a change of a property by a call to Set.
|
||||
type Change struct {
|
||||
Props *Properties
|
||||
Iface string
|
||||
Name string
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// Properties is a set of values that can be made available to the message bus
|
||||
// using the org.freedesktop.DBus.Properties interface. It is safe for
|
||||
// concurrent use by multiple goroutines.
|
||||
type Properties struct {
|
||||
m map[string]map[string]*Prop
|
||||
mut sync.RWMutex
|
||||
conn *dbus.Conn
|
||||
path dbus.ObjectPath
|
||||
}
|
||||
|
||||
// New returns a new Properties structure that manages the given properties.
|
||||
// The key for the first-level map of props is the name of the interface; the
|
||||
// second-level key is the name of the property. The returned structure will be
|
||||
// exported as org.freedesktop.DBus.Properties on path.
|
||||
func New(conn *dbus.Conn, path dbus.ObjectPath, props map[string]map[string]*Prop) *Properties {
|
||||
p := &Properties{m: props, conn: conn, path: path}
|
||||
conn.Export(p, path, "org.freedesktop.DBus.Properties")
|
||||
return p
|
||||
}
|
||||
|
||||
// Get implements org.freedesktop.DBus.Properties.Get.
|
||||
func (p *Properties) Get(iface, property string) (dbus.Variant, *dbus.Error) {
|
||||
p.mut.RLock()
|
||||
defer p.mut.RUnlock()
|
||||
m, ok := p.m[iface]
|
||||
if !ok {
|
||||
return dbus.Variant{}, ErrIfaceNotFound
|
||||
}
|
||||
prop, ok := m[property]
|
||||
if !ok {
|
||||
return dbus.Variant{}, ErrPropNotFound
|
||||
}
|
||||
return dbus.MakeVariant(prop.Value), nil
|
||||
}
|
||||
|
||||
// GetAll implements org.freedesktop.DBus.Properties.GetAll.
|
||||
func (p *Properties) GetAll(iface string) (map[string]dbus.Variant, *dbus.Error) {
|
||||
p.mut.RLock()
|
||||
defer p.mut.RUnlock()
|
||||
m, ok := p.m[iface]
|
||||
if !ok {
|
||||
return nil, ErrIfaceNotFound
|
||||
}
|
||||
rm := make(map[string]dbus.Variant, len(m))
|
||||
for k, v := range m {
|
||||
rm[k] = dbus.MakeVariant(v.Value)
|
||||
}
|
||||
return rm, nil
|
||||
}
|
||||
|
||||
// GetMust returns the value of the given property and panics if either the
|
||||
// interface or the property name are invalid.
|
||||
func (p *Properties) GetMust(iface, property string) interface{} {
|
||||
p.mut.RLock()
|
||||
defer p.mut.RUnlock()
|
||||
return p.m[iface][property].Value
|
||||
}
|
||||
|
||||
// Introspection returns the introspection data that represents the properties
|
||||
// of iface.
|
||||
func (p *Properties) Introspection(iface string) []introspect.Property {
|
||||
p.mut.RLock()
|
||||
defer p.mut.RUnlock()
|
||||
m := p.m[iface]
|
||||
s := make([]introspect.Property, 0, len(m))
|
||||
for k, v := range m {
|
||||
p := introspect.Property{Name: k, Type: dbus.SignatureOf(v.Value).String()}
|
||||
if v.Writable {
|
||||
p.Access = "readwrite"
|
||||
} else {
|
||||
p.Access = "read"
|
||||
}
|
||||
s = append(s, p)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// set sets the given property and emits PropertyChanged if appropiate. p.mut
|
||||
// must already be locked.
|
||||
func (p *Properties) set(iface, property string, v interface{}) {
|
||||
prop := p.m[iface][property]
|
||||
prop.Value = v
|
||||
switch prop.Emit {
|
||||
case EmitFalse:
|
||||
// do nothing
|
||||
case EmitInvalidates:
|
||||
p.conn.Emit(p.path, "org.freedesktop.DBus.Properties.PropertiesChanged",
|
||||
iface, map[string]dbus.Variant{}, []string{property})
|
||||
case EmitTrue:
|
||||
p.conn.Emit(p.path, "org.freedesktop.DBus.Properties.PropertiesChanged",
|
||||
iface, map[string]dbus.Variant{property: dbus.MakeVariant(v)},
|
||||
[]string{})
|
||||
default:
|
||||
panic("invalid value for EmitType")
|
||||
}
|
||||
}
|
||||
|
||||
// Set implements org.freedesktop.Properties.Set.
|
||||
func (p *Properties) Set(iface, property string, newv dbus.Variant) *dbus.Error {
|
||||
p.mut.Lock()
|
||||
defer p.mut.Unlock()
|
||||
m, ok := p.m[iface]
|
||||
if !ok {
|
||||
return ErrIfaceNotFound
|
||||
}
|
||||
prop, ok := m[property]
|
||||
if !ok {
|
||||
return ErrPropNotFound
|
||||
}
|
||||
if !prop.Writable {
|
||||
return ErrReadOnly
|
||||
}
|
||||
if newv.Signature() != dbus.SignatureOf(prop.Value) {
|
||||
return ErrInvalidArg
|
||||
}
|
||||
if prop.Callback != nil {
|
||||
err := prop.Callback(&Change{p, iface, property, newv.Value()})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
p.set(iface, property, newv.Value())
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMust sets the value of the given property and panics if the interface or
|
||||
// the property name are invalid.
|
||||
func (p *Properties) SetMust(iface, property string, v interface{}) {
|
||||
p.mut.Lock()
|
||||
p.set(iface, property, v)
|
||||
p.mut.Unlock()
|
||||
}
|
|
@ -1,257 +0,0 @@
|
|||
package dbus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var sigToType = map[byte]reflect.Type{
|
||||
'y': byteType,
|
||||
'b': boolType,
|
||||
'n': int16Type,
|
||||
'q': uint16Type,
|
||||
'i': int32Type,
|
||||
'u': uint32Type,
|
||||
'x': int64Type,
|
||||
't': uint64Type,
|
||||
'd': float64Type,
|
||||
's': stringType,
|
||||
'g': signatureType,
|
||||
'o': objectPathType,
|
||||
'v': variantType,
|
||||
'h': unixFDIndexType,
|
||||
}
|
||||
|
||||
// Signature represents a correct type signature as specified by the D-Bus
|
||||
// specification. The zero value represents the empty signature, "".
|
||||
type Signature struct {
|
||||
str string
|
||||
}
|
||||
|
||||
// SignatureOf returns the concatenation of all the signatures of the given
|
||||
// values. It panics if one of them is not representable in D-Bus.
|
||||
func SignatureOf(vs ...interface{}) Signature {
|
||||
var s string
|
||||
for _, v := range vs {
|
||||
s += getSignature(reflect.TypeOf(v))
|
||||
}
|
||||
return Signature{s}
|
||||
}
|
||||
|
||||
// SignatureOfType returns the signature of the given type. It panics if the
|
||||
// type is not representable in D-Bus.
|
||||
func SignatureOfType(t reflect.Type) Signature {
|
||||
return Signature{getSignature(t)}
|
||||
}
|
||||
|
||||
// getSignature returns the signature of the given type and panics on unknown types.
|
||||
func getSignature(t reflect.Type) string {
|
||||
// handle simple types first
|
||||
switch t.Kind() {
|
||||
case reflect.Uint8:
|
||||
return "y"
|
||||
case reflect.Bool:
|
||||
return "b"
|
||||
case reflect.Int16:
|
||||
return "n"
|
||||
case reflect.Uint16:
|
||||
return "q"
|
||||
case reflect.Int32:
|
||||
if t == unixFDType {
|
||||
return "h"
|
||||
}
|
||||
return "i"
|
||||
case reflect.Uint32:
|
||||
if t == unixFDIndexType {
|
||||
return "h"
|
||||
}
|
||||
return "u"
|
||||
case reflect.Int64:
|
||||
return "x"
|
||||
case reflect.Uint64:
|
||||
return "t"
|
||||
case reflect.Float64:
|
||||
return "d"
|
||||
case reflect.Ptr:
|
||||
return getSignature(t.Elem())
|
||||
case reflect.String:
|
||||
if t == objectPathType {
|
||||
return "o"
|
||||
}
|
||||
return "s"
|
||||
case reflect.Struct:
|
||||
if t == variantType {
|
||||
return "v"
|
||||
} else if t == signatureType {
|
||||
return "g"
|
||||
}
|
||||
var s string
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
|
||||
s += getSignature(t.Field(i).Type)
|
||||
}
|
||||
}
|
||||
return "(" + s + ")"
|
||||
case reflect.Array, reflect.Slice:
|
||||
return "a" + getSignature(t.Elem())
|
||||
case reflect.Map:
|
||||
if !isKeyType(t.Key()) {
|
||||
panic(InvalidTypeError{t})
|
||||
}
|
||||
return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}"
|
||||
}
|
||||
panic(InvalidTypeError{t})
|
||||
}
|
||||
|
||||
// ParseSignature returns the signature represented by this string, or a
|
||||
// SignatureError if the string is not a valid signature.
|
||||
func ParseSignature(s string) (sig Signature, err error) {
|
||||
if len(s) == 0 {
|
||||
return
|
||||
}
|
||||
if len(s) > 255 {
|
||||
return Signature{""}, SignatureError{s, "too long"}
|
||||
}
|
||||
sig.str = s
|
||||
for err == nil && len(s) != 0 {
|
||||
err, s = validSingle(s, 0)
|
||||
}
|
||||
if err != nil {
|
||||
sig = Signature{""}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ParseSignatureMust behaves like ParseSignature, except that it panics if s
|
||||
// is not valid.
|
||||
func ParseSignatureMust(s string) Signature {
|
||||
sig, err := ParseSignature(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return sig
|
||||
}
|
||||
|
||||
// Empty retruns whether the signature is the empty signature.
|
||||
func (s Signature) Empty() bool {
|
||||
return s.str == ""
|
||||
}
|
||||
|
||||
// Single returns whether the signature represents a single, complete type.
|
||||
func (s Signature) Single() bool {
|
||||
err, r := validSingle(s.str, 0)
|
||||
return err != nil && r == ""
|
||||
}
|
||||
|
||||
// String returns the signature's string representation.
|
||||
func (s Signature) String() string {
|
||||
return s.str
|
||||
}
|
||||
|
||||
// A SignatureError indicates that a signature passed to a function or received
|
||||
// on a connection is not a valid signature.
|
||||
type SignatureError struct {
|
||||
Sig string
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e SignatureError) Error() string {
|
||||
return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason)
|
||||
}
|
||||
|
||||
// Try to read a single type from this string. If it was successfull, err is nil
|
||||
// and rem is the remaining unparsed part. Otherwise, err is a non-nil
|
||||
// SignatureError and rem is "". depth is the current recursion depth which may
|
||||
// not be greater than 64 and should be given as 0 on the first call.
|
||||
func validSingle(s string, depth int) (err error, rem string) {
|
||||
if s == "" {
|
||||
return SignatureError{Sig: s, Reason: "empty signature"}, ""
|
||||
}
|
||||
if depth > 64 {
|
||||
return SignatureError{Sig: s, Reason: "container nesting too deep"}, ""
|
||||
}
|
||||
switch s[0] {
|
||||
case 'y', 'b', 'n', 'q', 'i', 'u', 'x', 't', 'd', 's', 'g', 'o', 'v', 'h':
|
||||
return nil, s[1:]
|
||||
case 'a':
|
||||
if len(s) > 1 && s[1] == '{' {
|
||||
i := findMatching(s[1:], '{', '}')
|
||||
if i == -1 {
|
||||
return SignatureError{Sig: s, Reason: "unmatched '{'"}, ""
|
||||
}
|
||||
i++
|
||||
rem = s[i+1:]
|
||||
s = s[2:i]
|
||||
if err, _ = validSingle(s[:1], depth+1); err != nil {
|
||||
return err, ""
|
||||
}
|
||||
err, nr := validSingle(s[1:], depth+1)
|
||||
if err != nil {
|
||||
return err, ""
|
||||
}
|
||||
if nr != "" {
|
||||
return SignatureError{Sig: s, Reason: "too many types in dict"}, ""
|
||||
}
|
||||
return nil, rem
|
||||
}
|
||||
return validSingle(s[1:], depth+1)
|
||||
case '(':
|
||||
i := findMatching(s, '(', ')')
|
||||
if i == -1 {
|
||||
return SignatureError{Sig: s, Reason: "unmatched ')'"}, ""
|
||||
}
|
||||
rem = s[i+1:]
|
||||
s = s[1:i]
|
||||
for err == nil && s != "" {
|
||||
err, s = validSingle(s, depth+1)
|
||||
}
|
||||
if err != nil {
|
||||
rem = ""
|
||||
}
|
||||
return
|
||||
}
|
||||
return SignatureError{Sig: s, Reason: "invalid type character"}, ""
|
||||
}
|
||||
|
||||
func findMatching(s string, left, right rune) int {
|
||||
n := 0
|
||||
for i, v := range s {
|
||||
if v == left {
|
||||
n++
|
||||
} else if v == right {
|
||||
n--
|
||||
}
|
||||
if n == 0 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// typeFor returns the type of the given signature. It ignores any left over
|
||||
// characters and panics if s doesn't start with a valid type signature.
|
||||
func typeFor(s string) (t reflect.Type) {
|
||||
err, _ := validSingle(s, 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if t, ok := sigToType[s[0]]; ok {
|
||||
return t
|
||||
}
|
||||
switch s[0] {
|
||||
case 'a':
|
||||
if s[1] == '{' {
|
||||
i := strings.LastIndex(s, "}")
|
||||
t = reflect.MapOf(sigToType[s[2]], typeFor(s[3:i]))
|
||||
} else {
|
||||
t = reflect.SliceOf(typeFor(s[1:]))
|
||||
}
|
||||
case '(':
|
||||
t = interfacesType
|
||||
}
|
||||
return
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
package dbus
|
||||
|
||||
func (t *unixTransport) SendNullByte() error {
|
||||
_, err := t.Write([]byte{0})
|
||||
return err
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
package dbus
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
type genericTransport struct {
|
||||
io.ReadWriteCloser
|
||||
}
|
||||
|
||||
func (t genericTransport) SendNullByte() error {
|
||||
_, err := t.Write([]byte{0})
|
||||
return err
|
||||
}
|
||||
|
||||
func (t genericTransport) SupportsUnixFDs() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t genericTransport) EnableUnixFDs() {}
|
||||
|
||||
func (t genericTransport) ReadMessage() (*Message, error) {
|
||||
return DecodeMessage(t)
|
||||
}
|
||||
|
||||
func (t genericTransport) SendMessage(msg *Message) error {
|
||||
for _, v := range msg.Body {
|
||||
if _, ok := v.(UnixFD); ok {
|
||||
return errors.New("dbus: unix fd passing not enabled")
|
||||
}
|
||||
}
|
||||
return msg.EncodeTo(t, binary.LittleEndian)
|
||||
}
|
|
@ -1,196 +0,0 @@
|
|||
//+build !windows
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type oobReader struct {
|
||||
conn *net.UnixConn
|
||||
oob []byte
|
||||
buf [4096]byte
|
||||
}
|
||||
|
||||
func (o *oobReader) Read(b []byte) (n int, err error) {
|
||||
n, oobn, flags, _, err := o.conn.ReadMsgUnix(b, o.buf[:])
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if flags&syscall.MSG_CTRUNC != 0 {
|
||||
return n, errors.New("dbus: control data truncated (too many fds received)")
|
||||
}
|
||||
o.oob = append(o.oob, o.buf[:oobn]...)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
type unixTransport struct {
|
||||
*net.UnixConn
|
||||
hasUnixFDs bool
|
||||
}
|
||||
|
||||
func newUnixTransport(keys string) (transport, error) {
|
||||
var err error
|
||||
|
||||
t := new(unixTransport)
|
||||
abstract := getKey(keys, "abstract")
|
||||
path := getKey(keys, "path")
|
||||
switch {
|
||||
case abstract == "" && path == "":
|
||||
return nil, errors.New("dbus: invalid address (neither path nor abstract set)")
|
||||
case abstract != "" && path == "":
|
||||
t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: "@" + abstract, Net: "unix"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return t, nil
|
||||
case abstract == "" && path != "":
|
||||
t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: path, Net: "unix"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return t, nil
|
||||
default:
|
||||
return nil, errors.New("dbus: invalid address (both path and abstract set)")
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
transports["unix"] = newUnixTransport
|
||||
}
|
||||
|
||||
func (t *unixTransport) EnableUnixFDs() {
|
||||
t.hasUnixFDs = true
|
||||
}
|
||||
|
||||
func (t *unixTransport) ReadMessage() (*Message, error) {
|
||||
var (
|
||||
blen, hlen uint32
|
||||
csheader [16]byte
|
||||
headers []header
|
||||
order binary.ByteOrder
|
||||
unixfds uint32
|
||||
)
|
||||
// To be sure that all bytes of out-of-band data are read, we use a special
|
||||
// reader that uses ReadUnix on the underlying connection instead of Read
|
||||
// and gathers the out-of-band data in a buffer.
|
||||
rd := &oobReader{conn: t.UnixConn}
|
||||
// read the first 16 bytes (the part of the header that has a constant size),
|
||||
// from which we can figure out the length of the rest of the message
|
||||
if _, err := io.ReadFull(rd, csheader[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch csheader[0] {
|
||||
case 'l':
|
||||
order = binary.LittleEndian
|
||||
case 'B':
|
||||
order = binary.BigEndian
|
||||
default:
|
||||
return nil, InvalidMessageError("invalid byte order")
|
||||
}
|
||||
// csheader[4:8] -> length of message body, csheader[12:16] -> length of
|
||||
// header fields (without alignment)
|
||||
binary.Read(bytes.NewBuffer(csheader[4:8]), order, &blen)
|
||||
binary.Read(bytes.NewBuffer(csheader[12:]), order, &hlen)
|
||||
if hlen%8 != 0 {
|
||||
hlen += 8 - (hlen % 8)
|
||||
}
|
||||
|
||||
// decode headers and look for unix fds
|
||||
headerdata := make([]byte, hlen+4)
|
||||
copy(headerdata, csheader[12:])
|
||||
if _, err := io.ReadFull(t, headerdata[4:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dec := newDecoder(bytes.NewBuffer(headerdata), order)
|
||||
dec.pos = 12
|
||||
vs, err := dec.Decode(Signature{"a(yv)"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
Store(vs, &headers)
|
||||
for _, v := range headers {
|
||||
if v.Field == byte(FieldUnixFDs) {
|
||||
unixfds, _ = v.Variant.value.(uint32)
|
||||
}
|
||||
}
|
||||
all := make([]byte, 16+hlen+blen)
|
||||
copy(all, csheader[:])
|
||||
copy(all[16:], headerdata[4:])
|
||||
if _, err := io.ReadFull(rd, all[16+hlen:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if unixfds != 0 {
|
||||
if !t.hasUnixFDs {
|
||||
return nil, errors.New("dbus: got unix fds on unsupported transport")
|
||||
}
|
||||
// read the fds from the OOB data
|
||||
scms, err := syscall.ParseSocketControlMessage(rd.oob)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(scms) != 1 {
|
||||
return nil, errors.New("dbus: received more than one socket control message")
|
||||
}
|
||||
fds, err := syscall.ParseUnixRights(&scms[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msg, err := DecodeMessage(bytes.NewBuffer(all))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// substitute the values in the message body (which are indices for the
|
||||
// array receiver via OOB) with the actual values
|
||||
for i, v := range msg.Body {
|
||||
if j, ok := v.(UnixFDIndex); ok {
|
||||
if uint32(j) >= unixfds {
|
||||
return nil, InvalidMessageError("invalid index for unix fd")
|
||||
}
|
||||
msg.Body[i] = UnixFD(fds[j])
|
||||
}
|
||||
}
|
||||
return msg, nil
|
||||
}
|
||||
return DecodeMessage(bytes.NewBuffer(all))
|
||||
}
|
||||
|
||||
func (t *unixTransport) SendMessage(msg *Message) error {
|
||||
fds := make([]int, 0)
|
||||
for i, v := range msg.Body {
|
||||
if fd, ok := v.(UnixFD); ok {
|
||||
msg.Body[i] = UnixFDIndex(len(fds))
|
||||
fds = append(fds, int(fd))
|
||||
}
|
||||
}
|
||||
if len(fds) != 0 {
|
||||
if !t.hasUnixFDs {
|
||||
return errors.New("dbus: unix fd passing not enabled")
|
||||
}
|
||||
msg.Headers[FieldUnixFDs] = MakeVariant(uint32(len(fds)))
|
||||
oob := syscall.UnixRights(fds...)
|
||||
buf := new(bytes.Buffer)
|
||||
msg.EncodeTo(buf, binary.LittleEndian)
|
||||
n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != buf.Len() || oobn != len(oob) {
|
||||
return io.ErrShortWrite
|
||||
}
|
||||
} else {
|
||||
if err := msg.EncodeTo(t, binary.LittleEndian); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *unixTransport) SupportsUnixFDs() bool {
|
||||
return true
|
||||
}
|
|
@ -1,95 +0,0 @@
|
|||
// The UnixCredentials system call is currently only implemented on Linux
|
||||
// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
|
||||
// https://golang.org/s/go1.4-syscall
|
||||
// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys
|
||||
|
||||
// Local implementation of the UnixCredentials system call for DragonFly BSD
|
||||
|
||||
package dbus
|
||||
|
||||
/*
|
||||
#include <sys/ucred.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// http://golang.org/src/pkg/syscall/ztypes_linux_amd64.go
|
||||
// http://golang.org/src/pkg/syscall/ztypes_dragonfly_amd64.go
|
||||
type Ucred struct {
|
||||
Pid int32
|
||||
Uid uint32
|
||||
Gid uint32
|
||||
}
|
||||
|
||||
// http://golang.org/src/pkg/syscall/types_linux.go
|
||||
// http://golang.org/src/pkg/syscall/types_dragonfly.go
|
||||
// https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/sys/sys/ucred.h
|
||||
const (
|
||||
SizeofUcred = C.sizeof_struct_ucred
|
||||
)
|
||||
|
||||
// http://golang.org/src/pkg/syscall/sockcmsg_unix.go
|
||||
func cmsgAlignOf(salen int) int {
|
||||
// From http://golang.org/src/pkg/syscall/sockcmsg_unix.go
|
||||
//salign := sizeofPtr
|
||||
// NOTE: It seems like 64-bit Darwin and DragonFly BSD kernels
|
||||
// still require 32-bit aligned access to network subsystem.
|
||||
//if darwin64Bit || dragonfly64Bit {
|
||||
// salign = 4
|
||||
//}
|
||||
salign := 4
|
||||
return (salen + salign - 1) & ^(salign - 1)
|
||||
}
|
||||
|
||||
// http://golang.org/src/pkg/syscall/sockcmsg_unix.go
|
||||
func cmsgData(h *syscall.Cmsghdr) unsafe.Pointer {
|
||||
return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(syscall.SizeofCmsghdr)))
|
||||
}
|
||||
|
||||
// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
|
||||
// UnixCredentials encodes credentials into a socket control message
|
||||
// for sending to another process. This can be used for
|
||||
// authentication.
|
||||
func UnixCredentials(ucred *Ucred) []byte {
|
||||
b := make([]byte, syscall.CmsgSpace(SizeofUcred))
|
||||
h := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
|
||||
h.Level = syscall.SOL_SOCKET
|
||||
h.Type = syscall.SCM_CREDS
|
||||
h.SetLen(syscall.CmsgLen(SizeofUcred))
|
||||
*((*Ucred)(cmsgData(h))) = *ucred
|
||||
return b
|
||||
}
|
||||
|
||||
// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
|
||||
// ParseUnixCredentials decodes a socket control message that contains
|
||||
// credentials in a Ucred structure. To receive such a message, the
|
||||
// SO_PASSCRED option must be enabled on the socket.
|
||||
func ParseUnixCredentials(m *syscall.SocketControlMessage) (*Ucred, error) {
|
||||
if m.Header.Level != syscall.SOL_SOCKET {
|
||||
return nil, syscall.EINVAL
|
||||
}
|
||||
if m.Header.Type != syscall.SCM_CREDS {
|
||||
return nil, syscall.EINVAL
|
||||
}
|
||||
ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0]))
|
||||
return &ucred, nil
|
||||
}
|
||||
|
||||
func (t *unixTransport) SendNullByte() error {
|
||||
ucred := &Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())}
|
||||
b := UnixCredentials(ucred)
|
||||
_, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if oobn != len(b) {
|
||||
return io.ErrShortWrite
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
// The UnixCredentials system call is currently only implemented on Linux
|
||||
// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
|
||||
// https://golang.org/s/go1.4-syscall
|
||||
// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func (t *unixTransport) SendNullByte() error {
|
||||
ucred := &syscall.Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())}
|
||||
b := syscall.UnixCredentials(ucred)
|
||||
_, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if oobn != len(b) {
|
||||
return io.ErrShortWrite
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,139 +0,0 @@
|
|||
package dbus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Variant represents the D-Bus variant type.
|
||||
type Variant struct {
|
||||
sig Signature
|
||||
value interface{}
|
||||
}
|
||||
|
||||
// MakeVariant converts the given value to a Variant. It panics if v cannot be
|
||||
// represented as a D-Bus type.
|
||||
func MakeVariant(v interface{}) Variant {
|
||||
return Variant{SignatureOf(v), v}
|
||||
}
|
||||
|
||||
// ParseVariant parses the given string as a variant as described at
|
||||
// https://developer.gnome.org/glib/unstable/gvariant-text.html. If sig is not
|
||||
// empty, it is taken to be the expected signature for the variant.
|
||||
func ParseVariant(s string, sig Signature) (Variant, error) {
|
||||
tokens := varLex(s)
|
||||
p := &varParser{tokens: tokens}
|
||||
n, err := varMakeNode(p)
|
||||
if err != nil {
|
||||
return Variant{}, err
|
||||
}
|
||||
if sig.str == "" {
|
||||
sig, err = varInfer(n)
|
||||
if err != nil {
|
||||
return Variant{}, err
|
||||
}
|
||||
}
|
||||
v, err := n.Value(sig)
|
||||
if err != nil {
|
||||
return Variant{}, err
|
||||
}
|
||||
return MakeVariant(v), nil
|
||||
}
|
||||
|
||||
// format returns a formatted version of v and whether this string can be parsed
|
||||
// unambigously.
|
||||
func (v Variant) format() (string, bool) {
|
||||
switch v.sig.str[0] {
|
||||
case 'b', 'i':
|
||||
return fmt.Sprint(v.value), true
|
||||
case 'n', 'q', 'u', 'x', 't', 'd', 'h':
|
||||
return fmt.Sprint(v.value), false
|
||||
case 's':
|
||||
return strconv.Quote(v.value.(string)), true
|
||||
case 'o':
|
||||
return strconv.Quote(string(v.value.(ObjectPath))), false
|
||||
case 'g':
|
||||
return strconv.Quote(v.value.(Signature).str), false
|
||||
case 'v':
|
||||
s, unamb := v.value.(Variant).format()
|
||||
if !unamb {
|
||||
return "<@" + v.value.(Variant).sig.str + " " + s + ">", true
|
||||
}
|
||||
return "<" + s + ">", true
|
||||
case 'y':
|
||||
return fmt.Sprintf("%#x", v.value.(byte)), false
|
||||
}
|
||||
rv := reflect.ValueOf(v.value)
|
||||
switch rv.Kind() {
|
||||
case reflect.Slice:
|
||||
if rv.Len() == 0 {
|
||||
return "[]", false
|
||||
}
|
||||
unamb := true
|
||||
buf := bytes.NewBuffer([]byte("["))
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
// TODO: slooow
|
||||
s, b := MakeVariant(rv.Index(i).Interface()).format()
|
||||
unamb = unamb && b
|
||||
buf.WriteString(s)
|
||||
if i != rv.Len()-1 {
|
||||
buf.WriteString(", ")
|
||||
}
|
||||
}
|
||||
buf.WriteByte(']')
|
||||
return buf.String(), unamb
|
||||
case reflect.Map:
|
||||
if rv.Len() == 0 {
|
||||
return "{}", false
|
||||
}
|
||||
unamb := true
|
||||
var buf bytes.Buffer
|
||||
kvs := make([]string, rv.Len())
|
||||
for i, k := range rv.MapKeys() {
|
||||
s, b := MakeVariant(k.Interface()).format()
|
||||
unamb = unamb && b
|
||||
buf.Reset()
|
||||
buf.WriteString(s)
|
||||
buf.WriteString(": ")
|
||||
s, b = MakeVariant(rv.MapIndex(k).Interface()).format()
|
||||
unamb = unamb && b
|
||||
buf.WriteString(s)
|
||||
kvs[i] = buf.String()
|
||||
}
|
||||
buf.Reset()
|
||||
buf.WriteByte('{')
|
||||
sort.Strings(kvs)
|
||||
for i, kv := range kvs {
|
||||
if i > 0 {
|
||||
buf.WriteString(", ")
|
||||
}
|
||||
buf.WriteString(kv)
|
||||
}
|
||||
buf.WriteByte('}')
|
||||
return buf.String(), unamb
|
||||
}
|
||||
return `"INVALID"`, true
|
||||
}
|
||||
|
||||
// Signature returns the D-Bus signature of the underlying value of v.
|
||||
func (v Variant) Signature() Signature {
|
||||
return v.sig
|
||||
}
|
||||
|
||||
// String returns the string representation of the underlying value of v as
|
||||
// described at https://developer.gnome.org/glib/unstable/gvariant-text.html.
|
||||
func (v Variant) String() string {
|
||||
s, unamb := v.format()
|
||||
if !unamb {
|
||||
return "@" + v.sig.str + " " + s
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Value returns the underlying value of v.
|
||||
func (v Variant) Value() interface{} {
|
||||
return v.value
|
||||
}
|
|
@ -1,284 +0,0 @@
|
|||
package dbus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Heavily inspired by the lexer from text/template.
|
||||
|
||||
type varToken struct {
|
||||
typ varTokenType
|
||||
val string
|
||||
}
|
||||
|
||||
type varTokenType byte
|
||||
|
||||
const (
|
||||
tokEOF varTokenType = iota
|
||||
tokError
|
||||
tokNumber
|
||||
tokString
|
||||
tokBool
|
||||
tokArrayStart
|
||||
tokArrayEnd
|
||||
tokDictStart
|
||||
tokDictEnd
|
||||
tokVariantStart
|
||||
tokVariantEnd
|
||||
tokComma
|
||||
tokColon
|
||||
tokType
|
||||
tokByteString
|
||||
)
|
||||
|
||||
type varLexer struct {
|
||||
input string
|
||||
start int
|
||||
pos int
|
||||
width int
|
||||
tokens []varToken
|
||||
}
|
||||
|
||||
type lexState func(*varLexer) lexState
|
||||
|
||||
func varLex(s string) []varToken {
|
||||
l := &varLexer{input: s}
|
||||
l.run()
|
||||
return l.tokens
|
||||
}
|
||||
|
||||
func (l *varLexer) accept(valid string) bool {
|
||||
if strings.IndexRune(valid, l.next()) >= 0 {
|
||||
return true
|
||||
}
|
||||
l.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
func (l *varLexer) backup() {
|
||||
l.pos -= l.width
|
||||
}
|
||||
|
||||
func (l *varLexer) emit(t varTokenType) {
|
||||
l.tokens = append(l.tokens, varToken{t, l.input[l.start:l.pos]})
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
func (l *varLexer) errorf(format string, v ...interface{}) lexState {
|
||||
l.tokens = append(l.tokens, varToken{
|
||||
tokError,
|
||||
fmt.Sprintf(format, v...),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *varLexer) ignore() {
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
func (l *varLexer) next() rune {
|
||||
var r rune
|
||||
|
||||
if l.pos >= len(l.input) {
|
||||
l.width = 0
|
||||
return -1
|
||||
}
|
||||
r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
|
||||
l.pos += l.width
|
||||
return r
|
||||
}
|
||||
|
||||
func (l *varLexer) run() {
|
||||
for state := varLexNormal; state != nil; {
|
||||
state = state(l)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *varLexer) peek() rune {
|
||||
r := l.next()
|
||||
l.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
func varLexNormal(l *varLexer) lexState {
|
||||
for {
|
||||
r := l.next()
|
||||
switch {
|
||||
case r == -1:
|
||||
l.emit(tokEOF)
|
||||
return nil
|
||||
case r == '[':
|
||||
l.emit(tokArrayStart)
|
||||
case r == ']':
|
||||
l.emit(tokArrayEnd)
|
||||
case r == '{':
|
||||
l.emit(tokDictStart)
|
||||
case r == '}':
|
||||
l.emit(tokDictEnd)
|
||||
case r == '<':
|
||||
l.emit(tokVariantStart)
|
||||
case r == '>':
|
||||
l.emit(tokVariantEnd)
|
||||
case r == ':':
|
||||
l.emit(tokColon)
|
||||
case r == ',':
|
||||
l.emit(tokComma)
|
||||
case r == '\'' || r == '"':
|
||||
l.backup()
|
||||
return varLexString
|
||||
case r == '@':
|
||||
l.backup()
|
||||
return varLexType
|
||||
case unicode.IsSpace(r):
|
||||
l.ignore()
|
||||
case unicode.IsNumber(r) || r == '+' || r == '-':
|
||||
l.backup()
|
||||
return varLexNumber
|
||||
case r == 'b':
|
||||
pos := l.start
|
||||
if n := l.peek(); n == '"' || n == '\'' {
|
||||
return varLexByteString
|
||||
}
|
||||
// not a byte string; try to parse it as a type or bool below
|
||||
l.pos = pos + 1
|
||||
l.width = 1
|
||||
fallthrough
|
||||
default:
|
||||
// either a bool or a type. Try bools first.
|
||||
l.backup()
|
||||
if l.pos+4 <= len(l.input) {
|
||||
if l.input[l.pos:l.pos+4] == "true" {
|
||||
l.pos += 4
|
||||
l.emit(tokBool)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if l.pos+5 <= len(l.input) {
|
||||
if l.input[l.pos:l.pos+5] == "false" {
|
||||
l.pos += 5
|
||||
l.emit(tokBool)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// must be a type.
|
||||
return varLexType
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var varTypeMap = map[string]string{
|
||||
"boolean": "b",
|
||||
"byte": "y",
|
||||
"int16": "n",
|
||||
"uint16": "q",
|
||||
"int32": "i",
|
||||
"uint32": "u",
|
||||
"int64": "x",
|
||||
"uint64": "t",
|
||||
"double": "f",
|
||||
"string": "s",
|
||||
"objectpath": "o",
|
||||
"signature": "g",
|
||||
}
|
||||
|
||||
func varLexByteString(l *varLexer) lexState {
|
||||
q := l.next()
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case '\\':
|
||||
if r := l.next(); r != -1 {
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case -1:
|
||||
return l.errorf("unterminated bytestring")
|
||||
case q:
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(tokByteString)
|
||||
return varLexNormal
|
||||
}
|
||||
|
||||
func varLexNumber(l *varLexer) lexState {
|
||||
l.accept("+-")
|
||||
digits := "0123456789"
|
||||
if l.accept("0") {
|
||||
if l.accept("x") {
|
||||
digits = "0123456789abcdefABCDEF"
|
||||
} else {
|
||||
digits = "01234567"
|
||||
}
|
||||
}
|
||||
for strings.IndexRune(digits, l.next()) >= 0 {
|
||||
}
|
||||
l.backup()
|
||||
if l.accept(".") {
|
||||
for strings.IndexRune(digits, l.next()) >= 0 {
|
||||
}
|
||||
l.backup()
|
||||
}
|
||||
if l.accept("eE") {
|
||||
l.accept("+-")
|
||||
for strings.IndexRune("0123456789", l.next()) >= 0 {
|
||||
}
|
||||
l.backup()
|
||||
}
|
||||
if r := l.peek(); unicode.IsLetter(r) {
|
||||
l.next()
|
||||
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
||||
}
|
||||
l.emit(tokNumber)
|
||||
return varLexNormal
|
||||
}
|
||||
|
||||
func varLexString(l *varLexer) lexState {
|
||||
q := l.next()
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case '\\':
|
||||
if r := l.next(); r != -1 {
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case -1:
|
||||
return l.errorf("unterminated string")
|
||||
case q:
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(tokString)
|
||||
return varLexNormal
|
||||
}
|
||||
|
||||
func varLexType(l *varLexer) lexState {
|
||||
at := l.accept("@")
|
||||
for {
|
||||
r := l.next()
|
||||
if r == -1 {
|
||||
break
|
||||
}
|
||||
if unicode.IsSpace(r) {
|
||||
l.backup()
|
||||
break
|
||||
}
|
||||
}
|
||||
if at {
|
||||
if _, err := ParseSignature(l.input[l.start+1 : l.pos]); err != nil {
|
||||
return l.errorf("%s", err)
|
||||
}
|
||||
} else {
|
||||
if _, ok := varTypeMap[l.input[l.start:l.pos]]; ok {
|
||||
l.emit(tokType)
|
||||
return varLexNormal
|
||||
}
|
||||
return l.errorf("unrecognized type %q", l.input[l.start:l.pos])
|
||||
}
|
||||
l.emit(tokType)
|
||||
return varLexNormal
|
||||
}
|
|
@ -1,817 +0,0 @@
|
|||
package dbus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type varParser struct {
|
||||
tokens []varToken
|
||||
i int
|
||||
}
|
||||
|
||||
func (p *varParser) backup() {
|
||||
p.i--
|
||||
}
|
||||
|
||||
func (p *varParser) next() varToken {
|
||||
if p.i < len(p.tokens) {
|
||||
t := p.tokens[p.i]
|
||||
p.i++
|
||||
return t
|
||||
}
|
||||
return varToken{typ: tokEOF}
|
||||
}
|
||||
|
||||
type varNode interface {
|
||||
Infer() (Signature, error)
|
||||
String() string
|
||||
Sigs() sigSet
|
||||
Value(Signature) (interface{}, error)
|
||||
}
|
||||
|
||||
func varMakeNode(p *varParser) (varNode, error) {
|
||||
var sig Signature
|
||||
|
||||
for {
|
||||
t := p.next()
|
||||
switch t.typ {
|
||||
case tokEOF:
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
case tokError:
|
||||
return nil, errors.New(t.val)
|
||||
case tokNumber:
|
||||
return varMakeNumNode(t, sig)
|
||||
case tokString:
|
||||
return varMakeStringNode(t, sig)
|
||||
case tokBool:
|
||||
if sig.str != "" && sig.str != "b" {
|
||||
return nil, varTypeError{t.val, sig}
|
||||
}
|
||||
b, err := strconv.ParseBool(t.val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return boolNode(b), nil
|
||||
case tokArrayStart:
|
||||
return varMakeArrayNode(p, sig)
|
||||
case tokVariantStart:
|
||||
return varMakeVariantNode(p, sig)
|
||||
case tokDictStart:
|
||||
return varMakeDictNode(p, sig)
|
||||
case tokType:
|
||||
if sig.str != "" {
|
||||
return nil, errors.New("unexpected type annotation")
|
||||
}
|
||||
if t.val[0] == '@' {
|
||||
sig.str = t.val[1:]
|
||||
} else {
|
||||
sig.str = varTypeMap[t.val]
|
||||
}
|
||||
case tokByteString:
|
||||
if sig.str != "" && sig.str != "ay" {
|
||||
return nil, varTypeError{t.val, sig}
|
||||
}
|
||||
b, err := varParseByteString(t.val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return byteStringNode(b), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected %q", t.val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type varTypeError struct {
|
||||
val string
|
||||
sig Signature
|
||||
}
|
||||
|
||||
func (e varTypeError) Error() string {
|
||||
return fmt.Sprintf("dbus: can't parse %q as type %q", e.val, e.sig.str)
|
||||
}
|
||||
|
||||
type sigSet map[Signature]bool
|
||||
|
||||
func (s sigSet) Empty() bool {
|
||||
return len(s) == 0
|
||||
}
|
||||
|
||||
func (s sigSet) Intersect(s2 sigSet) sigSet {
|
||||
r := make(sigSet)
|
||||
for k := range s {
|
||||
if s2[k] {
|
||||
r[k] = true
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (s sigSet) Single() (Signature, bool) {
|
||||
if len(s) == 1 {
|
||||
for k := range s {
|
||||
return k, true
|
||||
}
|
||||
}
|
||||
return Signature{}, false
|
||||
}
|
||||
|
||||
func (s sigSet) ToArray() sigSet {
|
||||
r := make(sigSet, len(s))
|
||||
for k := range s {
|
||||
r[Signature{"a" + k.str}] = true
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
type numNode struct {
|
||||
sig Signature
|
||||
str string
|
||||
val interface{}
|
||||
}
|
||||
|
||||
var numSigSet = sigSet{
|
||||
Signature{"y"}: true,
|
||||
Signature{"n"}: true,
|
||||
Signature{"q"}: true,
|
||||
Signature{"i"}: true,
|
||||
Signature{"u"}: true,
|
||||
Signature{"x"}: true,
|
||||
Signature{"t"}: true,
|
||||
Signature{"d"}: true,
|
||||
}
|
||||
|
||||
func (n numNode) Infer() (Signature, error) {
|
||||
if strings.ContainsAny(n.str, ".e") {
|
||||
return Signature{"d"}, nil
|
||||
}
|
||||
return Signature{"i"}, nil
|
||||
}
|
||||
|
||||
func (n numNode) String() string {
|
||||
return n.str
|
||||
}
|
||||
|
||||
func (n numNode) Sigs() sigSet {
|
||||
if n.sig.str != "" {
|
||||
return sigSet{n.sig: true}
|
||||
}
|
||||
if strings.ContainsAny(n.str, ".e") {
|
||||
return sigSet{Signature{"d"}: true}
|
||||
}
|
||||
return numSigSet
|
||||
}
|
||||
|
||||
func (n numNode) Value(sig Signature) (interface{}, error) {
|
||||
if n.sig.str != "" && n.sig != sig {
|
||||
return nil, varTypeError{n.str, sig}
|
||||
}
|
||||
if n.val != nil {
|
||||
return n.val, nil
|
||||
}
|
||||
return varNumAs(n.str, sig)
|
||||
}
|
||||
|
||||
func varMakeNumNode(tok varToken, sig Signature) (varNode, error) {
|
||||
if sig.str == "" {
|
||||
return numNode{str: tok.val}, nil
|
||||
}
|
||||
num, err := varNumAs(tok.val, sig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return numNode{sig: sig, val: num}, nil
|
||||
}
|
||||
|
||||
func varNumAs(s string, sig Signature) (interface{}, error) {
|
||||
isUnsigned := false
|
||||
size := 32
|
||||
switch sig.str {
|
||||
case "n":
|
||||
size = 16
|
||||
case "i":
|
||||
case "x":
|
||||
size = 64
|
||||
case "y":
|
||||
size = 8
|
||||
isUnsigned = true
|
||||
case "q":
|
||||
size = 16
|
||||
isUnsigned = true
|
||||
case "u":
|
||||
isUnsigned = true
|
||||
case "t":
|
||||
size = 64
|
||||
isUnsigned = true
|
||||
case "d":
|
||||
d, err := strconv.ParseFloat(s, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
default:
|
||||
return nil, varTypeError{s, sig}
|
||||
}
|
||||
base := 10
|
||||
if strings.HasPrefix(s, "0x") {
|
||||
base = 16
|
||||
s = s[2:]
|
||||
}
|
||||
if strings.HasPrefix(s, "0") && len(s) != 1 {
|
||||
base = 8
|
||||
s = s[1:]
|
||||
}
|
||||
if isUnsigned {
|
||||
i, err := strconv.ParseUint(s, base, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var v interface{} = i
|
||||
switch sig.str {
|
||||
case "y":
|
||||
v = byte(i)
|
||||
case "q":
|
||||
v = uint16(i)
|
||||
case "u":
|
||||
v = uint32(i)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
i, err := strconv.ParseInt(s, base, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var v interface{} = i
|
||||
switch sig.str {
|
||||
case "n":
|
||||
v = int16(i)
|
||||
case "i":
|
||||
v = int32(i)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
type stringNode struct {
|
||||
sig Signature
|
||||
str string // parsed
|
||||
val interface{} // has correct type
|
||||
}
|
||||
|
||||
var stringSigSet = sigSet{
|
||||
Signature{"s"}: true,
|
||||
Signature{"g"}: true,
|
||||
Signature{"o"}: true,
|
||||
}
|
||||
|
||||
func (n stringNode) Infer() (Signature, error) {
|
||||
return Signature{"s"}, nil
|
||||
}
|
||||
|
||||
func (n stringNode) String() string {
|
||||
return n.str
|
||||
}
|
||||
|
||||
func (n stringNode) Sigs() sigSet {
|
||||
if n.sig.str != "" {
|
||||
return sigSet{n.sig: true}
|
||||
}
|
||||
return stringSigSet
|
||||
}
|
||||
|
||||
func (n stringNode) Value(sig Signature) (interface{}, error) {
|
||||
if n.sig.str != "" && n.sig != sig {
|
||||
return nil, varTypeError{n.str, sig}
|
||||
}
|
||||
if n.val != nil {
|
||||
return n.val, nil
|
||||
}
|
||||
switch {
|
||||
case sig.str == "g":
|
||||
return Signature{n.str}, nil
|
||||
case sig.str == "o":
|
||||
return ObjectPath(n.str), nil
|
||||
case sig.str == "s":
|
||||
return n.str, nil
|
||||
default:
|
||||
return nil, varTypeError{n.str, sig}
|
||||
}
|
||||
}
|
||||
|
||||
func varMakeStringNode(tok varToken, sig Signature) (varNode, error) {
|
||||
if sig.str != "" && sig.str != "s" && sig.str != "g" && sig.str != "o" {
|
||||
return nil, fmt.Errorf("invalid type %q for string", sig.str)
|
||||
}
|
||||
s, err := varParseString(tok.val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n := stringNode{str: s}
|
||||
if sig.str == "" {
|
||||
return stringNode{str: s}, nil
|
||||
}
|
||||
n.sig = sig
|
||||
switch sig.str {
|
||||
case "o":
|
||||
n.val = ObjectPath(s)
|
||||
case "g":
|
||||
n.val = Signature{s}
|
||||
case "s":
|
||||
n.val = s
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func varParseString(s string) (string, error) {
|
||||
// quotes are guaranteed to be there
|
||||
s = s[1 : len(s)-1]
|
||||
buf := new(bytes.Buffer)
|
||||
for len(s) != 0 {
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && size == 1 {
|
||||
return "", errors.New("invalid UTF-8")
|
||||
}
|
||||
s = s[size:]
|
||||
if r != '\\' {
|
||||
buf.WriteRune(r)
|
||||
continue
|
||||
}
|
||||
r, size = utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && size == 1 {
|
||||
return "", errors.New("invalid UTF-8")
|
||||
}
|
||||
s = s[size:]
|
||||
switch r {
|
||||
case 'a':
|
||||
buf.WriteRune(0x7)
|
||||
case 'b':
|
||||
buf.WriteRune(0x8)
|
||||
case 'f':
|
||||
buf.WriteRune(0xc)
|
||||
case 'n':
|
||||
buf.WriteRune('\n')
|
||||
case 'r':
|
||||
buf.WriteRune('\r')
|
||||
case 't':
|
||||
buf.WriteRune('\t')
|
||||
case '\n':
|
||||
case 'u':
|
||||
if len(s) < 4 {
|
||||
return "", errors.New("short unicode escape")
|
||||
}
|
||||
r, err := strconv.ParseUint(s[:4], 16, 32)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf.WriteRune(rune(r))
|
||||
s = s[4:]
|
||||
case 'U':
|
||||
if len(s) < 8 {
|
||||
return "", errors.New("short unicode escape")
|
||||
}
|
||||
r, err := strconv.ParseUint(s[:8], 16, 32)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf.WriteRune(rune(r))
|
||||
s = s[8:]
|
||||
default:
|
||||
buf.WriteRune(r)
|
||||
}
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
var boolSigSet = sigSet{Signature{"b"}: true}
|
||||
|
||||
type boolNode bool
|
||||
|
||||
func (boolNode) Infer() (Signature, error) {
|
||||
return Signature{"b"}, nil
|
||||
}
|
||||
|
||||
func (b boolNode) String() string {
|
||||
if b {
|
||||
return "true"
|
||||
}
|
||||
return "false"
|
||||
}
|
||||
|
||||
func (boolNode) Sigs() sigSet {
|
||||
return boolSigSet
|
||||
}
|
||||
|
||||
func (b boolNode) Value(sig Signature) (interface{}, error) {
|
||||
if sig.str != "b" {
|
||||
return nil, varTypeError{b.String(), sig}
|
||||
}
|
||||
return bool(b), nil
|
||||
}
|
||||
|
||||
type arrayNode struct {
|
||||
set sigSet
|
||||
children []varNode
|
||||
val interface{}
|
||||
}
|
||||
|
||||
func (n arrayNode) Infer() (Signature, error) {
|
||||
for _, v := range n.children {
|
||||
csig, err := varInfer(v)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return Signature{"a" + csig.str}, nil
|
||||
}
|
||||
return Signature{}, fmt.Errorf("can't infer type for %q", n.String())
|
||||
}
|
||||
|
||||
func (n arrayNode) String() string {
|
||||
s := "["
|
||||
for i, v := range n.children {
|
||||
s += v.String()
|
||||
if i != len(n.children)-1 {
|
||||
s += ", "
|
||||
}
|
||||
}
|
||||
return s + "]"
|
||||
}
|
||||
|
||||
func (n arrayNode) Sigs() sigSet {
|
||||
return n.set
|
||||
}
|
||||
|
||||
func (n arrayNode) Value(sig Signature) (interface{}, error) {
|
||||
if n.set.Empty() {
|
||||
// no type information whatsoever, so this must be an empty slice
|
||||
return reflect.MakeSlice(typeFor(sig.str), 0, 0).Interface(), nil
|
||||
}
|
||||
if !n.set[sig] {
|
||||
return nil, varTypeError{n.String(), sig}
|
||||
}
|
||||
s := reflect.MakeSlice(typeFor(sig.str), len(n.children), len(n.children))
|
||||
for i, v := range n.children {
|
||||
rv, err := v.Value(Signature{sig.str[1:]})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.Index(i).Set(reflect.ValueOf(rv))
|
||||
}
|
||||
return s.Interface(), nil
|
||||
}
|
||||
|
||||
func varMakeArrayNode(p *varParser, sig Signature) (varNode, error) {
|
||||
var n arrayNode
|
||||
if sig.str != "" {
|
||||
n.set = sigSet{sig: true}
|
||||
}
|
||||
if t := p.next(); t.typ == tokArrayEnd {
|
||||
return n, nil
|
||||
} else {
|
||||
p.backup()
|
||||
}
|
||||
Loop:
|
||||
for {
|
||||
t := p.next()
|
||||
switch t.typ {
|
||||
case tokEOF:
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
case tokError:
|
||||
return nil, errors.New(t.val)
|
||||
}
|
||||
p.backup()
|
||||
cn, err := varMakeNode(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cset := cn.Sigs(); !cset.Empty() {
|
||||
if n.set.Empty() {
|
||||
n.set = cset.ToArray()
|
||||
} else {
|
||||
nset := cset.ToArray().Intersect(n.set)
|
||||
if nset.Empty() {
|
||||
return nil, fmt.Errorf("can't parse %q with given type information", cn.String())
|
||||
}
|
||||
n.set = nset
|
||||
}
|
||||
}
|
||||
n.children = append(n.children, cn)
|
||||
switch t := p.next(); t.typ {
|
||||
case tokEOF:
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
case tokError:
|
||||
return nil, errors.New(t.val)
|
||||
case tokArrayEnd:
|
||||
break Loop
|
||||
case tokComma:
|
||||
continue
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected %q", t.val)
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
type variantNode struct {
|
||||
n varNode
|
||||
}
|
||||
|
||||
var variantSet = sigSet{
|
||||
Signature{"v"}: true,
|
||||
}
|
||||
|
||||
func (variantNode) Infer() (Signature, error) {
|
||||
return Signature{"v"}, nil
|
||||
}
|
||||
|
||||
func (n variantNode) String() string {
|
||||
return "<" + n.n.String() + ">"
|
||||
}
|
||||
|
||||
func (variantNode) Sigs() sigSet {
|
||||
return variantSet
|
||||
}
|
||||
|
||||
func (n variantNode) Value(sig Signature) (interface{}, error) {
|
||||
if sig.str != "v" {
|
||||
return nil, varTypeError{n.String(), sig}
|
||||
}
|
||||
sig, err := varInfer(n.n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v, err := n.n.Value(sig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return MakeVariant(v), nil
|
||||
}
|
||||
|
||||
func varMakeVariantNode(p *varParser, sig Signature) (varNode, error) {
|
||||
n, err := varMakeNode(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if t := p.next(); t.typ != tokVariantEnd {
|
||||
return nil, fmt.Errorf("unexpected %q", t.val)
|
||||
}
|
||||
vn := variantNode{n}
|
||||
if sig.str != "" && sig.str != "v" {
|
||||
return nil, varTypeError{vn.String(), sig}
|
||||
}
|
||||
return variantNode{n}, nil
|
||||
}
|
||||
|
||||
type dictEntry struct {
|
||||
key, val varNode
|
||||
}
|
||||
|
||||
type dictNode struct {
|
||||
kset, vset sigSet
|
||||
children []dictEntry
|
||||
val interface{}
|
||||
}
|
||||
|
||||
func (n dictNode) Infer() (Signature, error) {
|
||||
for _, v := range n.children {
|
||||
ksig, err := varInfer(v.key)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
vsig, err := varInfer(v.val)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return Signature{"a{" + ksig.str + vsig.str + "}"}, nil
|
||||
}
|
||||
return Signature{}, fmt.Errorf("can't infer type for %q", n.String())
|
||||
}
|
||||
|
||||
func (n dictNode) String() string {
|
||||
s := "{"
|
||||
for i, v := range n.children {
|
||||
s += v.key.String() + ": " + v.val.String()
|
||||
if i != len(n.children)-1 {
|
||||
s += ", "
|
||||
}
|
||||
}
|
||||
return s + "}"
|
||||
}
|
||||
|
||||
func (n dictNode) Sigs() sigSet {
|
||||
r := sigSet{}
|
||||
for k := range n.kset {
|
||||
for v := range n.vset {
|
||||
sig := "a{" + k.str + v.str + "}"
|
||||
r[Signature{sig}] = true
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (n dictNode) Value(sig Signature) (interface{}, error) {
|
||||
set := n.Sigs()
|
||||
if set.Empty() {
|
||||
// no type information -> empty dict
|
||||
return reflect.MakeMap(typeFor(sig.str)).Interface(), nil
|
||||
}
|
||||
if !set[sig] {
|
||||
return nil, varTypeError{n.String(), sig}
|
||||
}
|
||||
m := reflect.MakeMap(typeFor(sig.str))
|
||||
ksig := Signature{sig.str[2:3]}
|
||||
vsig := Signature{sig.str[3 : len(sig.str)-1]}
|
||||
for _, v := range n.children {
|
||||
kv, err := v.key.Value(ksig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vv, err := v.val.Value(vsig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv))
|
||||
}
|
||||
return m.Interface(), nil
|
||||
}
|
||||
|
||||
func varMakeDictNode(p *varParser, sig Signature) (varNode, error) {
|
||||
var n dictNode
|
||||
|
||||
if sig.str != "" {
|
||||
if len(sig.str) < 5 {
|
||||
return nil, fmt.Errorf("invalid signature %q for dict type", sig)
|
||||
}
|
||||
ksig := Signature{string(sig.str[2])}
|
||||
vsig := Signature{sig.str[3 : len(sig.str)-1]}
|
||||
n.kset = sigSet{ksig: true}
|
||||
n.vset = sigSet{vsig: true}
|
||||
}
|
||||
if t := p.next(); t.typ == tokDictEnd {
|
||||
return n, nil
|
||||
} else {
|
||||
p.backup()
|
||||
}
|
||||
Loop:
|
||||
for {
|
||||
t := p.next()
|
||||
switch t.typ {
|
||||
case tokEOF:
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
case tokError:
|
||||
return nil, errors.New(t.val)
|
||||
}
|
||||
p.backup()
|
||||
kn, err := varMakeNode(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if kset := kn.Sigs(); !kset.Empty() {
|
||||
if n.kset.Empty() {
|
||||
n.kset = kset
|
||||
} else {
|
||||
n.kset = kset.Intersect(n.kset)
|
||||
if n.kset.Empty() {
|
||||
return nil, fmt.Errorf("can't parse %q with given type information", kn.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
t = p.next()
|
||||
switch t.typ {
|
||||
case tokEOF:
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
case tokError:
|
||||
return nil, errors.New(t.val)
|
||||
case tokColon:
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected %q", t.val)
|
||||
}
|
||||
t = p.next()
|
||||
switch t.typ {
|
||||
case tokEOF:
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
case tokError:
|
||||
return nil, errors.New(t.val)
|
||||
}
|
||||
p.backup()
|
||||
vn, err := varMakeNode(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if vset := vn.Sigs(); !vset.Empty() {
|
||||
if n.vset.Empty() {
|
||||
n.vset = vset
|
||||
} else {
|
||||
n.vset = n.vset.Intersect(vset)
|
||||
if n.vset.Empty() {
|
||||
return nil, fmt.Errorf("can't parse %q with given type information", vn.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
n.children = append(n.children, dictEntry{kn, vn})
|
||||
t = p.next()
|
||||
switch t.typ {
|
||||
case tokEOF:
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
case tokError:
|
||||
return nil, errors.New(t.val)
|
||||
case tokDictEnd:
|
||||
break Loop
|
||||
case tokComma:
|
||||
continue
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected %q", t.val)
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
type byteStringNode []byte
|
||||
|
||||
var byteStringSet = sigSet{
|
||||
Signature{"ay"}: true,
|
||||
}
|
||||
|
||||
func (byteStringNode) Infer() (Signature, error) {
|
||||
return Signature{"ay"}, nil
|
||||
}
|
||||
|
||||
func (b byteStringNode) String() string {
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (b byteStringNode) Sigs() sigSet {
|
||||
return byteStringSet
|
||||
}
|
||||
|
||||
func (b byteStringNode) Value(sig Signature) (interface{}, error) {
|
||||
if sig.str != "ay" {
|
||||
return nil, varTypeError{b.String(), sig}
|
||||
}
|
||||
return []byte(b), nil
|
||||
}
|
||||
|
||||
func varParseByteString(s string) ([]byte, error) {
|
||||
// quotes and b at start are guaranteed to be there
|
||||
b := make([]byte, 0, 1)
|
||||
s = s[2 : len(s)-1]
|
||||
for len(s) != 0 {
|
||||
c := s[0]
|
||||
s = s[1:]
|
||||
if c != '\\' {
|
||||
b = append(b, c)
|
||||
continue
|
||||
}
|
||||
c = s[0]
|
||||
s = s[1:]
|
||||
switch c {
|
||||
case 'a':
|
||||
b = append(b, 0x7)
|
||||
case 'b':
|
||||
b = append(b, 0x8)
|
||||
case 'f':
|
||||
b = append(b, 0xc)
|
||||
case 'n':
|
||||
b = append(b, '\n')
|
||||
case 'r':
|
||||
b = append(b, '\r')
|
||||
case 't':
|
||||
b = append(b, '\t')
|
||||
case 'x':
|
||||
if len(s) < 2 {
|
||||
return nil, errors.New("short escape")
|
||||
}
|
||||
n, err := strconv.ParseUint(s[:2], 16, 8)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b = append(b, byte(n))
|
||||
s = s[2:]
|
||||
case '0':
|
||||
if len(s) < 3 {
|
||||
return nil, errors.New("short escape")
|
||||
}
|
||||
n, err := strconv.ParseUint(s[:3], 8, 8)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b = append(b, byte(n))
|
||||
s = s[3:]
|
||||
default:
|
||||
b = append(b, c)
|
||||
}
|
||||
}
|
||||
return append(b, 0), nil
|
||||
}
|
||||
|
||||
func varInfer(n varNode) (Signature, error) {
|
||||
if sig, ok := n.Sigs().Single(); ok {
|
||||
return sig, nil
|
||||
}
|
||||
return n.Infer()
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
Go support for Protocol Buffers - Google's data interchange format
|
||||
|
||||
Copyright 2010 The Go Authors. All rights reserved.
|
||||
https://github.com/golang/protobuf
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
@ -1,43 +0,0 @@
|
|||
# Go support for Protocol Buffers - Google's data interchange format
|
||||
#
|
||||
# Copyright 2010 The Go Authors. All rights reserved.
|
||||
# https://github.com/golang/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
install:
|
||||
go install
|
||||
|
||||
test: install generate-test-pbs
|
||||
go test
|
||||
|
||||
|
||||
generate-test-pbs:
|
||||
make install
|
||||
make -C testdata
|
||||
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto
|
||||
make
|
|
@ -1,223 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer deep copy and merge.
|
||||
// TODO: RawMessage.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Clone returns a deep copy of a protocol buffer.
|
||||
func Clone(pb Message) Message {
|
||||
in := reflect.ValueOf(pb)
|
||||
if in.IsNil() {
|
||||
return pb
|
||||
}
|
||||
|
||||
out := reflect.New(in.Type().Elem())
|
||||
// out is empty so a merge is a deep copy.
|
||||
mergeStruct(out.Elem(), in.Elem())
|
||||
return out.Interface().(Message)
|
||||
}
|
||||
|
||||
// Merge merges src into dst.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
// Merge panics if src and dst are not the same type, or if dst is nil.
|
||||
func Merge(dst, src Message) {
|
||||
in := reflect.ValueOf(src)
|
||||
out := reflect.ValueOf(dst)
|
||||
if out.IsNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if in.Type() != out.Type() {
|
||||
// Explicit test prior to mergeStruct so that mistyped nils will fail
|
||||
panic("proto: type mismatch")
|
||||
}
|
||||
if in.IsNil() {
|
||||
// Merging nil into non-nil is a quiet no-op
|
||||
return
|
||||
}
|
||||
mergeStruct(out.Elem(), in.Elem())
|
||||
}
|
||||
|
||||
func mergeStruct(out, in reflect.Value) {
|
||||
sprop := GetProperties(in.Type())
|
||||
for i := 0; i < in.NumField(); i++ {
|
||||
f := in.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
||||
}
|
||||
|
||||
if emIn, ok := in.Addr().Interface().(extendableProto); ok {
|
||||
emOut := out.Addr().Interface().(extendableProto)
|
||||
mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
|
||||
}
|
||||
|
||||
uf := in.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return
|
||||
}
|
||||
uin := uf.Bytes()
|
||||
if len(uin) > 0 {
|
||||
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
|
||||
}
|
||||
}
|
||||
|
||||
// mergeAny performs a merge between two values of the same type.
|
||||
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
|
||||
// prop is set if this is a struct field (it may be nil).
|
||||
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
|
||||
if in.Type() == protoMessageType {
|
||||
if !in.IsNil() {
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
|
||||
} else {
|
||||
Merge(out.Interface().(Message), in.Interface().(Message))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
if !viaPtr && isProto3Zero(in) {
|
||||
return
|
||||
}
|
||||
out.Set(in)
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; copy non-nil values.
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
// Allocate destination if it is not set, or set to a different type.
|
||||
// Otherwise we will merge as normal.
|
||||
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
|
||||
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), false, nil)
|
||||
case reflect.Map:
|
||||
if in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(in.Type()))
|
||||
}
|
||||
// For maps with value types of *T or []byte we need to deep copy each value.
|
||||
elemKind := in.Type().Elem().Kind()
|
||||
for _, key := range in.MapKeys() {
|
||||
var val reflect.Value
|
||||
switch elemKind {
|
||||
case reflect.Ptr:
|
||||
val = reflect.New(in.Type().Elem().Elem())
|
||||
mergeAny(val, in.MapIndex(key), false, nil)
|
||||
case reflect.Slice:
|
||||
val = in.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
default:
|
||||
val = in.MapIndex(key)
|
||||
}
|
||||
out.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.New(in.Elem().Type()))
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), true, nil)
|
||||
case reflect.Slice:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if in.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// []byte is a scalar bytes field, not a repeated field.
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value, and should not
|
||||
// be merged.
|
||||
if prop != nil && prop.proto3 && in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Make a deep copy.
|
||||
// Append to []byte{} instead of []byte(nil) so that we never end up
|
||||
// with a nil result.
|
||||
out.SetBytes(append([]byte{}, in.Bytes()...))
|
||||
return
|
||||
}
|
||||
n := in.Len()
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeSlice(in.Type(), 0, n))
|
||||
}
|
||||
switch in.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
out.Set(reflect.AppendSlice(out, in))
|
||||
default:
|
||||
for i := 0; i < n; i++ {
|
||||
x := reflect.Indirect(reflect.New(in.Type().Elem()))
|
||||
mergeAny(x, in.Index(i), false, nil)
|
||||
out.Set(reflect.Append(out, x))
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
mergeStruct(out, in)
|
||||
default:
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to copy %v", in)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeExtension(out, in map[int32]Extension) {
|
||||
for extNum, eIn := range in {
|
||||
eOut := Extension{desc: eIn.desc}
|
||||
if eIn.value != nil {
|
||||
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
|
||||
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
|
||||
eOut.value = v.Interface()
|
||||
}
|
||||
if eIn.enc != nil {
|
||||
eOut.enc = make([]byte, len(eIn.enc))
|
||||
copy(eOut.enc, eIn.enc)
|
||||
}
|
||||
|
||||
out[extNum] = eOut
|
||||
}
|
||||
}
|
|
@ -1,867 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for decoding protocol buffer data to construct in-memory representations.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// errOverflow is returned when an integer is too large to be represented.
|
||||
var errOverflow = errors.New("proto: integer overflow")
|
||||
|
||||
// ErrInternalBadWireType is returned by generated code when an incorrect
|
||||
// wire type is encountered. It does not get returned to user code.
|
||||
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||
|
||||
// The fundamental decoders that interpret bytes on the wire.
|
||||
// Those that take integer types all return uint64 and are
|
||||
// therefore of type valueDecoder.
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the slice.
|
||||
// It returns the integer and the number of bytes consumed, or
|
||||
// zero if there is not enough.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||
// x, n already 0
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if n >= len(buf) {
|
||||
return 0, 0
|
||||
}
|
||||
b := uint64(buf[n])
|
||||
n++
|
||||
x |= (b & 0x7F) << shift
|
||||
if (b & 0x80) == 0 {
|
||||
return x, n
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
|
||||
i := p.index
|
||||
l := len(p.buf)
|
||||
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if i >= l {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
b := p.buf[i]
|
||||
i++
|
||||
x |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
p.index = i
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
err = errOverflow
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 8
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-8])
|
||||
x |= uint64(p.buf[i-7]) << 8
|
||||
x |= uint64(p.buf[i-6]) << 16
|
||||
x |= uint64(p.buf[i-5]) << 24
|
||||
x |= uint64(p.buf[i-4]) << 32
|
||||
x |= uint64(p.buf[i-3]) << 40
|
||||
x |= uint64(p.buf[i-2]) << 48
|
||||
x |= uint64(p.buf[i-1]) << 56
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeFixed32 reads a 32-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 4
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-4])
|
||||
x |= uint64(p.buf[i-3]) << 8
|
||||
x |= uint64(p.buf[i-2]) << 16
|
||||
x |= uint64(p.buf[i-1]) << 24
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
|
||||
return
|
||||
}
|
||||
|
||||
// These are not ValueDecoders: they produce an array of bytes or a string.
|
||||
// bytes, embedded messages
|
||||
|
||||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
|
||||
n, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nb := int(n)
|
||||
if nb < 0 {
|
||||
return nil, fmt.Errorf("proto: bad byte length %d", nb)
|
||||
}
|
||||
end := p.index + nb
|
||||
if end < p.index || end > len(p.buf) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
if !alloc {
|
||||
// todo: check if can get more uses of alloc=false
|
||||
buf = p.buf[p.index:end]
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
buf = make([]byte, nb)
|
||||
copy(buf, p.buf[p.index:])
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeStringBytes reads an encoded string from the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) DecodeStringBytes() (s string, err error) {
|
||||
buf, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
|
||||
// If the protocol buffer has extensions, and the field matches, add it as an extension.
|
||||
// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
|
||||
func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
|
||||
oi := o.index
|
||||
|
||||
err := o.skip(t, tag, wire)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !unrecField.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
ptr := structPointer_Bytes(base, unrecField)
|
||||
|
||||
// Add the skipped field to struct field
|
||||
obuf := o.buf
|
||||
|
||||
o.buf = *ptr
|
||||
o.EncodeVarint(uint64(tag<<3 | wire))
|
||||
*ptr = append(o.buf, obuf[oi:o.index]...)
|
||||
|
||||
o.buf = obuf
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
|
||||
func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
|
||||
|
||||
var u uint64
|
||||
var err error
|
||||
|
||||
switch wire {
|
||||
case WireVarint:
|
||||
_, err = o.DecodeVarint()
|
||||
case WireFixed64:
|
||||
_, err = o.DecodeFixed64()
|
||||
case WireBytes:
|
||||
_, err = o.DecodeRawBytes(false)
|
||||
case WireFixed32:
|
||||
_, err = o.DecodeFixed32()
|
||||
case WireStartGroup:
|
||||
for {
|
||||
u, err = o.DecodeVarint()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
fwire := int(u & 0x7)
|
||||
if fwire == WireEndGroup {
|
||||
break
|
||||
}
|
||||
ftag := int(u >> 3)
|
||||
err = o.skip(t, ftag, fwire)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The method should reset the receiver before
|
||||
// decoding starts. The argument points to data that may be
|
||||
// overwritten, so implementations should not keep references to the
|
||||
// buffer.
|
||||
type Unmarshaler interface {
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in buf and places the
|
||||
// decoded result in pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// Unmarshal resets pb before starting to unmarshal, so any
|
||||
// existing data in pb is always removed. Use UnmarshalMerge
|
||||
// to preserve and append to existing data.
|
||||
func Unmarshal(buf []byte, pb Message) error {
|
||||
pb.Reset()
|
||||
return UnmarshalMerge(buf, pb)
|
||||
}
|
||||
|
||||
// UnmarshalMerge parses the protocol buffer representation in buf and
|
||||
// writes the decoded result to pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// UnmarshalMerge merges into existing data in pb.
|
||||
// Most code should use Unmarshal instead.
|
||||
func UnmarshalMerge(buf []byte, pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeMessage reads a count-delimited message from the Buffer.
|
||||
func (p *Buffer) DecodeMessage(pb Message) error {
|
||||
enc, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return NewBuffer(enc).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeGroup reads a tag-delimited group from the Buffer.
|
||||
func (p *Buffer) DecodeGroup(pb Message) error {
|
||||
typ, base, err := getbase(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in the
|
||||
// Buffer and places the decoded result in pb. If the struct
|
||||
// underlying pb does not match the data in the buffer, the results can be
|
||||
// unpredictable.
|
||||
func (p *Buffer) Unmarshal(pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
err := u.Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
typ, base, err := getbase(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
|
||||
|
||||
if collectStats {
|
||||
stats.Decode++
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// unmarshalType does the work of unmarshaling a structure.
|
||||
func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
|
||||
var state errorState
|
||||
required, reqFields := prop.reqCount, uint64(0)
|
||||
|
||||
var err error
|
||||
for err == nil && o.index < len(o.buf) {
|
||||
oi := o.index
|
||||
var u uint64
|
||||
u, err = o.DecodeVarint()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
wire := int(u & 0x7)
|
||||
if wire == WireEndGroup {
|
||||
if is_group {
|
||||
return nil // input is satisfied
|
||||
}
|
||||
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
|
||||
}
|
||||
tag := int(u >> 3)
|
||||
if tag <= 0 {
|
||||
return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
|
||||
}
|
||||
fieldnum, ok := prop.decoderTags.get(tag)
|
||||
if !ok {
|
||||
// Maybe it's an extension?
|
||||
if prop.extendable {
|
||||
if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
|
||||
if err = o.skip(st, tag, wire); err == nil {
|
||||
ext := e.ExtensionMap()[int32(tag)] // may be missing
|
||||
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
|
||||
e.ExtensionMap()[int32(tag)] = ext
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Maybe it's a oneof?
|
||||
if prop.oneofUnmarshaler != nil {
|
||||
m := structPointer_Interface(base, st).(Message)
|
||||
// First return value indicates whether tag is a oneof field.
|
||||
ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
|
||||
if err == ErrInternalBadWireType {
|
||||
// Map the error to something more descriptive.
|
||||
// Do the formatting here to save generated code space.
|
||||
err = fmt.Errorf("bad wiretype for oneof field in %T", m)
|
||||
}
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
|
||||
continue
|
||||
}
|
||||
p := prop.Prop[fieldnum]
|
||||
|
||||
if p.dec == nil {
|
||||
fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
|
||||
continue
|
||||
}
|
||||
dec := p.dec
|
||||
if wire != WireStartGroup && wire != p.WireType {
|
||||
if wire == WireBytes && p.packedDec != nil {
|
||||
// a packable field
|
||||
dec = p.packedDec
|
||||
} else {
|
||||
err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
|
||||
continue
|
||||
}
|
||||
}
|
||||
decErr := dec(o, p, base)
|
||||
if decErr != nil && !state.shouldContinue(decErr, p) {
|
||||
err = decErr
|
||||
}
|
||||
if err == nil && p.Required {
|
||||
// Successfully decoded a required field.
|
||||
if tag <= 64 {
|
||||
// use bitmap for fields 1-64 to catch field reuse.
|
||||
var mask uint64 = 1 << uint64(tag-1)
|
||||
if reqFields&mask == 0 {
|
||||
// new required field
|
||||
reqFields |= mask
|
||||
required--
|
||||
}
|
||||
} else {
|
||||
// This is imprecise. It can be fooled by a required field
|
||||
// with a tag > 64 that is encoded twice; that's very rare.
|
||||
// A fully correct implementation would require allocating
|
||||
// a data structure, which we would like to avoid.
|
||||
required--
|
||||
}
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
if is_group {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if state.err != nil {
|
||||
return state.err
|
||||
}
|
||||
if required > 0 {
|
||||
// Not enough information to determine the exact field. If we use extra
|
||||
// CPU, we could determine the field only if the missing required field
|
||||
// has a tag <= 64 and we check reqFields.
|
||||
return &RequiredNotSetError{"{Unknown}"}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Individual type decoders
|
||||
// For each,
|
||||
// u is the decoded value,
|
||||
// v is a pointer to the field (pointer) in the struct
|
||||
|
||||
// Sizes of the pools to allocate inside the Buffer.
|
||||
// The goal is modest amortization and allocation
|
||||
// on at least 16-byte boundaries.
|
||||
const (
|
||||
boolPoolSize = 16
|
||||
uint32PoolSize = 8
|
||||
uint64PoolSize = 4
|
||||
)
|
||||
|
||||
// Decode a bool.
|
||||
func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(o.bools) == 0 {
|
||||
o.bools = make([]bool, boolPoolSize)
|
||||
}
|
||||
o.bools[0] = u != 0
|
||||
*structPointer_Bool(base, p.field) = &o.bools[0]
|
||||
o.bools = o.bools[1:]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*structPointer_BoolVal(base, p.field) = u != 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode an int32.
|
||||
func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode an int64.
|
||||
func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word64_Set(structPointer_Word64(base, p.field), o, u)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a string.
|
||||
func (o *Buffer) dec_string(p *Properties, base structPointer) error {
|
||||
s, err := o.DecodeStringBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*structPointer_String(base, p.field) = &s
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
|
||||
s, err := o.DecodeStringBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*structPointer_StringVal(base, p.field) = s
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of bytes ([]byte).
|
||||
func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
|
||||
b, err := o.DecodeRawBytes(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*structPointer_Bytes(base, p.field) = b
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of bools ([]bool).
|
||||
func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v := structPointer_BoolSlice(base, p.field)
|
||||
*v = append(*v, u != 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of bools ([]bool) in packed format.
|
||||
func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
|
||||
v := structPointer_BoolSlice(base, p.field)
|
||||
|
||||
nn, err := o.DecodeVarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nb := int(nn) // number of bytes of encoded bools
|
||||
fin := o.index + nb
|
||||
if fin < o.index {
|
||||
return errOverflow
|
||||
}
|
||||
|
||||
y := *v
|
||||
for o.index < fin {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
y = append(y, u != 0)
|
||||
}
|
||||
|
||||
*v = y
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int32s ([]int32).
|
||||
func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
structPointer_Word32Slice(base, p.field).Append(uint32(u))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int32s ([]int32) in packed format.
|
||||
func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
|
||||
v := structPointer_Word32Slice(base, p.field)
|
||||
|
||||
nn, err := o.DecodeVarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nb := int(nn) // number of bytes of encoded int32s
|
||||
|
||||
fin := o.index + nb
|
||||
if fin < o.index {
|
||||
return errOverflow
|
||||
}
|
||||
for o.index < fin {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Append(uint32(u))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int64s ([]int64).
|
||||
func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
structPointer_Word64Slice(base, p.field).Append(u)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int64s ([]int64) in packed format.
|
||||
func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
|
||||
v := structPointer_Word64Slice(base, p.field)
|
||||
|
||||
nn, err := o.DecodeVarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nb := int(nn) // number of bytes of encoded int64s
|
||||
|
||||
fin := o.index + nb
|
||||
if fin < o.index {
|
||||
return errOverflow
|
||||
}
|
||||
for o.index < fin {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Append(u)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of strings ([]string).
|
||||
func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
|
||||
s, err := o.DecodeStringBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v := structPointer_StringSlice(base, p.field)
|
||||
*v = append(*v, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of slice of bytes ([][]byte).
|
||||
func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
|
||||
b, err := o.DecodeRawBytes(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v := structPointer_BytesSlice(base, p.field)
|
||||
*v = append(*v, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a map field.
|
||||
func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
|
||||
raw, err := o.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oi := o.index // index at the end of this map entry
|
||||
o.index -= len(raw) // move buffer back to start of map entry
|
||||
|
||||
mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
|
||||
if mptr.Elem().IsNil() {
|
||||
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
|
||||
}
|
||||
v := mptr.Elem() // map[K]V
|
||||
|
||||
// Prepare addressable doubly-indirect placeholders for the key and value types.
|
||||
// See enc_new_map for why.
|
||||
keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
|
||||
keybase := toStructPointer(keyptr.Addr()) // **K
|
||||
|
||||
var valbase structPointer
|
||||
var valptr reflect.Value
|
||||
switch p.mtype.Elem().Kind() {
|
||||
case reflect.Slice:
|
||||
// []byte
|
||||
var dummy []byte
|
||||
valptr = reflect.ValueOf(&dummy) // *[]byte
|
||||
valbase = toStructPointer(valptr) // *[]byte
|
||||
case reflect.Ptr:
|
||||
// message; valptr is **Msg; need to allocate the intermediate pointer
|
||||
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
|
||||
valptr.Set(reflect.New(valptr.Type().Elem()))
|
||||
valbase = toStructPointer(valptr)
|
||||
default:
|
||||
// everything else
|
||||
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
|
||||
valbase = toStructPointer(valptr.Addr()) // **V
|
||||
}
|
||||
|
||||
// Decode.
|
||||
// This parses a restricted wire format, namely the encoding of a message
|
||||
// with two fields. See enc_new_map for the format.
|
||||
for o.index < oi {
|
||||
// tagcode for key and value properties are always a single byte
|
||||
// because they have tags 1 and 2.
|
||||
tagcode := o.buf[o.index]
|
||||
o.index++
|
||||
switch tagcode {
|
||||
case p.mkeyprop.tagcode[0]:
|
||||
if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
|
||||
return err
|
||||
}
|
||||
case p.mvalprop.tagcode[0]:
|
||||
if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
// TODO: Should we silently skip this instead?
|
||||
return fmt.Errorf("proto: bad map data tag %d", raw[0])
|
||||
}
|
||||
}
|
||||
keyelem, valelem := keyptr.Elem(), valptr.Elem()
|
||||
if !keyelem.IsValid() || !valelem.IsValid() {
|
||||
// We did not decode the key or the value in the map entry.
|
||||
// Either way, it's an invalid map entry.
|
||||
return fmt.Errorf("proto: bad map data: missing key/val")
|
||||
}
|
||||
|
||||
v.SetMapIndex(keyelem, valelem)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a group.
|
||||
func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
|
||||
bas := structPointer_GetStructPointer(base, p.field)
|
||||
if structPointer_IsNil(bas) {
|
||||
// allocate new nested message
|
||||
bas = toStructPointer(reflect.New(p.stype))
|
||||
structPointer_SetStructPointer(base, p.field, bas)
|
||||
}
|
||||
return o.unmarshalType(p.stype, p.sprop, true, bas)
|
||||
}
|
||||
|
||||
// Decode an embedded message.
|
||||
func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
|
||||
raw, e := o.DecodeRawBytes(false)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
bas := structPointer_GetStructPointer(base, p.field)
|
||||
if structPointer_IsNil(bas) {
|
||||
// allocate new nested message
|
||||
bas = toStructPointer(reflect.New(p.stype))
|
||||
structPointer_SetStructPointer(base, p.field, bas)
|
||||
}
|
||||
|
||||
// If the object can unmarshal itself, let it.
|
||||
if p.isUnmarshaler {
|
||||
iv := structPointer_Interface(bas, p.stype)
|
||||
return iv.(Unmarshaler).Unmarshal(raw)
|
||||
}
|
||||
|
||||
obuf := o.buf
|
||||
oi := o.index
|
||||
o.buf = raw
|
||||
o.index = 0
|
||||
|
||||
err = o.unmarshalType(p.stype, p.sprop, false, bas)
|
||||
o.buf = obuf
|
||||
o.index = oi
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode a slice of embedded messages.
|
||||
func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
|
||||
return o.dec_slice_struct(p, false, base)
|
||||
}
|
||||
|
||||
// Decode a slice of embedded groups.
|
||||
func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
|
||||
return o.dec_slice_struct(p, true, base)
|
||||
}
|
||||
|
||||
// Decode a slice of structs ([]*struct).
|
||||
func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
|
||||
v := reflect.New(p.stype)
|
||||
bas := toStructPointer(v)
|
||||
structPointer_StructPointerSlice(base, p.field).Append(bas)
|
||||
|
||||
if is_group {
|
||||
err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := o.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the object can unmarshal itself, let it.
|
||||
if p.isUnmarshaler {
|
||||
iv := v.Interface()
|
||||
return iv.(Unmarshaler).Unmarshal(raw)
|
||||
}
|
||||
|
||||
obuf := o.buf
|
||||
oi := o.index
|
||||
o.buf = raw
|
||||
o.index = 0
|
||||
|
||||
err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
|
||||
|
||||
o.buf = obuf
|
||||
o.index = oi
|
||||
|
||||
return err
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,276 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer comparison.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
/*
|
||||
Equal returns true iff protocol buffers a and b are equal.
|
||||
The arguments must both be pointers to protocol buffer structs.
|
||||
|
||||
Equality is defined in this way:
|
||||
- Two messages are equal iff they are the same type,
|
||||
corresponding fields are equal, unknown field sets
|
||||
are equal, and extensions sets are equal.
|
||||
- Two set scalar fields are equal iff their values are equal.
|
||||
If the fields are of a floating-point type, remember that
|
||||
NaN != x for all x, including NaN. If the message is defined
|
||||
in a proto3 .proto file, fields are not "set"; specifically,
|
||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
||||
- Two repeated fields are equal iff their lengths are the same,
|
||||
and their corresponding elements are equal (a "bytes" field,
|
||||
although represented by []byte, is not a repeated field)
|
||||
- Two unset fields are equal.
|
||||
- Two unknown field sets are equal if their current
|
||||
encoded state is equal.
|
||||
- Two extension sets are equal iff they have corresponding
|
||||
elements that are pairwise equal.
|
||||
- Every other combination of things are not equal.
|
||||
|
||||
The return value is undefined if a and b are not protocol buffers.
|
||||
*/
|
||||
func Equal(a, b Message) bool {
|
||||
if a == nil || b == nil {
|
||||
return a == b
|
||||
}
|
||||
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
|
||||
if v1.Type() != v2.Type() {
|
||||
return false
|
||||
}
|
||||
if v1.Kind() == reflect.Ptr {
|
||||
if v1.IsNil() {
|
||||
return v2.IsNil()
|
||||
}
|
||||
if v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
v1, v2 = v1.Elem(), v2.Elem()
|
||||
}
|
||||
if v1.Kind() != reflect.Struct {
|
||||
return false
|
||||
}
|
||||
return equalStruct(v1, v2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
func equalStruct(v1, v2 reflect.Value) bool {
|
||||
sprop := GetProperties(v1.Type())
|
||||
for i := 0; i < v1.NumField(); i++ {
|
||||
f := v1.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
f1, f2 := v1.Field(i), v2.Field(i)
|
||||
if f.Type.Kind() == reflect.Ptr {
|
||||
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
|
||||
// both unset
|
||||
continue
|
||||
} else if n1 != n2 {
|
||||
// set/unset mismatch
|
||||
return false
|
||||
}
|
||||
b1, ok := f1.Interface().(raw)
|
||||
if ok {
|
||||
b2 := f2.Interface().(raw)
|
||||
// RawMessage
|
||||
if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
f1, f2 = f1.Elem(), f2.Elem()
|
||||
}
|
||||
if !equalAny(f1, f2, sprop.Prop[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_extensions")
|
||||
if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
uf := v1.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return true
|
||||
}
|
||||
|
||||
u1 := uf.Bytes()
|
||||
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
||||
if !bytes.Equal(u1, u2) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
// prop may be nil.
|
||||
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
||||
if v1.Type() == protoMessageType {
|
||||
m1, _ := v1.Interface().(Message)
|
||||
m2, _ := v2.Interface().(Message)
|
||||
return Equal(m1, m2)
|
||||
}
|
||||
switch v1.Kind() {
|
||||
case reflect.Bool:
|
||||
return v1.Bool() == v2.Bool()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v1.Float() == v2.Float()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v1.Int() == v2.Int()
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; compare the inner values.
|
||||
n1, n2 := v1.IsNil(), v2.IsNil()
|
||||
if n1 || n2 {
|
||||
return n1 == n2
|
||||
}
|
||||
e1, e2 := v1.Elem(), v2.Elem()
|
||||
if e1.Type() != e2.Type() {
|
||||
return false
|
||||
}
|
||||
return equalAny(e1, e2, nil)
|
||||
case reflect.Map:
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for _, key := range v1.MapKeys() {
|
||||
val2 := v2.MapIndex(key)
|
||||
if !val2.IsValid() {
|
||||
// This key was not found in the second map.
|
||||
return false
|
||||
}
|
||||
if !equalAny(v1.MapIndex(key), val2, nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Ptr:
|
||||
return equalAny(v1.Elem(), v2.Elem(), prop)
|
||||
case reflect.Slice:
|
||||
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// short circuit: []byte
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value.
|
||||
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
|
||||
}
|
||||
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < v1.Len(); i++ {
|
||||
if !equalAny(v1.Index(i), v2.Index(i), prop) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.String:
|
||||
return v1.Interface().(string) == v2.Interface().(string)
|
||||
case reflect.Struct:
|
||||
return equalStruct(v1, v2)
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v1.Uint() == v2.Uint()
|
||||
}
|
||||
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to compare %v", v1)
|
||||
return false
|
||||
}
|
||||
|
||||
// base is the struct type that the extensions are based on.
|
||||
// em1 and em2 are extension maps.
|
||||
func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||
if len(em1) != len(em2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for extNum, e1 := range em1 {
|
||||
e2, ok := em2[extNum]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
m1, m2 := e1.value, e2.value
|
||||
|
||||
if m1 != nil && m2 != nil {
|
||||
// Both are unencoded.
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// At least one is encoded. To do a semantically correct comparison
|
||||
// we need to unmarshal them first.
|
||||
var desc *ExtensionDesc
|
||||
if m := extensionMaps[base]; m != nil {
|
||||
desc = m[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
||||
continue
|
||||
}
|
||||
var err error
|
||||
if m1 == nil {
|
||||
m1, err = decodeExtension(e1.enc, desc)
|
||||
}
|
||||
if m2 == nil && err == nil {
|
||||
m2, err = decodeExtension(e2.enc, desc)
|
||||
}
|
||||
if err != nil {
|
||||
// The encoded form is invalid.
|
||||
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
|
||||
return false
|
||||
}
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
|
@ -1,399 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Types and routines for supporting protocol buffer extensions.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
|
||||
var ErrMissingExtension = errors.New("proto: missing extension")
|
||||
|
||||
// ExtensionRange represents a range of message extensions for a protocol buffer.
|
||||
// Used in code generated by the protocol compiler.
|
||||
type ExtensionRange struct {
|
||||
Start, End int32 // both inclusive
|
||||
}
|
||||
|
||||
// extendableProto is an interface implemented by any protocol buffer that may be extended.
|
||||
type extendableProto interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
ExtensionMap() map[int32]Extension
|
||||
}
|
||||
|
||||
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
|
||||
|
||||
// ExtensionDesc represents an extension specification.
|
||||
// Used in generated code from the protocol compiler.
|
||||
type ExtensionDesc struct {
|
||||
ExtendedType Message // nil pointer to the type that is being extended
|
||||
ExtensionType interface{} // nil pointer to the extension type
|
||||
Field int32 // field number
|
||||
Name string // fully-qualified name of extension, for text formatting
|
||||
Tag string // protobuf tag style
|
||||
}
|
||||
|
||||
func (ed *ExtensionDesc) repeated() bool {
|
||||
t := reflect.TypeOf(ed.ExtensionType)
|
||||
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
|
||||
}
|
||||
|
||||
// Extension represents an extension in a message.
|
||||
type Extension struct {
|
||||
// When an extension is stored in a message using SetExtension
|
||||
// only desc and value are set. When the message is marshaled
|
||||
// enc will be set to the encoded form of the message.
|
||||
//
|
||||
// When a message is unmarshaled and contains extensions, each
|
||||
// extension will have only enc set. When such an extension is
|
||||
// accessed using GetExtension (or GetExtensions) desc and value
|
||||
// will be set.
|
||||
desc *ExtensionDesc
|
||||
value interface{}
|
||||
enc []byte
|
||||
}
|
||||
|
||||
// SetRawExtension is for testing only.
|
||||
func SetRawExtension(base extendableProto, id int32, b []byte) {
|
||||
base.ExtensionMap()[id] = Extension{enc: b}
|
||||
}
|
||||
|
||||
// isExtensionField returns true iff the given field number is in an extension range.
|
||||
func isExtensionField(pb extendableProto, field int32) bool {
|
||||
for _, er := range pb.ExtensionRangeArray() {
|
||||
if er.Start <= field && field <= er.End {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// checkExtensionTypes checks that the given extension is valid for pb.
|
||||
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
||||
// Check the extended type.
|
||||
if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
|
||||
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
|
||||
}
|
||||
// Check the range.
|
||||
if !isExtensionField(pb, extension.Field) {
|
||||
return errors.New("proto: bad extension number; not in declared ranges")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// extPropKey is sufficient to uniquely identify an extension.
|
||||
type extPropKey struct {
|
||||
base reflect.Type
|
||||
field int32
|
||||
}
|
||||
|
||||
var extProp = struct {
|
||||
sync.RWMutex
|
||||
m map[extPropKey]*Properties
|
||||
}{
|
||||
m: make(map[extPropKey]*Properties),
|
||||
}
|
||||
|
||||
func extensionProperties(ed *ExtensionDesc) *Properties {
|
||||
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
|
||||
|
||||
extProp.RLock()
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
extProp.RUnlock()
|
||||
return prop
|
||||
}
|
||||
extProp.RUnlock()
|
||||
|
||||
extProp.Lock()
|
||||
defer extProp.Unlock()
|
||||
// Check again.
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
return prop
|
||||
}
|
||||
|
||||
prop := new(Properties)
|
||||
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
|
||||
extProp.m[key] = prop
|
||||
return prop
|
||||
}
|
||||
|
||||
// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
|
||||
func encodeExtensionMap(m map[int32]Extension) error {
|
||||
for k, e := range m {
|
||||
if e.value == nil || e.desc == nil {
|
||||
// Extension is only in its encoded form.
|
||||
continue
|
||||
}
|
||||
|
||||
// We don't skip extensions that have an encoded form set,
|
||||
// because the extension value may have been mutated after
|
||||
// the last time this function was called.
|
||||
|
||||
et := reflect.TypeOf(e.desc.ExtensionType)
|
||||
props := extensionProperties(e.desc)
|
||||
|
||||
p := NewBuffer(nil)
|
||||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
||||
// Pass a *T with a zero field and hope it all works out.
|
||||
x := reflect.New(et)
|
||||
x.Elem().Set(reflect.ValueOf(e.value))
|
||||
if err := props.enc(p, props, toStructPointer(x)); err != nil {
|
||||
return err
|
||||
}
|
||||
e.enc = p.buf
|
||||
m[k] = e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func sizeExtensionMap(m map[int32]Extension) (n int) {
|
||||
for _, e := range m {
|
||||
if e.value == nil || e.desc == nil {
|
||||
// Extension is only in its encoded form.
|
||||
n += len(e.enc)
|
||||
continue
|
||||
}
|
||||
|
||||
// We don't skip extensions that have an encoded form set,
|
||||
// because the extension value may have been mutated after
|
||||
// the last time this function was called.
|
||||
|
||||
et := reflect.TypeOf(e.desc.ExtensionType)
|
||||
props := extensionProperties(e.desc)
|
||||
|
||||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
||||
// Pass a *T with a zero field and hope it all works out.
|
||||
x := reflect.New(et)
|
||||
x.Elem().Set(reflect.ValueOf(e.value))
|
||||
n += props.size(props, toStructPointer(x))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// HasExtension returns whether the given extension is present in pb.
|
||||
func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
_, ok := pb.ExtensionMap()[extension.Field]
|
||||
return ok
|
||||
}
|
||||
|
||||
// ClearExtension removes the given extension from pb.
|
||||
func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
delete(pb.ExtensionMap(), extension.Field)
|
||||
}
|
||||
|
||||
// GetExtension parses and returns the given extension of pb.
|
||||
// If the extension is not present and has no default value it returns ErrMissingExtension.
|
||||
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
|
||||
if err := checkExtensionTypes(pb, extension); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
emap := pb.ExtensionMap()
|
||||
e, ok := emap[extension.Field]
|
||||
if !ok {
|
||||
// defaultExtensionValue returns the default value or
|
||||
// ErrMissingExtension if there is no default.
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
|
||||
if e.value != nil {
|
||||
// Already decoded. Check the descriptor, though.
|
||||
if e.desc != extension {
|
||||
// This shouldn't happen. If it does, it means that
|
||||
// GetExtension was called twice with two different
|
||||
// descriptors with the same field number.
|
||||
return nil, errors.New("proto: descriptor conflict")
|
||||
}
|
||||
return e.value, nil
|
||||
}
|
||||
|
||||
v, err := decodeExtension(e.enc, extension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remember the decoded version and drop the encoded version.
|
||||
// That way it is safe to mutate what we return.
|
||||
e.value = v
|
||||
e.desc = extension
|
||||
e.enc = nil
|
||||
emap[extension.Field] = e
|
||||
return e.value, nil
|
||||
}
|
||||
|
||||
// defaultExtensionValue returns the default value for extension.
|
||||
// If no default for an extension is defined ErrMissingExtension is returned.
|
||||
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
props := extensionProperties(extension)
|
||||
|
||||
sf, _, err := fieldDefault(t, props)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sf == nil || sf.value == nil {
|
||||
// There is no default value.
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
|
||||
if t.Kind() != reflect.Ptr {
|
||||
// We do not need to return a Ptr, we can directly return sf.value.
|
||||
return sf.value, nil
|
||||
}
|
||||
|
||||
// We need to return an interface{} that is a pointer to sf.value.
|
||||
value := reflect.New(t).Elem()
|
||||
value.Set(reflect.New(value.Type().Elem()))
|
||||
if sf.kind == reflect.Int32 {
|
||||
// We may have an int32 or an enum, but the underlying data is int32.
|
||||
// Since we can't set an int32 into a non int32 reflect.value directly
|
||||
// set it as a int32.
|
||||
value.Elem().SetInt(int64(sf.value.(int32)))
|
||||
} else {
|
||||
value.Elem().Set(reflect.ValueOf(sf.value))
|
||||
}
|
||||
return value.Interface(), nil
|
||||
}
|
||||
|
||||
// decodeExtension decodes an extension encoded in b.
|
||||
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
||||
o := NewBuffer(b)
|
||||
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
|
||||
props := extensionProperties(extension)
|
||||
|
||||
// t is a pointer to a struct, pointer to basic type or a slice.
|
||||
// Allocate a "field" to store the pointer/slice itself; the
|
||||
// pointer/slice will be stored here. We pass
|
||||
// the address of this field to props.dec.
|
||||
// This passes a zero field and a *t and lets props.dec
|
||||
// interpret it as a *struct{ x t }.
|
||||
value := reflect.New(t).Elem()
|
||||
|
||||
for {
|
||||
// Discard wire type and field number varint. It isn't needed.
|
||||
if _, err := o.DecodeVarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if o.index >= len(o.buf) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return value.Interface(), nil
|
||||
}
|
||||
|
||||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
||||
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
||||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
||||
epb, ok := pb.(extendableProto)
|
||||
if !ok {
|
||||
err = errors.New("proto: not an extendable proto")
|
||||
return
|
||||
}
|
||||
extensions = make([]interface{}, len(es))
|
||||
for i, e := range es {
|
||||
extensions[i], err = GetExtension(epb, e)
|
||||
if err == ErrMissingExtension {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetExtension sets the specified extension of pb to the specified value.
|
||||
func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
|
||||
if err := checkExtensionTypes(pb, extension); err != nil {
|
||||
return err
|
||||
}
|
||||
typ := reflect.TypeOf(extension.ExtensionType)
|
||||
if typ != reflect.TypeOf(value) {
|
||||
return errors.New("proto: bad extension value type")
|
||||
}
|
||||
// nil extension values need to be caught early, because the
|
||||
// encoder can't distinguish an ErrNil due to a nil extension
|
||||
// from an ErrNil due to a missing field. Extensions are
|
||||
// always optional, so the encoder would just swallow the error
|
||||
// and drop all the extensions from the encoded message.
|
||||
if reflect.ValueOf(value).IsNil() {
|
||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
||||
}
|
||||
|
||||
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A global registry of extensions.
|
||||
// The generated code will register the generated descriptors by calling RegisterExtension.
|
||||
|
||||
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
|
||||
|
||||
// RegisterExtension is called from the generated code.
|
||||
func RegisterExtension(desc *ExtensionDesc) {
|
||||
st := reflect.TypeOf(desc.ExtendedType).Elem()
|
||||
m := extensionMaps[st]
|
||||
if m == nil {
|
||||
m = make(map[int32]*ExtensionDesc)
|
||||
extensionMaps[st] = m
|
||||
}
|
||||
if _, ok := m[desc.Field]; ok {
|
||||
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
|
||||
}
|
||||
m[desc.Field] = desc
|
||||
}
|
||||
|
||||
// RegisteredExtensions returns a map of the registered extensions of a
|
||||
// protocol buffer struct, indexed by the extension number.
|
||||
// The argument pb should be a nil pointer to the struct type.
|
||||
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
|
||||
return extensionMaps[reflect.TypeOf(pb).Elem()]
|
||||
}
|
|
@ -1,894 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package proto converts data structures to and from the wire format of
|
||||
protocol buffers. It works in concert with the Go source code generated
|
||||
for .proto files by the protocol compiler.
|
||||
|
||||
A summary of the properties of the protocol buffer interface
|
||||
for a protocol buffer variable v:
|
||||
|
||||
- Names are turned from camel_case to CamelCase for export.
|
||||
- There are no methods on v to set fields; just treat
|
||||
them as structure fields.
|
||||
- There are getters that return a field's value if set,
|
||||
and return the field's default value if unset.
|
||||
The getters work even if the receiver is a nil message.
|
||||
- The zero value for a struct is its correct initialization state.
|
||||
All desired fields must be set before marshaling.
|
||||
- A Reset() method will restore a protobuf struct to its zero state.
|
||||
- Non-repeated fields are pointers to the values; nil means unset.
|
||||
That is, optional or required field int32 f becomes F *int32.
|
||||
- Repeated fields are slices.
|
||||
- Helper functions are available to aid the setting of fields.
|
||||
msg.Foo = proto.String("hello") // set field
|
||||
- Constants are defined to hold the default values of all fields that
|
||||
have them. They have the form Default_StructName_FieldName.
|
||||
Because the getter methods handle defaulted values,
|
||||
direct use of these constants should be rare.
|
||||
- Enums are given type names and maps from names to values.
|
||||
Enum values are prefixed by the enclosing message's name, or by the
|
||||
enum's type name if it is a top-level enum. Enum types have a String
|
||||
method, and a Enum method to assist in message construction.
|
||||
- Nested messages, groups and enums have type names prefixed with the name of
|
||||
the surrounding message type.
|
||||
- Extensions are given descriptor names that start with E_,
|
||||
followed by an underscore-delimited list of the nested messages
|
||||
that contain it (if any) followed by the CamelCased name of the
|
||||
extension field itself. HasExtension, ClearExtension, GetExtension
|
||||
and SetExtension are functions for manipulating extensions.
|
||||
- Oneof field sets are given a single field in their message,
|
||||
with distinguished wrapper types for each possible field value.
|
||||
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
||||
|
||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||
|
||||
- Non-repeated fields of non-message type are values instead of pointers.
|
||||
- Getters are only generated for message and oneof fields.
|
||||
- Enum types do not get an Enum method.
|
||||
|
||||
The simplest way to describe this is to see an example.
|
||||
Given file test.proto, containing
|
||||
|
||||
package example;
|
||||
|
||||
enum FOO { X = 17; }
|
||||
|
||||
message Test {
|
||||
required string label = 1;
|
||||
optional int32 type = 2 [default=77];
|
||||
repeated int64 reps = 3;
|
||||
optional group OptionalGroup = 4 {
|
||||
required string RequiredField = 5;
|
||||
}
|
||||
oneof union {
|
||||
int32 number = 6;
|
||||
string name = 7;
|
||||
}
|
||||
}
|
||||
|
||||
The resulting file, test.pb.go, is:
|
||||
|
||||
package example
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import math "math"
|
||||
|
||||
type FOO int32
|
||||
const (
|
||||
FOO_X FOO = 17
|
||||
)
|
||||
var FOO_name = map[int32]string{
|
||||
17: "X",
|
||||
}
|
||||
var FOO_value = map[string]int32{
|
||||
"X": 17,
|
||||
}
|
||||
|
||||
func (x FOO) Enum() *FOO {
|
||||
p := new(FOO)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
func (x FOO) String() string {
|
||||
return proto.EnumName(FOO_name, int32(x))
|
||||
}
|
||||
func (x *FOO) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = FOO(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test struct {
|
||||
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
|
||||
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
|
||||
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
|
||||
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
|
||||
// Types that are valid to be assigned to Union:
|
||||
// *Test_Number
|
||||
// *Test_Name
|
||||
Union isTest_Union `protobuf_oneof:"union"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
func (m *Test) Reset() { *m = Test{} }
|
||||
func (m *Test) String() string { return proto.CompactTextString(m) }
|
||||
func (*Test) ProtoMessage() {}
|
||||
|
||||
type isTest_Union interface {
|
||||
isTest_Union()
|
||||
}
|
||||
|
||||
type Test_Number struct {
|
||||
Number int32 `protobuf:"varint,6,opt,name=number"`
|
||||
}
|
||||
type Test_Name struct {
|
||||
Name string `protobuf:"bytes,7,opt,name=name"`
|
||||
}
|
||||
|
||||
func (*Test_Number) isTest_Union() {}
|
||||
func (*Test_Name) isTest_Union() {}
|
||||
|
||||
func (m *Test) GetUnion() isTest_Union {
|
||||
if m != nil {
|
||||
return m.Union
|
||||
}
|
||||
return nil
|
||||
}
|
||||
const Default_Test_Type int32 = 77
|
||||
|
||||
func (m *Test) GetLabel() string {
|
||||
if m != nil && m.Label != nil {
|
||||
return *m.Label
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Test) GetType() int32 {
|
||||
if m != nil && m.Type != nil {
|
||||
return *m.Type
|
||||
}
|
||||
return Default_Test_Type
|
||||
}
|
||||
|
||||
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
|
||||
if m != nil {
|
||||
return m.Optionalgroup
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test_OptionalGroup struct {
|
||||
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
|
||||
}
|
||||
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
|
||||
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
|
||||
|
||||
func (m *Test_OptionalGroup) GetRequiredField() string {
|
||||
if m != nil && m.RequiredField != nil {
|
||||
return *m.RequiredField
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Test) GetNumber() int32 {
|
||||
if x, ok := m.GetUnion().(*Test_Number); ok {
|
||||
return x.Number
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Test) GetName() string {
|
||||
if x, ok := m.GetUnion().(*Test_Name); ok {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
|
||||
}
|
||||
|
||||
To create and play with a Test object:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "./example.pb"
|
||||
)
|
||||
|
||||
func main() {
|
||||
test := &pb.Test{
|
||||
Label: proto.String("hello"),
|
||||
Type: proto.Int32(17),
|
||||
Reps: []int64{1, 2, 3},
|
||||
Optionalgroup: &pb.Test_OptionalGroup{
|
||||
RequiredField: proto.String("good bye"),
|
||||
},
|
||||
Union: &pb.Test_Name{"fred"},
|
||||
}
|
||||
data, err := proto.Marshal(test)
|
||||
if err != nil {
|
||||
log.Fatal("marshaling error: ", err)
|
||||
}
|
||||
newTest := &pb.Test{}
|
||||
err = proto.Unmarshal(data, newTest)
|
||||
if err != nil {
|
||||
log.Fatal("unmarshaling error: ", err)
|
||||
}
|
||||
// Now test and newTest contain the same data.
|
||||
if test.GetLabel() != newTest.GetLabel() {
|
||||
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
|
||||
}
|
||||
// Use a type switch to determine which oneof was set.
|
||||
switch u := test.Union.(type) {
|
||||
case *pb.Test_Number: // u.Number contains the number.
|
||||
case *pb.Test_Name: // u.Name contains the string.
|
||||
}
|
||||
// etc.
|
||||
}
|
||||
*/
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Message is implemented by generated protocol buffer messages.
|
||||
type Message interface {
|
||||
Reset()
|
||||
String() string
|
||||
ProtoMessage()
|
||||
}
|
||||
|
||||
// Stats records allocation details about the protocol buffer encoders
|
||||
// and decoders. Useful for tuning the library itself.
|
||||
type Stats struct {
|
||||
Emalloc uint64 // mallocs in encode
|
||||
Dmalloc uint64 // mallocs in decode
|
||||
Encode uint64 // number of encodes
|
||||
Decode uint64 // number of decodes
|
||||
Chit uint64 // number of cache hits
|
||||
Cmiss uint64 // number of cache misses
|
||||
Size uint64 // number of sizes
|
||||
}
|
||||
|
||||
// Set to true to enable stats collection.
|
||||
const collectStats = false
|
||||
|
||||
var stats Stats
|
||||
|
||||
// GetStats returns a copy of the global Stats structure.
|
||||
func GetStats() Stats { return stats }
|
||||
|
||||
// A Buffer is a buffer manager for marshaling and unmarshaling
|
||||
// protocol buffers. It may be reused between invocations to
|
||||
// reduce memory usage. It is not necessary to use a Buffer;
|
||||
// the global functions Marshal and Unmarshal create a
|
||||
// temporary Buffer and are fine for most applications.
|
||||
type Buffer struct {
|
||||
buf []byte // encode/decode byte stream
|
||||
index int // write point
|
||||
|
||||
// pools of basic types to amortize allocation.
|
||||
bools []bool
|
||||
uint32s []uint32
|
||||
uint64s []uint64
|
||||
|
||||
// extra pools, only used with pointer_reflect.go
|
||||
int32s []int32
|
||||
int64s []int64
|
||||
float32s []float32
|
||||
float64s []float64
|
||||
}
|
||||
|
||||
// NewBuffer allocates a new Buffer and initializes its internal data to
|
||||
// the contents of the argument slice.
|
||||
func NewBuffer(e []byte) *Buffer {
|
||||
return &Buffer{buf: e}
|
||||
}
|
||||
|
||||
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
|
||||
func (p *Buffer) Reset() {
|
||||
p.buf = p.buf[0:0] // for reading/writing
|
||||
p.index = 0 // for reading
|
||||
}
|
||||
|
||||
// SetBuf replaces the internal buffer with the slice,
|
||||
// ready for unmarshaling the contents of the slice.
|
||||
func (p *Buffer) SetBuf(s []byte) {
|
||||
p.buf = s
|
||||
p.index = 0
|
||||
}
|
||||
|
||||
// Bytes returns the contents of the Buffer.
|
||||
func (p *Buffer) Bytes() []byte { return p.buf }
|
||||
|
||||
/*
|
||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
||||
*/
|
||||
|
||||
// Bool is a helper routine that allocates a new bool value
|
||||
// to store v and returns a pointer to it.
|
||||
func Bool(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int32 is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int32(v int32) *int32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it, but unlike Int32
|
||||
// its argument value is an int.
|
||||
func Int(v int) *int32 {
|
||||
p := new(int32)
|
||||
*p = int32(v)
|
||||
return p
|
||||
}
|
||||
|
||||
// Int64 is a helper routine that allocates a new int64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int64(v int64) *int64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float32 is a helper routine that allocates a new float32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float32(v float32) *float32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float64 is a helper routine that allocates a new float64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float64(v float64) *float64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint32 is a helper routine that allocates a new uint32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint32(v uint32) *uint32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint64 is a helper routine that allocates a new uint64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint64(v uint64) *uint64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// String is a helper routine that allocates a new string value
|
||||
// to store v and returns a pointer to it.
|
||||
func String(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
||||
// EnumName is a helper function to simplify printing protocol buffer enums
|
||||
// by name. Given an enum map and a value, it returns a useful string.
|
||||
func EnumName(m map[int32]string, v int32) string {
|
||||
s, ok := m[v]
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
|
||||
// from their JSON-encoded representation. Given a map from the enum's symbolic
|
||||
// names to its int values, and a byte buffer containing the JSON-encoded
|
||||
// value, it returns an int32 that can be cast to the enum type by the caller.
|
||||
//
|
||||
// The function can deal with both JSON representations, numeric and symbolic.
|
||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||
if data[0] == '"' {
|
||||
// New style: enums are strings.
|
||||
var repr string
|
||||
if err := json.Unmarshal(data, &repr); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
val, ok := m[repr]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
// Old style: enums are ints.
|
||||
var val int32
|
||||
if err := json.Unmarshal(data, &val); err != nil {
|
||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// DebugPrint dumps the encoded data in b in a debugging format with a header
|
||||
// including the string s. Used in testing but made available for general debugging.
|
||||
func (p *Buffer) DebugPrint(s string, b []byte) {
|
||||
var u uint64
|
||||
|
||||
obuf := p.buf
|
||||
index := p.index
|
||||
p.buf = b
|
||||
p.index = 0
|
||||
depth := 0
|
||||
|
||||
fmt.Printf("\n--- %s ---\n", s)
|
||||
|
||||
out:
|
||||
for {
|
||||
for i := 0; i < depth; i++ {
|
||||
fmt.Print(" ")
|
||||
}
|
||||
|
||||
index := p.index
|
||||
if index == len(p.buf) {
|
||||
break
|
||||
}
|
||||
|
||||
op, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: fetching op err %v\n", index, err)
|
||||
break out
|
||||
}
|
||||
tag := op >> 3
|
||||
wire := op & 7
|
||||
|
||||
switch wire {
|
||||
default:
|
||||
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
|
||||
index, tag, wire)
|
||||
break out
|
||||
|
||||
case WireBytes:
|
||||
var r []byte
|
||||
|
||||
r, err = p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
|
||||
if len(r) <= 6 {
|
||||
for i := 0; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < 3; i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
fmt.Printf(" ..")
|
||||
for i := len(r) - 3; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
case WireFixed32:
|
||||
u, err = p.DecodeFixed32()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
|
||||
|
||||
case WireFixed64:
|
||||
u, err = p.DecodeFixed64()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
|
||||
|
||||
case WireVarint:
|
||||
u, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
|
||||
|
||||
case WireStartGroup:
|
||||
fmt.Printf("%3d: t=%3d start\n", index, tag)
|
||||
depth++
|
||||
|
||||
case WireEndGroup:
|
||||
depth--
|
||||
fmt.Printf("%3d: t=%3d end\n", index, tag)
|
||||
}
|
||||
}
|
||||
|
||||
if depth != 0 {
|
||||
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
p.buf = obuf
|
||||
p.index = index
|
||||
}
|
||||
|
||||
// SetDefaults sets unset protocol buffer fields to their default values.
|
||||
// It only modifies fields that are both unset and have defined defaults.
|
||||
// It recursively sets default values in any non-nil sub-messages.
|
||||
func SetDefaults(pb Message) {
|
||||
setDefaults(reflect.ValueOf(pb), true, false)
|
||||
}
|
||||
|
||||
// v is a pointer to a struct.
|
||||
func setDefaults(v reflect.Value, recur, zeros bool) {
|
||||
v = v.Elem()
|
||||
|
||||
defaultMu.RLock()
|
||||
dm, ok := defaults[v.Type()]
|
||||
defaultMu.RUnlock()
|
||||
if !ok {
|
||||
dm = buildDefaultMessage(v.Type())
|
||||
defaultMu.Lock()
|
||||
defaults[v.Type()] = dm
|
||||
defaultMu.Unlock()
|
||||
}
|
||||
|
||||
for _, sf := range dm.scalars {
|
||||
f := v.Field(sf.index)
|
||||
if !f.IsNil() {
|
||||
// field already set
|
||||
continue
|
||||
}
|
||||
dv := sf.value
|
||||
if dv == nil && !zeros {
|
||||
// no explicit default, and don't want to set zeros
|
||||
continue
|
||||
}
|
||||
fptr := f.Addr().Interface() // **T
|
||||
// TODO: Consider batching the allocations we do here.
|
||||
switch sf.kind {
|
||||
case reflect.Bool:
|
||||
b := new(bool)
|
||||
if dv != nil {
|
||||
*b = dv.(bool)
|
||||
}
|
||||
*(fptr.(**bool)) = b
|
||||
case reflect.Float32:
|
||||
f := new(float32)
|
||||
if dv != nil {
|
||||
*f = dv.(float32)
|
||||
}
|
||||
*(fptr.(**float32)) = f
|
||||
case reflect.Float64:
|
||||
f := new(float64)
|
||||
if dv != nil {
|
||||
*f = dv.(float64)
|
||||
}
|
||||
*(fptr.(**float64)) = f
|
||||
case reflect.Int32:
|
||||
// might be an enum
|
||||
if ft := f.Type(); ft != int32PtrType {
|
||||
// enum
|
||||
f.Set(reflect.New(ft.Elem()))
|
||||
if dv != nil {
|
||||
f.Elem().SetInt(int64(dv.(int32)))
|
||||
}
|
||||
} else {
|
||||
// int32 field
|
||||
i := new(int32)
|
||||
if dv != nil {
|
||||
*i = dv.(int32)
|
||||
}
|
||||
*(fptr.(**int32)) = i
|
||||
}
|
||||
case reflect.Int64:
|
||||
i := new(int64)
|
||||
if dv != nil {
|
||||
*i = dv.(int64)
|
||||
}
|
||||
*(fptr.(**int64)) = i
|
||||
case reflect.String:
|
||||
s := new(string)
|
||||
if dv != nil {
|
||||
*s = dv.(string)
|
||||
}
|
||||
*(fptr.(**string)) = s
|
||||
case reflect.Uint8:
|
||||
// exceptional case: []byte
|
||||
var b []byte
|
||||
if dv != nil {
|
||||
db := dv.([]byte)
|
||||
b = make([]byte, len(db))
|
||||
copy(b, db)
|
||||
} else {
|
||||
b = []byte{}
|
||||
}
|
||||
*(fptr.(*[]byte)) = b
|
||||
case reflect.Uint32:
|
||||
u := new(uint32)
|
||||
if dv != nil {
|
||||
*u = dv.(uint32)
|
||||
}
|
||||
*(fptr.(**uint32)) = u
|
||||
case reflect.Uint64:
|
||||
u := new(uint64)
|
||||
if dv != nil {
|
||||
*u = dv.(uint64)
|
||||
}
|
||||
*(fptr.(**uint64)) = u
|
||||
default:
|
||||
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
|
||||
}
|
||||
}
|
||||
|
||||
for _, ni := range dm.nested {
|
||||
f := v.Field(ni)
|
||||
// f is *T or []*T or map[T]*T
|
||||
switch f.Kind() {
|
||||
case reflect.Ptr:
|
||||
if f.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(f, recur, zeros)
|
||||
|
||||
case reflect.Slice:
|
||||
for i := 0; i < f.Len(); i++ {
|
||||
e := f.Index(i)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
for _, k := range f.MapKeys() {
|
||||
e := f.MapIndex(k)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// defaults maps a protocol buffer struct type to a slice of the fields,
|
||||
// with its scalar fields set to their proto-declared non-zero default values.
|
||||
defaultMu sync.RWMutex
|
||||
defaults = make(map[reflect.Type]defaultMessage)
|
||||
|
||||
int32PtrType = reflect.TypeOf((*int32)(nil))
|
||||
)
|
||||
|
||||
// defaultMessage represents information about the default values of a message.
|
||||
type defaultMessage struct {
|
||||
scalars []scalarField
|
||||
nested []int // struct field index of nested messages
|
||||
}
|
||||
|
||||
type scalarField struct {
|
||||
index int // struct field index
|
||||
kind reflect.Kind // element type (the T in *T or []T)
|
||||
value interface{} // the proto-declared default value, or nil
|
||||
}
|
||||
|
||||
// t is a struct type.
|
||||
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
|
||||
sprop := GetProperties(t)
|
||||
for _, prop := range sprop.Prop {
|
||||
fi, ok := sprop.decoderTags.get(prop.Tag)
|
||||
if !ok {
|
||||
// XXX_unrecognized
|
||||
continue
|
||||
}
|
||||
ft := t.Field(fi).Type
|
||||
|
||||
sf, nested, err := fieldDefault(ft, prop)
|
||||
switch {
|
||||
case err != nil:
|
||||
log.Print(err)
|
||||
case nested:
|
||||
dm.nested = append(dm.nested, fi)
|
||||
case sf != nil:
|
||||
sf.index = fi
|
||||
dm.scalars = append(dm.scalars, *sf)
|
||||
}
|
||||
}
|
||||
|
||||
return dm
|
||||
}
|
||||
|
||||
// fieldDefault returns the scalarField for field type ft.
|
||||
// sf will be nil if the field can not have a default.
|
||||
// nestedMessage will be true if this is a nested message.
|
||||
// Note that sf.index is not set on return.
|
||||
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
|
||||
var canHaveDefault bool
|
||||
switch ft.Kind() {
|
||||
case reflect.Ptr:
|
||||
if ft.Elem().Kind() == reflect.Struct {
|
||||
nestedMessage = true
|
||||
} else {
|
||||
canHaveDefault = true // proto2 scalar field
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Ptr:
|
||||
nestedMessage = true // repeated message
|
||||
case reflect.Uint8:
|
||||
canHaveDefault = true // bytes field
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
if ft.Elem().Kind() == reflect.Ptr {
|
||||
nestedMessage = true // map with message values
|
||||
}
|
||||
}
|
||||
|
||||
if !canHaveDefault {
|
||||
if nestedMessage {
|
||||
return nil, true, nil
|
||||
}
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// We now know that ft is a pointer or slice.
|
||||
sf = &scalarField{kind: ft.Elem().Kind()}
|
||||
|
||||
// scalar fields without defaults
|
||||
if !prop.HasDefault {
|
||||
return sf, false, nil
|
||||
}
|
||||
|
||||
// a scalar field: either *T or []byte
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Bool:
|
||||
x, err := strconv.ParseBool(prop.Default)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Float32:
|
||||
x, err := strconv.ParseFloat(prop.Default, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = float32(x)
|
||||
case reflect.Float64:
|
||||
x, err := strconv.ParseFloat(prop.Default, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Int32:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = int32(x)
|
||||
case reflect.Int64:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.String:
|
||||
sf.value = prop.Default
|
||||
case reflect.Uint8:
|
||||
// []byte (not *uint8)
|
||||
sf.value = []byte(prop.Default)
|
||||
case reflect.Uint32:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = uint32(x)
|
||||
case reflect.Uint64:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
default:
|
||||
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
|
||||
}
|
||||
|
||||
return sf, false, nil
|
||||
}
|
||||
|
||||
// Map fields may have key types of non-float scalars, strings and enums.
|
||||
// The easiest way to sort them in some deterministic order is to use fmt.
|
||||
// If this turns out to be inefficient we can always consider other options,
|
||||
// such as doing a Schwartzian transform.
|
||||
|
||||
func mapKeys(vs []reflect.Value) sort.Interface {
|
||||
s := mapKeySorter{
|
||||
vs: vs,
|
||||
// default Less function: textual comparison
|
||||
less: func(a, b reflect.Value) bool {
|
||||
return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
|
||||
},
|
||||
}
|
||||
|
||||
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
|
||||
// numeric keys are sorted numerically.
|
||||
if len(vs) == 0 {
|
||||
return s
|
||||
}
|
||||
switch vs[0].Kind() {
|
||||
case reflect.Int32, reflect.Int64:
|
||||
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
type mapKeySorter struct {
|
||||
vs []reflect.Value
|
||||
less func(a, b reflect.Value) bool
|
||||
}
|
||||
|
||||
func (s mapKeySorter) Len() int { return len(s.vs) }
|
||||
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
|
||||
func (s mapKeySorter) Less(i, j int) bool {
|
||||
return s.less(s.vs[i], s.vs[j])
|
||||
}
|
||||
|
||||
// isProto3Zero reports whether v is a zero proto3 value.
|
||||
func isProto3Zero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.String:
|
||||
return v.String() == ""
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
const ProtoPackageIsVersion1 = true
|
|
@ -1,280 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Support for message sets.
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
||||
// A message type ID is required for storing a protocol buffer in a message set.
|
||||
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
|
||||
|
||||
// The first two types (_MessageSet_Item and messageSet)
|
||||
// model what the protocol compiler produces for the following protocol message:
|
||||
// message MessageSet {
|
||||
// repeated group Item = 1 {
|
||||
// required int32 type_id = 2;
|
||||
// required string message = 3;
|
||||
// };
|
||||
// }
|
||||
// That is the MessageSet wire format. We can't use a proto to generate these
|
||||
// because that would introduce a circular dependency between it and this package.
|
||||
|
||||
type _MessageSet_Item struct {
|
||||
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
|
||||
Message []byte `protobuf:"bytes,3,req,name=message"`
|
||||
}
|
||||
|
||||
type messageSet struct {
|
||||
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
|
||||
XXX_unrecognized []byte
|
||||
// TODO: caching?
|
||||
}
|
||||
|
||||
// Make sure messageSet is a Message.
|
||||
var _ Message = (*messageSet)(nil)
|
||||
|
||||
// messageTypeIder is an interface satisfied by a protocol buffer type
|
||||
// that may be stored in a MessageSet.
|
||||
type messageTypeIder interface {
|
||||
MessageTypeId() int32
|
||||
}
|
||||
|
||||
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
id := mti.MessageTypeId()
|
||||
for _, item := range ms.Item {
|
||||
if *item.TypeId == id {
|
||||
return item
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Has(pb Message) bool {
|
||||
if ms.find(pb) != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ms *messageSet) Unmarshal(pb Message) error {
|
||||
if item := ms.find(pb); item != nil {
|
||||
return Unmarshal(item.Message, pb)
|
||||
}
|
||||
if _, ok := pb.(messageTypeIder); !ok {
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
return nil // TODO: return error instead?
|
||||
}
|
||||
|
||||
func (ms *messageSet) Marshal(pb Message) error {
|
||||
msg, err := Marshal(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if item := ms.find(pb); item != nil {
|
||||
// reuse existing item
|
||||
item.Message = msg
|
||||
return nil
|
||||
}
|
||||
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
|
||||
mtid := mti.MessageTypeId()
|
||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
||||
TypeId: &mtid,
|
||||
Message: msg,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Reset() { *ms = messageSet{} }
|
||||
func (ms *messageSet) String() string { return CompactTextString(ms) }
|
||||
func (*messageSet) ProtoMessage() {}
|
||||
|
||||
// Support for the message_set_wire_format message option.
|
||||
|
||||
func skipVarint(buf []byte) []byte {
|
||||
i := 0
|
||||
for ; buf[i]&0x80 != 0; i++ {
|
||||
}
|
||||
return buf[i+1:]
|
||||
}
|
||||
|
||||
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
|
||||
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
|
||||
if err := encodeExtensionMap(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Sort extension IDs to provide a deterministic encoding.
|
||||
// See also enc_map in encode.go.
|
||||
ids := make([]int, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, int(id))
|
||||
}
|
||||
sort.Ints(ids)
|
||||
|
||||
ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
|
||||
for _, id := range ids {
|
||||
e := m[int32(id)]
|
||||
// Remove the wire type and field number varint, as well as the length varint.
|
||||
msg := skipVarint(skipVarint(e.enc))
|
||||
|
||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
||||
TypeId: Int32(int32(id)),
|
||||
Message: msg,
|
||||
})
|
||||
}
|
||||
return Marshal(ms)
|
||||
}
|
||||
|
||||
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
|
||||
ms := new(messageSet)
|
||||
if err := Unmarshal(buf, ms); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, item := range ms.Item {
|
||||
id := *item.TypeId
|
||||
msg := item.Message
|
||||
|
||||
// Restore wire type and field number varint, plus length varint.
|
||||
// Be careful to preserve duplicate items.
|
||||
b := EncodeVarint(uint64(id)<<3 | WireBytes)
|
||||
if ext, ok := m[id]; ok {
|
||||
// Existing data; rip off the tag and length varint
|
||||
// so we join the new data correctly.
|
||||
// We can assume that ext.enc is set because we are unmarshaling.
|
||||
o := ext.enc[len(b):] // skip wire type and field number
|
||||
_, n := DecodeVarint(o) // calculate length of length varint
|
||||
o = o[n:] // skip length varint
|
||||
msg = append(o, msg...) // join old data and new data
|
||||
}
|
||||
b = append(b, EncodeVarint(uint64(len(msg)))...)
|
||||
b = append(b, msg...)
|
||||
|
||||
m[id] = Extension{enc: b}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
|
||||
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
|
||||
var b bytes.Buffer
|
||||
b.WriteByte('{')
|
||||
|
||||
// Process the map in key order for deterministic output.
|
||||
ids := make([]int32, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
|
||||
|
||||
for i, id := range ids {
|
||||
ext := m[id]
|
||||
if i > 0 {
|
||||
b.WriteByte(',')
|
||||
}
|
||||
|
||||
msd, ok := messageSetMap[id]
|
||||
if !ok {
|
||||
// Unknown type; we can't render it, so skip it.
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(&b, `"[%s]":`, msd.name)
|
||||
|
||||
x := ext.value
|
||||
if x == nil {
|
||||
x = reflect.New(msd.t.Elem()).Interface()
|
||||
if err := Unmarshal(ext.enc, x.(Message)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
d, err := json.Marshal(x)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.Write(d)
|
||||
}
|
||||
b.WriteByte('}')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
|
||||
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
|
||||
// Common-case fast path.
|
||||
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is fairly tricky, and it's not clear that it is needed.
|
||||
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
|
||||
}
|
||||
|
||||
// A global registry of types that can be used in a MessageSet.
|
||||
|
||||
var messageSetMap = make(map[int32]messageSetDesc)
|
||||
|
||||
type messageSetDesc struct {
|
||||
t reflect.Type // pointer to struct
|
||||
name string
|
||||
}
|
||||
|
||||
// RegisterMessageSetType is called from the generated code.
|
||||
func RegisterMessageSetType(m Message, fieldNum int32, name string) {
|
||||
messageSetMap[fieldNum] = messageSetDesc{
|
||||
t: reflect.TypeOf(m),
|
||||
name: name,
|
||||
}
|
||||
}
|
|
@ -1,479 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build appengine
|
||||
|
||||
// This file contains an implementation of proto field accesses using package reflect.
|
||||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
||||
// be used on App Engine.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// A structPointer is a pointer to a struct.
|
||||
type structPointer struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
||||
// The reflect value must itself be a pointer to a struct.
|
||||
func toStructPointer(v reflect.Value) structPointer {
|
||||
return structPointer{v}
|
||||
}
|
||||
|
||||
// IsNil reports whether p is nil.
|
||||
func structPointer_IsNil(p structPointer) bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
// Interface returns the struct pointer as an interface value.
|
||||
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
|
||||
return p.v.Interface()
|
||||
}
|
||||
|
||||
// A field identifies a field in a struct, accessible from a structPointer.
|
||||
// In this implementation, a field is identified by the sequence of field indices
|
||||
// passed to reflect's FieldByIndex.
|
||||
type field []int
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return f.Index
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
var invalidField = field(nil)
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool { return f != nil }
|
||||
|
||||
// field returns the given field in the struct as a reflect value.
|
||||
func structPointer_field(p structPointer, f field) reflect.Value {
|
||||
// Special case: an extension map entry with a value of type T
|
||||
// passes a *T to the struct-handling code with a zero field,
|
||||
// expecting that it will be treated as equivalent to *struct{ X T },
|
||||
// which has the same memory layout. We have to handle that case
|
||||
// specially, because reflect will panic if we call FieldByIndex on a
|
||||
// non-struct.
|
||||
if f == nil {
|
||||
return p.v.Elem()
|
||||
}
|
||||
|
||||
return p.v.Elem().FieldByIndex(f)
|
||||
}
|
||||
|
||||
// ifield returns the given field in the struct as an interface value.
|
||||
func structPointer_ifield(p structPointer, f field) interface{} {
|
||||
return structPointer_field(p, f).Addr().Interface()
|
||||
}
|
||||
|
||||
// Bytes returns the address of a []byte field in the struct.
|
||||
func structPointer_Bytes(p structPointer, f field) *[]byte {
|
||||
return structPointer_ifield(p, f).(*[]byte)
|
||||
}
|
||||
|
||||
// BytesSlice returns the address of a [][]byte field in the struct.
|
||||
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
|
||||
return structPointer_ifield(p, f).(*[][]byte)
|
||||
}
|
||||
|
||||
// Bool returns the address of a *bool field in the struct.
|
||||
func structPointer_Bool(p structPointer, f field) **bool {
|
||||
return structPointer_ifield(p, f).(**bool)
|
||||
}
|
||||
|
||||
// BoolVal returns the address of a bool field in the struct.
|
||||
func structPointer_BoolVal(p structPointer, f field) *bool {
|
||||
return structPointer_ifield(p, f).(*bool)
|
||||
}
|
||||
|
||||
// BoolSlice returns the address of a []bool field in the struct.
|
||||
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
|
||||
return structPointer_ifield(p, f).(*[]bool)
|
||||
}
|
||||
|
||||
// String returns the address of a *string field in the struct.
|
||||
func structPointer_String(p structPointer, f field) **string {
|
||||
return structPointer_ifield(p, f).(**string)
|
||||
}
|
||||
|
||||
// StringVal returns the address of a string field in the struct.
|
||||
func structPointer_StringVal(p structPointer, f field) *string {
|
||||
return structPointer_ifield(p, f).(*string)
|
||||
}
|
||||
|
||||
// StringSlice returns the address of a []string field in the struct.
|
||||
func structPointer_StringSlice(p structPointer, f field) *[]string {
|
||||
return structPointer_ifield(p, f).(*[]string)
|
||||
}
|
||||
|
||||
// ExtMap returns the address of an extension map field in the struct.
|
||||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
||||
return structPointer_ifield(p, f).(*map[int32]Extension)
|
||||
}
|
||||
|
||||
// NewAt returns the reflect.Value for a pointer to a field in the struct.
|
||||
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
|
||||
return structPointer_field(p, f).Addr()
|
||||
}
|
||||
|
||||
// SetStructPointer writes a *struct field in the struct.
|
||||
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
|
||||
structPointer_field(p, f).Set(q.v)
|
||||
}
|
||||
|
||||
// GetStructPointer reads a *struct field in the struct.
|
||||
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
|
||||
return structPointer{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// StructPointerSlice the address of a []*struct field in the struct.
|
||||
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
|
||||
return structPointerSlice{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// A structPointerSlice represents the address of a slice of pointers to structs
|
||||
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
|
||||
type structPointerSlice struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func (p structPointerSlice) Len() int { return p.v.Len() }
|
||||
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
|
||||
func (p structPointerSlice) Append(q structPointer) {
|
||||
p.v.Set(reflect.Append(p.v, q.v))
|
||||
}
|
||||
|
||||
var (
|
||||
int32Type = reflect.TypeOf(int32(0))
|
||||
uint32Type = reflect.TypeOf(uint32(0))
|
||||
float32Type = reflect.TypeOf(float32(0))
|
||||
int64Type = reflect.TypeOf(int64(0))
|
||||
uint64Type = reflect.TypeOf(uint64(0))
|
||||
float64Type = reflect.TypeOf(float64(0))
|
||||
)
|
||||
|
||||
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
|
||||
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
|
||||
type word32 struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
// IsNil reports whether p is nil.
|
||||
func word32_IsNil(p word32) bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
// Set sets p to point at a newly allocated word with bits set to x.
|
||||
func word32_Set(p word32, o *Buffer, x uint32) {
|
||||
t := p.v.Type().Elem()
|
||||
switch t {
|
||||
case int32Type:
|
||||
if len(o.int32s) == 0 {
|
||||
o.int32s = make([]int32, uint32PoolSize)
|
||||
}
|
||||
o.int32s[0] = int32(x)
|
||||
p.v.Set(reflect.ValueOf(&o.int32s[0]))
|
||||
o.int32s = o.int32s[1:]
|
||||
return
|
||||
case uint32Type:
|
||||
if len(o.uint32s) == 0 {
|
||||
o.uint32s = make([]uint32, uint32PoolSize)
|
||||
}
|
||||
o.uint32s[0] = x
|
||||
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
|
||||
o.uint32s = o.uint32s[1:]
|
||||
return
|
||||
case float32Type:
|
||||
if len(o.float32s) == 0 {
|
||||
o.float32s = make([]float32, uint32PoolSize)
|
||||
}
|
||||
o.float32s[0] = math.Float32frombits(x)
|
||||
p.v.Set(reflect.ValueOf(&o.float32s[0]))
|
||||
o.float32s = o.float32s[1:]
|
||||
return
|
||||
}
|
||||
|
||||
// must be enum
|
||||
p.v.Set(reflect.New(t))
|
||||
p.v.Elem().SetInt(int64(int32(x)))
|
||||
}
|
||||
|
||||
// Get gets the bits pointed at by p, as a uint32.
|
||||
func word32_Get(p word32) uint32 {
|
||||
elem := p.v.Elem()
|
||||
switch elem.Kind() {
|
||||
case reflect.Int32:
|
||||
return uint32(elem.Int())
|
||||
case reflect.Uint32:
|
||||
return uint32(elem.Uint())
|
||||
case reflect.Float32:
|
||||
return math.Float32bits(float32(elem.Float()))
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
|
||||
func structPointer_Word32(p structPointer, f field) word32 {
|
||||
return word32{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// A word32Val represents a field of type int32, uint32, float32, or enum.
|
||||
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
|
||||
type word32Val struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
// Set sets *p to x.
|
||||
func word32Val_Set(p word32Val, x uint32) {
|
||||
switch p.v.Type() {
|
||||
case int32Type:
|
||||
p.v.SetInt(int64(x))
|
||||
return
|
||||
case uint32Type:
|
||||
p.v.SetUint(uint64(x))
|
||||
return
|
||||
case float32Type:
|
||||
p.v.SetFloat(float64(math.Float32frombits(x)))
|
||||
return
|
||||
}
|
||||
|
||||
// must be enum
|
||||
p.v.SetInt(int64(int32(x)))
|
||||
}
|
||||
|
||||
// Get gets the bits pointed at by p, as a uint32.
|
||||
func word32Val_Get(p word32Val) uint32 {
|
||||
elem := p.v
|
||||
switch elem.Kind() {
|
||||
case reflect.Int32:
|
||||
return uint32(elem.Int())
|
||||
case reflect.Uint32:
|
||||
return uint32(elem.Uint())
|
||||
case reflect.Float32:
|
||||
return math.Float32bits(float32(elem.Float()))
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
|
||||
func structPointer_Word32Val(p structPointer, f field) word32Val {
|
||||
return word32Val{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// A word32Slice is a slice of 32-bit values.
|
||||
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
|
||||
type word32Slice struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func (p word32Slice) Append(x uint32) {
|
||||
n, m := p.v.Len(), p.v.Cap()
|
||||
if n < m {
|
||||
p.v.SetLen(n + 1)
|
||||
} else {
|
||||
t := p.v.Type().Elem()
|
||||
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
||||
}
|
||||
elem := p.v.Index(n)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int32:
|
||||
elem.SetInt(int64(int32(x)))
|
||||
case reflect.Uint32:
|
||||
elem.SetUint(uint64(x))
|
||||
case reflect.Float32:
|
||||
elem.SetFloat(float64(math.Float32frombits(x)))
|
||||
}
|
||||
}
|
||||
|
||||
func (p word32Slice) Len() int {
|
||||
return p.v.Len()
|
||||
}
|
||||
|
||||
func (p word32Slice) Index(i int) uint32 {
|
||||
elem := p.v.Index(i)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int32:
|
||||
return uint32(elem.Int())
|
||||
case reflect.Uint32:
|
||||
return uint32(elem.Uint())
|
||||
case reflect.Float32:
|
||||
return math.Float32bits(float32(elem.Float()))
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
|
||||
func structPointer_Word32Slice(p structPointer, f field) word32Slice {
|
||||
return word32Slice{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// word64 is like word32 but for 64-bit values.
|
||||
type word64 struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func word64_Set(p word64, o *Buffer, x uint64) {
|
||||
t := p.v.Type().Elem()
|
||||
switch t {
|
||||
case int64Type:
|
||||
if len(o.int64s) == 0 {
|
||||
o.int64s = make([]int64, uint64PoolSize)
|
||||
}
|
||||
o.int64s[0] = int64(x)
|
||||
p.v.Set(reflect.ValueOf(&o.int64s[0]))
|
||||
o.int64s = o.int64s[1:]
|
||||
return
|
||||
case uint64Type:
|
||||
if len(o.uint64s) == 0 {
|
||||
o.uint64s = make([]uint64, uint64PoolSize)
|
||||
}
|
||||
o.uint64s[0] = x
|
||||
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
|
||||
o.uint64s = o.uint64s[1:]
|
||||
return
|
||||
case float64Type:
|
||||
if len(o.float64s) == 0 {
|
||||
o.float64s = make([]float64, uint64PoolSize)
|
||||
}
|
||||
o.float64s[0] = math.Float64frombits(x)
|
||||
p.v.Set(reflect.ValueOf(&o.float64s[0]))
|
||||
o.float64s = o.float64s[1:]
|
||||
return
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func word64_IsNil(p word64) bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
func word64_Get(p word64) uint64 {
|
||||
elem := p.v.Elem()
|
||||
switch elem.Kind() {
|
||||
case reflect.Int64:
|
||||
return uint64(elem.Int())
|
||||
case reflect.Uint64:
|
||||
return elem.Uint()
|
||||
case reflect.Float64:
|
||||
return math.Float64bits(elem.Float())
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func structPointer_Word64(p structPointer, f field) word64 {
|
||||
return word64{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// word64Val is like word32Val but for 64-bit values.
|
||||
type word64Val struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
|
||||
switch p.v.Type() {
|
||||
case int64Type:
|
||||
p.v.SetInt(int64(x))
|
||||
return
|
||||
case uint64Type:
|
||||
p.v.SetUint(x)
|
||||
return
|
||||
case float64Type:
|
||||
p.v.SetFloat(math.Float64frombits(x))
|
||||
return
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func word64Val_Get(p word64Val) uint64 {
|
||||
elem := p.v
|
||||
switch elem.Kind() {
|
||||
case reflect.Int64:
|
||||
return uint64(elem.Int())
|
||||
case reflect.Uint64:
|
||||
return elem.Uint()
|
||||
case reflect.Float64:
|
||||
return math.Float64bits(elem.Float())
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func structPointer_Word64Val(p structPointer, f field) word64Val {
|
||||
return word64Val{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
type word64Slice struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func (p word64Slice) Append(x uint64) {
|
||||
n, m := p.v.Len(), p.v.Cap()
|
||||
if n < m {
|
||||
p.v.SetLen(n + 1)
|
||||
} else {
|
||||
t := p.v.Type().Elem()
|
||||
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
||||
}
|
||||
elem := p.v.Index(n)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int64:
|
||||
elem.SetInt(int64(int64(x)))
|
||||
case reflect.Uint64:
|
||||
elem.SetUint(uint64(x))
|
||||
case reflect.Float64:
|
||||
elem.SetFloat(float64(math.Float64frombits(x)))
|
||||
}
|
||||
}
|
||||
|
||||
func (p word64Slice) Len() int {
|
||||
return p.v.Len()
|
||||
}
|
||||
|
||||
func (p word64Slice) Index(i int) uint64 {
|
||||
elem := p.v.Index(i)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int64:
|
||||
return uint64(elem.Int())
|
||||
case reflect.Uint64:
|
||||
return uint64(elem.Uint())
|
||||
case reflect.Float64:
|
||||
return math.Float64bits(float64(elem.Float()))
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func structPointer_Word64Slice(p structPointer, f field) word64Slice {
|
||||
return word64Slice{structPointer_field(p, f)}
|
||||
}
|
|
@ -1,266 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
// This file contains the implementation of the proto field accesses using package unsafe.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// NOTE: These type_Foo functions would more idiomatically be methods,
|
||||
// but Go does not allow methods on pointer types, and we must preserve
|
||||
// some pointer type for the garbage collector. We use these
|
||||
// funcs with clunky names as our poor approximation to methods.
|
||||
//
|
||||
// An alternative would be
|
||||
// type structPointer struct { p unsafe.Pointer }
|
||||
// but that does not registerize as well.
|
||||
|
||||
// A structPointer is a pointer to a struct.
|
||||
type structPointer unsafe.Pointer
|
||||
|
||||
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
||||
func toStructPointer(v reflect.Value) structPointer {
|
||||
return structPointer(unsafe.Pointer(v.Pointer()))
|
||||
}
|
||||
|
||||
// IsNil reports whether p is nil.
|
||||
func structPointer_IsNil(p structPointer) bool {
|
||||
return p == nil
|
||||
}
|
||||
|
||||
// Interface returns the struct pointer, assumed to have element type t,
|
||||
// as an interface value.
|
||||
func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
|
||||
return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
|
||||
}
|
||||
|
||||
// A field identifies a field in a struct, accessible from a structPointer.
|
||||
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
||||
type field uintptr
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return field(f.Offset)
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
const invalidField = ^field(0)
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool {
|
||||
return f != ^field(0)
|
||||
}
|
||||
|
||||
// Bytes returns the address of a []byte field in the struct.
|
||||
func structPointer_Bytes(p structPointer, f field) *[]byte {
|
||||
return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// BytesSlice returns the address of a [][]byte field in the struct.
|
||||
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
|
||||
return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// Bool returns the address of a *bool field in the struct.
|
||||
func structPointer_Bool(p structPointer, f field) **bool {
|
||||
return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// BoolVal returns the address of a bool field in the struct.
|
||||
func structPointer_BoolVal(p structPointer, f field) *bool {
|
||||
return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// BoolSlice returns the address of a []bool field in the struct.
|
||||
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
|
||||
return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// String returns the address of a *string field in the struct.
|
||||
func structPointer_String(p structPointer, f field) **string {
|
||||
return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// StringVal returns the address of a string field in the struct.
|
||||
func structPointer_StringVal(p structPointer, f field) *string {
|
||||
return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// StringSlice returns the address of a []string field in the struct.
|
||||
func structPointer_StringSlice(p structPointer, f field) *[]string {
|
||||
return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// ExtMap returns the address of an extension map field in the struct.
|
||||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
||||
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// NewAt returns the reflect.Value for a pointer to a field in the struct.
|
||||
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
|
||||
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
|
||||
}
|
||||
|
||||
// SetStructPointer writes a *struct field in the struct.
|
||||
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
|
||||
*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
|
||||
}
|
||||
|
||||
// GetStructPointer reads a *struct field in the struct.
|
||||
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
|
||||
return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// StructPointerSlice the address of a []*struct field in the struct.
|
||||
func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
|
||||
return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
|
||||
type structPointerSlice []structPointer
|
||||
|
||||
func (v *structPointerSlice) Len() int { return len(*v) }
|
||||
func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
|
||||
func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
|
||||
|
||||
// A word32 is the address of a "pointer to 32-bit value" field.
|
||||
type word32 **uint32
|
||||
|
||||
// IsNil reports whether *v is nil.
|
||||
func word32_IsNil(p word32) bool {
|
||||
return *p == nil
|
||||
}
|
||||
|
||||
// Set sets *v to point at a newly allocated word set to x.
|
||||
func word32_Set(p word32, o *Buffer, x uint32) {
|
||||
if len(o.uint32s) == 0 {
|
||||
o.uint32s = make([]uint32, uint32PoolSize)
|
||||
}
|
||||
o.uint32s[0] = x
|
||||
*p = &o.uint32s[0]
|
||||
o.uint32s = o.uint32s[1:]
|
||||
}
|
||||
|
||||
// Get gets the value pointed at by *v.
|
||||
func word32_Get(p word32) uint32 {
|
||||
return **p
|
||||
}
|
||||
|
||||
// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
|
||||
func structPointer_Word32(p structPointer, f field) word32 {
|
||||
return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||
}
|
||||
|
||||
// A word32Val is the address of a 32-bit value field.
|
||||
type word32Val *uint32
|
||||
|
||||
// Set sets *p to x.
|
||||
func word32Val_Set(p word32Val, x uint32) {
|
||||
*p = x
|
||||
}
|
||||
|
||||
// Get gets the value pointed at by p.
|
||||
func word32Val_Get(p word32Val) uint32 {
|
||||
return *p
|
||||
}
|
||||
|
||||
// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
|
||||
func structPointer_Word32Val(p structPointer, f field) word32Val {
|
||||
return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||
}
|
||||
|
||||
// A word32Slice is a slice of 32-bit values.
|
||||
type word32Slice []uint32
|
||||
|
||||
func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
|
||||
func (v *word32Slice) Len() int { return len(*v) }
|
||||
func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
|
||||
|
||||
// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
|
||||
func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
|
||||
return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// word64 is like word32 but for 64-bit values.
|
||||
type word64 **uint64
|
||||
|
||||
func word64_Set(p word64, o *Buffer, x uint64) {
|
||||
if len(o.uint64s) == 0 {
|
||||
o.uint64s = make([]uint64, uint64PoolSize)
|
||||
}
|
||||
o.uint64s[0] = x
|
||||
*p = &o.uint64s[0]
|
||||
o.uint64s = o.uint64s[1:]
|
||||
}
|
||||
|
||||
func word64_IsNil(p word64) bool {
|
||||
return *p == nil
|
||||
}
|
||||
|
||||
func word64_Get(p word64) uint64 {
|
||||
return **p
|
||||
}
|
||||
|
||||
func structPointer_Word64(p structPointer, f field) word64 {
|
||||
return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||
}
|
||||
|
||||
// word64Val is like word32Val but for 64-bit values.
|
||||
type word64Val *uint64
|
||||
|
||||
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
|
||||
*p = x
|
||||
}
|
||||
|
||||
func word64Val_Get(p word64Val) uint64 {
|
||||
return *p
|
||||
}
|
||||
|
||||
func structPointer_Word64Val(p structPointer, f field) word64Val {
|
||||
return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||
}
|
||||
|
||||
// word64Slice is like word32Slice but for 64-bit values.
|
||||
type word64Slice []uint64
|
||||
|
||||
func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
|
||||
func (v *word64Slice) Len() int { return len(*v) }
|
||||
func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
|
||||
|
||||
func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
|
||||
return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
|
@ -1,846 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for encoding data into the wire format for protocol buffers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const debug bool = false
|
||||
|
||||
// Constants that identify the encoding of a value on the wire.
|
||||
const (
|
||||
WireVarint = 0
|
||||
WireFixed64 = 1
|
||||
WireBytes = 2
|
||||
WireStartGroup = 3
|
||||
WireEndGroup = 4
|
||||
WireFixed32 = 5
|
||||
)
|
||||
|
||||
const startSize = 10 // initial slice/string sizes
|
||||
|
||||
// Encoders are defined in encode.go
|
||||
// An encoder outputs the full representation of a field, including its
|
||||
// tag and encoder type.
|
||||
type encoder func(p *Buffer, prop *Properties, base structPointer) error
|
||||
|
||||
// A valueEncoder encodes a single integer in a particular encoding.
|
||||
type valueEncoder func(o *Buffer, x uint64) error
|
||||
|
||||
// Sizers are defined in encode.go
|
||||
// A sizer returns the encoded size of a field, including its tag and encoder
|
||||
// type.
|
||||
type sizer func(prop *Properties, base structPointer) int
|
||||
|
||||
// A valueSizer returns the encoded size of a single integer in a particular
|
||||
// encoding.
|
||||
type valueSizer func(x uint64) int
|
||||
|
||||
// Decoders are defined in decode.go
|
||||
// A decoder creates a value from its wire representation.
|
||||
// Unrecognized subelements are saved in unrec.
|
||||
type decoder func(p *Buffer, prop *Properties, base structPointer) error
|
||||
|
||||
// A valueDecoder decodes a single integer in a particular encoding.
|
||||
type valueDecoder func(o *Buffer) (x uint64, err error)
|
||||
|
||||
// A oneofMarshaler does the marshaling for all oneof fields in a message.
|
||||
type oneofMarshaler func(Message, *Buffer) error
|
||||
|
||||
// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
|
||||
type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
|
||||
|
||||
// A oneofSizer does the sizing for all oneof fields in a message.
|
||||
type oneofSizer func(Message) int
|
||||
|
||||
// tagMap is an optimization over map[int]int for typical protocol buffer
|
||||
// use-cases. Encoded protocol buffers are often in tag order with small tag
|
||||
// numbers.
|
||||
type tagMap struct {
|
||||
fastTags []int
|
||||
slowTags map[int]int
|
||||
}
|
||||
|
||||
// tagMapFastLimit is the upper bound on the tag number that will be stored in
|
||||
// the tagMap slice rather than its map.
|
||||
const tagMapFastLimit = 1024
|
||||
|
||||
func (p *tagMap) get(t int) (int, bool) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
if t >= len(p.fastTags) {
|
||||
return 0, false
|
||||
}
|
||||
fi := p.fastTags[t]
|
||||
return fi, fi >= 0
|
||||
}
|
||||
fi, ok := p.slowTags[t]
|
||||
return fi, ok
|
||||
}
|
||||
|
||||
func (p *tagMap) put(t int, fi int) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
for len(p.fastTags) < t+1 {
|
||||
p.fastTags = append(p.fastTags, -1)
|
||||
}
|
||||
p.fastTags[t] = fi
|
||||
return
|
||||
}
|
||||
if p.slowTags == nil {
|
||||
p.slowTags = make(map[int]int)
|
||||
}
|
||||
p.slowTags[t] = fi
|
||||
}
|
||||
|
||||
// StructProperties represents properties for all the fields of a struct.
|
||||
// decoderTags and decoderOrigNames should only be used by the decoder.
|
||||
type StructProperties struct {
|
||||
Prop []*Properties // properties for each field
|
||||
reqCount int // required count
|
||||
decoderTags tagMap // map from proto tag to struct field number
|
||||
decoderOrigNames map[string]int // map from original name to struct field number
|
||||
order []int // list of struct field numbers in tag order
|
||||
unrecField field // field id of the XXX_unrecognized []byte field
|
||||
extendable bool // is this an extendable proto
|
||||
|
||||
oneofMarshaler oneofMarshaler
|
||||
oneofUnmarshaler oneofUnmarshaler
|
||||
oneofSizer oneofSizer
|
||||
stype reflect.Type
|
||||
|
||||
// OneofTypes contains information about the oneof fields in this message.
|
||||
// It is keyed by the original name of a field.
|
||||
OneofTypes map[string]*OneofProperties
|
||||
}
|
||||
|
||||
// OneofProperties represents information about a specific field in a oneof.
|
||||
type OneofProperties struct {
|
||||
Type reflect.Type // pointer to generated struct type for this oneof field
|
||||
Field int // struct field number of the containing oneof in the message
|
||||
Prop *Properties
|
||||
}
|
||||
|
||||
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
|
||||
// See encode.go, (*Buffer).enc_struct.
|
||||
|
||||
func (sp *StructProperties) Len() int { return len(sp.order) }
|
||||
func (sp *StructProperties) Less(i, j int) bool {
|
||||
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
|
||||
}
|
||||
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
|
||||
|
||||
// Properties represents the protocol-specific behavior of a single struct field.
|
||||
type Properties struct {
|
||||
Name string // name of the field, for error messages
|
||||
OrigName string // original name before protocol compiler (always set)
|
||||
JSONName string // name to use for JSON; determined by protoc
|
||||
Wire string
|
||||
WireType int
|
||||
Tag int
|
||||
Required bool
|
||||
Optional bool
|
||||
Repeated bool
|
||||
Packed bool // relevant for repeated primitives only
|
||||
Enum string // set for enum types only
|
||||
proto3 bool // whether this is known to be a proto3 field; set for []byte only
|
||||
oneof bool // whether this is a oneof field
|
||||
|
||||
Default string // default value
|
||||
HasDefault bool // whether an explicit default was provided
|
||||
def_uint64 uint64
|
||||
|
||||
enc encoder
|
||||
valEnc valueEncoder // set for bool and numeric types only
|
||||
field field
|
||||
tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
|
||||
tagbuf [8]byte
|
||||
stype reflect.Type // set for struct types only
|
||||
sprop *StructProperties // set for struct types only
|
||||
isMarshaler bool
|
||||
isUnmarshaler bool
|
||||
|
||||
mtype reflect.Type // set for map types only
|
||||
mkeyprop *Properties // set for map types only
|
||||
mvalprop *Properties // set for map types only
|
||||
|
||||
size sizer
|
||||
valSize valueSizer // set for bool and numeric types only
|
||||
|
||||
dec decoder
|
||||
valDec valueDecoder // set for bool and numeric types only
|
||||
|
||||
// If this is a packable field, this will be the decoder for the packed version of the field.
|
||||
packedDec decoder
|
||||
}
|
||||
|
||||
// String formats the properties in the protobuf struct field tag style.
|
||||
func (p *Properties) String() string {
|
||||
s := p.Wire
|
||||
s = ","
|
||||
s += strconv.Itoa(p.Tag)
|
||||
if p.Required {
|
||||
s += ",req"
|
||||
}
|
||||
if p.Optional {
|
||||
s += ",opt"
|
||||
}
|
||||
if p.Repeated {
|
||||
s += ",rep"
|
||||
}
|
||||
if p.Packed {
|
||||
s += ",packed"
|
||||
}
|
||||
s += ",name=" + p.OrigName
|
||||
if p.JSONName != p.OrigName {
|
||||
s += ",json=" + p.JSONName
|
||||
}
|
||||
if p.proto3 {
|
||||
s += ",proto3"
|
||||
}
|
||||
if p.oneof {
|
||||
s += ",oneof"
|
||||
}
|
||||
if len(p.Enum) > 0 {
|
||||
s += ",enum=" + p.Enum
|
||||
}
|
||||
if p.HasDefault {
|
||||
s += ",def=" + p.Default
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
||||
func (p *Properties) Parse(s string) {
|
||||
// "bytes,49,opt,name=foo,def=hello!"
|
||||
fields := strings.Split(s, ",") // breaks def=, but handled below.
|
||||
if len(fields) < 2 {
|
||||
fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
|
||||
return
|
||||
}
|
||||
|
||||
p.Wire = fields[0]
|
||||
switch p.Wire {
|
||||
case "varint":
|
||||
p.WireType = WireVarint
|
||||
p.valEnc = (*Buffer).EncodeVarint
|
||||
p.valDec = (*Buffer).DecodeVarint
|
||||
p.valSize = sizeVarint
|
||||
case "fixed32":
|
||||
p.WireType = WireFixed32
|
||||
p.valEnc = (*Buffer).EncodeFixed32
|
||||
p.valDec = (*Buffer).DecodeFixed32
|
||||
p.valSize = sizeFixed32
|
||||
case "fixed64":
|
||||
p.WireType = WireFixed64
|
||||
p.valEnc = (*Buffer).EncodeFixed64
|
||||
p.valDec = (*Buffer).DecodeFixed64
|
||||
p.valSize = sizeFixed64
|
||||
case "zigzag32":
|
||||
p.WireType = WireVarint
|
||||
p.valEnc = (*Buffer).EncodeZigzag32
|
||||
p.valDec = (*Buffer).DecodeZigzag32
|
||||
p.valSize = sizeZigzag32
|
||||
case "zigzag64":
|
||||
p.WireType = WireVarint
|
||||
p.valEnc = (*Buffer).EncodeZigzag64
|
||||
p.valDec = (*Buffer).DecodeZigzag64
|
||||
p.valSize = sizeZigzag64
|
||||
case "bytes", "group":
|
||||
p.WireType = WireBytes
|
||||
// no numeric converter for non-numeric types
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
p.Tag, err = strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i := 2; i < len(fields); i++ {
|
||||
f := fields[i]
|
||||
switch {
|
||||
case f == "req":
|
||||
p.Required = true
|
||||
case f == "opt":
|
||||
p.Optional = true
|
||||
case f == "rep":
|
||||
p.Repeated = true
|
||||
case f == "packed":
|
||||
p.Packed = true
|
||||
case strings.HasPrefix(f, "name="):
|
||||
p.OrigName = f[5:]
|
||||
case strings.HasPrefix(f, "json="):
|
||||
p.JSONName = f[5:]
|
||||
case strings.HasPrefix(f, "enum="):
|
||||
p.Enum = f[5:]
|
||||
case f == "proto3":
|
||||
p.proto3 = true
|
||||
case f == "oneof":
|
||||
p.oneof = true
|
||||
case strings.HasPrefix(f, "def="):
|
||||
p.HasDefault = true
|
||||
p.Default = f[4:] // rest of string
|
||||
if i+1 < len(fields) {
|
||||
// Commas aren't escaped, and def is always last.
|
||||
p.Default += "," + strings.Join(fields[i+1:], ",")
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func logNoSliceEnc(t1, t2 reflect.Type) {
|
||||
fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
|
||||
}
|
||||
|
||||
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
|
||||
|
||||
// Initialize the fields for encoding and decoding.
|
||||
func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
|
||||
p.enc = nil
|
||||
p.dec = nil
|
||||
p.size = nil
|
||||
|
||||
switch t1 := typ; t1.Kind() {
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
|
||||
|
||||
// proto3 scalar types
|
||||
|
||||
case reflect.Bool:
|
||||
p.enc = (*Buffer).enc_proto3_bool
|
||||
p.dec = (*Buffer).dec_proto3_bool
|
||||
p.size = size_proto3_bool
|
||||
case reflect.Int32:
|
||||
p.enc = (*Buffer).enc_proto3_int32
|
||||
p.dec = (*Buffer).dec_proto3_int32
|
||||
p.size = size_proto3_int32
|
||||
case reflect.Uint32:
|
||||
p.enc = (*Buffer).enc_proto3_uint32
|
||||
p.dec = (*Buffer).dec_proto3_int32 // can reuse
|
||||
p.size = size_proto3_uint32
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
p.enc = (*Buffer).enc_proto3_int64
|
||||
p.dec = (*Buffer).dec_proto3_int64
|
||||
p.size = size_proto3_int64
|
||||
case reflect.Float32:
|
||||
p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
|
||||
p.dec = (*Buffer).dec_proto3_int32
|
||||
p.size = size_proto3_uint32
|
||||
case reflect.Float64:
|
||||
p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
|
||||
p.dec = (*Buffer).dec_proto3_int64
|
||||
p.size = size_proto3_int64
|
||||
case reflect.String:
|
||||
p.enc = (*Buffer).enc_proto3_string
|
||||
p.dec = (*Buffer).dec_proto3_string
|
||||
p.size = size_proto3_string
|
||||
|
||||
case reflect.Ptr:
|
||||
switch t2 := t1.Elem(); t2.Kind() {
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
|
||||
break
|
||||
case reflect.Bool:
|
||||
p.enc = (*Buffer).enc_bool
|
||||
p.dec = (*Buffer).dec_bool
|
||||
p.size = size_bool
|
||||
case reflect.Int32:
|
||||
p.enc = (*Buffer).enc_int32
|
||||
p.dec = (*Buffer).dec_int32
|
||||
p.size = size_int32
|
||||
case reflect.Uint32:
|
||||
p.enc = (*Buffer).enc_uint32
|
||||
p.dec = (*Buffer).dec_int32 // can reuse
|
||||
p.size = size_uint32
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
p.enc = (*Buffer).enc_int64
|
||||
p.dec = (*Buffer).dec_int64
|
||||
p.size = size_int64
|
||||
case reflect.Float32:
|
||||
p.enc = (*Buffer).enc_uint32 // can just treat them as bits
|
||||
p.dec = (*Buffer).dec_int32
|
||||
p.size = size_uint32
|
||||
case reflect.Float64:
|
||||
p.enc = (*Buffer).enc_int64 // can just treat them as bits
|
||||
p.dec = (*Buffer).dec_int64
|
||||
p.size = size_int64
|
||||
case reflect.String:
|
||||
p.enc = (*Buffer).enc_string
|
||||
p.dec = (*Buffer).dec_string
|
||||
p.size = size_string
|
||||
case reflect.Struct:
|
||||
p.stype = t1.Elem()
|
||||
p.isMarshaler = isMarshaler(t1)
|
||||
p.isUnmarshaler = isUnmarshaler(t1)
|
||||
if p.Wire == "bytes" {
|
||||
p.enc = (*Buffer).enc_struct_message
|
||||
p.dec = (*Buffer).dec_struct_message
|
||||
p.size = size_struct_message
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_struct_group
|
||||
p.dec = (*Buffer).dec_struct_group
|
||||
p.size = size_struct_group
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
switch t2 := t1.Elem(); t2.Kind() {
|
||||
default:
|
||||
logNoSliceEnc(t1, t2)
|
||||
break
|
||||
case reflect.Bool:
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_bool
|
||||
p.size = size_slice_packed_bool
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_bool
|
||||
p.size = size_slice_bool
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_bool
|
||||
p.packedDec = (*Buffer).dec_slice_packed_bool
|
||||
case reflect.Int32:
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_int32
|
||||
p.size = size_slice_packed_int32
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_int32
|
||||
p.size = size_slice_int32
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int32
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int32
|
||||
case reflect.Uint32:
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_uint32
|
||||
p.size = size_slice_packed_uint32
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_uint32
|
||||
p.size = size_slice_uint32
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int32
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int32
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_int64
|
||||
p.size = size_slice_packed_int64
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_int64
|
||||
p.size = size_slice_int64
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int64
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int64
|
||||
case reflect.Uint8:
|
||||
p.enc = (*Buffer).enc_slice_byte
|
||||
p.dec = (*Buffer).dec_slice_byte
|
||||
p.size = size_slice_byte
|
||||
// This is a []byte, which is either a bytes field,
|
||||
// or the value of a map field. In the latter case,
|
||||
// we always encode an empty []byte, so we should not
|
||||
// use the proto3 enc/size funcs.
|
||||
// f == nil iff this is the key/value of a map field.
|
||||
if p.proto3 && f != nil {
|
||||
p.enc = (*Buffer).enc_proto3_slice_byte
|
||||
p.size = size_proto3_slice_byte
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
switch t2.Bits() {
|
||||
case 32:
|
||||
// can just treat them as bits
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_uint32
|
||||
p.size = size_slice_packed_uint32
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_uint32
|
||||
p.size = size_slice_uint32
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int32
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int32
|
||||
case 64:
|
||||
// can just treat them as bits
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_int64
|
||||
p.size = size_slice_packed_int64
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_int64
|
||||
p.size = size_slice_int64
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int64
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int64
|
||||
default:
|
||||
logNoSliceEnc(t1, t2)
|
||||
break
|
||||
}
|
||||
case reflect.String:
|
||||
p.enc = (*Buffer).enc_slice_string
|
||||
p.dec = (*Buffer).dec_slice_string
|
||||
p.size = size_slice_string
|
||||
case reflect.Ptr:
|
||||
switch t3 := t2.Elem(); t3.Kind() {
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
|
||||
break
|
||||
case reflect.Struct:
|
||||
p.stype = t2.Elem()
|
||||
p.isMarshaler = isMarshaler(t2)
|
||||
p.isUnmarshaler = isUnmarshaler(t2)
|
||||
if p.Wire == "bytes" {
|
||||
p.enc = (*Buffer).enc_slice_struct_message
|
||||
p.dec = (*Buffer).dec_slice_struct_message
|
||||
p.size = size_slice_struct_message
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_struct_group
|
||||
p.dec = (*Buffer).dec_slice_struct_group
|
||||
p.size = size_slice_struct_group
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
switch t2.Elem().Kind() {
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
|
||||
break
|
||||
case reflect.Uint8:
|
||||
p.enc = (*Buffer).enc_slice_slice_byte
|
||||
p.dec = (*Buffer).dec_slice_slice_byte
|
||||
p.size = size_slice_slice_byte
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
p.enc = (*Buffer).enc_new_map
|
||||
p.dec = (*Buffer).dec_new_map
|
||||
p.size = size_new_map
|
||||
|
||||
p.mtype = t1
|
||||
p.mkeyprop = &Properties{}
|
||||
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
||||
p.mvalprop = &Properties{}
|
||||
vtype := p.mtype.Elem()
|
||||
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
|
||||
// The value type is not a message (*T) or bytes ([]byte),
|
||||
// so we need encoders for the pointer to this type.
|
||||
vtype = reflect.PtrTo(vtype)
|
||||
}
|
||||
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||
}
|
||||
|
||||
// precalculate tag code
|
||||
wire := p.WireType
|
||||
if p.Packed {
|
||||
wire = WireBytes
|
||||
}
|
||||
x := uint32(p.Tag)<<3 | uint32(wire)
|
||||
i := 0
|
||||
for i = 0; x > 127; i++ {
|
||||
p.tagbuf[i] = 0x80 | uint8(x&0x7F)
|
||||
x >>= 7
|
||||
}
|
||||
p.tagbuf[i] = uint8(x)
|
||||
p.tagcode = p.tagbuf[0 : i+1]
|
||||
|
||||
if p.stype != nil {
|
||||
if lockGetProp {
|
||||
p.sprop = GetProperties(p.stype)
|
||||
} else {
|
||||
p.sprop = getPropertiesLocked(p.stype)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||
)
|
||||
|
||||
// isMarshaler reports whether type t implements Marshaler.
|
||||
func isMarshaler(t reflect.Type) bool {
|
||||
// We're checking for (likely) pointer-receiver methods
|
||||
// so if t is not a pointer, something is very wrong.
|
||||
// The calls above only invoke isMarshaler on pointer types.
|
||||
if t.Kind() != reflect.Ptr {
|
||||
panic("proto: misuse of isMarshaler")
|
||||
}
|
||||
return t.Implements(marshalerType)
|
||||
}
|
||||
|
||||
// isUnmarshaler reports whether type t implements Unmarshaler.
|
||||
func isUnmarshaler(t reflect.Type) bool {
|
||||
// We're checking for (likely) pointer-receiver methods
|
||||
// so if t is not a pointer, something is very wrong.
|
||||
// The calls above only invoke isUnmarshaler on pointer types.
|
||||
if t.Kind() != reflect.Ptr {
|
||||
panic("proto: misuse of isUnmarshaler")
|
||||
}
|
||||
return t.Implements(unmarshalerType)
|
||||
}
|
||||
|
||||
// Init populates the properties from a protocol buffer struct tag.
|
||||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
||||
p.init(typ, name, tag, f, true)
|
||||
}
|
||||
|
||||
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
|
||||
// "bytes,49,opt,def=hello!"
|
||||
p.Name = name
|
||||
p.OrigName = name
|
||||
if f != nil {
|
||||
p.field = toField(f)
|
||||
}
|
||||
if tag == "" {
|
||||
return
|
||||
}
|
||||
p.Parse(tag)
|
||||
p.setEncAndDec(typ, f, lockGetProp)
|
||||
}
|
||||
|
||||
var (
|
||||
propertiesMu sync.RWMutex
|
||||
propertiesMap = make(map[reflect.Type]*StructProperties)
|
||||
)
|
||||
|
||||
// GetProperties returns the list of properties for the type represented by t.
|
||||
// t must represent a generated struct type of a protocol message.
|
||||
func GetProperties(t reflect.Type) *StructProperties {
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic("proto: type must have kind struct")
|
||||
}
|
||||
|
||||
// Most calls to GetProperties in a long-running program will be
|
||||
// retrieving details for types we have seen before.
|
||||
propertiesMu.RLock()
|
||||
sprop, ok := propertiesMap[t]
|
||||
propertiesMu.RUnlock()
|
||||
if ok {
|
||||
if collectStats {
|
||||
stats.Chit++
|
||||
}
|
||||
return sprop
|
||||
}
|
||||
|
||||
propertiesMu.Lock()
|
||||
sprop = getPropertiesLocked(t)
|
||||
propertiesMu.Unlock()
|
||||
return sprop
|
||||
}
|
||||
|
||||
// getPropertiesLocked requires that propertiesMu is held.
|
||||
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||
if prop, ok := propertiesMap[t]; ok {
|
||||
if collectStats {
|
||||
stats.Chit++
|
||||
}
|
||||
return prop
|
||||
}
|
||||
if collectStats {
|
||||
stats.Cmiss++
|
||||
}
|
||||
|
||||
prop := new(StructProperties)
|
||||
// in case of recursive protos, fill this in now.
|
||||
propertiesMap[t] = prop
|
||||
|
||||
// build properties
|
||||
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
|
||||
prop.unrecField = invalidField
|
||||
prop.Prop = make([]*Properties, t.NumField())
|
||||
prop.order = make([]int, t.NumField())
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
p := new(Properties)
|
||||
name := f.Name
|
||||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
||||
|
||||
if f.Name == "XXX_extensions" { // special case
|
||||
p.enc = (*Buffer).enc_map
|
||||
p.dec = nil // not needed
|
||||
p.size = size_map
|
||||
}
|
||||
if f.Name == "XXX_unrecognized" { // special case
|
||||
prop.unrecField = toField(&f)
|
||||
}
|
||||
oneof := f.Tag.Get("protobuf_oneof") != "" // special case
|
||||
prop.Prop[i] = p
|
||||
prop.order[i] = i
|
||||
if debug {
|
||||
print(i, " ", f.Name, " ", t.String(), " ")
|
||||
if p.Tag > 0 {
|
||||
print(p.String())
|
||||
}
|
||||
print("\n")
|
||||
}
|
||||
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
|
||||
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
|
||||
}
|
||||
}
|
||||
|
||||
// Re-order prop.order.
|
||||
sort.Sort(prop)
|
||||
|
||||
type oneofMessage interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
}
|
||||
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
|
||||
var oots []interface{}
|
||||
prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
|
||||
prop.stype = t
|
||||
|
||||
// Interpret oneof metadata.
|
||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||
for _, oot := range oots {
|
||||
oop := &OneofProperties{
|
||||
Type: reflect.ValueOf(oot).Type(), // *T
|
||||
Prop: new(Properties),
|
||||
}
|
||||
sft := oop.Type.Elem().Field(0)
|
||||
oop.Prop.Name = sft.Name
|
||||
oop.Prop.Parse(sft.Tag.Get("protobuf"))
|
||||
// There will be exactly one interface field that
|
||||
// this new value is assignable to.
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if f.Type.Kind() != reflect.Interface {
|
||||
continue
|
||||
}
|
||||
if !oop.Type.AssignableTo(f.Type) {
|
||||
continue
|
||||
}
|
||||
oop.Field = i
|
||||
break
|
||||
}
|
||||
prop.OneofTypes[oop.Prop.OrigName] = oop
|
||||
}
|
||||
}
|
||||
|
||||
// build required counts
|
||||
// build tags
|
||||
reqCount := 0
|
||||
prop.decoderOrigNames = make(map[string]int)
|
||||
for i, p := range prop.Prop {
|
||||
if strings.HasPrefix(p.Name, "XXX_") {
|
||||
// Internal fields should not appear in tags/origNames maps.
|
||||
// They are handled specially when encoding and decoding.
|
||||
continue
|
||||
}
|
||||
if p.Required {
|
||||
reqCount++
|
||||
}
|
||||
prop.decoderTags.put(p.Tag, i)
|
||||
prop.decoderOrigNames[p.OrigName] = i
|
||||
}
|
||||
prop.reqCount = reqCount
|
||||
|
||||
return prop
|
||||
}
|
||||
|
||||
// Return the Properties object for the x[0]'th field of the structure.
|
||||
func propByIndex(t reflect.Type, x []int) *Properties {
|
||||
if len(x) != 1 {
|
||||
fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
|
||||
return nil
|
||||
}
|
||||
prop := GetProperties(t)
|
||||
return prop.Prop[x[0]]
|
||||
}
|
||||
|
||||
// Get the address and type of a pointer to a struct from an interface.
|
||||
func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
|
||||
if pb == nil {
|
||||
err = ErrNil
|
||||
return
|
||||
}
|
||||
// get the reflect type of the pointer to the struct.
|
||||
t = reflect.TypeOf(pb)
|
||||
// get the address of the struct.
|
||||
value := reflect.ValueOf(pb)
|
||||
b = toStructPointer(value)
|
||||
return
|
||||
}
|
||||
|
||||
// A global registry of enum types.
|
||||
// The generated code will register the generated maps by calling RegisterEnum.
|
||||
|
||||
var enumValueMaps = make(map[string]map[string]int32)
|
||||
|
||||
// RegisterEnum is called from the generated code to install the enum descriptor
|
||||
// maps into the global table to aid parsing text format protocol buffers.
|
||||
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
|
||||
if _, ok := enumValueMaps[typeName]; ok {
|
||||
panic("proto: duplicate enum registered: " + typeName)
|
||||
}
|
||||
enumValueMaps[typeName] = valueMap
|
||||
}
|
||||
|
||||
// EnumValueMap returns the mapping from names to integers of the
|
||||
// enum type enumType, or a nil if not found.
|
||||
func EnumValueMap(enumType string) map[string]int32 {
|
||||
return enumValueMaps[enumType]
|
||||
}
|
||||
|
||||
// A registry of all linked message types.
|
||||
// The string is a fully-qualified proto name ("pkg.Message").
|
||||
var (
|
||||
protoTypes = make(map[string]reflect.Type)
|
||||
revProtoTypes = make(map[reflect.Type]string)
|
||||
)
|
||||
|
||||
// RegisterType is called from generated code and maps from the fully qualified
|
||||
// proto name to the type (pointer to struct) of the protocol buffer.
|
||||
func RegisterType(x Message, name string) {
|
||||
if _, ok := protoTypes[name]; ok {
|
||||
// TODO: Some day, make this a panic.
|
||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||
return
|
||||
}
|
||||
t := reflect.TypeOf(x)
|
||||
protoTypes[name] = t
|
||||
revProtoTypes[t] = name
|
||||
}
|
||||
|
||||
// MessageName returns the fully-qualified proto name for the given message type.
|
||||
func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
|
||||
|
||||
// MessageType returns the message type (pointer to struct) for a named message.
|
||||
func MessageType(name string) reflect.Type { return protoTypes[name] }
|
|
@ -1,751 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// Functions for writing the text protocol buffer format.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
newline = []byte("\n")
|
||||
spaces = []byte(" ")
|
||||
gtNewline = []byte(">\n")
|
||||
endBraceNewline = []byte("}\n")
|
||||
backslashN = []byte{'\\', 'n'}
|
||||
backslashR = []byte{'\\', 'r'}
|
||||
backslashT = []byte{'\\', 't'}
|
||||
backslashDQ = []byte{'\\', '"'}
|
||||
backslashBS = []byte{'\\', '\\'}
|
||||
posInf = []byte("inf")
|
||||
negInf = []byte("-inf")
|
||||
nan = []byte("nan")
|
||||
)
|
||||
|
||||
type writer interface {
|
||||
io.Writer
|
||||
WriteByte(byte) error
|
||||
}
|
||||
|
||||
// textWriter is an io.Writer that tracks its indentation level.
|
||||
type textWriter struct {
|
||||
ind int
|
||||
complete bool // if the current position is a complete line
|
||||
compact bool // whether to write out as a one-liner
|
||||
w writer
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteString(s string) (n int, err error) {
|
||||
if !strings.Contains(s, "\n") {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.complete = false
|
||||
return io.WriteString(w.w, s)
|
||||
}
|
||||
// WriteString is typically called without newlines, so this
|
||||
// codepath and its copy are rare. We copy to avoid
|
||||
// duplicating all of Write's logic here.
|
||||
return w.Write([]byte(s))
|
||||
}
|
||||
|
||||
func (w *textWriter) Write(p []byte) (n int, err error) {
|
||||
newlines := bytes.Count(p, newline)
|
||||
if newlines == 0 {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
n, err = w.w.Write(p)
|
||||
w.complete = false
|
||||
return n, err
|
||||
}
|
||||
|
||||
frags := bytes.SplitN(p, newline, newlines+1)
|
||||
if w.compact {
|
||||
for i, frag := range frags {
|
||||
if i > 0 {
|
||||
if err := w.w.WriteByte(' '); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
}
|
||||
nn, err := w.w.Write(frag)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
for i, frag := range frags {
|
||||
if w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
nn, err := w.w.Write(frag)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if i+1 < len(frags) {
|
||||
if err := w.w.WriteByte('\n'); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
w.complete = len(frags[len(frags)-1]) == 0
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteByte(c byte) error {
|
||||
if w.compact && c == '\n' {
|
||||
c = ' '
|
||||
}
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
err := w.w.WriteByte(c)
|
||||
w.complete = c == '\n'
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *textWriter) indent() { w.ind++ }
|
||||
|
||||
func (w *textWriter) unindent() {
|
||||
if w.ind == 0 {
|
||||
log.Printf("proto: textWriter unindented too far")
|
||||
return
|
||||
}
|
||||
w.ind--
|
||||
}
|
||||
|
||||
func writeName(w *textWriter, props *Properties) error {
|
||||
if _, err := w.WriteString(props.OrigName); err != nil {
|
||||
return err
|
||||
}
|
||||
if props.Wire != "group" {
|
||||
return w.WriteByte(':')
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// raw is the interface satisfied by RawMessage.
|
||||
type raw interface {
|
||||
Bytes() []byte
|
||||
}
|
||||
|
||||
func writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < sv.NumField(); i++ {
|
||||
fv := sv.Field(i)
|
||||
props := sprops.Prop[i]
|
||||
name := st.Field(i).Name
|
||||
|
||||
if strings.HasPrefix(name, "XXX_") {
|
||||
// There are two XXX_ fields:
|
||||
// XXX_unrecognized []byte
|
||||
// XXX_extensions map[int32]proto.Extension
|
||||
// The first is handled here;
|
||||
// the second is handled at the bottom of this function.
|
||||
if name == "XXX_unrecognized" && !fv.IsNil() {
|
||||
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
||||
// Field not filled in. This could be an optional field or
|
||||
// a required field that wasn't filled in. Either way, there
|
||||
// isn't anything we can show for it.
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Slice && fv.IsNil() {
|
||||
// Repeated field that is empty, or a bytes field that is unused.
|
||||
continue
|
||||
}
|
||||
|
||||
if props.Repeated && fv.Kind() == reflect.Slice {
|
||||
// Repeated field.
|
||||
for j := 0; j < fv.Len(); j++ {
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
v := fv.Index(j)
|
||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
// A nil message in a repeated field is not valid,
|
||||
// but we can handle that more gracefully than panicking.
|
||||
if _, err := w.Write([]byte("<nil>\n")); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err := writeAny(w, v, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Map {
|
||||
// Map fields are rendered as a repeated struct with key/value fields.
|
||||
keys := fv.MapKeys()
|
||||
sort.Sort(mapKeys(keys))
|
||||
for _, key := range keys {
|
||||
val := fv.MapIndex(key)
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// open struct
|
||||
if err := w.WriteByte('<'); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
// key
|
||||
if _, err := w.WriteString("key:"); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := writeAny(w, key, props.mkeyprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
// nil values aren't legal, but we can avoid panicking because of them.
|
||||
if val.Kind() != reflect.Ptr || !val.IsNil() {
|
||||
// value
|
||||
if _, err := w.WriteString("value:"); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := writeAny(w, val, props.mvalprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// close struct
|
||||
w.unindent()
|
||||
if err := w.WriteByte('>'); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
|
||||
// empty bytes field
|
||||
continue
|
||||
}
|
||||
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
|
||||
// proto3 non-repeated scalar field; skip if zero value
|
||||
if isProto3Zero(fv) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if fv.Kind() == reflect.Interface {
|
||||
// Check if it is a oneof.
|
||||
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
|
||||
// fv is nil, or holds a pointer to generated struct.
|
||||
// That generated struct has exactly one field,
|
||||
// which has a protobuf struct tag.
|
||||
if fv.IsNil() {
|
||||
continue
|
||||
}
|
||||
inner := fv.Elem().Elem() // interface -> *T -> T
|
||||
tag := inner.Type().Field(0).Tag.Get("protobuf")
|
||||
props = new(Properties) // Overwrite the outer props var, but not its pointee.
|
||||
props.Parse(tag)
|
||||
// Write the value in the oneof, not the oneof itself.
|
||||
fv = inner.Field(0)
|
||||
|
||||
// Special case to cope with malformed messages gracefully:
|
||||
// If the value in the oneof is a nil pointer, don't panic
|
||||
// in writeAny.
|
||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
||||
// Use errors.New so writeAny won't render quotes.
|
||||
msg := errors.New("/* nil */")
|
||||
fv = reflect.ValueOf(&msg).Elem()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if b, ok := fv.Interface().(raw); ok {
|
||||
if err := writeRaw(w, b.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Enums have a String method, so writeAny will work fine.
|
||||
if err := writeAny(w, fv, props); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Extensions (the XXX_extensions field).
|
||||
pv := sv.Addr()
|
||||
if pv.Type().Implements(extendableProtoType) {
|
||||
if err := writeExtensions(w, pv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeRaw writes an uninterpreted raw message.
|
||||
func writeRaw(w *textWriter, b []byte) error {
|
||||
if err := w.WriteByte('<'); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
if err := writeUnknownStruct(w, b); err != nil {
|
||||
return err
|
||||
}
|
||||
w.unindent()
|
||||
if err := w.WriteByte('>'); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeAny writes an arbitrary field.
|
||||
func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
v = reflect.Indirect(v)
|
||||
|
||||
// Floats have special cases.
|
||||
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
|
||||
x := v.Float()
|
||||
var b []byte
|
||||
switch {
|
||||
case math.IsInf(x, 1):
|
||||
b = posInf
|
||||
case math.IsInf(x, -1):
|
||||
b = negInf
|
||||
case math.IsNaN(x):
|
||||
b = nan
|
||||
}
|
||||
if b != nil {
|
||||
_, err := w.Write(b)
|
||||
return err
|
||||
}
|
||||
// Other values are handled below.
|
||||
}
|
||||
|
||||
// We don't attempt to serialise every possible value type; only those
|
||||
// that can occur in protocol buffers.
|
||||
switch v.Kind() {
|
||||
case reflect.Slice:
|
||||
// Should only be a []byte; repeated fields are handled in writeStruct.
|
||||
if err := writeString(w, string(v.Interface().([]byte))); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.String:
|
||||
if err := writeString(w, v.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.Struct:
|
||||
// Required/optional group/message.
|
||||
var bra, ket byte = '<', '>'
|
||||
if props != nil && props.Wire == "group" {
|
||||
bra, ket = '{', '}'
|
||||
}
|
||||
if err := w.WriteByte(bra); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||
text, err := tm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = w.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err := writeStruct(w, v); err != nil {
|
||||
return err
|
||||
}
|
||||
w.unindent()
|
||||
if err := w.WriteByte(ket); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
_, err := fmt.Fprint(w, v.Interface())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// equivalent to C's isprint.
|
||||
func isprint(c byte) bool {
|
||||
return c >= 0x20 && c < 0x7f
|
||||
}
|
||||
|
||||
// writeString writes a string in the protocol buffer text format.
|
||||
// It is similar to strconv.Quote except we don't use Go escape sequences,
|
||||
// we treat the string as a byte sequence, and we use octal escapes.
|
||||
// These differences are to maintain interoperability with the other
|
||||
// languages' implementations of the text format.
|
||||
func writeString(w *textWriter, s string) error {
|
||||
// use WriteByte here to get any needed indent
|
||||
if err := w.WriteByte('"'); err != nil {
|
||||
return err
|
||||
}
|
||||
// Loop over the bytes, not the runes.
|
||||
for i := 0; i < len(s); i++ {
|
||||
var err error
|
||||
// Divergence from C++: we don't escape apostrophes.
|
||||
// There's no need to escape them, and the C++ parser
|
||||
// copes with a naked apostrophe.
|
||||
switch c := s[i]; c {
|
||||
case '\n':
|
||||
_, err = w.w.Write(backslashN)
|
||||
case '\r':
|
||||
_, err = w.w.Write(backslashR)
|
||||
case '\t':
|
||||
_, err = w.w.Write(backslashT)
|
||||
case '"':
|
||||
_, err = w.w.Write(backslashDQ)
|
||||
case '\\':
|
||||
_, err = w.w.Write(backslashBS)
|
||||
default:
|
||||
if isprint(c) {
|
||||
err = w.w.WriteByte(c)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w.w, "\\%03o", c)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return w.WriteByte('"')
|
||||
}
|
||||
|
||||
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
|
||||
if !w.compact {
|
||||
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
b := NewBuffer(data)
|
||||
for b.index < len(b.buf) {
|
||||
x, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
_, err := fmt.Fprintf(w, "/* %v */\n", err)
|
||||
return err
|
||||
}
|
||||
wire, tag := x&7, x>>3
|
||||
if wire == WireEndGroup {
|
||||
w.unindent()
|
||||
if _, err := w.Write(endBraceNewline); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if _, err := fmt.Fprint(w, tag); err != nil {
|
||||
return err
|
||||
}
|
||||
if wire != WireStartGroup {
|
||||
if err := w.WriteByte(':'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !w.compact || wire == WireStartGroup {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
switch wire {
|
||||
case WireBytes:
|
||||
buf, e := b.DecodeRawBytes(false)
|
||||
if e == nil {
|
||||
_, err = fmt.Fprintf(w, "%q", buf)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w, "/* %v */", e)
|
||||
}
|
||||
case WireFixed32:
|
||||
x, err = b.DecodeFixed32()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
case WireFixed64:
|
||||
x, err = b.DecodeFixed64()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
case WireStartGroup:
|
||||
err = w.WriteByte('{')
|
||||
w.indent()
|
||||
case WireVarint:
|
||||
x, err = b.DecodeVarint()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
default:
|
||||
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeUnknownInt(w *textWriter, x uint64, err error) error {
|
||||
if err == nil {
|
||||
_, err = fmt.Fprint(w, x)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w, "/* %v */", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type int32Slice []int32
|
||||
|
||||
func (s int32Slice) Len() int { return len(s) }
|
||||
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// writeExtensions writes all the extensions in pv.
|
||||
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
||||
func writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||
emap := extensionMaps[pv.Type().Elem()]
|
||||
ep := pv.Interface().(extendableProto)
|
||||
|
||||
// Order the extensions by ID.
|
||||
// This isn't strictly necessary, but it will give us
|
||||
// canonical output, which will also make testing easier.
|
||||
m := ep.ExtensionMap()
|
||||
ids := make([]int32, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Sort(int32Slice(ids))
|
||||
|
||||
for _, extNum := range ids {
|
||||
ext := m[extNum]
|
||||
var desc *ExtensionDesc
|
||||
if emap != nil {
|
||||
desc = emap[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
// Unknown extension.
|
||||
if err := writeUnknownStruct(w, ext.enc); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
pb, err := GetExtension(ep, desc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed getting extension: %v", err)
|
||||
}
|
||||
|
||||
// Repeated extensions will appear as a slice.
|
||||
if !desc.repeated() {
|
||||
if err := writeExtension(w, desc.Name, pb); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
v := reflect.ValueOf(pb)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeExtension(w *textWriter, name string, pb interface{}) error {
|
||||
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeIndent() {
|
||||
if !w.complete {
|
||||
return
|
||||
}
|
||||
remain := w.ind * 2
|
||||
for remain > 0 {
|
||||
n := remain
|
||||
if n > len(spaces) {
|
||||
n = len(spaces)
|
||||
}
|
||||
w.w.Write(spaces[:n])
|
||||
remain -= n
|
||||
}
|
||||
w.complete = false
|
||||
}
|
||||
|
||||
func marshalText(w io.Writer, pb Message, compact bool) error {
|
||||
val := reflect.ValueOf(pb)
|
||||
if pb == nil || val.IsNil() {
|
||||
w.Write([]byte("<nil>"))
|
||||
return nil
|
||||
}
|
||||
var bw *bufio.Writer
|
||||
ww, ok := w.(writer)
|
||||
if !ok {
|
||||
bw = bufio.NewWriter(w)
|
||||
ww = bw
|
||||
}
|
||||
aw := &textWriter{
|
||||
w: ww,
|
||||
complete: true,
|
||||
compact: compact,
|
||||
}
|
||||
|
||||
if tm, ok := pb.(encoding.TextMarshaler); ok {
|
||||
text, err := tm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = aw.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
return bw.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Dereference the received pointer so we don't have outer < and >.
|
||||
v := reflect.Indirect(val)
|
||||
if err := writeStruct(aw, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
return bw.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalText writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func MarshalText(w io.Writer, pb Message) error {
|
||||
return marshalText(w, pb, false)
|
||||
}
|
||||
|
||||
// MarshalTextString is the same as MarshalText, but returns the string directly.
|
||||
func MarshalTextString(pb Message) string {
|
||||
var buf bytes.Buffer
|
||||
marshalText(&buf, pb, false)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// CompactText writes a given protocol buffer in compact text format (one line).
|
||||
func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) }
|
||||
|
||||
// CompactTextString is the same as CompactText, but returns the string directly.
|
||||
func CompactTextString(pb Message) string {
|
||||
var buf bytes.Buffer
|
||||
marshalText(&buf, pb, true)
|
||||
return buf.String()
|
||||
}
|
|
@ -1,806 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// Functions for parsing the Text protocol buffer format.
|
||||
// TODO: message sets.
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type ParseError struct {
|
||||
Message string
|
||||
Line int // 1-based line number
|
||||
Offset int // 0-based byte offset from start of input
|
||||
}
|
||||
|
||||
func (p *ParseError) Error() string {
|
||||
if p.Line == 1 {
|
||||
// show offset only for first line
|
||||
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
|
||||
}
|
||||
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
|
||||
}
|
||||
|
||||
type token struct {
|
||||
value string
|
||||
err *ParseError
|
||||
line int // line number
|
||||
offset int // byte number from start of input, not start of line
|
||||
unquoted string // the unquoted version of value, if it was a quoted string
|
||||
}
|
||||
|
||||
func (t *token) String() string {
|
||||
if t.err == nil {
|
||||
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
|
||||
}
|
||||
return fmt.Sprintf("parse error: %v", t.err)
|
||||
}
|
||||
|
||||
type textParser struct {
|
||||
s string // remaining input
|
||||
done bool // whether the parsing is finished (success or error)
|
||||
backed bool // whether back() was called
|
||||
offset, line int
|
||||
cur token
|
||||
}
|
||||
|
||||
func newTextParser(s string) *textParser {
|
||||
p := new(textParser)
|
||||
p.s = s
|
||||
p.line = 1
|
||||
p.cur.line = 1
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
||||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
||||
p.cur.err = pe
|
||||
p.done = true
|
||||
return pe
|
||||
}
|
||||
|
||||
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
|
||||
func isIdentOrNumberChar(c byte) bool {
|
||||
switch {
|
||||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
||||
return true
|
||||
case '0' <= c && c <= '9':
|
||||
return true
|
||||
}
|
||||
switch c {
|
||||
case '-', '+', '.', '_':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isWhitespace(c byte) bool {
|
||||
switch c {
|
||||
case ' ', '\t', '\n', '\r':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isQuote(c byte) bool {
|
||||
switch c {
|
||||
case '"', '\'':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *textParser) skipWhitespace() {
|
||||
i := 0
|
||||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
||||
if p.s[i] == '#' {
|
||||
// comment; skip to end of line or input
|
||||
for i < len(p.s) && p.s[i] != '\n' {
|
||||
i++
|
||||
}
|
||||
if i == len(p.s) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if p.s[i] == '\n' {
|
||||
p.line++
|
||||
}
|
||||
i++
|
||||
}
|
||||
p.offset += i
|
||||
p.s = p.s[i:len(p.s)]
|
||||
if len(p.s) == 0 {
|
||||
p.done = true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textParser) advance() {
|
||||
// Skip whitespace
|
||||
p.skipWhitespace()
|
||||
if p.done {
|
||||
return
|
||||
}
|
||||
|
||||
// Start of non-whitespace
|
||||
p.cur.err = nil
|
||||
p.cur.offset, p.cur.line = p.offset, p.line
|
||||
p.cur.unquoted = ""
|
||||
switch p.s[0] {
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',':
|
||||
// Single symbol
|
||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||
case '"', '\'':
|
||||
// Quoted string
|
||||
i := 1
|
||||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
||||
if p.s[i] == '\\' && i+1 < len(p.s) {
|
||||
// skip escaped char
|
||||
i++
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i >= len(p.s) || p.s[i] != p.s[0] {
|
||||
p.errorf("unmatched quote")
|
||||
return
|
||||
}
|
||||
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
||||
if err != nil {
|
||||
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
||||
p.cur.unquoted = unq
|
||||
default:
|
||||
i := 0
|
||||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
p.errorf("unexpected byte %#x", p.s[0])
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
||||
}
|
||||
p.offset += len(p.cur.value)
|
||||
}
|
||||
|
||||
var (
|
||||
errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||
errBadHex = errors.New("proto: bad hexadecimal")
|
||||
)
|
||||
|
||||
func unquoteC(s string, quote rune) (string, error) {
|
||||
// This is based on C++'s tokenizer.cc.
|
||||
// Despite its name, this is *not* parsing C syntax.
|
||||
// For instance, "\0" is an invalid quoted string.
|
||||
|
||||
// Avoid allocation in trivial cases.
|
||||
simple := true
|
||||
for _, r := range s {
|
||||
if r == '\\' || r == quote {
|
||||
simple = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if simple {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
buf := make([]byte, 0, 3*len(s)/2)
|
||||
for len(s) > 0 {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
if r != '\\' {
|
||||
if r < utf8.RuneSelf {
|
||||
buf = append(buf, byte(r))
|
||||
} else {
|
||||
buf = append(buf, string(r)...)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ch, tail, err := unescape(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf = append(buf, ch...)
|
||||
s = tail
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func unescape(s string) (ch string, tail string, err error) {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
switch r {
|
||||
case 'a':
|
||||
return "\a", s, nil
|
||||
case 'b':
|
||||
return "\b", s, nil
|
||||
case 'f':
|
||||
return "\f", s, nil
|
||||
case 'n':
|
||||
return "\n", s, nil
|
||||
case 'r':
|
||||
return "\r", s, nil
|
||||
case 't':
|
||||
return "\t", s, nil
|
||||
case 'v':
|
||||
return "\v", s, nil
|
||||
case '?':
|
||||
return "?", s, nil // trigraph workaround
|
||||
case '\'', '"', '\\':
|
||||
return string(r), s, nil
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
|
||||
if len(s) < 2 {
|
||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||
}
|
||||
base := 8
|
||||
ss := s[:2]
|
||||
s = s[2:]
|
||||
if r == 'x' || r == 'X' {
|
||||
base = 16
|
||||
} else {
|
||||
ss = string(r) + ss
|
||||
}
|
||||
i, err := strconv.ParseUint(ss, base, 8)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
case 'u', 'U':
|
||||
n := 4
|
||||
if r == 'U' {
|
||||
n = 8
|
||||
}
|
||||
if len(s) < n {
|
||||
return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
|
||||
}
|
||||
|
||||
bs := make([]byte, n/2)
|
||||
for i := 0; i < n; i += 2 {
|
||||
a, ok1 := unhex(s[i])
|
||||
b, ok2 := unhex(s[i+1])
|
||||
if !ok1 || !ok2 {
|
||||
return "", "", errBadHex
|
||||
}
|
||||
bs[i/2] = a<<4 | b
|
||||
}
|
||||
s = s[n:]
|
||||
return string(bs), s, nil
|
||||
}
|
||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||
}
|
||||
|
||||
// Adapted from src/pkg/strconv/quote.go.
|
||||
func unhex(b byte) (v byte, ok bool) {
|
||||
switch {
|
||||
case '0' <= b && b <= '9':
|
||||
return b - '0', true
|
||||
case 'a' <= b && b <= 'f':
|
||||
return b - 'a' + 10, true
|
||||
case 'A' <= b && b <= 'F':
|
||||
return b - 'A' + 10, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Back off the parser by one token. Can only be done between calls to next().
|
||||
// It makes the next advance() a no-op.
|
||||
func (p *textParser) back() { p.backed = true }
|
||||
|
||||
// Advances the parser and returns the new current token.
|
||||
func (p *textParser) next() *token {
|
||||
if p.backed || p.done {
|
||||
p.backed = false
|
||||
return &p.cur
|
||||
}
|
||||
p.advance()
|
||||
if p.done {
|
||||
p.cur.value = ""
|
||||
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
||||
// Look for multiple quoted strings separated by whitespace,
|
||||
// and concatenate them.
|
||||
cat := p.cur
|
||||
for {
|
||||
p.skipWhitespace()
|
||||
if p.done || !isQuote(p.s[0]) {
|
||||
break
|
||||
}
|
||||
p.advance()
|
||||
if p.cur.err != nil {
|
||||
return &p.cur
|
||||
}
|
||||
cat.value += " " + p.cur.value
|
||||
cat.unquoted += p.cur.unquoted
|
||||
}
|
||||
p.done = false // parser may have seen EOF, but we want to return cat
|
||||
p.cur = cat
|
||||
}
|
||||
return &p.cur
|
||||
}
|
||||
|
||||
func (p *textParser) consumeToken(s string) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != s {
|
||||
p.back()
|
||||
return p.errorf("expected %q, found %q", s, tok.value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return a RequiredNotSetError indicating which required field was not set.
|
||||
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < st.NumField(); i++ {
|
||||
if !isNil(sv.Field(i)) {
|
||||
continue
|
||||
}
|
||||
|
||||
props := sprops.Prop[i]
|
||||
if props.Required {
|
||||
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
|
||||
}
|
||||
}
|
||||
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
|
||||
}
|
||||
|
||||
// Returns the index in the struct for the named field, as well as the parsed tag properties.
|
||||
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
|
||||
i, ok := sprops.decoderOrigNames[name]
|
||||
if ok {
|
||||
return i, sprops.Prop[i], true
|
||||
}
|
||||
return -1, nil, false
|
||||
}
|
||||
|
||||
// Consume a ':' from the input stream (if the next token is a colon),
|
||||
// returning an error if a colon is needed but not present.
|
||||
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ":" {
|
||||
// Colon is optional when the field is a group or message.
|
||||
needColon := true
|
||||
switch props.Wire {
|
||||
case "group":
|
||||
needColon = false
|
||||
case "bytes":
|
||||
// A "bytes" field is either a message, a string, or a repeated field;
|
||||
// those three become *T, *string and []T respectively, so we can check for
|
||||
// this field being a pointer to a non-string.
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
// *T or *string
|
||||
if typ.Elem().Kind() == reflect.String {
|
||||
break
|
||||
}
|
||||
} else if typ.Kind() == reflect.Slice {
|
||||
// []T or []*T
|
||||
if typ.Elem().Kind() != reflect.Ptr {
|
||||
break
|
||||
}
|
||||
} else if typ.Kind() == reflect.String {
|
||||
// The proto3 exception is for a string field,
|
||||
// which requires a colon.
|
||||
break
|
||||
}
|
||||
needColon = false
|
||||
}
|
||||
if needColon {
|
||||
return p.errorf("expected ':', found %q", tok.value)
|
||||
}
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
reqCount := sprops.reqCount
|
||||
var reqFieldErr error
|
||||
fieldSet := make(map[string]bool)
|
||||
// A struct is a sequence of "name: value", terminated by one of
|
||||
// '>' or '}', or the end of the input. A name may also be
|
||||
// "[extension]".
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
if tok.value == "[" {
|
||||
// Looks like an extension.
|
||||
//
|
||||
// TODO: Check whether we need to handle
|
||||
// namespace rooted names (e.g. ".something.Foo").
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
var desc *ExtensionDesc
|
||||
// This could be faster, but it's functional.
|
||||
// TODO: Do something smarter than a linear scan.
|
||||
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
|
||||
if d.Name == tok.value {
|
||||
desc = d
|
||||
break
|
||||
}
|
||||
}
|
||||
if desc == nil {
|
||||
return p.errorf("unrecognized extension %q", tok.value)
|
||||
}
|
||||
// Check the extension terminator.
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != "]" {
|
||||
return p.errorf("unrecognized extension terminator %q", tok.value)
|
||||
}
|
||||
|
||||
props := &Properties{}
|
||||
props.Parse(desc.Tag)
|
||||
|
||||
typ := reflect.TypeOf(desc.ExtensionType)
|
||||
if err := p.checkForColon(props, typ); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rep := desc.repeated()
|
||||
|
||||
// Read the extension structure, and set it in
|
||||
// the value we're constructing.
|
||||
var ext reflect.Value
|
||||
if !rep {
|
||||
ext = reflect.New(typ).Elem()
|
||||
} else {
|
||||
ext = reflect.New(typ.Elem()).Elem()
|
||||
}
|
||||
if err := p.readAny(ext, props); err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
}
|
||||
ep := sv.Addr().Interface().(extendableProto)
|
||||
if !rep {
|
||||
SetExtension(ep, desc, ext.Interface())
|
||||
} else {
|
||||
old, err := GetExtension(ep, desc)
|
||||
var sl reflect.Value
|
||||
if err == nil {
|
||||
sl = reflect.ValueOf(old) // existing slice
|
||||
} else {
|
||||
sl = reflect.MakeSlice(typ, 0, 1)
|
||||
}
|
||||
sl = reflect.Append(sl, ext)
|
||||
SetExtension(ep, desc, sl.Interface())
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a normal, non-extension field.
|
||||
name := tok.value
|
||||
var dst reflect.Value
|
||||
fi, props, ok := structFieldByName(sprops, name)
|
||||
if ok {
|
||||
dst = sv.Field(fi)
|
||||
} else if oop, ok := sprops.OneofTypes[name]; ok {
|
||||
// It is a oneof.
|
||||
props = oop.Prop
|
||||
nv := reflect.New(oop.Type.Elem())
|
||||
dst = nv.Elem().Field(0)
|
||||
sv.Field(oop.Field).Set(nv)
|
||||
}
|
||||
if !dst.IsValid() {
|
||||
return p.errorf("unknown field name %q in %v", name, st)
|
||||
}
|
||||
|
||||
if dst.Kind() == reflect.Map {
|
||||
// Consume any colon.
|
||||
if err := p.checkForColon(props, dst.Type()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct the map if it doesn't already exist.
|
||||
if dst.IsNil() {
|
||||
dst.Set(reflect.MakeMap(dst.Type()))
|
||||
}
|
||||
key := reflect.New(dst.Type().Key()).Elem()
|
||||
val := reflect.New(dst.Type().Elem()).Elem()
|
||||
|
||||
// The map entry should be this sequence of tokens:
|
||||
// < key : KEY value : VALUE >
|
||||
// Technically the "key" and "value" could come in any order,
|
||||
// but in practice they won't.
|
||||
|
||||
tok := p.next()
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
if err := p.consumeToken("key"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeToken(":"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(key, props.mkeyprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeToken("value"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(val, props.mvalprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeToken(terminator); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dst.SetMapIndex(key, val)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check that it's not already set if it's not a repeated field.
|
||||
if !props.Repeated && fieldSet[name] {
|
||||
return p.errorf("non-repeated field %q was repeated", name)
|
||||
}
|
||||
|
||||
if err := p.checkForColon(props, dst.Type()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse into the field.
|
||||
fieldSet[name] = true
|
||||
if err := p.readAny(dst, props); err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
} else if props.Required {
|
||||
reqCount--
|
||||
}
|
||||
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if reqCount > 0 {
|
||||
return p.missingRequiredFieldError(sv)
|
||||
}
|
||||
return reqFieldErr
|
||||
}
|
||||
|
||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||
// It is used in readStruct to provide backward compatibility.
|
||||
func (p *textParser) consumeOptionalSeparator() error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ";" && tok.value != "," {
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == "" {
|
||||
return p.errorf("unexpected EOF")
|
||||
}
|
||||
|
||||
switch fv := v; fv.Kind() {
|
||||
case reflect.Slice:
|
||||
at := v.Type()
|
||||
if at.Elem().Kind() == reflect.Uint8 {
|
||||
// Special case for []byte
|
||||
if tok.value[0] != '"' && tok.value[0] != '\'' {
|
||||
// Deliberately written out here, as the error after
|
||||
// this switch statement would write "invalid []byte: ...",
|
||||
// which is not as user-friendly.
|
||||
return p.errorf("invalid string: %v", tok.value)
|
||||
}
|
||||
bytes := []byte(tok.unquoted)
|
||||
fv.Set(reflect.ValueOf(bytes))
|
||||
return nil
|
||||
}
|
||||
// Repeated field.
|
||||
if tok.value == "[" {
|
||||
// Repeated field with list notation, like [1,2,3].
|
||||
for {
|
||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||
err := p.readAny(fv.Index(fv.Len()-1), props)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == "]" {
|
||||
break
|
||||
}
|
||||
if tok.value != "," {
|
||||
return p.errorf("Expected ']' or ',' found %q", tok.value)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// One value of the repeated field.
|
||||
p.back()
|
||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||
return p.readAny(fv.Index(fv.Len()-1), props)
|
||||
case reflect.Bool:
|
||||
// Either "true", "false", 1 or 0.
|
||||
switch tok.value {
|
||||
case "true", "1":
|
||||
fv.SetBool(true)
|
||||
return nil
|
||||
case "false", "0":
|
||||
fv.SetBool(false)
|
||||
return nil
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
v := tok.value
|
||||
// Ignore 'f' for compatibility with output generated by C++, but don't
|
||||
// remove 'f' when the value is "-inf" or "inf".
|
||||
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
|
||||
v = v[:len(v)-1]
|
||||
}
|
||||
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
|
||||
fv.SetFloat(f)
|
||||
return nil
|
||||
}
|
||||
case reflect.Int32:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||
fv.SetInt(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(props.Enum) == 0 {
|
||||
break
|
||||
}
|
||||
m, ok := enumValueMaps[props.Enum]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
x, ok := m[tok.value]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
fv.SetInt(int64(x))
|
||||
return nil
|
||||
case reflect.Int64:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
||||
fv.SetInt(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// A basic field (indirected through pointer), or a repeated message/group
|
||||
p.back()
|
||||
fv.Set(reflect.New(fv.Type().Elem()))
|
||||
return p.readAny(fv.Elem(), props)
|
||||
case reflect.String:
|
||||
if tok.value[0] == '"' || tok.value[0] == '\'' {
|
||||
fv.SetString(tok.unquoted)
|
||||
return nil
|
||||
}
|
||||
case reflect.Struct:
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "{":
|
||||
terminator = "}"
|
||||
case "<":
|
||||
terminator = ">"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
|
||||
return p.readStruct(fv, terminator)
|
||||
case reflect.Uint32:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
fv.SetUint(uint64(x))
|
||||
return nil
|
||||
}
|
||||
case reflect.Uint64:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||
fv.SetUint(x)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return p.errorf("invalid %v: %v", v.Type(), tok.value)
|
||||
}
|
||||
|
||||
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
|
||||
// before starting to unmarshal, so any existing data in pb is always removed.
|
||||
// If a required field is not set and no other error occurs,
|
||||
// UnmarshalText returns *RequiredNotSetError.
|
||||
func UnmarshalText(s string, pb Message) error {
|
||||
if um, ok := pb.(encoding.TextUnmarshaler); ok {
|
||||
err := um.UnmarshalText([]byte(s))
|
||||
return err
|
||||
}
|
||||
pb.Reset()
|
||||
v := reflect.ValueOf(pb)
|
||||
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
|
||||
return pe
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
package jmespath
|
||||
|
||||
import "github.com/jmespath/go-jmespath"
|
||||
|
||||
// Fuzz will fuzz test the JMESPath parser.
|
||||
func Fuzz(data []byte) int {
|
||||
p := jmespath.NewParser()
|
||||
_, err := p.Parse(string(data))
|
||||
if err != nil {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
|
@ -1,201 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue