Merge pull request #1714 from hashicorp/e-godeps
Manage dependencies via Godep
This commit is contained in:
commit
87678817f9
34
GNUmakefile
34
GNUmakefile
|
@ -1,12 +1,11 @@
|
|||
GOTOOLS = github.com/mitchellh/gox golang.org/x/tools/cmd/stringer \
|
||||
github.com/jteeuwen/go-bindata/... github.com/elazarl/go-bindata-assetfs/...
|
||||
DEPS = $(shell go list -f '{{range .TestImports}}{{.}} {{end}}' ./...)
|
||||
PACKAGES = $(shell go list ./...)
|
||||
PACKAGES=$(shell go list ./... | grep -v '^github.com/hashicorp/consul/vendor/')
|
||||
VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods \
|
||||
-nilfunc -printf -rangeloops -shift -structtags -unsafeptr
|
||||
VERSION?=$(shell awk -F\" '/^const Version/ { print $$2; exit }' version.go)
|
||||
|
||||
all: deps format
|
||||
all: format tools
|
||||
@mkdir -p bin/
|
||||
@bash --norc -i ./scripts/build.sh
|
||||
|
||||
|
@ -26,30 +25,16 @@ cov:
|
|||
gocov test ./... | gocov-html > /tmp/coverage.html
|
||||
open /tmp/coverage.html
|
||||
|
||||
deps:
|
||||
@echo "--> Installing build dependencies"
|
||||
@go get -v $(GOTOOLS)
|
||||
@go get -d -v ./... $(DEPS)
|
||||
|
||||
updatedeps: deps
|
||||
go get -u -v $(GOTOOLS)
|
||||
go list ./... \
|
||||
| xargs go list -f '{{join .Deps "\n"}}' \
|
||||
| grep -v github.com/hashicorp/consul \
|
||||
| grep -v '/internal/' \
|
||||
| sort -u \
|
||||
| xargs go get -f -u -v
|
||||
|
||||
test: deps
|
||||
test:
|
||||
@$(MAKE) vet
|
||||
@./scripts/verify_no_uuid.sh
|
||||
@./scripts/test.sh
|
||||
|
||||
cover: deps
|
||||
cover:
|
||||
./scripts/verify_no_uuid.sh
|
||||
go list ./... | xargs -n1 go test --cover
|
||||
|
||||
format: deps
|
||||
format:
|
||||
@echo "--> Running go fmt"
|
||||
@go fmt $(PACKAGES)
|
||||
|
||||
|
@ -65,21 +50,24 @@ vet:
|
|||
fi
|
||||
|
||||
# generate runs `go generate` to build the dynamically generated source files
|
||||
generate: deps
|
||||
generate:
|
||||
find . -type f -name '.DS_Store' -delete
|
||||
go generate ./...
|
||||
|
||||
# generates the static web ui
|
||||
static-assets: deps
|
||||
static-assets:
|
||||
@echo "--> Generating static assets"
|
||||
@go-bindata-assetfs -pkg agent -prefix pkg ./pkg/web_ui/...
|
||||
@mv bindata_assetfs.go command/agent
|
||||
$(MAKE) format
|
||||
|
||||
tools:
|
||||
go get -u -v $(GOTOOLS)
|
||||
|
||||
web:
|
||||
./scripts/website_run.sh
|
||||
|
||||
web-push:
|
||||
./scripts/website_push.sh
|
||||
|
||||
.PHONY: all bin dev dist cov deps test vet web web-push generate test-nodep static-assets
|
||||
.PHONY: all bin dev dist cov test vet web web-push generate static-assets tools
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
{
|
||||
"ImportPath": "github.com/hashicorp/consul",
|
||||
"GoVersion": "go1.5",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/DataDog/datadog-go/statsd",
|
||||
|
@ -16,7 +19,11 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-radix",
|
||||
"Rev": "fbd82e84e2b13651f3abc5ffd26b65ba71bc8f93"
|
||||
"Rev": "4239b77079c7b5d1243b7b4736304ce8ddb6f0f2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/beorn7/perks/quantile",
|
||||
"Rev": "b965b613227fddccbfffe13eae360ed3fa822f8d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/bgentry/speakeasy",
|
||||
|
@ -24,8 +31,8 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/boltdb/bolt",
|
||||
"Comment": "v1.1.0-40-g34a0fa5",
|
||||
"Rev": "34a0fa5307f7562980fb8e7ff4723f7987edf49b"
|
||||
"Comment": "v1.1.0-65-gee4a088",
|
||||
"Rev": "ee4a0888a9abe7eefe5a0992ca4cb06864839873"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/elazarl/go-bindata-assetfs",
|
||||
|
@ -33,7 +40,11 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient",
|
||||
"Rev": "299d728486342c894e7fafd68e3a4b89623bef1d"
|
||||
"Rev": "7b651349f9479f5114913eefbfd3c4eeddd79ab4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "0dfe8f37844c14cb32c7247925270e0f7ba90973"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/errwrap",
|
||||
|
@ -71,13 +82,17 @@
|
|||
"ImportPath": "github.com/hashicorp/go-syslog",
|
||||
"Rev": "42a2b573b664dbf281bd48c3cc12c086b17a39ba"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-uuid",
|
||||
"Rev": "36289988d83ca270bc07c234c36f364b0dd9c9a7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "5c7531c003d8bf158b0fe5063649a2f41a822146"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl",
|
||||
"Rev": "197e8d3cf42199cfd53cd775deb37f3637234635"
|
||||
"Rev": "578dd9746824a54637686b51a41bad457a56bcef"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/logutils",
|
||||
|
@ -93,7 +108,7 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft",
|
||||
"Rev": "d136cd15dfb7876fd7c89cad1995bc4f19ceb294"
|
||||
"Rev": "057b893fd996696719e98b6c44649ea14968c811"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft-boltdb",
|
||||
|
@ -105,13 +120,13 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/serf/coordinate",
|
||||
"Comment": "v0.7.0-1-g39c7c06",
|
||||
"Rev": "39c7c06298b480560202bec00c2c77e974e88792"
|
||||
"Comment": "v0.7.0-12-ge4ec8cc",
|
||||
"Rev": "e4ec8cc423bbe20d26584b96efbeb9102e16d05f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/serf/serf",
|
||||
"Comment": "v0.7.0-1-g39c7c06",
|
||||
"Rev": "39c7c06298b480560202bec00c2c77e974e88792"
|
||||
"Comment": "v0.7.0-12-ge4ec8cc",
|
||||
"Rev": "e4ec8cc423bbe20d26584b96efbeb9102e16d05f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/yamux",
|
||||
|
@ -125,18 +140,48 @@
|
|||
"ImportPath": "github.com/mattn/go-isatty",
|
||||
"Rev": "56b76bdf51f7708750eac80fa38b952bb9f32639"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
|
||||
"Rev": "d0c3fe89de86839aecf2e0579c40ba3bb336a453"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/miekg/dns",
|
||||
"Rev": "1756430e42a7b2ecded216a9fdd37d002c116df5"
|
||||
"Rev": "75e6e86cc601825c5dbcd4e0c209eab180997cd7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/cli",
|
||||
"Rev": "43a4bc367e0d53f561d3d985b9dca84e15bd0554"
|
||||
"Rev": "cb6853d606ea4a12a15ac83cc43503df99fd28fb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||
"Rev": "281073eb9eb092240d33ef253c404f1cca550309"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_golang/prometheus",
|
||||
"Comment": "0.7.0-70-g15006a7",
|
||||
"Rev": "15006a7ed88e73201c4e6142a2e66b54ae5fdf00"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_model/go",
|
||||
"Comment": "model-0.0.2-12-gfa8ad6f",
|
||||
"Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/expfmt",
|
||||
"Rev": "23070236b1ebff452f494ae831569545c2b61d26"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
|
||||
"Rev": "23070236b1ebff452f494ae831569545c2b61d26"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/model",
|
||||
"Rev": "23070236b1ebff452f494ae831569545c2b61d26"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/procfs",
|
||||
"Rev": "406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ryanuber/columnize",
|
||||
"Comment": "v2.0.1-8-g983d3a5",
|
||||
|
@ -144,7 +189,7 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
"Rev": "833a04a10549a95dc34458c195cbad61bbb6cb4d"
|
||||
"Rev": "20457ee8ea8546920d3f4e19e405da45250dc5a5"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
|
@ -1,72 +0,0 @@
|
|||
{
|
||||
"ImportPath": "github.com/hashicorp/consul",
|
||||
"GoVersion": "go1.3rc1",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/armon/circbuf",
|
||||
"Rev": "f092b4f207b6e5cce0569056fba9e1a2735cb6cf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-metrics",
|
||||
"Rev": "02567bbc4f518a43853d262b651a3c8257c3f141"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/gomdb",
|
||||
"Rev": "a8e036c4dabe7437014ecf9dbc03c6f6f0766ef8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-syslog",
|
||||
"Rev": "ac3963b72ac367e48b1e68a831e62b93fb69091c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/logutils",
|
||||
"Rev": "8e0820fe7ac5eb2b01626b1d99df47c5449eb2d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/memberlist",
|
||||
"Rev": "17d39b695094be943bfb98442a80b082e6b9ac47"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft",
|
||||
"Rev": "8bdafd1e83e7d85ffeb5dc8f0857dbddd61edba5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft-mdb",
|
||||
"Rev": "70e1c88f4b6fb06fc94cc02109243160a443609d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/serf/serf",
|
||||
"Comment": "v0.6.1-7-g83f220b",
|
||||
"Rev": "83f220b4faa0614f49649156118b750b5b12fafb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/yamux",
|
||||
"Rev": "35417c7dfab4085d7c921b33e4d5ea6cf9ceef65"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/inconshreveable/muxado",
|
||||
"Rev": "f693c7e88ba316d1a0ae3e205e22a01aa3ec2848"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/miekg/dns",
|
||||
"Rev": "05cfaca9f0712f44206ecbfa65a6769434164e7a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/cli",
|
||||
"Rev": "975a7477b1507ea6bb888c48108e05d26fb30434"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||
"Rev": "6fb2c832bcac61d01212ab1d172f7a14a8585b07"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ryanuber/columnize",
|
||||
"Comment": "v2.0.1",
|
||||
"Rev": "785d943a7b6886e0bb2f139a60487b823dd8d9de"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ugorji/go/codec",
|
||||
"Rev": "71c2886f5a673a35f909803f38ece5810165097b"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,72 +0,0 @@
|
|||
{
|
||||
"ImportPath": "github.com/hashicorp/consul",
|
||||
"GoVersion": "go1.3",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/armon/circbuf",
|
||||
"Rev": "f092b4f207b6e5cce0569056fba9e1a2735cb6cf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-metrics",
|
||||
"Rev": "02567bbc4f518a43853d262b651a3c8257c3f141"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/gomdb",
|
||||
"Rev": "a8e036c4dabe7437014ecf9dbc03c6f6f0766ef8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-syslog",
|
||||
"Rev": "ac3963b72ac367e48b1e68a831e62b93fb69091c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/logutils",
|
||||
"Rev": "8e0820fe7ac5eb2b01626b1d99df47c5449eb2d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/memberlist",
|
||||
"Rev": "e6a282556f0e8f15e9a53dcb0d14912a3c2fb141"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft",
|
||||
"Rev": "35f5fa082f5a064595d84715b0cf8821f002e9ac"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft-mdb",
|
||||
"Rev": "9076b4b956c1c4c8a47117608b612bda2cb5f481"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/serf/serf",
|
||||
"Comment": "v0.6.3-1-g7f260e7",
|
||||
"Rev": "7f260e70a89739bd38c1f0bf3b74c0e1c1ee617f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/yamux",
|
||||
"Rev": "35417c7dfab4085d7c921b33e4d5ea6cf9ceef65"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/inconshreveable/muxado",
|
||||
"Rev": "f693c7e88ba316d1a0ae3e205e22a01aa3ec2848"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/miekg/dns",
|
||||
"Rev": "9af5c1f8a8a71bc5c8539d16cdc40b4a47ee7024"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/cli",
|
||||
"Rev": "eaf0e415fc517a431dca53c7b2e7559d42238ebe"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||
"Rev": "6fb2c832bcac61d01212ab1d172f7a14a8585b07"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ryanuber/columnize",
|
||||
"Comment": "v2.0.1",
|
||||
"Rev": "785d943a7b6886e0bb2f139a60487b823dd8d9de"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ugorji/go/codec",
|
||||
"Rev": "71c2886f5a673a35f909803f38ece5810165097b"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,97 +0,0 @@
|
|||
{
|
||||
"ImportPath": "github.com/hashicorp/consul",
|
||||
"GoVersion": "go1.3.1",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/armon/circbuf",
|
||||
"Rev": "f092b4f207b6e5cce0569056fba9e1a2735cb6cf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/consul-api",
|
||||
"Rev": "045662de1042be0662fe4a1e21b57c8f7669261a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-metrics",
|
||||
"Rev": "2b75159ce5d3641fb35b5a159cff309ac3cf4177"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-radix",
|
||||
"Rev": "b045fc0ad3587e8620fb42a0dea882cf8c08aef9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/gomdb",
|
||||
"Rev": "a8e036c4dabe7437014ecf9dbc03c6f6f0766ef8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-checkpoint",
|
||||
"Rev": "0bf0bbaa84938f68abdc85d158c6818674a9ae5d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-syslog",
|
||||
"Rev": "ac3963b72ac367e48b1e68a831e62b93fb69091c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "4dfff096c4973178c8f35cf6dd1a732a0a139370"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl",
|
||||
"Rev": "a0a5d2873ec34d649ced122e53b180c27474f3a3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/logutils",
|
||||
"Rev": "8e0820fe7ac5eb2b01626b1d99df47c5449eb2d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/memberlist",
|
||||
"Rev": "def5afe3702fce72d72922fb44ef2b8e5608b205"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft",
|
||||
"Rev": "35f5fa082f5a064595d84715b0cf8821f002e9ac"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft-mdb",
|
||||
"Rev": "9076b4b956c1c4c8a47117608b612bda2cb5f481"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/serf/serf",
|
||||
"Comment": "v0.6.3-7-g4d121f5",
|
||||
"Rev": "4d121f5786d0c1eac445189b83f901bdd63bd6e6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/terraform/helper/multierror",
|
||||
"Comment": "v0.2.1-14-g58a482d",
|
||||
"Rev": "58a482d67742b9bc43d28831b1fe70388101b9c4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/yamux",
|
||||
"Rev": "35417c7dfab4085d7c921b33e4d5ea6cf9ceef65"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/inconshreveable/muxado",
|
||||
"Rev": "f693c7e88ba316d1a0ae3e205e22a01aa3ec2848"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/miekg/dns",
|
||||
"Rev": "818abf8202e379c51de8ebce6df6ed5b042824d0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/cli",
|
||||
"Rev": "bfacda5ba006a32b10ddfe2abad56c11661573eb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||
"Rev": "740c764bc6149d3f1806231418adb9f52c11bcbf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ryanuber/columnize",
|
||||
"Comment": "v2.0.1-5-gda1ef32",
|
||||
"Rev": "da1ef32ab26acc19d36b7382ef853892a544a983"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ugorji/go/codec",
|
||||
"Rev": "e906e395b9d45d3230e800c8ad1f92f99764e753"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,97 +0,0 @@
|
|||
{
|
||||
"ImportPath": "github.com/hashicorp/consul",
|
||||
"GoVersion": "go1.3.3",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/armon/circbuf",
|
||||
"Rev": "f092b4f207b6e5cce0569056fba9e1a2735cb6cf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/consul-api",
|
||||
"Rev": "1b81c8e0c4cbf1d382310e4c0dc11221632e79d1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-metrics",
|
||||
"Rev": "2b75159ce5d3641fb35b5a159cff309ac3cf4177"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-radix",
|
||||
"Rev": "b045fc0ad3587e8620fb42a0dea882cf8c08aef9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/gomdb",
|
||||
"Rev": "a8e036c4dabe7437014ecf9dbc03c6f6f0766ef8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-checkpoint",
|
||||
"Rev": "89ef2a697dd8cdb4623097d5bb9acdb19a470767"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-msgpack/codec",
|
||||
"Rev": "71c2886f5a673a35f909803f38ece5810165097b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-syslog",
|
||||
"Rev": "ac3963b72ac367e48b1e68a831e62b93fb69091c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "253b2dc1ca8bae42c3b5b6e53dd2eab1a7551116"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl",
|
||||
"Rev": "e51eabcdf801f663738fa12f4340fbad13062738"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/logutils",
|
||||
"Rev": "23b0af5510a2d1442103ef83ffcf53eb82f3debc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/memberlist",
|
||||
"Rev": "16d947e2d4b3f1fe508ee1d9b6ec34b8fd2e96d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft",
|
||||
"Rev": "cc9710ab540985954a67c108f414aa3152f5916f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft-mdb",
|
||||
"Rev": "6f52d0ce62a34e3f5bd29aa4d7068030d700d94a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/serf/serf",
|
||||
"Comment": "v0.6.3-60-g0479bc1",
|
||||
"Rev": "0479bc1b942fd84205587f7e73867ac78809966b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/terraform/helper/multierror",
|
||||
"Comment": "v0.3.1-25-g2d11732",
|
||||
"Rev": "2d117326edb33b7155d1ec9d0ab9d3542ba1b230"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/yamux",
|
||||
"Rev": "9feabe6854fadca1abec9cd3bd2a613fe9a34000"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/inconshreveable/muxado",
|
||||
"Rev": "f693c7e88ba316d1a0ae3e205e22a01aa3ec2848"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/miekg/dns",
|
||||
"Rev": "dc30c7cd4ed2fc8af73d49da4ee285404958b8bd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/cli",
|
||||
"Rev": "e3c2e3d39391e9beb9660ccd6b4bd9a2f38dd8a0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||
"Rev": "740c764bc6149d3f1806231418adb9f52c11bcbf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ryanuber/columnize",
|
||||
"Comment": "v2.0.1-6-g44cb478",
|
||||
"Rev": "44cb4788b2ec3c3d158dd3d1b50aba7d66f4b59a"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,142 +0,0 @@
|
|||
{
|
||||
"ImportPath": "github.com/hashicorp/consul",
|
||||
"GoVersion": "go1.4.2",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/armon/circbuf",
|
||||
"Rev": "f092b4f207b6e5cce0569056fba9e1a2735cb6cf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-metrics",
|
||||
"Rev": "88b7658f24511c4b885942b26e9ea7a61ee37ebc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-radix",
|
||||
"Rev": "e39d623f12e8e41c7b5529e9a9dd67a1e2261f80"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/gomdb",
|
||||
"Rev": "151f2e08ef45cb0e57d694b2562f351955dff572"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "5677a0e3d5e89854c9974e1256839ee23f8233ca"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-checkpoint",
|
||||
"Rev": "88326f6851319068e7b34981032128c0b1a6524d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-msgpack/codec",
|
||||
"Rev": "71c2886f5a673a35f909803f38ece5810165097b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-multierror",
|
||||
"Rev": "fcdddc395df1ddf4247c69bd436e84cfa0733f7e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-syslog",
|
||||
"Rev": "42a2b573b664dbf281bd48c3cc12c086b17a39ba"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "f09f965649501e2ac1b0c310c632a7bebdbdc1d4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl",
|
||||
"Rev": "513e04c400ee2e81e97f5e011c08fb42c6f69b84"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/logutils",
|
||||
"Rev": "23b0af5510a2d1442103ef83ffcf53eb82f3debc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/memberlist",
|
||||
"Rev": "9a1e242e454d2443df330bdd51a436d5a9058fc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/net-rpc-msgpackrpc",
|
||||
"Rev": "d377902b7aba83dd3895837b902f6cf3f71edcb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft",
|
||||
"Rev": "a88bfa8385bc52c1f25d0fc02d1b55a2708d04ab"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft-mdb",
|
||||
"Rev": "4ec3694ffbc74d34f7532e70ef2e9c3546a0c0b0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/scada-client",
|
||||
"Rev": "c26580cfe35393f6f4bf1b9ba55e6afe33176bae"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/serf/serf",
|
||||
"Comment": "v0.6.4-3-gcc95df9",
|
||||
"Rev": "cc95df950d69acfd117128c98a3bc6b64c1da3fd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/yamux",
|
||||
"Rev": "b4f943b3f25da97dec8e26bee1c3269019de070d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/inconshreveable/muxado",
|
||||
"Rev": "f693c7e88ba316d1a0ae3e205e22a01aa3ec2848"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/ext",
|
||||
"Rev": "ba7d65ac66e9da93a714ca18f6d1bc7a0c09100c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/miekg/dns",
|
||||
"Rev": "64fea017a260b4ccbfbb3f60ab027e0398cd8f6f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/cli",
|
||||
"Rev": "e3c2e3d39391e9beb9660ccd6b4bd9a2f38dd8a0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||
"Rev": "442e588f213303bec7936deba67901f8fc8f18b1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_golang/_vendor/goautoneg",
|
||||
"Comment": "0.1.0-33-g38dbb2e",
|
||||
"Rev": "38dbb2e26868d15db0275ddbaac0cbe450ec03de"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_golang/_vendor/perks/quantile",
|
||||
"Comment": "0.1.0-33-g38dbb2e",
|
||||
"Rev": "38dbb2e26868d15db0275ddbaac0cbe450ec03de"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_golang/model",
|
||||
"Comment": "0.1.0-33-g38dbb2e",
|
||||
"Rev": "38dbb2e26868d15db0275ddbaac0cbe450ec03de"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_golang/prometheus",
|
||||
"Comment": "0.1.0-33-g38dbb2e",
|
||||
"Rev": "38dbb2e26868d15db0275ddbaac0cbe450ec03de"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_golang/text",
|
||||
"Comment": "0.1.0-33-g38dbb2e",
|
||||
"Rev": "38dbb2e26868d15db0275ddbaac0cbe450ec03de"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_model/go",
|
||||
"Comment": "model-0.0.2-12-gfa8ad6f",
|
||||
"Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/procfs",
|
||||
"Rev": "92faa308558161acab0ada1db048e9996ecec160"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ryanuber/columnize",
|
||||
"Comment": "v2.0.1-6-g44cb478",
|
||||
"Rev": "44cb4788b2ec3c3d158dd3d1b50aba7d66f4b59a"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,118 +0,0 @@
|
|||
{
|
||||
"ImportPath": "github.com/hashicorp/consul",
|
||||
"GoVersion": "go1.4.2",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/armon/circbuf",
|
||||
"Rev": "f092b4f207b6e5cce0569056fba9e1a2735cb6cf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-metrics",
|
||||
"Rev": "a54701ebec11868993bc198c3f315353e9de2ed6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-radix",
|
||||
"Rev": "0bab926c3433cfd6490c6d3c504a7b471362390c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/gomdb",
|
||||
"Rev": "151f2e08ef45cb0e57d694b2562f351955dff572"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/boltdb/bolt",
|
||||
"Comment": "v1.0-79-g2c04100",
|
||||
"Rev": "2c04100eb9793f2b8541d243494e2909d2112325"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/consul-migrate/migrator",
|
||||
"Comment": "v0.1.0",
|
||||
"Rev": "4977886fc950a0db1a6f0bbadca56dfabf170f9c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-checkpoint",
|
||||
"Rev": "88326f6851319068e7b34981032128c0b1a6524d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-msgpack/codec",
|
||||
"Rev": "71c2886f5a673a35f909803f38ece5810165097b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-multierror",
|
||||
"Rev": "fcdddc395df1ddf4247c69bd436e84cfa0733f7e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-syslog",
|
||||
"Rev": "42a2b573b664dbf281bd48c3cc12c086b17a39ba"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "995efda3e073b6946b175ed93901d729ad47466a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl",
|
||||
"Rev": "513e04c400ee2e81e97f5e011c08fb42c6f69b84"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/logutils",
|
||||
"Rev": "367a65d59043b4f846d179341d138f01f988c186"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/memberlist",
|
||||
"Rev": "6025015f2dc659ca2c735112d37e753bda6e329d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/net-rpc-msgpackrpc",
|
||||
"Rev": "d377902b7aba83dd3895837b902f6cf3f71edcb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft",
|
||||
"Rev": "a8065f298505708bf60f518c09178149f3c06f21"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft-boltdb",
|
||||
"Rev": "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft-mdb",
|
||||
"Rev": "4ec3694ffbc74d34f7532e70ef2e9c3546a0c0b0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/scada-client",
|
||||
"Rev": "c26580cfe35393f6f4bf1b9ba55e6afe33176bae"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/serf/serf",
|
||||
"Comment": "v0.6.4-12-g320787d",
|
||||
"Rev": "320787d8567bdcf9d77f8c5d65031fcb19e71ebc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/yamux",
|
||||
"Rev": "b2e55852ddaf823a85c67f798080eb7d08acd71d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/inconshreveable/muxado",
|
||||
"Rev": "f693c7e88ba316d1a0ae3e205e22a01aa3ec2848"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/miekg/dns",
|
||||
"Rev": "bb1103f648f811d2018d4bedcb2d4b2bce34a0f1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/cli",
|
||||
"Rev": "6cc8bc522243675a2882b81662b0b0d2e04b99c9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||
"Rev": "442e588f213303bec7936deba67901f8fc8f18b1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ryanuber/columnize",
|
||||
"Comment": "v2.0.1-6-g44cb478",
|
||||
"Rev": "44cb4788b2ec3c3d158dd3d1b50aba7d66f4b59a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ssh/terminal",
|
||||
"Rev": "74f810a0152f4c50a16195f6b9ff44afc35594e8"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,118 +0,0 @@
|
|||
{
|
||||
"ImportPath": "github.com/hashicorp/consul",
|
||||
"GoVersion": "go1.4.2",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/armon/circbuf",
|
||||
"Rev": "f092b4f207b6e5cce0569056fba9e1a2735cb6cf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-metrics",
|
||||
"Rev": "a54701ebec11868993bc198c3f315353e9de2ed6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-radix",
|
||||
"Rev": "0bab926c3433cfd6490c6d3c504a7b471362390c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/gomdb",
|
||||
"Rev": "151f2e08ef45cb0e57d694b2562f351955dff572"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/boltdb/bolt",
|
||||
"Comment": "v1.0-79-g2c04100",
|
||||
"Rev": "2c04100eb9793f2b8541d243494e2909d2112325"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/consul-migrate/migrator",
|
||||
"Comment": "v0.1.0",
|
||||
"Rev": "4977886fc950a0db1a6f0bbadca56dfabf170f9c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-checkpoint",
|
||||
"Rev": "88326f6851319068e7b34981032128c0b1a6524d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-msgpack/codec",
|
||||
"Rev": "71c2886f5a673a35f909803f38ece5810165097b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-multierror",
|
||||
"Rev": "fcdddc395df1ddf4247c69bd436e84cfa0733f7e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-syslog",
|
||||
"Rev": "42a2b573b664dbf281bd48c3cc12c086b17a39ba"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "995efda3e073b6946b175ed93901d729ad47466a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl",
|
||||
"Rev": "513e04c400ee2e81e97f5e011c08fb42c6f69b84"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/logutils",
|
||||
"Rev": "367a65d59043b4f846d179341d138f01f988c186"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/memberlist",
|
||||
"Rev": "6025015f2dc659ca2c735112d37e753bda6e329d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/net-rpc-msgpackrpc",
|
||||
"Rev": "d377902b7aba83dd3895837b902f6cf3f71edcb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft",
|
||||
"Rev": "a8065f298505708bf60f518c09178149f3c06f21"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft-boltdb",
|
||||
"Rev": "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft-mdb",
|
||||
"Rev": "4ec3694ffbc74d34f7532e70ef2e9c3546a0c0b0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/scada-client",
|
||||
"Rev": "c26580cfe35393f6f4bf1b9ba55e6afe33176bae"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/serf/serf",
|
||||
"Comment": "v0.6.4-13-g558a687",
|
||||
"Rev": "558a6876882b2c5c61df29fd3990fb1765fd71d3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/yamux",
|
||||
"Rev": "b2e55852ddaf823a85c67f798080eb7d08acd71d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/inconshreveable/muxado",
|
||||
"Rev": "f693c7e88ba316d1a0ae3e205e22a01aa3ec2848"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/miekg/dns",
|
||||
"Rev": "bb1103f648f811d2018d4bedcb2d4b2bce34a0f1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/cli",
|
||||
"Rev": "6cc8bc522243675a2882b81662b0b0d2e04b99c9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||
"Rev": "442e588f213303bec7936deba67901f8fc8f18b1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ryanuber/columnize",
|
||||
"Comment": "v2.0.1-6-g44cb478",
|
||||
"Rev": "44cb4788b2ec3c3d158dd3d1b50aba7d66f4b59a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ssh/terminal",
|
||||
"Rev": "74f810a0152f4c50a16195f6b9ff44afc35594e8"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,134 +0,0 @@
|
|||
{
|
||||
"ImportPath": "github.com/hashicorp/consul",
|
||||
"GoVersion": "go1.5.1",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/DataDog/datadog-go/statsd",
|
||||
"Rev": "b050cd8f4d7c394545fd7d966c8e2909ce89d552"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/circbuf",
|
||||
"Rev": "bbbad097214e2918d8543d5201d12bfd7bca254d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-metrics",
|
||||
"Rev": "6c5fa0d8f48f4661c9ba8709799c88d425ad20f0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-radix",
|
||||
"Rev": "fbd82e84e2b13651f3abc5ffd26b65ba71bc8f93"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/boltdb/bolt",
|
||||
"Comment": "v1.1.0-19-g0b00eff",
|
||||
"Rev": "0b00effdd7a8270ebd91c24297e51643e370dd52"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient",
|
||||
"Rev": "2350d7bc12bb04f2d7d6824c7718012b1397b760"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/errwrap",
|
||||
"Rev": "7554cd9344cec97297fa6649b055a8c98c2a1e55"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-checkpoint",
|
||||
"Rev": "e4b2dc34c0f698ee04750bf2035d8b9384233e1b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-cleanhttp",
|
||||
"Rev": "5df5ddc69534f1a4697289f1dca2193fbb40213f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-immutable-radix",
|
||||
"Rev": "aca1bd0689e10884f20d114aff148ddb849ece80"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-memdb",
|
||||
"Rev": "9ea975be0e31ada034a5760340d4892f3f543d20"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-msgpack/codec",
|
||||
"Rev": "fa3f63826f7c23912c15263591e65d54d080b458"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-multierror",
|
||||
"Rev": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-syslog",
|
||||
"Rev": "42a2b573b664dbf281bd48c3cc12c086b17a39ba"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "a6091bb5d00e2e9c4a16a0e739e306f8a3071a3c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl",
|
||||
"Rev": "2deb1d1db27ed473f38fe65a16044572b9ff9d30"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/logutils",
|
||||
"Rev": "0dc08b1671f34c4250ce212759ebd880f743d883"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/memberlist",
|
||||
"Rev": "28424fb38c7c3e30f366b72b1a55f690d318d8f3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/net-rpc-msgpackrpc",
|
||||
"Rev": "a14192a58a694c123d8fe5481d4a4727d6ae82f3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft",
|
||||
"Rev": "d136cd15dfb7876fd7c89cad1995bc4f19ceb294"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft-boltdb",
|
||||
"Rev": "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/scada-client",
|
||||
"Rev": "84989fd23ad4cc0e7ad44d6a871fd793eb9beb0a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/serf/coordinate",
|
||||
"Comment": "v0.6.4-145-ga72c045",
|
||||
"Rev": "a72c0453da2ba628a013e98bf323a76be4aa1443"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/serf/serf",
|
||||
"Comment": "v0.6.4-145-ga72c045",
|
||||
"Rev": "a72c0453da2ba628a013e98bf323a76be4aa1443"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/yamux",
|
||||
"Rev": "ddcd0a6ec7c55e29f235e27935bf98d302281bd3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/inconshreveable/muxado",
|
||||
"Rev": "f693c7e88ba316d1a0ae3e205e22a01aa3ec2848"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/miekg/dns",
|
||||
"Rev": "d27455715200c7d3e321a1e5cadb27c9ee0b0f02"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/cli",
|
||||
"Rev": "8102d0ed5ea2709ade1243798785888175f6e415"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||
"Rev": "281073eb9eb092240d33ef253c404f1cca550309"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ryanuber/columnize",
|
||||
"Comment": "v2.0.1-8-g983d3a5",
|
||||
"Rev": "983d3a5fab1bf04d1b412465d2d9f8430e2e917e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ssh/terminal",
|
||||
"Rev": "346896d57731cb5670b36c6178fc5519f3225980"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,134 +0,0 @@
|
|||
{
|
||||
"ImportPath": "github.com/hashicorp/consul",
|
||||
"GoVersion": "go1.5.1",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/DataDog/datadog-go/statsd",
|
||||
"Rev": "b050cd8f4d7c394545fd7d966c8e2909ce89d552"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/circbuf",
|
||||
"Rev": "bbbad097214e2918d8543d5201d12bfd7bca254d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-metrics",
|
||||
"Rev": "6c5fa0d8f48f4661c9ba8709799c88d425ad20f0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-radix",
|
||||
"Rev": "fbd82e84e2b13651f3abc5ffd26b65ba71bc8f93"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/boltdb/bolt",
|
||||
"Comment": "v1.1.0-19-g0b00eff",
|
||||
"Rev": "0b00effdd7a8270ebd91c24297e51643e370dd52"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient",
|
||||
"Rev": "2350d7bc12bb04f2d7d6824c7718012b1397b760"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/errwrap",
|
||||
"Rev": "7554cd9344cec97297fa6649b055a8c98c2a1e55"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-checkpoint",
|
||||
"Rev": "e4b2dc34c0f698ee04750bf2035d8b9384233e1b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-cleanhttp",
|
||||
"Rev": "5df5ddc69534f1a4697289f1dca2193fbb40213f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-immutable-radix",
|
||||
"Rev": "aca1bd0689e10884f20d114aff148ddb849ece80"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-memdb",
|
||||
"Rev": "9ea975be0e31ada034a5760340d4892f3f543d20"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-msgpack/codec",
|
||||
"Rev": "fa3f63826f7c23912c15263591e65d54d080b458"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-multierror",
|
||||
"Rev": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-syslog",
|
||||
"Rev": "42a2b573b664dbf281bd48c3cc12c086b17a39ba"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "a6091bb5d00e2e9c4a16a0e739e306f8a3071a3c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl",
|
||||
"Rev": "2deb1d1db27ed473f38fe65a16044572b9ff9d30"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/logutils",
|
||||
"Rev": "0dc08b1671f34c4250ce212759ebd880f743d883"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/memberlist",
|
||||
"Rev": "28424fb38c7c3e30f366b72b1a55f690d318d8f3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/net-rpc-msgpackrpc",
|
||||
"Rev": "a14192a58a694c123d8fe5481d4a4727d6ae82f3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft",
|
||||
"Rev": "d136cd15dfb7876fd7c89cad1995bc4f19ceb294"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft-boltdb",
|
||||
"Rev": "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/scada-client",
|
||||
"Rev": "84989fd23ad4cc0e7ad44d6a871fd793eb9beb0a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/serf/coordinate",
|
||||
"Comment": "v0.6.4-145-ga72c045",
|
||||
"Rev": "a72c0453da2ba628a013e98bf323a76be4aa1443"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/serf/serf",
|
||||
"Comment": "v0.6.4-145-ga72c045",
|
||||
"Rev": "a72c0453da2ba628a013e98bf323a76be4aa1443"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/yamux",
|
||||
"Rev": "df949784da9ed028ee76df44652e42d37a09d7e4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/inconshreveable/muxado",
|
||||
"Rev": "f693c7e88ba316d1a0ae3e205e22a01aa3ec2848"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/miekg/dns",
|
||||
"Rev": "d27455715200c7d3e321a1e5cadb27c9ee0b0f02"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/cli",
|
||||
"Rev": "8102d0ed5ea2709ade1243798785888175f6e415"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||
"Rev": "281073eb9eb092240d33ef253c404f1cca550309"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ryanuber/columnize",
|
||||
"Comment": "v2.0.1-8-g983d3a5",
|
||||
"Rev": "983d3a5fab1bf04d1b412465d2d9f8430e2e917e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ssh/terminal",
|
||||
"Rev": "346896d57731cb5670b36c6178fc5519f3225980"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,150 +0,0 @@
|
|||
{
|
||||
"ImportPath": "github.com/hashicorp/consul",
|
||||
"GoVersion": "go1.5",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/DataDog/datadog-go/statsd",
|
||||
"Rev": "b050cd8f4d7c394545fd7d966c8e2909ce89d552"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/circbuf",
|
||||
"Rev": "bbbad097214e2918d8543d5201d12bfd7bca254d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-metrics",
|
||||
"Rev": "345426c77237ece5dab0e1605c3e4b35c3f54757"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-radix",
|
||||
"Rev": "fbd82e84e2b13651f3abc5ffd26b65ba71bc8f93"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/bgentry/speakeasy",
|
||||
"Rev": "36e9cfdd690967f4f690c6edcc9ffacd006014a0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/boltdb/bolt",
|
||||
"Comment": "v1.1.0-40-g34a0fa5",
|
||||
"Rev": "34a0fa5307f7562980fb8e7ff4723f7987edf49b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/elazarl/go-bindata-assetfs",
|
||||
"Rev": "57eb5e1fc594ad4b0b1dbea7b286d299e0cb43c2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient",
|
||||
"Rev": "299d728486342c894e7fafd68e3a4b89623bef1d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/errwrap",
|
||||
"Rev": "7554cd9344cec97297fa6649b055a8c98c2a1e55"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-checkpoint",
|
||||
"Rev": "e4b2dc34c0f698ee04750bf2035d8b9384233e1b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-cleanhttp",
|
||||
"Rev": "ce617e79981a8fff618bb643d155133a8f38db96"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-immutable-radix",
|
||||
"Rev": "12e90058b2897552deea141eff51bb7a07a09e63"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-memdb",
|
||||
"Rev": "31949d523ade8a236956c6f1761e9dcf902d1638"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-msgpack/codec",
|
||||
"Rev": "fa3f63826f7c23912c15263591e65d54d080b458"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-multierror",
|
||||
"Rev": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-reap",
|
||||
"Rev": "e300e334a8de28a7931fc05331345f9e985128de"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-syslog",
|
||||
"Rev": "42a2b573b664dbf281bd48c3cc12c086b17a39ba"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "5c7531c003d8bf158b0fe5063649a2f41a822146"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl",
|
||||
"Rev": "197e8d3cf42199cfd53cd775deb37f3637234635"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/logutils",
|
||||
"Rev": "0dc08b1671f34c4250ce212759ebd880f743d883"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/memberlist",
|
||||
"Rev": "9888dc523910e5d22c5be4f6e34520943df21809"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/net-rpc-msgpackrpc",
|
||||
"Rev": "a14192a58a694c123d8fe5481d4a4727d6ae82f3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft",
|
||||
"Rev": "d136cd15dfb7876fd7c89cad1995bc4f19ceb294"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft-boltdb",
|
||||
"Rev": "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/scada-client",
|
||||
"Rev": "84989fd23ad4cc0e7ad44d6a871fd793eb9beb0a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/serf/coordinate",
|
||||
"Comment": "v0.7.0-1-g39c7c06",
|
||||
"Rev": "39c7c06298b480560202bec00c2c77e974e88792"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/serf/serf",
|
||||
"Comment": "v0.7.0-1-g39c7c06",
|
||||
"Rev": "39c7c06298b480560202bec00c2c77e974e88792"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/yamux",
|
||||
"Rev": "df949784da9ed028ee76df44652e42d37a09d7e4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/inconshreveable/muxado",
|
||||
"Rev": "f693c7e88ba316d1a0ae3e205e22a01aa3ec2848"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattn/go-isatty",
|
||||
"Rev": "56b76bdf51f7708750eac80fa38b952bb9f32639"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/miekg/dns",
|
||||
"Rev": "1756430e42a7b2ecded216a9fdd37d002c116df5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/cli",
|
||||
"Rev": "43a4bc367e0d53f561d3d985b9dca84e15bd0554"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||
"Rev": "281073eb9eb092240d33ef253c404f1cca550309"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ryanuber/columnize",
|
||||
"Comment": "v2.0.1-8-g983d3a5",
|
||||
"Rev": "983d3a5fab1bf04d1b412465d2d9f8430e2e917e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
"Rev": "833a04a10549a95dc34458c195cbad61bbb6cb4d"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,150 +0,0 @@
|
|||
{
|
||||
"ImportPath": "github.com/hashicorp/consul",
|
||||
"GoVersion": "go1.5",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/DataDog/datadog-go/statsd",
|
||||
"Rev": "b050cd8f4d7c394545fd7d966c8e2909ce89d552"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/circbuf",
|
||||
"Rev": "bbbad097214e2918d8543d5201d12bfd7bca254d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-metrics",
|
||||
"Rev": "345426c77237ece5dab0e1605c3e4b35c3f54757"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-radix",
|
||||
"Rev": "fbd82e84e2b13651f3abc5ffd26b65ba71bc8f93"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/bgentry/speakeasy",
|
||||
"Rev": "36e9cfdd690967f4f690c6edcc9ffacd006014a0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/boltdb/bolt",
|
||||
"Comment": "v1.1.0-40-g34a0fa5",
|
||||
"Rev": "34a0fa5307f7562980fb8e7ff4723f7987edf49b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/elazarl/go-bindata-assetfs",
|
||||
"Rev": "57eb5e1fc594ad4b0b1dbea7b286d299e0cb43c2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient",
|
||||
"Rev": "299d728486342c894e7fafd68e3a4b89623bef1d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/errwrap",
|
||||
"Rev": "7554cd9344cec97297fa6649b055a8c98c2a1e55"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-checkpoint",
|
||||
"Rev": "e4b2dc34c0f698ee04750bf2035d8b9384233e1b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-cleanhttp",
|
||||
"Rev": "ce617e79981a8fff618bb643d155133a8f38db96"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-immutable-radix",
|
||||
"Rev": "12e90058b2897552deea141eff51bb7a07a09e63"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-memdb",
|
||||
"Rev": "31949d523ade8a236956c6f1761e9dcf902d1638"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-msgpack/codec",
|
||||
"Rev": "fa3f63826f7c23912c15263591e65d54d080b458"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-multierror",
|
||||
"Rev": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-reap",
|
||||
"Rev": "e300e334a8de28a7931fc05331345f9e985128de"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-syslog",
|
||||
"Rev": "42a2b573b664dbf281bd48c3cc12c086b17a39ba"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "5c7531c003d8bf158b0fe5063649a2f41a822146"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl",
|
||||
"Rev": "197e8d3cf42199cfd53cd775deb37f3637234635"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/logutils",
|
||||
"Rev": "0dc08b1671f34c4250ce212759ebd880f743d883"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/memberlist",
|
||||
"Rev": "9888dc523910e5d22c5be4f6e34520943df21809"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/net-rpc-msgpackrpc",
|
||||
"Rev": "a14192a58a694c123d8fe5481d4a4727d6ae82f3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft",
|
||||
"Rev": "d136cd15dfb7876fd7c89cad1995bc4f19ceb294"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft-boltdb",
|
||||
"Rev": "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/scada-client",
|
||||
"Rev": "84989fd23ad4cc0e7ad44d6a871fd793eb9beb0a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/serf/coordinate",
|
||||
"Comment": "v0.7.0-1-g39c7c06",
|
||||
"Rev": "39c7c06298b480560202bec00c2c77e974e88792"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/serf/serf",
|
||||
"Comment": "v0.7.0-1-g39c7c06",
|
||||
"Rev": "39c7c06298b480560202bec00c2c77e974e88792"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/yamux",
|
||||
"Rev": "df949784da9ed028ee76df44652e42d37a09d7e4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/inconshreveable/muxado",
|
||||
"Rev": "f693c7e88ba316d1a0ae3e205e22a01aa3ec2848"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattn/go-isatty",
|
||||
"Rev": "56b76bdf51f7708750eac80fa38b952bb9f32639"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/miekg/dns",
|
||||
"Rev": "1756430e42a7b2ecded216a9fdd37d002c116df5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/cli",
|
||||
"Rev": "43a4bc367e0d53f561d3d985b9dca84e15bd0554"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||
"Rev": "281073eb9eb092240d33ef253c404f1cca550309"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ryanuber/columnize",
|
||||
"Comment": "v2.0.1-8-g983d3a5",
|
||||
"Rev": "983d3a5fab1bf04d1b412465d2d9f8430e2e917e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
"Rev": "833a04a10549a95dc34458c195cbad61bbb6cb4d"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -3,6 +3,8 @@
|
|||
# This script builds the application from source for multiple platforms.
|
||||
set -e
|
||||
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
|
||||
# Get the parent directory of where this script is.
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
|
@ -20,10 +22,6 @@ GIT_DESCRIBE=$(git describe --tags)
|
|||
XC_ARCH=${XC_ARCH:-"386 amd64 arm"}
|
||||
XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"}
|
||||
|
||||
# Install dependencies
|
||||
echo "==> Getting dependencies..."
|
||||
go get ./...
|
||||
|
||||
# Delete the old dir
|
||||
echo "==> Removing old directory..."
|
||||
rm -f bin/*
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
|
||||
# Get the version from the command line
|
||||
VERSION=$1
|
||||
if [ -z $VERSION ]; then
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
|
||||
# Create a temp dir and clean it up on exit
|
||||
TEMPDIR=`mktemp -d -t consul-test.XXX`
|
||||
trap "rm -rf $TEMPDIR" EXIT HUP INT QUIT TERM
|
||||
|
@ -10,4 +12,4 @@ go build -o $TEMPDIR/consul || exit 1
|
|||
|
||||
# Run the tests
|
||||
echo "--> Running tests"
|
||||
go list ./... | PATH=$TEMPDIR:$PATH xargs -n1 go test
|
||||
go list ./... | grep -v ^github.com/hashicorp/consul/vendor/ | PATH=$TEMPDIR:$PATH xargs -n1 go test
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
## Overview
|
||||
|
||||
Package `statsd` provides a Go [dogstatsd](http://docs.datadoghq.com/guides/dogstatsd/) client. Dogstatsd extends Statsd, adding tags
|
||||
and histograms.
|
||||
|
||||
## Get the code
|
||||
|
||||
$ go get github.com/DataDog/datadog-go/statsd
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
// Create the client
|
||||
c, err := statsd.New("127.0.0.1:8125")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Prefix every metric with the app name
|
||||
c.Namespace = "flubber."
|
||||
// Send the EC2 availability zone as a tag with every metric
|
||||
c.Tags = append(c.Tags, "us-east-1a")
|
||||
err = c.Gauge("request.duration", 1.2, nil, 1)
|
||||
```
|
||||
|
||||
## Buffering Client
|
||||
|
||||
Dogstatsd accepts packets with multiple statsd payloads in them. Using the BufferingClient via `NewBufferingClient` will buffer up commands and send them when the buffer is reached or after 100msec.
|
||||
|
||||
## Development
|
||||
|
||||
Run the tests with:
|
||||
|
||||
$ go test
|
||||
|
||||
## Documentation
|
||||
|
||||
Please see: http://godoc.org/github.com/DataDog/datadog-go/statsd
|
||||
|
||||
## License
|
||||
|
||||
go-dogstatsd is released under the [MIT license](http://www.opensource.org/licenses/mit-license.php).
|
||||
|
||||
## Credits
|
||||
|
||||
Original code by [ooyala](https://github.com/ooyala/go-dogstatsd).
|
|
@ -0,0 +1,353 @@
|
|||
// Copyright 2013 Ooyala, Inc.
|
||||
|
||||
/*
|
||||
Package statsd provides a Go dogstatsd client. Dogstatsd extends the popular statsd,
|
||||
adding tags and histograms and pushing upstream to Datadog.
|
||||
|
||||
Refer to http://docs.datadoghq.com/guides/dogstatsd/ for information about DogStatsD.
|
||||
|
||||
Example Usage:
|
||||
|
||||
// Create the client
|
||||
c, err := statsd.New("127.0.0.1:8125")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Prefix every metric with the app name
|
||||
c.Namespace = "flubber."
|
||||
// Send the EC2 availability zone as a tag with every metric
|
||||
c.Tags = append(c.Tags, "us-east-1a")
|
||||
err = c.Gauge("request.duration", 1.2, nil, 1)
|
||||
|
||||
statsd is based on go-statsd-client.
|
||||
*/
|
||||
package statsd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Client is a handle for sending udp messages to dogstatsd. It is safe to
|
||||
// use one Client from multiple goroutines simultaneously.
|
||||
type Client struct {
|
||||
conn net.Conn
|
||||
// Namespace to prepend to all statsd calls
|
||||
Namespace string
|
||||
// Tags are global tags to be added to every statsd call
|
||||
Tags []string
|
||||
// BufferLength is the length of the buffer in commands.
|
||||
bufferLength int
|
||||
flushTime time.Duration
|
||||
commands []string
|
||||
stop bool
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// New returns a pointer to a new Client given an addr in the format "hostname:port".
|
||||
func New(addr string) (*Client, error) {
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn, err := net.DialUDP("udp", nil, udpAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := &Client{conn: conn}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewBuffered returns a Client that buffers its output and sends it in chunks.
|
||||
// Buflen is the length of the buffer in number of commands.
|
||||
func NewBuffered(addr string, buflen int) (*Client, error) {
|
||||
client, err := New(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.bufferLength = buflen
|
||||
client.commands = make([]string, 0, buflen)
|
||||
client.flushTime = time.Millisecond * 100
|
||||
go client.watch()
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// format a message from its name, value, tags and rate. Also adds global
|
||||
// namespace and tags.
|
||||
func (c *Client) format(name, value string, tags []string, rate float64) string {
|
||||
var buf bytes.Buffer
|
||||
if c.Namespace != "" {
|
||||
buf.WriteString(c.Namespace)
|
||||
}
|
||||
buf.WriteString(name)
|
||||
buf.WriteString(":")
|
||||
buf.WriteString(value)
|
||||
if rate < 1 {
|
||||
buf.WriteString(`|@`)
|
||||
buf.WriteString(strconv.FormatFloat(rate, 'f', -1, 64))
|
||||
}
|
||||
|
||||
tags = append(c.Tags, tags...)
|
||||
if len(tags) > 0 {
|
||||
buf.WriteString("|#")
|
||||
buf.WriteString(tags[0])
|
||||
for _, tag := range tags[1:] {
|
||||
buf.WriteString(",")
|
||||
buf.WriteString(tag)
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (c *Client) watch() {
|
||||
for _ = range time.Tick(c.flushTime) {
|
||||
if c.stop {
|
||||
return
|
||||
}
|
||||
c.Lock()
|
||||
if len(c.commands) > 0 {
|
||||
// FIXME: eating error here
|
||||
c.flush()
|
||||
}
|
||||
c.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) append(cmd string) error {
|
||||
c.Lock()
|
||||
c.commands = append(c.commands, cmd)
|
||||
// if we should flush, lets do it
|
||||
if len(c.commands) == c.bufferLength {
|
||||
if err := c.flush(); err != nil {
|
||||
c.Unlock()
|
||||
return err
|
||||
}
|
||||
}
|
||||
c.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// flush the commands in the buffer. Lock must be held by caller.
|
||||
func (c *Client) flush() error {
|
||||
data := strings.Join(c.commands, "\n")
|
||||
_, err := c.conn.Write([]byte(data))
|
||||
// clear the slice with a slice op, doesn't realloc
|
||||
c.commands = c.commands[:0]
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Client) sendMsg(msg string) error {
|
||||
// if this client is buffered, then we'll just append this
|
||||
if c.bufferLength > 0 {
|
||||
return c.append(msg)
|
||||
}
|
||||
c.Lock()
|
||||
_, err := c.conn.Write([]byte(msg))
|
||||
c.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
// send handles sampling and sends the message over UDP. It also adds global namespace prefixes and tags.
|
||||
func (c *Client) send(name, value string, tags []string, rate float64) error {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
if rate < 1 && rand.Float64() > rate {
|
||||
return nil
|
||||
}
|
||||
data := c.format(name, value, tags, rate)
|
||||
return c.sendMsg(data)
|
||||
}
|
||||
|
||||
// Gauge measures the value of a metric at a particular time.
|
||||
func (c *Client) Gauge(name string, value float64, tags []string, rate float64) error {
|
||||
stat := fmt.Sprintf("%f|g", value)
|
||||
return c.send(name, stat, tags, rate)
|
||||
}
|
||||
|
||||
// Count tracks how many times something happened per second.
|
||||
func (c *Client) Count(name string, value int64, tags []string, rate float64) error {
|
||||
stat := fmt.Sprintf("%d|c", value)
|
||||
return c.send(name, stat, tags, rate)
|
||||
}
|
||||
|
||||
// Histogram tracks the statistical distribution of a set of values.
|
||||
func (c *Client) Histogram(name string, value float64, tags []string, rate float64) error {
|
||||
stat := fmt.Sprintf("%f|h", value)
|
||||
return c.send(name, stat, tags, rate)
|
||||
}
|
||||
|
||||
// Set counts the number of unique elements in a group.
|
||||
func (c *Client) Set(name string, value string, tags []string, rate float64) error {
|
||||
stat := fmt.Sprintf("%s|s", value)
|
||||
return c.send(name, stat, tags, rate)
|
||||
}
|
||||
|
||||
// TimeInMilliseconds sends timing information in milliseconds.
|
||||
// It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing)
|
||||
func (c *Client) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error {
|
||||
stat := fmt.Sprintf("%f|ms", value)
|
||||
return c.send(name, stat, tags, rate)
|
||||
}
|
||||
|
||||
// Event sends the provided Event.
|
||||
func (c *Client) Event(e *Event) error {
|
||||
stat, err := e.Encode(c.Tags...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.sendMsg(stat)
|
||||
}
|
||||
|
||||
// SimpleEvent sends an event with the provided title and text.
|
||||
func (c *Client) SimpleEvent(title, text string) error {
|
||||
e := NewEvent(title, text)
|
||||
return c.Event(e)
|
||||
}
|
||||
|
||||
// Close the client connection.
|
||||
func (c *Client) Close() error {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
c.stop = true
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// Events support
|
||||
|
||||
type eventAlertType string
|
||||
|
||||
const (
|
||||
// Info is the "info" AlertType for events
|
||||
Info eventAlertType = "info"
|
||||
// Error is the "error" AlertType for events
|
||||
Error eventAlertType = "error"
|
||||
// Warning is the "warning" AlertType for events
|
||||
Warning eventAlertType = "warning"
|
||||
// Success is the "success" AlertType for events
|
||||
Success eventAlertType = "success"
|
||||
)
|
||||
|
||||
type eventPriority string
|
||||
|
||||
const (
|
||||
// Normal is the "normal" Priority for events
|
||||
Normal eventPriority = "normal"
|
||||
// Low is the "low" Priority for events
|
||||
Low eventPriority = "low"
|
||||
)
|
||||
|
||||
// An Event is an object that can be posted to your DataDog event stream.
|
||||
type Event struct {
|
||||
// Title of the event. Required.
|
||||
Title string
|
||||
// Text is the description of the event. Required.
|
||||
Text string
|
||||
// Timestamp is a timestamp for the event. If not provided, the dogstatsd
|
||||
// server will set this to the current time.
|
||||
Timestamp time.Time
|
||||
// Hostname for the event.
|
||||
Hostname string
|
||||
// AggregationKey groups this event with others of the same key.
|
||||
AggregationKey string
|
||||
// Priority of the event. Can be statsd.Low or statsd.Normal.
|
||||
Priority eventPriority
|
||||
// SourceTypeName is a source type for the event.
|
||||
SourceTypeName string
|
||||
// AlertType can be statsd.Info, statsd.Error, statsd.Warning, or statsd.Success.
|
||||
// If absent, the default value applied by the dogstatsd server is Info.
|
||||
AlertType eventAlertType
|
||||
// Tags for the event.
|
||||
Tags []string
|
||||
}
|
||||
|
||||
// NewEvent creates a new event with the given title and text. Error checking
|
||||
// against these values is done at send-time, or upon running e.Check.
|
||||
func NewEvent(title, text string) *Event {
|
||||
return &Event{
|
||||
Title: title,
|
||||
Text: text,
|
||||
}
|
||||
}
|
||||
|
||||
// Check verifies that an event is valid.
|
||||
func (e Event) Check() error {
|
||||
if len(e.Title) == 0 {
|
||||
return fmt.Errorf("statsd.Event title is required")
|
||||
}
|
||||
if len(e.Text) == 0 {
|
||||
return fmt.Errorf("statsd.Event text is required")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Encode returns the dogstatsd wire protocol representation for an event.
|
||||
// Tags may be passed which will be added to the encoded output but not to
|
||||
// the Event's list of tags, eg. for default tags.
|
||||
func (e Event) Encode(tags ...string) (string, error) {
|
||||
err := e.Check()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString("_e{")
|
||||
buffer.WriteString(strconv.FormatInt(int64(len(e.Title)), 10))
|
||||
buffer.WriteRune(',')
|
||||
buffer.WriteString(strconv.FormatInt(int64(len(e.Text)), 10))
|
||||
buffer.WriteString("}:")
|
||||
buffer.WriteString(e.Title)
|
||||
buffer.WriteRune('|')
|
||||
buffer.WriteString(e.Text)
|
||||
|
||||
if !e.Timestamp.IsZero() {
|
||||
buffer.WriteString("|d:")
|
||||
buffer.WriteString(strconv.FormatInt(int64(e.Timestamp.Unix()), 10))
|
||||
}
|
||||
|
||||
if len(e.Hostname) != 0 {
|
||||
buffer.WriteString("|h:")
|
||||
buffer.WriteString(e.Hostname)
|
||||
}
|
||||
|
||||
if len(e.AggregationKey) != 0 {
|
||||
buffer.WriteString("|k:")
|
||||
buffer.WriteString(e.AggregationKey)
|
||||
|
||||
}
|
||||
|
||||
if len(e.Priority) != 0 {
|
||||
buffer.WriteString("|p:")
|
||||
buffer.WriteString(string(e.Priority))
|
||||
}
|
||||
|
||||
if len(e.SourceTypeName) != 0 {
|
||||
buffer.WriteString("|s:")
|
||||
buffer.WriteString(e.SourceTypeName)
|
||||
}
|
||||
|
||||
if len(e.AlertType) != 0 {
|
||||
buffer.WriteString("|t:")
|
||||
buffer.WriteString(string(e.AlertType))
|
||||
}
|
||||
|
||||
if len(tags)+len(e.Tags) > 0 {
|
||||
all := make([]string, 0, len(tags)+len(e.Tags))
|
||||
all = append(all, tags...)
|
||||
all = append(all, e.Tags...)
|
||||
buffer.WriteString("|#")
|
||||
buffer.WriteString(all[0])
|
||||
for _, tag := range all[1:] {
|
||||
buffer.WriteString(",")
|
||||
buffer.WriteString(tag)
|
||||
}
|
||||
}
|
||||
|
||||
return buffer.String(), nil
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
|
@ -0,0 +1,20 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Armon Dadgar
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,28 @@
|
|||
circbuf
|
||||
=======
|
||||
|
||||
This repository provides the `circbuf` package. This provides a `Buffer` object
|
||||
which is a circular (or ring) buffer. It has a fixed size, but can be written
|
||||
to infinitely. Only the last `size` bytes are ever retained. The buffer implements
|
||||
the `io.Writer` interface.
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
||||
Full documentation can be found on [Godoc](http://godoc.org/github.com/armon/circbuf)
|
||||
|
||||
Usage
|
||||
=====
|
||||
|
||||
The `circbuf` package is very easy to use:
|
||||
|
||||
```go
|
||||
buf, _ := NewBuffer(6)
|
||||
buf.Write([]byte("hello world"))
|
||||
|
||||
if string(buf.Bytes()) != " world" {
|
||||
panic("should only have last 6 bytes!")
|
||||
}
|
||||
|
||||
```
|
||||
|
|
@ -0,0 +1,92 @@
|
|||
package circbuf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Buffer implements a circular buffer. It is a fixed size,
|
||||
// and new writes overwrite older data, such that for a buffer
|
||||
// of size N, for any amount of writes, only the last N bytes
|
||||
// are retained.
|
||||
type Buffer struct {
|
||||
data []byte
|
||||
size int64
|
||||
writeCursor int64
|
||||
written int64
|
||||
}
|
||||
|
||||
// NewBuffer creates a new buffer of a given size. The size
|
||||
// must be greater than 0.
|
||||
func NewBuffer(size int64) (*Buffer, error) {
|
||||
if size <= 0 {
|
||||
return nil, fmt.Errorf("Size must be positive")
|
||||
}
|
||||
|
||||
b := &Buffer{
|
||||
size: size,
|
||||
data: make([]byte, size),
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Write writes up to len(buf) bytes to the internal ring,
|
||||
// overriding older data if necessary.
|
||||
func (b *Buffer) Write(buf []byte) (int, error) {
|
||||
// Account for total bytes written
|
||||
n := len(buf)
|
||||
b.written += int64(n)
|
||||
|
||||
// If the buffer is larger than ours, then we only care
|
||||
// about the last size bytes anyways
|
||||
if int64(n) > b.size {
|
||||
buf = buf[int64(n)-b.size:]
|
||||
}
|
||||
|
||||
// Copy in place
|
||||
remain := b.size - b.writeCursor
|
||||
copy(b.data[b.writeCursor:], buf)
|
||||
if int64(len(buf)) > remain {
|
||||
copy(b.data, buf[remain:])
|
||||
}
|
||||
|
||||
// Update location of the cursor
|
||||
b.writeCursor = ((b.writeCursor + int64(len(buf))) % b.size)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Size returns the size of the buffer
|
||||
func (b *Buffer) Size() int64 {
|
||||
return b.size
|
||||
}
|
||||
|
||||
// TotalWritten provides the total number of bytes written
|
||||
func (b *Buffer) TotalWritten() int64 {
|
||||
return b.written
|
||||
}
|
||||
|
||||
// Bytes provides a slice of the bytes written. This
|
||||
// slice should not be written to.
|
||||
func (b *Buffer) Bytes() []byte {
|
||||
switch {
|
||||
case b.written >= b.size && b.writeCursor == 0:
|
||||
return b.data
|
||||
case b.written > b.size:
|
||||
out := make([]byte, b.size)
|
||||
copy(out, b.data[b.writeCursor:])
|
||||
copy(out[b.size-b.writeCursor:], b.data[:b.writeCursor])
|
||||
return out
|
||||
default:
|
||||
return b.data[:b.writeCursor]
|
||||
}
|
||||
}
|
||||
|
||||
// Reset resets the buffer so it has no content.
|
||||
func (b *Buffer) Reset() {
|
||||
b.writeCursor = 0
|
||||
b.written = 0
|
||||
}
|
||||
|
||||
// String returns the contents of the buffer as a string
|
||||
func (b *Buffer) String() string {
|
||||
return string(b.Bytes())
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
|
@ -0,0 +1,20 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Armon Dadgar
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,71 @@
|
|||
go-metrics
|
||||
==========
|
||||
|
||||
This library provides a `metrics` package which can be used to instrument code,
|
||||
expose application metrics, and profile runtime performance in a flexible manner.
|
||||
|
||||
Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics)
|
||||
|
||||
Sinks
|
||||
=====
|
||||
|
||||
The `metrics` package makes use of a `MetricSink` interface to support delivery
|
||||
to any type of backend. Currently the following sinks are provided:
|
||||
|
||||
* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP)
|
||||
* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP)
|
||||
* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes)
|
||||
* InmemSink : Provides in-memory aggregation, can be used to export stats
|
||||
* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example.
|
||||
* BlackholeSink : Sinks to nowhere
|
||||
|
||||
In addition to the sinks, the `InmemSignal` can be used to catch a signal,
|
||||
and dump a formatted output of recent metrics. For example, when a process gets
|
||||
a SIGUSR1, it can dump to stderr recent performance metrics for debugging.
|
||||
|
||||
Examples
|
||||
========
|
||||
|
||||
Here is an example of using the package:
|
||||
|
||||
func SlowMethod() {
|
||||
// Profiling the runtime of a method
|
||||
defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now())
|
||||
}
|
||||
|
||||
// Configure a statsite sink as the global metrics sink
|
||||
sink, _ := metrics.NewStatsiteSink("statsite:8125")
|
||||
metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink)
|
||||
|
||||
// Emit a Key/Value pair
|
||||
metrics.EmitKey([]string{"questions", "meaning of life"}, 42)
|
||||
|
||||
|
||||
Here is an example of setting up an signal handler:
|
||||
|
||||
// Setup the inmem sink and signal handler
|
||||
inm := metrics.NewInmemSink(10*time.Second, time.Minute)
|
||||
sig := metrics.DefaultInmemSignal(inm)
|
||||
metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm)
|
||||
|
||||
// Run some code
|
||||
inm.SetGauge([]string{"foo"}, 42)
|
||||
inm.EmitKey([]string{"bar"}, 30)
|
||||
|
||||
inm.IncrCounter([]string{"baz"}, 42)
|
||||
inm.IncrCounter([]string{"baz"}, 1)
|
||||
inm.IncrCounter([]string{"baz"}, 80)
|
||||
|
||||
inm.AddSample([]string{"method", "wow"}, 42)
|
||||
inm.AddSample([]string{"method", "wow"}, 100)
|
||||
inm.AddSample([]string{"method", "wow"}, 22)
|
||||
|
||||
....
|
||||
|
||||
When a signal comes in, output like the following will be dumped to stderr:
|
||||
|
||||
[2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000
|
||||
[2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000
|
||||
[2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509
|
||||
[2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
// +build !windows
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultSignal is used with DefaultInmemSignal
|
||||
DefaultSignal = syscall.SIGUSR1
|
||||
)
|
|
@ -0,0 +1,13 @@
|
|||
// +build windows
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultSignal is used with DefaultInmemSignal
|
||||
// Windows has no SIGUSR1, use SIGBREAK
|
||||
DefaultSignal = syscall.Signal(21)
|
||||
)
|
|
@ -0,0 +1,125 @@
|
|||
package datadog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
)
|
||||
|
||||
// DogStatsdSink provides a MetricSink that can be used
|
||||
// with a dogstatsd server. It utilizes the Dogstatsd client at github.com/DataDog/datadog-go/statsd
|
||||
type DogStatsdSink struct {
|
||||
client *statsd.Client
|
||||
hostName string
|
||||
propagateHostname bool
|
||||
}
|
||||
|
||||
// NewDogStatsdSink is used to create a new DogStatsdSink with sane defaults
|
||||
func NewDogStatsdSink(addr string, hostName string) (*DogStatsdSink, error) {
|
||||
client, err := statsd.New(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sink := &DogStatsdSink{
|
||||
client: client,
|
||||
hostName: hostName,
|
||||
propagateHostname: false,
|
||||
}
|
||||
return sink, nil
|
||||
}
|
||||
|
||||
// SetTags sets common tags on the Dogstatsd Client that will be sent
|
||||
// along with all dogstatsd packets.
|
||||
// Ref: http://docs.datadoghq.com/guides/dogstatsd/#tags
|
||||
func (s *DogStatsdSink) SetTags(tags []string) {
|
||||
s.client.Tags = tags
|
||||
}
|
||||
|
||||
// EnableHostnamePropagation forces a Dogstatsd `host` tag with the value specified by `s.HostName`
|
||||
// Since the go-metrics package has its own mechanism for attaching a hostname to metrics,
|
||||
// setting the `propagateHostname` flag ensures that `s.HostName` overrides the host tag naively set by the DogStatsd server
|
||||
func (s *DogStatsdSink) EnableHostNamePropagation() {
|
||||
s.propagateHostname = true
|
||||
}
|
||||
|
||||
func (s *DogStatsdSink) flattenKey(parts []string) string {
|
||||
joined := strings.Join(parts, ".")
|
||||
return strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case ':':
|
||||
fallthrough
|
||||
case ' ':
|
||||
return '_'
|
||||
default:
|
||||
return r
|
||||
}
|
||||
}, joined)
|
||||
}
|
||||
|
||||
func (s *DogStatsdSink) parseKey(key []string) ([]string, []string) {
|
||||
// Since DogStatsd supports dimensionality via tags on metric keys, this sink's approach is to splice the hostname out of the key in favor of a `host` tag
|
||||
// The `host` tag is either forced here, or set downstream by the DogStatsd server
|
||||
|
||||
var tags []string
|
||||
hostName := s.hostName
|
||||
|
||||
//Splice the hostname out of the key
|
||||
for i, el := range key {
|
||||
if el == hostName {
|
||||
key = append(key[:i], key[i+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
if s.propagateHostname {
|
||||
tags = append(tags, fmt.Sprintf("host:%s", hostName))
|
||||
}
|
||||
return key, tags
|
||||
}
|
||||
|
||||
// Implementation of methods in the MetricSink interface
|
||||
|
||||
func (s *DogStatsdSink) SetGauge(key []string, val float32) {
|
||||
s.SetGaugeWithTags(key, val, []string{})
|
||||
}
|
||||
|
||||
func (s *DogStatsdSink) IncrCounter(key []string, val float32) {
|
||||
s.IncrCounterWithTags(key, val, []string{})
|
||||
}
|
||||
|
||||
// EmitKey is not implemented since DogStatsd does not provide a metric type that holds an
|
||||
// arbitrary number of values
|
||||
func (s *DogStatsdSink) EmitKey(key []string, val float32) {
|
||||
}
|
||||
|
||||
func (s *DogStatsdSink) AddSample(key []string, val float32) {
|
||||
s.AddSampleWithTags(key, val, []string{})
|
||||
}
|
||||
|
||||
// The following ...WithTags methods correspond to Datadog's Tag extension to Statsd.
|
||||
// http://docs.datadoghq.com/guides/dogstatsd/#tags
|
||||
|
||||
func (s *DogStatsdSink) SetGaugeWithTags(key []string, val float32, tags []string) {
|
||||
flatKey, tags := s.getFlatkeyAndCombinedTags(key, tags)
|
||||
rate := 1.0
|
||||
s.client.Gauge(flatKey, float64(val), tags, rate)
|
||||
}
|
||||
|
||||
func (s *DogStatsdSink) IncrCounterWithTags(key []string, val float32, tags []string) {
|
||||
flatKey, tags := s.getFlatkeyAndCombinedTags(key, tags)
|
||||
rate := 1.0
|
||||
s.client.Count(flatKey, int64(val), tags, rate)
|
||||
}
|
||||
|
||||
func (s *DogStatsdSink) AddSampleWithTags(key []string, val float32, tags []string) {
|
||||
flatKey, tags := s.getFlatkeyAndCombinedTags(key, tags)
|
||||
rate := 1.0
|
||||
s.client.TimeInMilliseconds(flatKey, float64(val), tags, rate)
|
||||
}
|
||||
|
||||
func (s *DogStatsdSink) getFlatkeyAndCombinedTags(key []string, tags []string) (flattenedKey string, combinedTags []string) {
|
||||
key, hostTags := s.parseKey(key)
|
||||
flatKey := s.flattenKey(key)
|
||||
tags = append(tags, hostTags...)
|
||||
return flatKey, tags
|
||||
}
|
|
@ -0,0 +1,241 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// InmemSink provides a MetricSink that does in-memory aggregation
|
||||
// without sending metrics over a network. It can be embedded within
|
||||
// an application to provide profiling information.
|
||||
type InmemSink struct {
|
||||
// How long is each aggregation interval
|
||||
interval time.Duration
|
||||
|
||||
// Retain controls how many metrics interval we keep
|
||||
retain time.Duration
|
||||
|
||||
// maxIntervals is the maximum length of intervals.
|
||||
// It is retain / interval.
|
||||
maxIntervals int
|
||||
|
||||
// intervals is a slice of the retained intervals
|
||||
intervals []*IntervalMetrics
|
||||
intervalLock sync.RWMutex
|
||||
}
|
||||
|
||||
// IntervalMetrics stores the aggregated metrics
|
||||
// for a specific interval
|
||||
type IntervalMetrics struct {
|
||||
sync.RWMutex
|
||||
|
||||
// The start time of the interval
|
||||
Interval time.Time
|
||||
|
||||
// Gauges maps the key to the last set value
|
||||
Gauges map[string]float32
|
||||
|
||||
// Points maps the string to the list of emitted values
|
||||
// from EmitKey
|
||||
Points map[string][]float32
|
||||
|
||||
// Counters maps the string key to a sum of the counter
|
||||
// values
|
||||
Counters map[string]*AggregateSample
|
||||
|
||||
// Samples maps the key to an AggregateSample,
|
||||
// which has the rolled up view of a sample
|
||||
Samples map[string]*AggregateSample
|
||||
}
|
||||
|
||||
// NewIntervalMetrics creates a new IntervalMetrics for a given interval
|
||||
func NewIntervalMetrics(intv time.Time) *IntervalMetrics {
|
||||
return &IntervalMetrics{
|
||||
Interval: intv,
|
||||
Gauges: make(map[string]float32),
|
||||
Points: make(map[string][]float32),
|
||||
Counters: make(map[string]*AggregateSample),
|
||||
Samples: make(map[string]*AggregateSample),
|
||||
}
|
||||
}
|
||||
|
||||
// AggregateSample is used to hold aggregate metrics
|
||||
// about a sample
|
||||
type AggregateSample struct {
|
||||
Count int // The count of emitted pairs
|
||||
Sum float64 // The sum of values
|
||||
SumSq float64 // The sum of squared values
|
||||
Min float64 // Minimum value
|
||||
Max float64 // Maximum value
|
||||
LastUpdated time.Time // When value was last updated
|
||||
}
|
||||
|
||||
// Computes a Stddev of the values
|
||||
func (a *AggregateSample) Stddev() float64 {
|
||||
num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2)
|
||||
div := float64(a.Count * (a.Count - 1))
|
||||
if div == 0 {
|
||||
return 0
|
||||
}
|
||||
return math.Sqrt(num / div)
|
||||
}
|
||||
|
||||
// Computes a mean of the values
|
||||
func (a *AggregateSample) Mean() float64 {
|
||||
if a.Count == 0 {
|
||||
return 0
|
||||
}
|
||||
return a.Sum / float64(a.Count)
|
||||
}
|
||||
|
||||
// Ingest is used to update a sample
|
||||
func (a *AggregateSample) Ingest(v float64) {
|
||||
a.Count++
|
||||
a.Sum += v
|
||||
a.SumSq += (v * v)
|
||||
if v < a.Min || a.Count == 1 {
|
||||
a.Min = v
|
||||
}
|
||||
if v > a.Max || a.Count == 1 {
|
||||
a.Max = v
|
||||
}
|
||||
a.LastUpdated = time.Now()
|
||||
}
|
||||
|
||||
func (a *AggregateSample) String() string {
|
||||
if a.Count == 0 {
|
||||
return "Count: 0"
|
||||
} else if a.Stddev() == 0 {
|
||||
return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated)
|
||||
} else {
|
||||
return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s",
|
||||
a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated)
|
||||
}
|
||||
}
|
||||
|
||||
// NewInmemSink is used to construct a new in-memory sink.
|
||||
// Uses an aggregation interval and maximum retention period.
|
||||
func NewInmemSink(interval, retain time.Duration) *InmemSink {
|
||||
i := &InmemSink{
|
||||
interval: interval,
|
||||
retain: retain,
|
||||
maxIntervals: int(retain / interval),
|
||||
}
|
||||
i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals)
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *InmemSink) SetGauge(key []string, val float32) {
|
||||
k := i.flattenKey(key)
|
||||
intv := i.getInterval()
|
||||
|
||||
intv.Lock()
|
||||
defer intv.Unlock()
|
||||
intv.Gauges[k] = val
|
||||
}
|
||||
|
||||
func (i *InmemSink) EmitKey(key []string, val float32) {
|
||||
k := i.flattenKey(key)
|
||||
intv := i.getInterval()
|
||||
|
||||
intv.Lock()
|
||||
defer intv.Unlock()
|
||||
vals := intv.Points[k]
|
||||
intv.Points[k] = append(vals, val)
|
||||
}
|
||||
|
||||
func (i *InmemSink) IncrCounter(key []string, val float32) {
|
||||
k := i.flattenKey(key)
|
||||
intv := i.getInterval()
|
||||
|
||||
intv.Lock()
|
||||
defer intv.Unlock()
|
||||
|
||||
agg := intv.Counters[k]
|
||||
if agg == nil {
|
||||
agg = &AggregateSample{}
|
||||
intv.Counters[k] = agg
|
||||
}
|
||||
agg.Ingest(float64(val))
|
||||
}
|
||||
|
||||
func (i *InmemSink) AddSample(key []string, val float32) {
|
||||
k := i.flattenKey(key)
|
||||
intv := i.getInterval()
|
||||
|
||||
intv.Lock()
|
||||
defer intv.Unlock()
|
||||
|
||||
agg := intv.Samples[k]
|
||||
if agg == nil {
|
||||
agg = &AggregateSample{}
|
||||
intv.Samples[k] = agg
|
||||
}
|
||||
agg.Ingest(float64(val))
|
||||
}
|
||||
|
||||
// Data is used to retrieve all the aggregated metrics
|
||||
// Intervals may be in use, and a read lock should be acquired
|
||||
func (i *InmemSink) Data() []*IntervalMetrics {
|
||||
// Get the current interval, forces creation
|
||||
i.getInterval()
|
||||
|
||||
i.intervalLock.RLock()
|
||||
defer i.intervalLock.RUnlock()
|
||||
|
||||
intervals := make([]*IntervalMetrics, len(i.intervals))
|
||||
copy(intervals, i.intervals)
|
||||
return intervals
|
||||
}
|
||||
|
||||
func (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics {
|
||||
i.intervalLock.RLock()
|
||||
defer i.intervalLock.RUnlock()
|
||||
|
||||
n := len(i.intervals)
|
||||
if n > 0 && i.intervals[n-1].Interval == intv {
|
||||
return i.intervals[n-1]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics {
|
||||
i.intervalLock.Lock()
|
||||
defer i.intervalLock.Unlock()
|
||||
|
||||
// Check for an existing interval
|
||||
n := len(i.intervals)
|
||||
if n > 0 && i.intervals[n-1].Interval == intv {
|
||||
return i.intervals[n-1]
|
||||
}
|
||||
|
||||
// Add the current interval
|
||||
current := NewIntervalMetrics(intv)
|
||||
i.intervals = append(i.intervals, current)
|
||||
n++
|
||||
|
||||
// Truncate the intervals if they are too long
|
||||
if n >= i.maxIntervals {
|
||||
copy(i.intervals[0:], i.intervals[n-i.maxIntervals:])
|
||||
i.intervals = i.intervals[:i.maxIntervals]
|
||||
}
|
||||
return current
|
||||
}
|
||||
|
||||
// getInterval returns the current interval to write to
|
||||
func (i *InmemSink) getInterval() *IntervalMetrics {
|
||||
intv := time.Now().Truncate(i.interval)
|
||||
if m := i.getExistingInterval(intv); m != nil {
|
||||
return m
|
||||
}
|
||||
return i.createInterval(intv)
|
||||
}
|
||||
|
||||
// Flattens the key for formatting, removes spaces
|
||||
func (i *InmemSink) flattenKey(parts []string) string {
|
||||
joined := strings.Join(parts, ".")
|
||||
return strings.Replace(joined, " ", "_", -1)
|
||||
}
|
|
@ -0,0 +1,100 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// InmemSignal is used to listen for a given signal, and when received,
|
||||
// to dump the current metrics from the InmemSink to an io.Writer
|
||||
type InmemSignal struct {
|
||||
signal syscall.Signal
|
||||
inm *InmemSink
|
||||
w io.Writer
|
||||
sigCh chan os.Signal
|
||||
|
||||
stop bool
|
||||
stopCh chan struct{}
|
||||
stopLock sync.Mutex
|
||||
}
|
||||
|
||||
// NewInmemSignal creates a new InmemSignal which listens for a given signal,
|
||||
// and dumps the current metrics out to a writer
|
||||
func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal {
|
||||
i := &InmemSignal{
|
||||
signal: sig,
|
||||
inm: inmem,
|
||||
w: w,
|
||||
sigCh: make(chan os.Signal, 1),
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
signal.Notify(i.sigCh, sig)
|
||||
go i.run()
|
||||
return i
|
||||
}
|
||||
|
||||
// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1
|
||||
// and writes output to stderr. Windows uses SIGBREAK
|
||||
func DefaultInmemSignal(inmem *InmemSink) *InmemSignal {
|
||||
return NewInmemSignal(inmem, DefaultSignal, os.Stderr)
|
||||
}
|
||||
|
||||
// Stop is used to stop the InmemSignal from listening
|
||||
func (i *InmemSignal) Stop() {
|
||||
i.stopLock.Lock()
|
||||
defer i.stopLock.Unlock()
|
||||
|
||||
if i.stop {
|
||||
return
|
||||
}
|
||||
i.stop = true
|
||||
close(i.stopCh)
|
||||
signal.Stop(i.sigCh)
|
||||
}
|
||||
|
||||
// run is a long running routine that handles signals
|
||||
func (i *InmemSignal) run() {
|
||||
for {
|
||||
select {
|
||||
case <-i.sigCh:
|
||||
i.dumpStats()
|
||||
case <-i.stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dumpStats is used to dump the data to output writer
|
||||
func (i *InmemSignal) dumpStats() {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
data := i.inm.Data()
|
||||
// Skip the last period which is still being aggregated
|
||||
for i := 0; i < len(data)-1; i++ {
|
||||
intv := data[i]
|
||||
intv.RLock()
|
||||
for name, val := range intv.Gauges {
|
||||
fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val)
|
||||
}
|
||||
for name, vals := range intv.Points {
|
||||
for _, val := range vals {
|
||||
fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val)
|
||||
}
|
||||
}
|
||||
for name, agg := range intv.Counters {
|
||||
fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg)
|
||||
}
|
||||
for name, agg := range intv.Samples {
|
||||
fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg)
|
||||
}
|
||||
intv.RUnlock()
|
||||
}
|
||||
|
||||
// Write out the bytes
|
||||
i.w.Write(buf.Bytes())
|
||||
}
|
|
@ -0,0 +1,115 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (m *Metrics) SetGauge(key []string, val float32) {
|
||||
if m.HostName != "" && m.EnableHostname {
|
||||
key = insert(0, m.HostName, key)
|
||||
}
|
||||
if m.EnableTypePrefix {
|
||||
key = insert(0, "gauge", key)
|
||||
}
|
||||
if m.ServiceName != "" {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
m.sink.SetGauge(key, val)
|
||||
}
|
||||
|
||||
func (m *Metrics) EmitKey(key []string, val float32) {
|
||||
if m.EnableTypePrefix {
|
||||
key = insert(0, "kv", key)
|
||||
}
|
||||
if m.ServiceName != "" {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
m.sink.EmitKey(key, val)
|
||||
}
|
||||
|
||||
func (m *Metrics) IncrCounter(key []string, val float32) {
|
||||
if m.EnableTypePrefix {
|
||||
key = insert(0, "counter", key)
|
||||
}
|
||||
if m.ServiceName != "" {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
m.sink.IncrCounter(key, val)
|
||||
}
|
||||
|
||||
func (m *Metrics) AddSample(key []string, val float32) {
|
||||
if m.EnableTypePrefix {
|
||||
key = insert(0, "sample", key)
|
||||
}
|
||||
if m.ServiceName != "" {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
m.sink.AddSample(key, val)
|
||||
}
|
||||
|
||||
func (m *Metrics) MeasureSince(key []string, start time.Time) {
|
||||
if m.EnableTypePrefix {
|
||||
key = insert(0, "timer", key)
|
||||
}
|
||||
if m.ServiceName != "" {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
now := time.Now()
|
||||
elapsed := now.Sub(start)
|
||||
msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity)
|
||||
m.sink.AddSample(key, msec)
|
||||
}
|
||||
|
||||
// Periodically collects runtime stats to publish
|
||||
func (m *Metrics) collectStats() {
|
||||
for {
|
||||
time.Sleep(m.ProfileInterval)
|
||||
m.emitRuntimeStats()
|
||||
}
|
||||
}
|
||||
|
||||
// Emits various runtime statsitics
|
||||
func (m *Metrics) emitRuntimeStats() {
|
||||
// Export number of Goroutines
|
||||
numRoutines := runtime.NumGoroutine()
|
||||
m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines))
|
||||
|
||||
// Export memory stats
|
||||
var stats runtime.MemStats
|
||||
runtime.ReadMemStats(&stats)
|
||||
m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc))
|
||||
m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys))
|
||||
m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs))
|
||||
m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees))
|
||||
m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects))
|
||||
m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs))
|
||||
m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC))
|
||||
|
||||
// Export info about the last few GC runs
|
||||
num := stats.NumGC
|
||||
|
||||
// Handle wrap around
|
||||
if num < m.lastNumGC {
|
||||
m.lastNumGC = 0
|
||||
}
|
||||
|
||||
// Ensure we don't scan more than 256
|
||||
if num-m.lastNumGC >= 256 {
|
||||
m.lastNumGC = num - 255
|
||||
}
|
||||
|
||||
for i := m.lastNumGC; i < num; i++ {
|
||||
pause := stats.PauseNs[i%256]
|
||||
m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause))
|
||||
}
|
||||
m.lastNumGC = num
|
||||
}
|
||||
|
||||
// Inserts a string value at an index into the slice
|
||||
func insert(i int, v string, s []string) []string {
|
||||
s = append(s, "")
|
||||
copy(s[i+1:], s[i:])
|
||||
s[i] = v
|
||||
return s
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
// +build go1.3
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type PrometheusSink struct {
|
||||
mu sync.Mutex
|
||||
gauges map[string]prometheus.Gauge
|
||||
summaries map[string]prometheus.Summary
|
||||
counters map[string]prometheus.Counter
|
||||
}
|
||||
|
||||
func NewPrometheusSink() (*PrometheusSink, error) {
|
||||
return &PrometheusSink{
|
||||
gauges: make(map[string]prometheus.Gauge),
|
||||
summaries: make(map[string]prometheus.Summary),
|
||||
counters: make(map[string]prometheus.Counter),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *PrometheusSink) flattenKey(parts []string) string {
|
||||
joined := strings.Join(parts, "_")
|
||||
joined = strings.Replace(joined, " ", "_", -1)
|
||||
joined = strings.Replace(joined, ".", "_", -1)
|
||||
joined = strings.Replace(joined, "-", "_", -1)
|
||||
return joined
|
||||
}
|
||||
|
||||
func (p *PrometheusSink) SetGauge(parts []string, val float32) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
key := p.flattenKey(parts)
|
||||
g, ok := p.gauges[key]
|
||||
if !ok {
|
||||
g = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: key,
|
||||
Help: key,
|
||||
})
|
||||
prometheus.MustRegister(g)
|
||||
p.gauges[key] = g
|
||||
}
|
||||
g.Set(float64(val))
|
||||
}
|
||||
|
||||
func (p *PrometheusSink) AddSample(parts []string, val float32) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
key := p.flattenKey(parts)
|
||||
g, ok := p.summaries[key]
|
||||
if !ok {
|
||||
g = prometheus.NewSummary(prometheus.SummaryOpts{
|
||||
Name: key,
|
||||
Help: key,
|
||||
MaxAge: 10 * time.Second,
|
||||
})
|
||||
prometheus.MustRegister(g)
|
||||
p.summaries[key] = g
|
||||
}
|
||||
g.Observe(float64(val))
|
||||
}
|
||||
|
||||
// EmitKey is not implemented. Prometheus doesn’t offer a type for which an
|
||||
// arbitrary number of values is retained, as Prometheus works with a pull
|
||||
// model, rather than a push model.
|
||||
func (p *PrometheusSink) EmitKey(key []string, val float32) {
|
||||
}
|
||||
|
||||
func (p *PrometheusSink) IncrCounter(parts []string, val float32) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
key := p.flattenKey(parts)
|
||||
g, ok := p.counters[key]
|
||||
if !ok {
|
||||
g = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: key,
|
||||
Help: key,
|
||||
})
|
||||
prometheus.MustRegister(g)
|
||||
p.counters[key] = g
|
||||
}
|
||||
g.Add(float64(val))
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
package metrics
|
||||
|
||||
// The MetricSink interface is used to transmit metrics information
|
||||
// to an external system
|
||||
type MetricSink interface {
|
||||
// A Gauge should retain the last value it is set to
|
||||
SetGauge(key []string, val float32)
|
||||
|
||||
// Should emit a Key/Value pair for each call
|
||||
EmitKey(key []string, val float32)
|
||||
|
||||
// Counters should accumulate values
|
||||
IncrCounter(key []string, val float32)
|
||||
|
||||
// Samples are for timing information, where quantiles are used
|
||||
AddSample(key []string, val float32)
|
||||
}
|
||||
|
||||
// BlackholeSink is used to just blackhole messages
|
||||
type BlackholeSink struct{}
|
||||
|
||||
func (*BlackholeSink) SetGauge(key []string, val float32) {}
|
||||
func (*BlackholeSink) EmitKey(key []string, val float32) {}
|
||||
func (*BlackholeSink) IncrCounter(key []string, val float32) {}
|
||||
func (*BlackholeSink) AddSample(key []string, val float32) {}
|
||||
|
||||
// FanoutSink is used to sink to fanout values to multiple sinks
|
||||
type FanoutSink []MetricSink
|
||||
|
||||
func (fh FanoutSink) SetGauge(key []string, val float32) {
|
||||
for _, s := range fh {
|
||||
s.SetGauge(key, val)
|
||||
}
|
||||
}
|
||||
|
||||
func (fh FanoutSink) EmitKey(key []string, val float32) {
|
||||
for _, s := range fh {
|
||||
s.EmitKey(key, val)
|
||||
}
|
||||
}
|
||||
|
||||
func (fh FanoutSink) IncrCounter(key []string, val float32) {
|
||||
for _, s := range fh {
|
||||
s.IncrCounter(key, val)
|
||||
}
|
||||
}
|
||||
|
||||
func (fh FanoutSink) AddSample(key []string, val float32) {
|
||||
for _, s := range fh {
|
||||
s.AddSample(key, val)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Config is used to configure metrics settings
|
||||
type Config struct {
|
||||
ServiceName string // Prefixed with keys to seperate services
|
||||
HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname
|
||||
EnableHostname bool // Enable prefixing gauge values with hostname
|
||||
EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory)
|
||||
EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer")
|
||||
TimerGranularity time.Duration // Granularity of timers.
|
||||
ProfileInterval time.Duration // Interval to profile runtime metrics
|
||||
}
|
||||
|
||||
// Metrics represents an instance of a metrics sink that can
|
||||
// be used to emit
|
||||
type Metrics struct {
|
||||
Config
|
||||
lastNumGC uint32
|
||||
sink MetricSink
|
||||
}
|
||||
|
||||
// Shared global metrics instance
|
||||
var globalMetrics *Metrics
|
||||
|
||||
func init() {
|
||||
// Initialize to a blackhole sink to avoid errors
|
||||
globalMetrics = &Metrics{sink: &BlackholeSink{}}
|
||||
}
|
||||
|
||||
// DefaultConfig provides a sane default configuration
|
||||
func DefaultConfig(serviceName string) *Config {
|
||||
c := &Config{
|
||||
ServiceName: serviceName, // Use client provided service
|
||||
HostName: "",
|
||||
EnableHostname: true, // Enable hostname prefix
|
||||
EnableRuntimeMetrics: true, // Enable runtime profiling
|
||||
EnableTypePrefix: false, // Disable type prefix
|
||||
TimerGranularity: time.Millisecond, // Timers are in milliseconds
|
||||
ProfileInterval: time.Second, // Poll runtime every second
|
||||
}
|
||||
|
||||
// Try to get the hostname
|
||||
name, _ := os.Hostname()
|
||||
c.HostName = name
|
||||
return c
|
||||
}
|
||||
|
||||
// New is used to create a new instance of Metrics
|
||||
func New(conf *Config, sink MetricSink) (*Metrics, error) {
|
||||
met := &Metrics{}
|
||||
met.Config = *conf
|
||||
met.sink = sink
|
||||
|
||||
// Start the runtime collector
|
||||
if conf.EnableRuntimeMetrics {
|
||||
go met.collectStats()
|
||||
}
|
||||
return met, nil
|
||||
}
|
||||
|
||||
// NewGlobal is the same as New, but it assigns the metrics object to be
|
||||
// used globally as well as returning it.
|
||||
func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) {
|
||||
metrics, err := New(conf, sink)
|
||||
if err == nil {
|
||||
globalMetrics = metrics
|
||||
}
|
||||
return metrics, err
|
||||
}
|
||||
|
||||
// Proxy all the methods to the globalMetrics instance
|
||||
func SetGauge(key []string, val float32) {
|
||||
globalMetrics.SetGauge(key, val)
|
||||
}
|
||||
|
||||
func EmitKey(key []string, val float32) {
|
||||
globalMetrics.EmitKey(key, val)
|
||||
}
|
||||
|
||||
func IncrCounter(key []string, val float32) {
|
||||
globalMetrics.IncrCounter(key, val)
|
||||
}
|
||||
|
||||
func AddSample(key []string, val float32) {
|
||||
globalMetrics.AddSample(key, val)
|
||||
}
|
||||
|
||||
func MeasureSince(key []string, start time.Time) {
|
||||
globalMetrics.MeasureSince(key, start)
|
||||
}
|
|
@ -0,0 +1,154 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// statsdMaxLen is the maximum size of a packet
|
||||
// to send to statsd
|
||||
statsdMaxLen = 1400
|
||||
)
|
||||
|
||||
// StatsdSink provides a MetricSink that can be used
|
||||
// with a statsite or statsd metrics server. It uses
|
||||
// only UDP packets, while StatsiteSink uses TCP.
|
||||
type StatsdSink struct {
|
||||
addr string
|
||||
metricQueue chan string
|
||||
}
|
||||
|
||||
// NewStatsdSink is used to create a new StatsdSink
|
||||
func NewStatsdSink(addr string) (*StatsdSink, error) {
|
||||
s := &StatsdSink{
|
||||
addr: addr,
|
||||
metricQueue: make(chan string, 4096),
|
||||
}
|
||||
go s.flushMetrics()
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Close is used to stop flushing to statsd
|
||||
func (s *StatsdSink) Shutdown() {
|
||||
close(s.metricQueue)
|
||||
}
|
||||
|
||||
func (s *StatsdSink) SetGauge(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) EmitKey(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) IncrCounter(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) AddSample(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
|
||||
}
|
||||
|
||||
// Flattens the key for formatting, removes spaces
|
||||
func (s *StatsdSink) flattenKey(parts []string) string {
|
||||
joined := strings.Join(parts, ".")
|
||||
return strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case ':':
|
||||
fallthrough
|
||||
case ' ':
|
||||
return '_'
|
||||
default:
|
||||
return r
|
||||
}
|
||||
}, joined)
|
||||
}
|
||||
|
||||
// Does a non-blocking push to the metrics queue
|
||||
func (s *StatsdSink) pushMetric(m string) {
|
||||
select {
|
||||
case s.metricQueue <- m:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Flushes metrics
|
||||
func (s *StatsdSink) flushMetrics() {
|
||||
var sock net.Conn
|
||||
var err error
|
||||
var wait <-chan time.Time
|
||||
ticker := time.NewTicker(flushInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
CONNECT:
|
||||
// Create a buffer
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
// Attempt to connect
|
||||
sock, err = net.Dial("udp", s.addr)
|
||||
if err != nil {
|
||||
log.Printf("[ERR] Error connecting to statsd! Err: %s", err)
|
||||
goto WAIT
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case metric, ok := <-s.metricQueue:
|
||||
// Get a metric from the queue
|
||||
if !ok {
|
||||
goto QUIT
|
||||
}
|
||||
|
||||
// Check if this would overflow the packet size
|
||||
if len(metric)+buf.Len() > statsdMaxLen {
|
||||
_, err := sock.Write(buf.Bytes())
|
||||
buf.Reset()
|
||||
if err != nil {
|
||||
log.Printf("[ERR] Error writing to statsd! Err: %s", err)
|
||||
goto WAIT
|
||||
}
|
||||
}
|
||||
|
||||
// Append to the buffer
|
||||
buf.WriteString(metric)
|
||||
|
||||
case <-ticker.C:
|
||||
if buf.Len() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := sock.Write(buf.Bytes())
|
||||
buf.Reset()
|
||||
if err != nil {
|
||||
log.Printf("[ERR] Error flushing to statsd! Err: %s", err)
|
||||
goto WAIT
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
WAIT:
|
||||
// Wait for a while
|
||||
wait = time.After(time.Duration(5) * time.Second)
|
||||
for {
|
||||
select {
|
||||
// Dequeue the messages to avoid backlog
|
||||
case _, ok := <-s.metricQueue:
|
||||
if !ok {
|
||||
goto QUIT
|
||||
}
|
||||
case <-wait:
|
||||
goto CONNECT
|
||||
}
|
||||
}
|
||||
QUIT:
|
||||
s.metricQueue = nil
|
||||
}
|
|
@ -0,0 +1,142 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// We force flush the statsite metrics after this period of
|
||||
// inactivity. Prevents stats from getting stuck in a buffer
|
||||
// forever.
|
||||
flushInterval = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
// StatsiteSink provides a MetricSink that can be used with a
|
||||
// statsite metrics server
|
||||
type StatsiteSink struct {
|
||||
addr string
|
||||
metricQueue chan string
|
||||
}
|
||||
|
||||
// NewStatsiteSink is used to create a new StatsiteSink
|
||||
func NewStatsiteSink(addr string) (*StatsiteSink, error) {
|
||||
s := &StatsiteSink{
|
||||
addr: addr,
|
||||
metricQueue: make(chan string, 4096),
|
||||
}
|
||||
go s.flushMetrics()
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Close is used to stop flushing to statsite
|
||||
func (s *StatsiteSink) Shutdown() {
|
||||
close(s.metricQueue)
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) SetGauge(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) EmitKey(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) IncrCounter(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) AddSample(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
|
||||
}
|
||||
|
||||
// Flattens the key for formatting, removes spaces
|
||||
func (s *StatsiteSink) flattenKey(parts []string) string {
|
||||
joined := strings.Join(parts, ".")
|
||||
return strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case ':':
|
||||
fallthrough
|
||||
case ' ':
|
||||
return '_'
|
||||
default:
|
||||
return r
|
||||
}
|
||||
}, joined)
|
||||
}
|
||||
|
||||
// Does a non-blocking push to the metrics queue
|
||||
func (s *StatsiteSink) pushMetric(m string) {
|
||||
select {
|
||||
case s.metricQueue <- m:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Flushes metrics
|
||||
func (s *StatsiteSink) flushMetrics() {
|
||||
var sock net.Conn
|
||||
var err error
|
||||
var wait <-chan time.Time
|
||||
var buffered *bufio.Writer
|
||||
ticker := time.NewTicker(flushInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
CONNECT:
|
||||
// Attempt to connect
|
||||
sock, err = net.Dial("tcp", s.addr)
|
||||
if err != nil {
|
||||
log.Printf("[ERR] Error connecting to statsite! Err: %s", err)
|
||||
goto WAIT
|
||||
}
|
||||
|
||||
// Create a buffered writer
|
||||
buffered = bufio.NewWriter(sock)
|
||||
|
||||
for {
|
||||
select {
|
||||
case metric, ok := <-s.metricQueue:
|
||||
// Get a metric from the queue
|
||||
if !ok {
|
||||
goto QUIT
|
||||
}
|
||||
|
||||
// Try to send to statsite
|
||||
_, err := buffered.Write([]byte(metric))
|
||||
if err != nil {
|
||||
log.Printf("[ERR] Error writing to statsite! Err: %s", err)
|
||||
goto WAIT
|
||||
}
|
||||
case <-ticker.C:
|
||||
if err := buffered.Flush(); err != nil {
|
||||
log.Printf("[ERR] Error flushing to statsite! Err: %s", err)
|
||||
goto WAIT
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
WAIT:
|
||||
// Wait for a while
|
||||
wait = time.After(time.Duration(5) * time.Second)
|
||||
for {
|
||||
select {
|
||||
// Dequeue the messages to avoid backlog
|
||||
case _, ok := <-s.metricQueue:
|
||||
if !ok {
|
||||
goto QUIT
|
||||
}
|
||||
case <-wait:
|
||||
goto CONNECT
|
||||
}
|
||||
}
|
||||
QUIT:
|
||||
s.metricQueue = nil
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
|
@ -0,0 +1,3 @@
|
|||
language: go
|
||||
go:
|
||||
- tip
|
|
@ -0,0 +1,20 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Armon Dadgar
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,38 @@
|
|||
go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix)
|
||||
=========
|
||||
|
||||
Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
|
||||
The package only provides a single `Tree` implementation, optimized for sparse nodes.
|
||||
|
||||
As a radix tree, it provides the following:
|
||||
* O(k) operations. In many cases, this can be faster than a hash table since
|
||||
the hash function is an O(k) operation, and hash tables have very poor cache locality.
|
||||
* Minimum / Maximum value lookups
|
||||
* Ordered iteration
|
||||
|
||||
For an immutable variant, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix).
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
||||
The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix).
|
||||
|
||||
Example
|
||||
=======
|
||||
|
||||
Below is a simple example of usage
|
||||
|
||||
```go
|
||||
// Create a tree
|
||||
r := radix.New()
|
||||
r.Insert("foo", 1)
|
||||
r.Insert("bar", 2)
|
||||
r.Insert("foobar", 2)
|
||||
|
||||
// Find the longest prefix match
|
||||
m, _, _ := r.LongestPrefix("foozip")
|
||||
if m != "foo" {
|
||||
panic("should be foo")
|
||||
}
|
||||
```
|
||||
|
|
@ -0,0 +1,496 @@
|
|||
package radix
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// WalkFn is used when walking the tree. Takes a
|
||||
// key and value, returning if iteration should
|
||||
// be terminated.
|
||||
type WalkFn func(s string, v interface{}) bool
|
||||
|
||||
// leafNode is used to represent a value
|
||||
type leafNode struct {
|
||||
key string
|
||||
val interface{}
|
||||
}
|
||||
|
||||
// edge is used to represent an edge node
|
||||
type edge struct {
|
||||
label byte
|
||||
node *node
|
||||
}
|
||||
|
||||
type node struct {
|
||||
// leaf is used to store possible leaf
|
||||
leaf *leafNode
|
||||
|
||||
// prefix is the common prefix we ignore
|
||||
prefix string
|
||||
|
||||
// Edges should be stored in-order for iteration.
|
||||
// We avoid a fully materialized slice to save memory,
|
||||
// since in most cases we expect to be sparse
|
||||
edges edges
|
||||
}
|
||||
|
||||
func (n *node) isLeaf() bool {
|
||||
return n.leaf != nil
|
||||
}
|
||||
|
||||
func (n *node) addEdge(e edge) {
|
||||
n.edges = append(n.edges, e)
|
||||
n.edges.Sort()
|
||||
}
|
||||
|
||||
func (n *node) replaceEdge(e edge) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= e.label
|
||||
})
|
||||
if idx < num && n.edges[idx].label == e.label {
|
||||
n.edges[idx].node = e.node
|
||||
return
|
||||
}
|
||||
panic("replacing missing edge")
|
||||
}
|
||||
|
||||
func (n *node) getEdge(label byte) *node {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= label
|
||||
})
|
||||
if idx < num && n.edges[idx].label == label {
|
||||
return n.edges[idx].node
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *node) delEdge(label byte) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= label
|
||||
})
|
||||
if idx < num && n.edges[idx].label == label {
|
||||
copy(n.edges[idx:], n.edges[idx+1:])
|
||||
n.edges[len(n.edges)-1] = edge{}
|
||||
n.edges = n.edges[:len(n.edges)-1]
|
||||
}
|
||||
}
|
||||
|
||||
type edges []edge
|
||||
|
||||
func (e edges) Len() int {
|
||||
return len(e)
|
||||
}
|
||||
|
||||
func (e edges) Less(i, j int) bool {
|
||||
return e[i].label < e[j].label
|
||||
}
|
||||
|
||||
func (e edges) Swap(i, j int) {
|
||||
e[i], e[j] = e[j], e[i]
|
||||
}
|
||||
|
||||
func (e edges) Sort() {
|
||||
sort.Sort(e)
|
||||
}
|
||||
|
||||
// Tree implements a radix tree. This can be treated as a
|
||||
// Dictionary abstract data type. The main advantage over
|
||||
// a standard hash map is prefix-based lookups and
|
||||
// ordered iteration,
|
||||
type Tree struct {
|
||||
root *node
|
||||
size int
|
||||
}
|
||||
|
||||
// New returns an empty Tree
|
||||
func New() *Tree {
|
||||
return NewFromMap(nil)
|
||||
}
|
||||
|
||||
// NewFromMap returns a new tree containing the keys
|
||||
// from an existing map
|
||||
func NewFromMap(m map[string]interface{}) *Tree {
|
||||
t := &Tree{root: &node{}}
|
||||
for k, v := range m {
|
||||
t.Insert(k, v)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Len is used to return the number of elements in the tree
|
||||
func (t *Tree) Len() int {
|
||||
return t.size
|
||||
}
|
||||
|
||||
// longestPrefix finds the length of the shared prefix
|
||||
// of two strings
|
||||
func longestPrefix(k1, k2 string) int {
|
||||
max := len(k1)
|
||||
if l := len(k2); l < max {
|
||||
max = l
|
||||
}
|
||||
var i int
|
||||
for i = 0; i < max; i++ {
|
||||
if k1[i] != k2[i] {
|
||||
break
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// Insert is used to add a newentry or update
|
||||
// an existing entry. Returns if updated.
|
||||
func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) {
|
||||
var parent *node
|
||||
n := t.root
|
||||
search := s
|
||||
for {
|
||||
// Handle key exhaution
|
||||
if len(search) == 0 {
|
||||
if n.isLeaf() {
|
||||
old := n.leaf.val
|
||||
n.leaf.val = v
|
||||
return old, true
|
||||
}
|
||||
|
||||
n.leaf = &leafNode{
|
||||
key: s,
|
||||
val: v,
|
||||
}
|
||||
t.size++
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Look for the edge
|
||||
parent = n
|
||||
n = n.getEdge(search[0])
|
||||
|
||||
// No edge, create one
|
||||
if n == nil {
|
||||
e := edge{
|
||||
label: search[0],
|
||||
node: &node{
|
||||
leaf: &leafNode{
|
||||
key: s,
|
||||
val: v,
|
||||
},
|
||||
prefix: search,
|
||||
},
|
||||
}
|
||||
parent.addEdge(e)
|
||||
t.size++
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Determine longest prefix of the search key on match
|
||||
commonPrefix := longestPrefix(search, n.prefix)
|
||||
if commonPrefix == len(n.prefix) {
|
||||
search = search[commonPrefix:]
|
||||
continue
|
||||
}
|
||||
|
||||
// Split the node
|
||||
t.size++
|
||||
child := &node{
|
||||
prefix: search[:commonPrefix],
|
||||
}
|
||||
parent.replaceEdge(edge{
|
||||
label: search[0],
|
||||
node: child,
|
||||
})
|
||||
|
||||
// Restore the existing node
|
||||
child.addEdge(edge{
|
||||
label: n.prefix[commonPrefix],
|
||||
node: n,
|
||||
})
|
||||
n.prefix = n.prefix[commonPrefix:]
|
||||
|
||||
// Create a new leaf node
|
||||
leaf := &leafNode{
|
||||
key: s,
|
||||
val: v,
|
||||
}
|
||||
|
||||
// If the new key is a subset, add to to this node
|
||||
search = search[commonPrefix:]
|
||||
if len(search) == 0 {
|
||||
child.leaf = leaf
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Create a new edge for the node
|
||||
child.addEdge(edge{
|
||||
label: search[0],
|
||||
node: &node{
|
||||
leaf: leaf,
|
||||
prefix: search,
|
||||
},
|
||||
})
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
// Delete is used to delete a key, returning the previous
|
||||
// value and if it was deleted
|
||||
func (t *Tree) Delete(s string) (interface{}, bool) {
|
||||
var parent *node
|
||||
var label byte
|
||||
n := t.root
|
||||
search := s
|
||||
for {
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
if !n.isLeaf() {
|
||||
break
|
||||
}
|
||||
goto DELETE
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
parent = n
|
||||
label = search[0]
|
||||
n = n.getEdge(label)
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if strings.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
|
||||
DELETE:
|
||||
// Delete the leaf
|
||||
leaf := n.leaf
|
||||
n.leaf = nil
|
||||
t.size--
|
||||
|
||||
// Check if we should delete this node from the parent
|
||||
if parent != nil && len(n.edges) == 0 {
|
||||
parent.delEdge(label)
|
||||
}
|
||||
|
||||
// Check if we should merge this node
|
||||
if n != t.root && len(n.edges) == 1 {
|
||||
n.mergeChild()
|
||||
}
|
||||
|
||||
// Check if we should merge the parent's other child
|
||||
if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() {
|
||||
parent.mergeChild()
|
||||
}
|
||||
|
||||
return leaf.val, true
|
||||
}
|
||||
|
||||
func (n *node) mergeChild() {
|
||||
e := n.edges[0]
|
||||
child := e.node
|
||||
n.prefix = n.prefix + child.prefix
|
||||
n.leaf = child.leaf
|
||||
n.edges = child.edges
|
||||
}
|
||||
|
||||
// Get is used to lookup a specific key, returning
|
||||
// the value and if it was found
|
||||
func (t *Tree) Get(s string) (interface{}, bool) {
|
||||
n := t.root
|
||||
search := s
|
||||
for {
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
if n.isLeaf() {
|
||||
return n.leaf.val, true
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if strings.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// LongestPrefix is like Get, but instead of an
|
||||
// exact match, it will return the longest prefix match.
|
||||
func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) {
|
||||
var last *leafNode
|
||||
n := t.root
|
||||
search := s
|
||||
for {
|
||||
// Look for a leaf node
|
||||
if n.isLeaf() {
|
||||
last = n.leaf
|
||||
}
|
||||
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if strings.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if last != nil {
|
||||
return last.key, last.val, true
|
||||
}
|
||||
return "", nil, false
|
||||
}
|
||||
|
||||
// Minimum is used to return the minimum value in the tree
|
||||
func (t *Tree) Minimum() (string, interface{}, bool) {
|
||||
n := t.root
|
||||
for {
|
||||
if n.isLeaf() {
|
||||
return n.leaf.key, n.leaf.val, true
|
||||
}
|
||||
if len(n.edges) > 0 {
|
||||
n = n.edges[0].node
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return "", nil, false
|
||||
}
|
||||
|
||||
// Maximum is used to return the maximum value in the tree
|
||||
func (t *Tree) Maximum() (string, interface{}, bool) {
|
||||
n := t.root
|
||||
for {
|
||||
if num := len(n.edges); num > 0 {
|
||||
n = n.edges[num-1].node
|
||||
continue
|
||||
}
|
||||
if n.isLeaf() {
|
||||
return n.leaf.key, n.leaf.val, true
|
||||
}
|
||||
break
|
||||
}
|
||||
return "", nil, false
|
||||
}
|
||||
|
||||
// Walk is used to walk the tree
|
||||
func (t *Tree) Walk(fn WalkFn) {
|
||||
recursiveWalk(t.root, fn)
|
||||
}
|
||||
|
||||
// WalkPrefix is used to walk the tree under a prefix
|
||||
func (t *Tree) WalkPrefix(prefix string, fn WalkFn) {
|
||||
n := t.root
|
||||
search := prefix
|
||||
for {
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
recursiveWalk(n, fn)
|
||||
return
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if strings.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
|
||||
} else if strings.HasPrefix(n.prefix, search) {
|
||||
// Child may be under our search prefix
|
||||
recursiveWalk(n, fn)
|
||||
return
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// WalkPath is used to walk the tree, but only visiting nodes
|
||||
// from the root down to a given leaf. Where WalkPrefix walks
|
||||
// all the entries *under* the given prefix, this walks the
|
||||
// entries *above* the given prefix.
|
||||
func (t *Tree) WalkPath(path string, fn WalkFn) {
|
||||
n := t.root
|
||||
search := path
|
||||
for {
|
||||
// Visit the leaf values if any
|
||||
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if strings.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// recursiveWalk is used to do a pre-order walk of a node
|
||||
// recursively. Returns true if the walk should be aborted
|
||||
func recursiveWalk(n *node, fn WalkFn) bool {
|
||||
// Visit the leaf values if any
|
||||
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Recurse on the children
|
||||
for _, e := range n.edges {
|
||||
if recursiveWalk(e.node, fn) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ToMap is used to walk the tree and convert it into a map
|
||||
func (t *Tree) ToMap() map[string]interface{} {
|
||||
out := make(map[string]interface{}, t.size)
|
||||
t.Walk(func(k string, v interface{}) bool {
|
||||
out[k] = v
|
||||
return false
|
||||
})
|
||||
return out
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,292 @@
|
|||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targets map[float64]float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for quantile, epsilon := range targets {
|
||||
if quantile*s.n <= r {
|
||||
f = (2 * epsilon * r) / quantile
|
||||
} else {
|
||||
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(float64(l) * q)
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
example/example
|
||||
example/example.exe
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [2013] [the CloudFoundry Authors]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,30 @@
|
|||
# Speakeasy
|
||||
|
||||
This package provides cross-platform Go (#golang) helpers for taking user input
|
||||
from the terminal while not echoing the input back (similar to `getpasswd`). The
|
||||
package uses syscalls to avoid any dependence on cgo, and is therefore
|
||||
compatible with cross-compiling.
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/bgentry/speakeasy?status.png)][godoc]
|
||||
|
||||
## Unicode
|
||||
|
||||
Multi-byte unicode characters work successfully on Mac OS X. On Windows,
|
||||
however, this may be problematic (as is UTF in general on Windows). Other
|
||||
platforms have not been tested.
|
||||
|
||||
## License
|
||||
|
||||
The code herein was not written by me, but was compiled from two separate open
|
||||
source packages. Unix portions were imported from [gopass][gopass], while
|
||||
Windows portions were imported from the [CloudFoundry Go CLI][cf-cli]'s
|
||||
[Windows terminal helpers][cf-ui-windows].
|
||||
|
||||
The [license for the windows portion](./LICENSE_WINDOWS) has been copied exactly
|
||||
from the source (though I attempted to fill in the correct owner in the
|
||||
boilerplate copyright notice).
|
||||
|
||||
[cf-cli]: https://github.com/cloudfoundry/cli "CloudFoundry Go CLI"
|
||||
[cf-ui-windows]: https://github.com/cloudfoundry/cli/blob/master/src/cf/terminal/ui_windows.go "CloudFoundry Go CLI Windows input helpers"
|
||||
[godoc]: https://godoc.org/github.com/bgentry/speakeasy "speakeasy on Godoc.org"
|
||||
[gopass]: https://code.google.com/p/gopass "gopass"
|
|
@ -0,0 +1,18 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/bgentry/speakeasy"
|
||||
)
|
||||
|
||||
func main() {
|
||||
password, err := speakeasy.Ask("Please enter a password: ")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Password result: %q\n", password)
|
||||
fmt.Printf("Password len: %d\n", len(password))
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
package speakeasy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Ask the user to enter a password with input hidden. prompt is a string to
|
||||
// display before the user's input. Returns the provided password, or an error
|
||||
// if the command failed.
|
||||
func Ask(prompt string) (password string, err error) {
|
||||
return FAsk(os.Stdout, prompt)
|
||||
}
|
||||
|
||||
// Same as the Ask function, except it is possible to specify the file to write
|
||||
// the prompt to.
|
||||
func FAsk(file *os.File, prompt string) (password string, err error) {
|
||||
if prompt != "" {
|
||||
fmt.Fprint(file, prompt) // Display the prompt.
|
||||
}
|
||||
password, err = getPassword()
|
||||
|
||||
// Carriage return after the user input.
|
||||
fmt.Fprintln(file, "")
|
||||
return
|
||||
}
|
||||
|
||||
func readline() (value string, err error) {
|
||||
var valb []byte
|
||||
var n int
|
||||
b := make([]byte, 1)
|
||||
for {
|
||||
// read one byte at a time so we don't accidentally read extra bytes
|
||||
n, err = os.Stdin.Read(b)
|
||||
if err != nil && err != io.EOF {
|
||||
return "", err
|
||||
}
|
||||
if n == 0 || b[0] == '\n' {
|
||||
break
|
||||
}
|
||||
valb = append(valb, b[0])
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(string(valb), "\r"), nil
|
||||
}
|
|
@ -0,0 +1,93 @@
|
|||
// based on https://code.google.com/p/gopass
|
||||
// Author: johnsiilver@gmail.com (John Doak)
|
||||
//
|
||||
// Original code is based on code by RogerV in the golang-nuts thread:
|
||||
// https://groups.google.com/group/golang-nuts/browse_thread/thread/40cc41e9d9fc9247
|
||||
|
||||
// +build darwin freebsd linux netbsd openbsd solaris
|
||||
|
||||
package speakeasy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const sttyArg0 = "/bin/stty"
|
||||
|
||||
var (
|
||||
sttyArgvEOff = []string{"stty", "-echo"}
|
||||
sttyArgvEOn = []string{"stty", "echo"}
|
||||
)
|
||||
|
||||
// getPassword gets input hidden from the terminal from a user. This is
|
||||
// accomplished by turning off terminal echo, reading input from the user and
|
||||
// finally turning on terminal echo.
|
||||
func getPassword() (password string, err error) {
|
||||
sig := make(chan os.Signal, 10)
|
||||
brk := make(chan bool)
|
||||
|
||||
// File descriptors for stdin, stdout, and stderr.
|
||||
fd := []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd()}
|
||||
|
||||
// Setup notifications of termination signals to channel sig, create a process to
|
||||
// watch for these signals so we can turn back on echo if need be.
|
||||
signal.Notify(sig, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT,
|
||||
syscall.SIGTERM)
|
||||
go catchSignal(fd, sig, brk)
|
||||
|
||||
// Turn off the terminal echo.
|
||||
pid, err := echoOff(fd)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Turn on the terminal echo and stop listening for signals.
|
||||
defer signal.Stop(sig)
|
||||
defer close(brk)
|
||||
defer echoOn(fd)
|
||||
|
||||
syscall.Wait4(pid, nil, 0, nil)
|
||||
|
||||
line, err := readline()
|
||||
if err == nil {
|
||||
password = strings.TrimSpace(line)
|
||||
} else {
|
||||
err = fmt.Errorf("failed during password entry: %s", err)
|
||||
}
|
||||
|
||||
return password, err
|
||||
}
|
||||
|
||||
// echoOff turns off the terminal echo.
|
||||
func echoOff(fd []uintptr) (int, error) {
|
||||
pid, err := syscall.ForkExec(sttyArg0, sttyArgvEOff, &syscall.ProcAttr{Dir: "", Files: fd})
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed turning off console echo for password entry:\n\t%s", err)
|
||||
}
|
||||
return pid, nil
|
||||
}
|
||||
|
||||
// echoOn turns back on the terminal echo.
|
||||
func echoOn(fd []uintptr) {
|
||||
// Turn on the terminal echo.
|
||||
pid, e := syscall.ForkExec(sttyArg0, sttyArgvEOn, &syscall.ProcAttr{Dir: "", Files: fd})
|
||||
if e == nil {
|
||||
syscall.Wait4(pid, nil, 0, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// catchSignal tries to catch SIGKILL, SIGQUIT and SIGINT so that we can turn
|
||||
// terminal echo back on before the program ends. Otherwise the user is left
|
||||
// with echo off on their terminal.
|
||||
func catchSignal(fd []uintptr, sig chan os.Signal, brk chan bool) {
|
||||
select {
|
||||
case <-sig:
|
||||
echoOn(fd)
|
||||
os.Exit(-1)
|
||||
case <-brk:
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
// +build windows
|
||||
|
||||
package speakeasy
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// SetConsoleMode function can be used to change value of ENABLE_ECHO_INPUT:
|
||||
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
|
||||
const ENABLE_ECHO_INPUT = 0x0004
|
||||
|
||||
func getPassword() (password string, err error) {
|
||||
hStdin := syscall.Handle(os.Stdin.Fd())
|
||||
var oldMode uint32
|
||||
|
||||
err = syscall.GetConsoleMode(hStdin, &oldMode)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var newMode uint32 = (oldMode &^ ENABLE_ECHO_INPUT)
|
||||
|
||||
err = setConsoleMode(hStdin, newMode)
|
||||
defer setConsoleMode(hStdin, oldMode)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return readline()
|
||||
}
|
||||
|
||||
func setConsoleMode(console syscall.Handle, mode uint32) (err error) {
|
||||
dll := syscall.MustLoadDLL("kernel32")
|
||||
proc := dll.MustFindProc("SetConsoleMode")
|
||||
r, _, err := proc.Call(uintptr(console), uintptr(mode))
|
||||
|
||||
if r == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
*.prof
|
||||
*.test
|
||||
*.swp
|
||||
/bin/
|
|
@ -0,0 +1,20 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Ben Johnson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,18 @@
|
|||
BRANCH=`git rev-parse --abbrev-ref HEAD`
|
||||
COMMIT=`git rev-parse --short HEAD`
|
||||
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
|
||||
|
||||
default: build
|
||||
|
||||
race:
|
||||
@go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
||||
|
||||
# go get github.com/kisielk/errcheck
|
||||
errcheck:
|
||||
@errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt
|
||||
|
||||
test:
|
||||
@go test -v -cover .
|
||||
@go test -v ./cmd/bolt
|
||||
|
||||
.PHONY: fmt test
|
|
@ -0,0 +1,840 @@
|
|||
Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.0-green.svg)
|
||||
====
|
||||
|
||||
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
|
||||
[LMDB project][lmdb]. The goal of the project is to provide a simple,
|
||||
fast, and reliable database for projects that don't require a full database
|
||||
server such as Postgres or MySQL.
|
||||
|
||||
Since Bolt is meant to be used as such a low-level piece of functionality,
|
||||
simplicity is key. The API will be small and only focus on getting values
|
||||
and setting values. That's it.
|
||||
|
||||
[hyc_symas]: https://twitter.com/hyc_symas
|
||||
[lmdb]: http://symas.com/mdb/
|
||||
|
||||
## Project Status
|
||||
|
||||
Bolt is stable and the API is fixed. Full unit test coverage and randomized
|
||||
black box testing are used to ensure database consistency and thread safety.
|
||||
Bolt is currently in high-load production environments serving databases as
|
||||
large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed
|
||||
services every day.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Getting Started](#getting-started)
|
||||
- [Installing](#installing)
|
||||
- [Opening a database](#opening-a-database)
|
||||
- [Transactions](#transactions)
|
||||
- [Read-write transactions](#read-write-transactions)
|
||||
- [Read-only transactions](#read-only-transactions)
|
||||
- [Batch read-write transactions](#batch-read-write-transactions)
|
||||
- [Managing transactions manually](#managing-transactions-manually)
|
||||
- [Using buckets](#using-buckets)
|
||||
- [Using key/value pairs](#using-keyvalue-pairs)
|
||||
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
|
||||
- [Iterating over keys](#iterating-over-keys)
|
||||
- [Prefix scans](#prefix-scans)
|
||||
- [Range scans](#range-scans)
|
||||
- [ForEach()](#foreach)
|
||||
- [Nested buckets](#nested-buckets)
|
||||
- [Database backups](#database-backups)
|
||||
- [Statistics](#statistics)
|
||||
- [Read-Only Mode](#read-only-mode)
|
||||
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
|
||||
- [Resources](#resources)
|
||||
- [Comparison with other databases](#comparison-with-other-databases)
|
||||
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
|
||||
- [LevelDB, RocksDB](#leveldb-rocksdb)
|
||||
- [LMDB](#lmdb)
|
||||
- [Caveats & Limitations](#caveats--limitations)
|
||||
- [Reading the Source](#reading-the-source)
|
||||
- [Other Projects Using Bolt](#other-projects-using-bolt)
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Installing
|
||||
|
||||
To start using Bolt, install Go and run `go get`:
|
||||
|
||||
```sh
|
||||
$ go get github.com/boltdb/bolt/...
|
||||
```
|
||||
|
||||
This will retrieve the library and install the `bolt` command line utility into
|
||||
your `$GOBIN` path.
|
||||
|
||||
|
||||
### Opening a database
|
||||
|
||||
The top-level object in Bolt is a `DB`. It is represented as a single file on
|
||||
your disk and represents a consistent snapshot of your data.
|
||||
|
||||
To open your database, simply use the `bolt.Open()` function:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Open the my.db data file in your current directory.
|
||||
// It will be created if it doesn't exist.
|
||||
db, err := bolt.Open("my.db", 0600, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Please note that Bolt obtains a file lock on the data file so multiple processes
|
||||
cannot open the same database at the same time. Opening an already open Bolt
|
||||
database will cause it to hang until the other process closes it. To prevent
|
||||
an indefinite wait you can pass a timeout option to the `Open()` function:
|
||||
|
||||
```go
|
||||
db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
|
||||
```
|
||||
|
||||
|
||||
### Transactions
|
||||
|
||||
Bolt allows only one read-write transaction at a time but allows as many
|
||||
read-only transactions as you want at a time. Each transaction has a consistent
|
||||
view of the data as it existed when the transaction started.
|
||||
|
||||
Individual transactions and all objects created from them (e.g. buckets, keys)
|
||||
are not thread safe. To work with data in multiple goroutines you must start
|
||||
a transaction for each one or use locking to ensure only one goroutine accesses
|
||||
a transaction at a time. Creating transaction from the `DB` is thread safe.
|
||||
|
||||
Read-only transactions and read-write transactions should not depend on one
|
||||
another and generally shouldn't be opened simultaneously in the same goroutine.
|
||||
This can cause a deadlock as the read-write transaction needs to periodically
|
||||
re-map the data file but it cannot do so while a read-only transaction is open.
|
||||
|
||||
|
||||
#### Read-write transactions
|
||||
|
||||
To start a read-write transaction, you can use the `DB.Update()` function:
|
||||
|
||||
```go
|
||||
err := db.Update(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Inside the closure, you have a consistent view of the database. You commit the
|
||||
transaction by returning `nil` at the end. You can also rollback the transaction
|
||||
at any point by returning an error. All database operations are allowed inside
|
||||
a read-write transaction.
|
||||
|
||||
Always check the return error as it will report any disk failures that can cause
|
||||
your transaction to not complete. If you return an error within your closure
|
||||
it will be passed through.
|
||||
|
||||
|
||||
#### Read-only transactions
|
||||
|
||||
To start a read-only transaction, you can use the `DB.View()` function:
|
||||
|
||||
```go
|
||||
err := db.View(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
You also get a consistent view of the database within this closure, however,
|
||||
no mutating operations are allowed within a read-only transaction. You can only
|
||||
retrieve buckets, retrieve values, and copy the database within a read-only
|
||||
transaction.
|
||||
|
||||
|
||||
#### Batch read-write transactions
|
||||
|
||||
Each `DB.Update()` waits for disk to commit the writes. This overhead
|
||||
can be minimized by combining multiple updates with the `DB.Batch()`
|
||||
function:
|
||||
|
||||
```go
|
||||
err := db.Batch(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Concurrent Batch calls are opportunistically combined into larger
|
||||
transactions. Batch is only useful when there are multiple goroutines
|
||||
calling it.
|
||||
|
||||
The trade-off is that `Batch` can call the given
|
||||
function multiple times, if parts of the transaction fail. The
|
||||
function must be idempotent and side effects must take effect only
|
||||
after a successful return from `DB.Batch()`.
|
||||
|
||||
For example: don't display messages from inside the function, instead
|
||||
set variables in the enclosing scope:
|
||||
|
||||
```go
|
||||
var id uint64
|
||||
err := db.Batch(func(tx *bolt.Tx) error {
|
||||
// Find last key in bucket, decode as bigendian uint64, increment
|
||||
// by one, encode back to []byte, and add new key.
|
||||
...
|
||||
id = newValue
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return ...
|
||||
}
|
||||
fmt.Println("Allocated ID %d", id)
|
||||
```
|
||||
|
||||
|
||||
#### Managing transactions manually
|
||||
|
||||
The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
|
||||
function. These helper functions will start the transaction, execute a function,
|
||||
and then safely close your transaction if an error is returned. This is the
|
||||
recommended way to use Bolt transactions.
|
||||
|
||||
However, sometimes you may want to manually start and end your transactions.
|
||||
You can use the `Tx.Begin()` function directly but **please** be sure to close
|
||||
the transaction.
|
||||
|
||||
```go
|
||||
// Start a writable transaction.
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Use the transaction...
|
||||
_, err := tx.CreateBucket([]byte("MyBucket"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit the transaction and check for error.
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
The first argument to `DB.Begin()` is a boolean stating if the transaction
|
||||
should be writable.
|
||||
|
||||
|
||||
### Using buckets
|
||||
|
||||
Buckets are collections of key/value pairs within the database. All keys in a
|
||||
bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
|
||||
function:
|
||||
|
||||
```go
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("MyBucket"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("create bucket: %s", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
You can also create a bucket only if it doesn't exist by using the
|
||||
`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
|
||||
function for all your top-level buckets after you open your database so you can
|
||||
guarantee that they exist for future transactions.
|
||||
|
||||
To delete a bucket, simply call the `Tx.DeleteBucket()` function.
|
||||
|
||||
|
||||
### Using key/value pairs
|
||||
|
||||
To save a key/value pair to a bucket, use the `Bucket.Put()` function:
|
||||
|
||||
```go
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
err := b.Put([]byte("answer"), []byte("42"))
|
||||
return err
|
||||
})
|
||||
```
|
||||
|
||||
This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
|
||||
bucket. To retrieve this value, we can use the `Bucket.Get()` function:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
v := b.Get([]byte("answer"))
|
||||
fmt.Printf("The answer is: %s\n", v)
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
The `Get()` function does not return an error because its operation is
|
||||
guaranteed to work (unless there is some kind of system failure). If the key
|
||||
exists then it will return its byte slice value. If it doesn't exist then it
|
||||
will return `nil`. It's important to note that you can have a zero-length value
|
||||
set to a key which is different than the key not existing.
|
||||
|
||||
Use the `Bucket.Delete()` function to delete a key from the bucket.
|
||||
|
||||
Please note that values returned from `Get()` are only valid while the
|
||||
transaction is open. If you need to use a value outside of the transaction
|
||||
then you must use `copy()` to copy it to another byte slice.
|
||||
|
||||
|
||||
### Autoincrementing integer for the bucket
|
||||
By using the `NextSequence()` function, you can let Bolt determine a sequence
|
||||
which can be used as the unique identifier for your key/value pairs. See the
|
||||
example below.
|
||||
|
||||
```go
|
||||
// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
|
||||
func (s *Store) CreateUser(u *User) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the users bucket.
|
||||
// This should be created when the DB is first opened.
|
||||
b := tx.Bucket([]byte("users"))
|
||||
|
||||
// Generate ID for the user.
|
||||
// This returns an error only if the Tx is closed or not writeable.
|
||||
// That can't happen in an Update() call so I ignore the error check.
|
||||
id, _ = b.NextSequence()
|
||||
u.ID = int(id)
|
||||
|
||||
// Marshal user data into bytes.
|
||||
buf, err := json.Marshal(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Persist bytes to users bucket.
|
||||
return b.Put(itob(u.ID), buf)
|
||||
})
|
||||
}
|
||||
|
||||
// itob returns an 8-byte big endian representation of v.
|
||||
func itob(v int) []byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, uint64(v))
|
||||
return b
|
||||
}
|
||||
|
||||
type User struct {
|
||||
ID int
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### Iterating over keys
|
||||
|
||||
Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
|
||||
iteration over these keys extremely fast. To iterate over keys we'll use a
|
||||
`Cursor`:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
|
||||
c := b.Cursor()
|
||||
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
The cursor allows you to move to a specific point in the list of keys and move
|
||||
forward or backward through the keys one at a time.
|
||||
|
||||
The following functions are available on the cursor:
|
||||
|
||||
```
|
||||
First() Move to the first key.
|
||||
Last() Move to the last key.
|
||||
Seek() Move to a specific key.
|
||||
Next() Move to the next key.
|
||||
Prev() Move to the previous key.
|
||||
```
|
||||
|
||||
Each of those functions has a return signature of `(key []byte, value []byte)`.
|
||||
When you have iterated to the end of the cursor then `Next()` will return a
|
||||
`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()`
|
||||
before calling `Next()` or `Prev()`. If you do not seek to a position then
|
||||
these functions will return a `nil` key.
|
||||
|
||||
During iteration, if the key is non-`nil` but the value is `nil`, that means
|
||||
the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
|
||||
access the sub-bucket.
|
||||
|
||||
|
||||
#### Prefix scans
|
||||
|
||||
To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
c := tx.Bucket([]byte("MyBucket")).Cursor()
|
||||
|
||||
prefix := []byte("1234")
|
||||
for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
#### Range scans
|
||||
|
||||
Another common use case is scanning over a range such as a time range. If you
|
||||
use a sortable time encoding such as RFC3339 then you can query a specific
|
||||
date range like this:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume our events bucket exists and has RFC3339 encoded time keys.
|
||||
c := tx.Bucket([]byte("Events")).Cursor()
|
||||
|
||||
// Our time range spans the 90's decade.
|
||||
min := []byte("1990-01-01T00:00:00Z")
|
||||
max := []byte("2000-01-01T00:00:00Z")
|
||||
|
||||
// Iterate over the 90's.
|
||||
for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
|
||||
fmt.Printf("%s: %s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
|
||||
#### ForEach()
|
||||
|
||||
You can also use the function `ForEach()` if you know you'll be iterating over
|
||||
all the keys in a bucket:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
|
||||
b.ForEach(func(k, v []byte) error {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
|
||||
### Nested buckets
|
||||
|
||||
You can also store a bucket in a key to create nested buckets. The API is the
|
||||
same as the bucket management API on the `DB` object:
|
||||
|
||||
```go
|
||||
func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
|
||||
func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
|
||||
func (*Bucket) DeleteBucket(key []byte) error
|
||||
```
|
||||
|
||||
|
||||
### Database backups
|
||||
|
||||
Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
|
||||
function to write a consistent view of the database to a writer. If you call
|
||||
this from a read-only transaction, it will perform a hot backup and not block
|
||||
your other database reads and writes.
|
||||
|
||||
By default, it will use a regular file handle which will utilize the operating
|
||||
system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
|
||||
documentation for information about optimizing for larger-than-RAM datasets.
|
||||
|
||||
One common use case is to backup over HTTP so you can use tools like `cURL` to
|
||||
do database backups:
|
||||
|
||||
```go
|
||||
func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
|
||||
err := db.View(func(tx *bolt.Tx) error {
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
|
||||
w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
|
||||
_, err := tx.WriteTo(w)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Then you can backup using this command:
|
||||
|
||||
```sh
|
||||
$ curl http://localhost/backup > my.db
|
||||
```
|
||||
|
||||
Or you can open your browser to `http://localhost/backup` and it will download
|
||||
automatically.
|
||||
|
||||
If you want to backup to another file you can use the `Tx.CopyFile()` helper
|
||||
function.
|
||||
|
||||
|
||||
### Statistics
|
||||
|
||||
The database keeps a running count of many of the internal operations it
|
||||
performs so you can better understand what's going on. By grabbing a snapshot
|
||||
of these stats at two points in time we can see what operations were performed
|
||||
in that time range.
|
||||
|
||||
For example, we could start a goroutine to log stats every 10 seconds:
|
||||
|
||||
```go
|
||||
go func() {
|
||||
// Grab the initial stats.
|
||||
prev := db.Stats()
|
||||
|
||||
for {
|
||||
// Wait for 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
// Grab the current stats and diff them.
|
||||
stats := db.Stats()
|
||||
diff := stats.Sub(&prev)
|
||||
|
||||
// Encode stats to JSON and print to STDERR.
|
||||
json.NewEncoder(os.Stderr).Encode(diff)
|
||||
|
||||
// Save stats for the next loop.
|
||||
prev = stats
|
||||
}
|
||||
}()
|
||||
```
|
||||
|
||||
It's also useful to pipe these stats to a service such as statsd for monitoring
|
||||
or to provide an HTTP endpoint that will perform a fixed-length sample.
|
||||
|
||||
|
||||
### Read-Only Mode
|
||||
|
||||
Sometimes it is useful to create a shared, read-only Bolt database. To this,
|
||||
set the `Options.ReadOnly` flag when opening your database. Read-only mode
|
||||
uses a shared lock to allow multiple processes to read from the database but
|
||||
it will block any processes from opening the database in read-write mode.
|
||||
|
||||
```go
|
||||
db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
### Mobile Use (iOS/Android)
|
||||
|
||||
Bolt is able to run on mobile devices by leveraging the binding feature of the
|
||||
[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
|
||||
contain your database logic and a reference to a `*bolt.DB` with a initializing
|
||||
contstructor that takes in a filepath where the database file will be stored.
|
||||
Neither Android nor iOS require extra permissions or cleanup from using this method.
|
||||
|
||||
```go
|
||||
func NewBoltDB(filepath string) *BoltDB {
|
||||
db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return &BoltDB{db}
|
||||
}
|
||||
|
||||
type BoltDB struct {
|
||||
db *bolt.DB
|
||||
...
|
||||
}
|
||||
|
||||
func (b *BoltDB) Path() string {
|
||||
return b.db.Path()
|
||||
}
|
||||
|
||||
func (b *BoltDB) Close() {
|
||||
b.db.Close()
|
||||
}
|
||||
```
|
||||
|
||||
Database logic should be defined as methods on this wrapper struct.
|
||||
|
||||
To initialize this struct from the native language (both platforms now sync
|
||||
their local storage to the cloud. These snippets disable that functionality for the
|
||||
database file):
|
||||
|
||||
#### Android
|
||||
|
||||
```java
|
||||
String path;
|
||||
if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
|
||||
path = getNoBackupFilesDir().getAbsolutePath();
|
||||
} else{
|
||||
path = getFilesDir().getAbsolutePath();
|
||||
}
|
||||
Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
|
||||
```
|
||||
|
||||
#### iOS
|
||||
|
||||
```objc
|
||||
- (void)demo {
|
||||
NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
|
||||
NSUserDomainMask,
|
||||
YES) objectAtIndex:0];
|
||||
GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
|
||||
[self addSkipBackupAttributeToItemAtPath:demo.path];
|
||||
//Some DB Logic would go here
|
||||
[demo close];
|
||||
}
|
||||
|
||||
- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
|
||||
{
|
||||
NSURL* URL= [NSURL fileURLWithPath: filePathString];
|
||||
assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
|
||||
|
||||
NSError *error = nil;
|
||||
BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
|
||||
forKey: NSURLIsExcludedFromBackupKey error: &error];
|
||||
if(!success){
|
||||
NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
For more information on getting started with Bolt, check out the following articles:
|
||||
|
||||
* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
|
||||
* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
|
||||
|
||||
|
||||
## Comparison with other databases
|
||||
|
||||
### Postgres, MySQL, & other relational databases
|
||||
|
||||
Relational databases structure data into rows and are only accessible through
|
||||
the use of SQL. This approach provides flexibility in how you store and query
|
||||
your data but also incurs overhead in parsing and planning SQL statements. Bolt
|
||||
accesses all data by a byte slice key. This makes Bolt fast to read and write
|
||||
data by key but provides no built-in support for joining values together.
|
||||
|
||||
Most relational databases (with the exception of SQLite) are standalone servers
|
||||
that run separately from your application. This gives your systems
|
||||
flexibility to connect multiple application servers to a single database
|
||||
server but also adds overhead in serializing and transporting data over the
|
||||
network. Bolt runs as a library included in your application so all data access
|
||||
has to go through your application's process. This brings data closer to your
|
||||
application but limits multi-process access to the data.
|
||||
|
||||
|
||||
### LevelDB, RocksDB
|
||||
|
||||
LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
|
||||
they are libraries bundled into the application, however, their underlying
|
||||
structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
|
||||
random writes by using a write ahead log and multi-tiered, sorted files called
|
||||
SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
|
||||
have trade-offs.
|
||||
|
||||
If you require a high random write throughput (>10,000 w/sec) or you need to use
|
||||
spinning disks then LevelDB could be a good choice. If your application is
|
||||
read-heavy or does a lot of range scans then Bolt could be a good choice.
|
||||
|
||||
One other important consideration is that LevelDB does not have transactions.
|
||||
It supports batch writing of key/values pairs and it supports read snapshots
|
||||
but it will not give you the ability to do a compare-and-swap operation safely.
|
||||
Bolt supports fully serializable ACID transactions.
|
||||
|
||||
|
||||
### LMDB
|
||||
|
||||
Bolt was originally a port of LMDB so it is architecturally similar. Both use
|
||||
a B+tree, have ACID semantics with fully serializable transactions, and support
|
||||
lock-free MVCC using a single writer and multiple readers.
|
||||
|
||||
The two projects have somewhat diverged. LMDB heavily focuses on raw performance
|
||||
while Bolt has focused on simplicity and ease of use. For example, LMDB allows
|
||||
several unsafe actions such as direct writes for the sake of performance. Bolt
|
||||
opts to disallow actions which can leave the database in a corrupted state. The
|
||||
only exception to this in Bolt is `DB.NoSync`.
|
||||
|
||||
There are also a few differences in API. LMDB requires a maximum mmap size when
|
||||
opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
|
||||
automatically. LMDB overloads the getter and setter functions with multiple
|
||||
flags whereas Bolt splits these specialized cases into their own functions.
|
||||
|
||||
|
||||
## Caveats & Limitations
|
||||
|
||||
It's important to pick the right tool for the job and Bolt is no exception.
|
||||
Here are a few things to note when evaluating and using Bolt:
|
||||
|
||||
* Bolt is good for read intensive workloads. Sequential write performance is
|
||||
also fast but random writes can be slow. You can use `DB.Batch()` or add a
|
||||
write-ahead log to help mitigate this issue.
|
||||
|
||||
* Bolt uses a B+tree internally so there can be a lot of random page access.
|
||||
SSDs provide a significant performance boost over spinning disks.
|
||||
|
||||
* Try to avoid long running read transactions. Bolt uses copy-on-write so
|
||||
old pages cannot be reclaimed while an old transaction is using them.
|
||||
|
||||
* Byte slices returned from Bolt are only valid during a transaction. Once the
|
||||
transaction has been committed or rolled back then the memory they point to
|
||||
can be reused by a new page or can be unmapped from virtual memory and you'll
|
||||
see an `unexpected fault address` panic when accessing it.
|
||||
|
||||
* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
|
||||
buckets that have random inserts will cause your database to have very poor
|
||||
page utilization.
|
||||
|
||||
* Use larger buckets in general. Smaller buckets causes poor page utilization
|
||||
once they become larger than the page size (typically 4KB).
|
||||
|
||||
* Bulk loading a lot of random writes into a new bucket can be slow as the
|
||||
page will not split until the transaction is committed. Randomly inserting
|
||||
more than 100,000 key/value pairs into a single new bucket in a single
|
||||
transaction is not advised.
|
||||
|
||||
* Bolt uses a memory-mapped file so the underlying operating system handles the
|
||||
caching of the data. Typically, the OS will cache as much of the file as it
|
||||
can in memory and will release memory as needed to other processes. This means
|
||||
that Bolt can show very high memory usage when working with large databases.
|
||||
However, this is expected and the OS will release memory as needed. Bolt can
|
||||
handle databases much larger than the available physical RAM, provided its
|
||||
memory-map fits in the process virtual address space. It may be problematic
|
||||
on 32-bits systems.
|
||||
|
||||
* The data structures in the Bolt database are memory mapped so the data file
|
||||
will be endian specific. This means that you cannot copy a Bolt file from a
|
||||
little endian machine to a big endian machine and have it work. For most
|
||||
users this is not a concern since most modern CPUs are little endian.
|
||||
|
||||
* Because of the way pages are laid out on disk, Bolt cannot truncate data files
|
||||
and return free pages back to the disk. Instead, Bolt maintains a free list
|
||||
of unused pages within its data file. These free pages can be reused by later
|
||||
transactions. This works well for many use cases as databases generally tend
|
||||
to grow. However, it's important to note that deleting large chunks of data
|
||||
will not allow you to reclaim that space on disk.
|
||||
|
||||
For more information on page allocation, [see this comment][page-allocation].
|
||||
|
||||
[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
|
||||
|
||||
|
||||
## Reading the Source
|
||||
|
||||
Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
|
||||
transactional key/value database so it can be a good starting point for people
|
||||
interested in how databases work.
|
||||
|
||||
The best places to start are the main entry points into Bolt:
|
||||
|
||||
- `Open()` - Initializes the reference to the database. It's responsible for
|
||||
creating the database if it doesn't exist, obtaining an exclusive lock on the
|
||||
file, reading the meta pages, & memory-mapping the file.
|
||||
|
||||
- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
|
||||
value of the `writable` argument. This requires briefly obtaining the "meta"
|
||||
lock to keep track of open transactions. Only one read-write transaction can
|
||||
exist at a time so the "rwlock" is acquired during the life of a read-write
|
||||
transaction.
|
||||
|
||||
- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
|
||||
arguments, a cursor is used to traverse the B+tree to the page and position
|
||||
where they key & value will be written. Once the position is found, the bucket
|
||||
materializes the underlying page and the page's parent pages into memory as
|
||||
"nodes". These nodes are where mutations occur during read-write transactions.
|
||||
These changes get flushed to disk during commit.
|
||||
|
||||
- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
|
||||
to move to the page & position of a key/value pair. During a read-only
|
||||
transaction, the key and value data is returned as a direct reference to the
|
||||
underlying mmap file so there's no allocation overhead. For read-write
|
||||
transactions, this data may reference the mmap file or one of the in-memory
|
||||
node values.
|
||||
|
||||
- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
|
||||
or in-memory nodes. It can seek to a specific key, move to the first or last
|
||||
value, or it can move forward or backward. The cursor handles the movement up
|
||||
and down the B+tree transparently to the end user.
|
||||
|
||||
- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
|
||||
into pages to be written to disk. Writing to disk then occurs in two phases.
|
||||
First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
|
||||
new meta page with an incremented transaction ID is written and another
|
||||
`fsync()` occurs. This two phase write ensures that partially written data
|
||||
pages are ignored in the event of a crash since the meta page pointing to them
|
||||
is never written. Partially written meta pages are invalidated because they
|
||||
are written with a checksum.
|
||||
|
||||
If you have additional notes that could be helpful for others, please submit
|
||||
them via pull request.
|
||||
|
||||
|
||||
## Other Projects Using Bolt
|
||||
|
||||
Below is a list of public, open source projects that use Bolt:
|
||||
|
||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
||||
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
|
||||
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
||||
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
|
||||
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
|
||||
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
|
||||
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
|
||||
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
|
||||
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
|
||||
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
|
||||
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
|
||||
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
|
||||
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
|
||||
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
|
||||
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
|
||||
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
||||
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
|
||||
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
|
||||
* [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database.
|
||||
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
|
||||
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
|
||||
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
|
||||
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
|
||||
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
|
||||
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
|
||||
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
|
||||
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
|
||||
backed by boltdb.
|
||||
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
|
||||
simple tx and key scans.
|
||||
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
|
||||
|
||||
If you are using Bolt in a project please send a pull request to add it to the list.
|
|
@ -0,0 +1,7 @@
|
|||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
|
@ -0,0 +1,7 @@
|
|||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
|
@ -0,0 +1,7 @@
|
|||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
|
@ -0,0 +1,9 @@
|
|||
// +build arm64
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
|
@ -0,0 +1,10 @@
|
|||
package bolt
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// fdatasync flushes written data to a file descriptor.
|
||||
func fdatasync(db *DB) error {
|
||||
return syscall.Fdatasync(int(db.file.Fd()))
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
package bolt
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
msAsync = 1 << iota // perform asynchronous writes
|
||||
msSync // perform synchronous writes
|
||||
msInvalidate // invalidate cached data
|
||||
)
|
||||
|
||||
func msync(db *DB) error {
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
|
||||
if errno != 0 {
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func fdatasync(db *DB) error {
|
||||
if db.data != nil {
|
||||
return msync(db)
|
||||
}
|
||||
return db.file.Sync()
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
// +build ppc64le
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
|
@ -0,0 +1,9 @@
|
|||
// +build s390x
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
|
@ -0,0 +1,89 @@
|
|||
// +build !windows,!plan9,!solaris
|
||||
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(f *os.File, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
flag := syscall.LOCK_SH
|
||||
if exclusive {
|
||||
flag = syscall.LOCK_EX
|
||||
}
|
||||
|
||||
// Otherwise attempt to obtain an exclusive lock.
|
||||
err := syscall.Flock(int(f.Fd()), flag|syscall.LOCK_NB)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EWOULDBLOCK {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(f *os.File) error {
|
||||
return syscall.Flock(int(f.Fd()), syscall.LOCK_UN)
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
func mmap(db *DB, sz int) error {
|
||||
// Map the data file to memory.
|
||||
b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Advise the kernel that the mmap is accessed randomly.
|
||||
if err := madvise(b, syscall.MADV_RANDOM); err != nil {
|
||||
return fmt.Errorf("madvise: %s", err)
|
||||
}
|
||||
|
||||
// Save the original byte slice and convert to a byte array pointer.
|
||||
db.dataref = b
|
||||
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
|
||||
db.datasz = sz
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a DB's data file from memory.
|
||||
func munmap(db *DB) error {
|
||||
// Ignore the unmap if we have no mapped data.
|
||||
if db.dataref == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmap using the original byte slice.
|
||||
err := syscall.Munmap(db.dataref)
|
||||
db.dataref = nil
|
||||
db.data = nil
|
||||
db.datasz = 0
|
||||
return err
|
||||
}
|
||||
|
||||
// NOTE: This function is copied from stdlib because it is not available on darwin.
|
||||
func madvise(b []byte, advice int) (err error) {
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(f *os.File, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
var lock syscall.Flock_t
|
||||
lock.Start = 0
|
||||
lock.Len = 0
|
||||
lock.Pid = 0
|
||||
lock.Whence = 0
|
||||
lock.Pid = 0
|
||||
if exclusive {
|
||||
lock.Type = syscall.F_WRLCK
|
||||
} else {
|
||||
lock.Type = syscall.F_RDLCK
|
||||
}
|
||||
err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EAGAIN {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(f *os.File) error {
|
||||
var lock syscall.Flock_t
|
||||
lock.Start = 0
|
||||
lock.Len = 0
|
||||
lock.Type = syscall.F_UNLCK
|
||||
lock.Whence = 0
|
||||
return syscall.FcntlFlock(uintptr(f.Fd()), syscall.F_SETLK, &lock)
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
func mmap(db *DB, sz int) error {
|
||||
// Map the data file to memory.
|
||||
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Advise the kernel that the mmap is accessed randomly.
|
||||
if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
|
||||
return fmt.Errorf("madvise: %s", err)
|
||||
}
|
||||
|
||||
// Save the original byte slice and convert to a byte array pointer.
|
||||
db.dataref = b
|
||||
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
|
||||
db.datasz = sz
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a DB's data file from memory.
|
||||
func munmap(db *DB) error {
|
||||
// Ignore the unmap if we have no mapped data.
|
||||
if db.dataref == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmap using the original byte slice.
|
||||
err := unix.Munmap(db.dataref)
|
||||
db.dataref = nil
|
||||
db.data = nil
|
||||
db.datasz = 0
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,130 @@
|
|||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
|
||||
var (
|
||||
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
procLockFileEx = modkernel32.NewProc("LockFileEx")
|
||||
procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
|
||||
)
|
||||
|
||||
const (
|
||||
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
|
||||
flagLockExclusive = 2
|
||||
flagLockFailImmediately = 1
|
||||
|
||||
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
|
||||
errLockViolation syscall.Errno = 0x21
|
||||
)
|
||||
|
||||
func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
|
||||
r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
|
||||
if r == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
|
||||
r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
|
||||
if r == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fdatasync flushes written data to a file descriptor.
|
||||
func fdatasync(db *DB) error {
|
||||
return db.file.Sync()
|
||||
}
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(f *os.File, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
|
||||
var flag uint32 = flagLockFailImmediately
|
||||
if exclusive {
|
||||
flag |= flagLockExclusive
|
||||
}
|
||||
|
||||
err := lockFileEx(syscall.Handle(f.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != errLockViolation {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(f *os.File) error {
|
||||
return unlockFileEx(syscall.Handle(f.Fd()), 0, 1, 0, &syscall.Overlapped{})
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
// Based on: https://github.com/edsrzf/mmap-go
|
||||
func mmap(db *DB, sz int) error {
|
||||
if !db.readOnly {
|
||||
// Truncate the database to the size of the mmap.
|
||||
if err := db.file.Truncate(int64(sz)); err != nil {
|
||||
return fmt.Errorf("truncate: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Open a file mapping handle.
|
||||
sizelo := uint32(sz >> 32)
|
||||
sizehi := uint32(sz) & 0xffffffff
|
||||
h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
|
||||
if h == 0 {
|
||||
return os.NewSyscallError("CreateFileMapping", errno)
|
||||
}
|
||||
|
||||
// Create the memory map.
|
||||
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
|
||||
if addr == 0 {
|
||||
return os.NewSyscallError("MapViewOfFile", errno)
|
||||
}
|
||||
|
||||
// Close mapping handle.
|
||||
if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
|
||||
return os.NewSyscallError("CloseHandle", err)
|
||||
}
|
||||
|
||||
// Convert to a byte array.
|
||||
db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
|
||||
db.datasz = sz
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a pointer from a file.
|
||||
// Based on: https://github.com/edsrzf/mmap-go
|
||||
func munmap(db *DB) error {
|
||||
if db.data == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
addr := (uintptr)(unsafe.Pointer(&db.data[0]))
|
||||
if err := syscall.UnmapViewOfFile(addr); err != nil {
|
||||
return os.NewSyscallError("UnmapViewOfFile", err)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
// +build !windows,!plan9,!linux,!openbsd
|
||||
|
||||
package bolt
|
||||
|
||||
// fdatasync flushes written data to a file descriptor.
|
||||
func fdatasync(db *DB) error {
|
||||
return db.file.Sync()
|
||||
}
|
|
@ -0,0 +1,748 @@
|
|||
package bolt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxKeySize is the maximum length of a key, in bytes.
|
||||
MaxKeySize = 32768
|
||||
|
||||
// MaxValueSize is the maximum length of a value, in bytes.
|
||||
MaxValueSize = (1 << 31) - 2
|
||||
)
|
||||
|
||||
const (
|
||||
maxUint = ^uint(0)
|
||||
minUint = 0
|
||||
maxInt = int(^uint(0) >> 1)
|
||||
minInt = -maxInt - 1
|
||||
)
|
||||
|
||||
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
|
||||
|
||||
const (
|
||||
minFillPercent = 0.1
|
||||
maxFillPercent = 1.0
|
||||
)
|
||||
|
||||
// DefaultFillPercent is the percentage that split pages are filled.
|
||||
// This value can be changed by setting Bucket.FillPercent.
|
||||
const DefaultFillPercent = 0.5
|
||||
|
||||
// Bucket represents a collection of key/value pairs inside the database.
|
||||
type Bucket struct {
|
||||
*bucket
|
||||
tx *Tx // the associated transaction
|
||||
buckets map[string]*Bucket // subbucket cache
|
||||
page *page // inline page reference
|
||||
rootNode *node // materialized node for the root page.
|
||||
nodes map[pgid]*node // node cache
|
||||
|
||||
// Sets the threshold for filling nodes when they split. By default,
|
||||
// the bucket will fill to 50% but it can be useful to increase this
|
||||
// amount if you know that your write workloads are mostly append-only.
|
||||
//
|
||||
// This is non-persisted across transactions so it must be set in every Tx.
|
||||
FillPercent float64
|
||||
}
|
||||
|
||||
// bucket represents the on-file representation of a bucket.
|
||||
// This is stored as the "value" of a bucket key. If the bucket is small enough,
|
||||
// then its root page can be stored inline in the "value", after the bucket
|
||||
// header. In the case of inline buckets, the "root" will be 0.
|
||||
type bucket struct {
|
||||
root pgid // page id of the bucket's root-level page
|
||||
sequence uint64 // monotonically incrementing, used by NextSequence()
|
||||
}
|
||||
|
||||
// newBucket returns a new bucket associated with a transaction.
|
||||
func newBucket(tx *Tx) Bucket {
|
||||
var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
|
||||
if tx.writable {
|
||||
b.buckets = make(map[string]*Bucket)
|
||||
b.nodes = make(map[pgid]*node)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Tx returns the tx of the bucket.
|
||||
func (b *Bucket) Tx() *Tx {
|
||||
return b.tx
|
||||
}
|
||||
|
||||
// Root returns the root of the bucket.
|
||||
func (b *Bucket) Root() pgid {
|
||||
return b.root
|
||||
}
|
||||
|
||||
// Writable returns whether the bucket is writable.
|
||||
func (b *Bucket) Writable() bool {
|
||||
return b.tx.writable
|
||||
}
|
||||
|
||||
// Cursor creates a cursor associated with the bucket.
|
||||
// The cursor is only valid as long as the transaction is open.
|
||||
// Do not use a cursor after the transaction is closed.
|
||||
func (b *Bucket) Cursor() *Cursor {
|
||||
// Update transaction statistics.
|
||||
b.tx.stats.CursorCount++
|
||||
|
||||
// Allocate and return a cursor.
|
||||
return &Cursor{
|
||||
bucket: b,
|
||||
stack: make([]elemRef, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Bucket retrieves a nested bucket by name.
|
||||
// Returns nil if the bucket does not exist.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (b *Bucket) Bucket(name []byte) *Bucket {
|
||||
if b.buckets != nil {
|
||||
if child := b.buckets[string(name)]; child != nil {
|
||||
return child
|
||||
}
|
||||
}
|
||||
|
||||
// Move cursor to key.
|
||||
c := b.Cursor()
|
||||
k, v, flags := c.seek(name)
|
||||
|
||||
// Return nil if the key doesn't exist or it is not a bucket.
|
||||
if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise create a bucket and cache it.
|
||||
var child = b.openBucket(v)
|
||||
if b.buckets != nil {
|
||||
b.buckets[string(name)] = child
|
||||
}
|
||||
|
||||
return child
|
||||
}
|
||||
|
||||
// Helper method that re-interprets a sub-bucket value
|
||||
// from a parent into a Bucket
|
||||
func (b *Bucket) openBucket(value []byte) *Bucket {
|
||||
var child = newBucket(b.tx)
|
||||
|
||||
// If this is a writable transaction then we need to copy the bucket entry.
|
||||
// Read-only transactions can point directly at the mmap entry.
|
||||
if b.tx.writable {
|
||||
child.bucket = &bucket{}
|
||||
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
|
||||
} else {
|
||||
child.bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||
}
|
||||
|
||||
// Save a reference to the inline page if the bucket is inline.
|
||||
if child.root == 0 {
|
||||
child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
|
||||
}
|
||||
|
||||
return &child
|
||||
}
|
||||
|
||||
// CreateBucket creates a new bucket at the given key and returns the new bucket.
|
||||
// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
|
||||
if b.tx.db == nil {
|
||||
return nil, ErrTxClosed
|
||||
} else if !b.tx.writable {
|
||||
return nil, ErrTxNotWritable
|
||||
} else if len(key) == 0 {
|
||||
return nil, ErrBucketNameRequired
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if there is an existing key.
|
||||
if bytes.Equal(key, k) {
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return nil, ErrBucketExists
|
||||
} else {
|
||||
return nil, ErrIncompatibleValue
|
||||
}
|
||||
}
|
||||
|
||||
// Create empty, inline bucket.
|
||||
var bucket = Bucket{
|
||||
bucket: &bucket{},
|
||||
rootNode: &node{isLeaf: true},
|
||||
FillPercent: DefaultFillPercent,
|
||||
}
|
||||
var value = bucket.write()
|
||||
|
||||
// Insert into node.
|
||||
key = cloneBytes(key)
|
||||
c.node().put(key, key, value, 0, bucketLeafFlag)
|
||||
|
||||
// Since subbuckets are not allowed on inline buckets, we need to
|
||||
// dereference the inline page, if it exists. This will cause the bucket
|
||||
// to be treated as a regular, non-inline bucket for the rest of the tx.
|
||||
b.page = nil
|
||||
|
||||
return b.Bucket(key), nil
|
||||
}
|
||||
|
||||
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
|
||||
// Returns an error if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
|
||||
child, err := b.CreateBucket(key)
|
||||
if err == ErrBucketExists {
|
||||
return b.Bucket(key), nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return child, nil
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket at the given key.
|
||||
// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
|
||||
func (b *Bucket) DeleteBucket(key []byte) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if bucket doesn't exist or is not a bucket.
|
||||
if !bytes.Equal(key, k) {
|
||||
return ErrBucketNotFound
|
||||
} else if (flags & bucketLeafFlag) == 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Recursively delete all child buckets.
|
||||
child := b.Bucket(key)
|
||||
err := child.ForEach(func(k, v []byte) error {
|
||||
if v == nil {
|
||||
if err := child.DeleteBucket(k); err != nil {
|
||||
return fmt.Errorf("delete bucket: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove cached copy.
|
||||
delete(b.buckets, string(key))
|
||||
|
||||
// Release all bucket pages to freelist.
|
||||
child.nodes = nil
|
||||
child.rootNode = nil
|
||||
child.free()
|
||||
|
||||
// Delete the node if we have a matching key.
|
||||
c.node().del(key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves the value for a key in the bucket.
|
||||
// Returns a nil value if the key does not exist or if the key is a nested bucket.
|
||||
// The returned value is only valid for the life of the transaction.
|
||||
func (b *Bucket) Get(key []byte) []byte {
|
||||
k, v, flags := b.Cursor().seek(key)
|
||||
|
||||
// Return nil if this is a bucket.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If our target node isn't the same key as what's passed in then return nil.
|
||||
if !bytes.Equal(key, k) {
|
||||
return nil
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Put sets the value for a key in the bucket.
|
||||
// If the key exist then its previous value will be overwritten.
|
||||
// Supplied value must remain valid for the life of the transaction.
|
||||
// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
|
||||
func (b *Bucket) Put(key []byte, value []byte) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
} else if len(key) == 0 {
|
||||
return ErrKeyRequired
|
||||
} else if len(key) > MaxKeySize {
|
||||
return ErrKeyTooLarge
|
||||
} else if int64(len(value)) > MaxValueSize {
|
||||
return ErrValueTooLarge
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if there is an existing key with a bucket value.
|
||||
if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Insert into node.
|
||||
key = cloneBytes(key)
|
||||
c.node().put(key, key, value, 0, 0)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes a key from the bucket.
|
||||
// If the key does not exist then nothing is done and a nil error is returned.
|
||||
// Returns an error if the bucket was created from a read-only transaction.
|
||||
func (b *Bucket) Delete(key []byte) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
_, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if there is already existing bucket value.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Delete the node if we have a matching key.
|
||||
c.node().del(key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NextSequence returns an autoincrementing integer for the bucket.
|
||||
func (b *Bucket) NextSequence() (uint64, error) {
|
||||
if b.tx.db == nil {
|
||||
return 0, ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return 0, ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Materialize the root node if it hasn't been already so that the
|
||||
// bucket will be saved during commit.
|
||||
if b.rootNode == nil {
|
||||
_ = b.node(b.root, nil)
|
||||
}
|
||||
|
||||
// Increment and return the sequence.
|
||||
b.bucket.sequence++
|
||||
return b.bucket.sequence, nil
|
||||
}
|
||||
|
||||
// ForEach executes a function for each key/value pair in a bucket.
|
||||
// If the provided function returns an error then the iteration is stopped and
|
||||
// the error is returned to the caller. The provided function must not modify
|
||||
// the bucket; this will result in undefined behavior.
|
||||
func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
}
|
||||
c := b.Cursor()
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if err := fn(k, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stat returns stats on a bucket.
|
||||
func (b *Bucket) Stats() BucketStats {
|
||||
var s, subStats BucketStats
|
||||
pageSize := b.tx.db.pageSize
|
||||
s.BucketN += 1
|
||||
if b.root == 0 {
|
||||
s.InlineBucketN += 1
|
||||
}
|
||||
b.forEachPage(func(p *page, depth int) {
|
||||
if (p.flags & leafPageFlag) != 0 {
|
||||
s.KeyN += int(p.count)
|
||||
|
||||
// used totals the used bytes for the page
|
||||
used := pageHeaderSize
|
||||
|
||||
if p.count != 0 {
|
||||
// If page has any elements, add all element headers.
|
||||
used += leafPageElementSize * int(p.count-1)
|
||||
|
||||
// Add all element key, value sizes.
|
||||
// The computation takes advantage of the fact that the position
|
||||
// of the last element's key/value equals to the total of the sizes
|
||||
// of all previous elements' keys and values.
|
||||
// It also includes the last element's header.
|
||||
lastElement := p.leafPageElement(p.count - 1)
|
||||
used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
|
||||
}
|
||||
|
||||
if b.root == 0 {
|
||||
// For inlined bucket just update the inline stats
|
||||
s.InlineBucketInuse += used
|
||||
} else {
|
||||
// For non-inlined bucket update all the leaf stats
|
||||
s.LeafPageN++
|
||||
s.LeafInuse += used
|
||||
s.LeafOverflowN += int(p.overflow)
|
||||
|
||||
// Collect stats from sub-buckets.
|
||||
// Do that by iterating over all element headers
|
||||
// looking for the ones with the bucketLeafFlag.
|
||||
for i := uint16(0); i < p.count; i++ {
|
||||
e := p.leafPageElement(i)
|
||||
if (e.flags & bucketLeafFlag) != 0 {
|
||||
// For any bucket element, open the element value
|
||||
// and recursively call Stats on the contained bucket.
|
||||
subStats.Add(b.openBucket(e.value()).Stats())
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (p.flags & branchPageFlag) != 0 {
|
||||
s.BranchPageN++
|
||||
lastElement := p.branchPageElement(p.count - 1)
|
||||
|
||||
// used totals the used bytes for the page
|
||||
// Add header and all element headers.
|
||||
used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
|
||||
|
||||
// Add size of all keys and values.
|
||||
// Again, use the fact that last element's position equals to
|
||||
// the total of key, value sizes of all previous elements.
|
||||
used += int(lastElement.pos + lastElement.ksize)
|
||||
s.BranchInuse += used
|
||||
s.BranchOverflowN += int(p.overflow)
|
||||
}
|
||||
|
||||
// Keep track of maximum page depth.
|
||||
if depth+1 > s.Depth {
|
||||
s.Depth = (depth + 1)
|
||||
}
|
||||
})
|
||||
|
||||
// Alloc stats can be computed from page counts and pageSize.
|
||||
s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
|
||||
s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
|
||||
|
||||
// Add the max depth of sub-buckets to get total nested depth.
|
||||
s.Depth += subStats.Depth
|
||||
// Add the stats for all sub-buckets
|
||||
s.Add(subStats)
|
||||
return s
|
||||
}
|
||||
|
||||
// forEachPage iterates over every page in a bucket, including inline pages.
|
||||
func (b *Bucket) forEachPage(fn func(*page, int)) {
|
||||
// If we have an inline page then just use that.
|
||||
if b.page != nil {
|
||||
fn(b.page, 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise traverse the page hierarchy.
|
||||
b.tx.forEachPage(b.root, 0, fn)
|
||||
}
|
||||
|
||||
// forEachPageNode iterates over every page (or node) in a bucket.
|
||||
// This also includes inline pages.
|
||||
func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
|
||||
// If we have an inline page or root node then just use that.
|
||||
if b.page != nil {
|
||||
fn(b.page, nil, 0)
|
||||
return
|
||||
}
|
||||
b._forEachPageNode(b.root, 0, fn)
|
||||
}
|
||||
|
||||
func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
|
||||
var p, n = b.pageNode(pgid)
|
||||
|
||||
// Execute function.
|
||||
fn(p, n, depth)
|
||||
|
||||
// Recursively loop over children.
|
||||
if p != nil {
|
||||
if (p.flags & branchPageFlag) != 0 {
|
||||
for i := 0; i < int(p.count); i++ {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
b._forEachPageNode(elem.pgid, depth+1, fn)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !n.isLeaf {
|
||||
for _, inode := range n.inodes {
|
||||
b._forEachPageNode(inode.pgid, depth+1, fn)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// spill writes all the nodes for this bucket to dirty pages.
|
||||
func (b *Bucket) spill() error {
|
||||
// Spill all child buckets first.
|
||||
for name, child := range b.buckets {
|
||||
// If the child bucket is small enough and it has no child buckets then
|
||||
// write it inline into the parent bucket's page. Otherwise spill it
|
||||
// like a normal bucket and make the parent value a pointer to the page.
|
||||
var value []byte
|
||||
if child.inlineable() {
|
||||
child.free()
|
||||
value = child.write()
|
||||
} else {
|
||||
if err := child.spill(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the child bucket header in this bucket.
|
||||
value = make([]byte, unsafe.Sizeof(bucket{}))
|
||||
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||
*bucket = *child.bucket
|
||||
}
|
||||
|
||||
// Skip writing the bucket if there are no materialized nodes.
|
||||
if child.rootNode == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Update parent node.
|
||||
var c = b.Cursor()
|
||||
k, _, flags := c.seek([]byte(name))
|
||||
if !bytes.Equal([]byte(name), k) {
|
||||
panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
|
||||
}
|
||||
if flags&bucketLeafFlag == 0 {
|
||||
panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
|
||||
}
|
||||
c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
|
||||
}
|
||||
|
||||
// Ignore if there's not a materialized root node.
|
||||
if b.rootNode == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Spill nodes.
|
||||
if err := b.rootNode.spill(); err != nil {
|
||||
return err
|
||||
}
|
||||
b.rootNode = b.rootNode.root()
|
||||
|
||||
// Update the root node for this bucket.
|
||||
if b.rootNode.pgid >= b.tx.meta.pgid {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
|
||||
}
|
||||
b.root = b.rootNode.pgid
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// inlineable returns true if a bucket is small enough to be written inline
|
||||
// and if it contains no subbuckets. Otherwise returns false.
|
||||
func (b *Bucket) inlineable() bool {
|
||||
var n = b.rootNode
|
||||
|
||||
// Bucket must only contain a single leaf node.
|
||||
if n == nil || !n.isLeaf {
|
||||
return false
|
||||
}
|
||||
|
||||
// Bucket is not inlineable if it contains subbuckets or if it goes beyond
|
||||
// our threshold for inline bucket size.
|
||||
var size = pageHeaderSize
|
||||
for _, inode := range n.inodes {
|
||||
size += leafPageElementSize + len(inode.key) + len(inode.value)
|
||||
|
||||
if inode.flags&bucketLeafFlag != 0 {
|
||||
return false
|
||||
} else if size > b.maxInlineBucketSize() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns the maximum total size of a bucket to make it a candidate for inlining.
|
||||
func (b *Bucket) maxInlineBucketSize() int {
|
||||
return b.tx.db.pageSize / 4
|
||||
}
|
||||
|
||||
// write allocates and writes a bucket to a byte slice.
|
||||
func (b *Bucket) write() []byte {
|
||||
// Allocate the appropriate size.
|
||||
var n = b.rootNode
|
||||
var value = make([]byte, bucketHeaderSize+n.size())
|
||||
|
||||
// Write a bucket header.
|
||||
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||
*bucket = *b.bucket
|
||||
|
||||
// Convert byte slice to a fake page and write the root node.
|
||||
var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
|
||||
n.write(p)
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
// rebalance attempts to balance all nodes.
|
||||
func (b *Bucket) rebalance() {
|
||||
for _, n := range b.nodes {
|
||||
n.rebalance()
|
||||
}
|
||||
for _, child := range b.buckets {
|
||||
child.rebalance()
|
||||
}
|
||||
}
|
||||
|
||||
// node creates a node from a page and associates it with a given parent.
|
||||
func (b *Bucket) node(pgid pgid, parent *node) *node {
|
||||
_assert(b.nodes != nil, "nodes map expected")
|
||||
|
||||
// Retrieve node if it's already been created.
|
||||
if n := b.nodes[pgid]; n != nil {
|
||||
return n
|
||||
}
|
||||
|
||||
// Otherwise create a node and cache it.
|
||||
n := &node{bucket: b, parent: parent}
|
||||
if parent == nil {
|
||||
b.rootNode = n
|
||||
} else {
|
||||
parent.children = append(parent.children, n)
|
||||
}
|
||||
|
||||
// Use the inline page if this is an inline bucket.
|
||||
var p = b.page
|
||||
if p == nil {
|
||||
p = b.tx.page(pgid)
|
||||
}
|
||||
|
||||
// Read the page into the node and cache it.
|
||||
n.read(p)
|
||||
b.nodes[pgid] = n
|
||||
|
||||
// Update statistics.
|
||||
b.tx.stats.NodeCount++
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// free recursively frees all pages in the bucket.
|
||||
func (b *Bucket) free() {
|
||||
if b.root == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var tx = b.tx
|
||||
b.forEachPageNode(func(p *page, n *node, _ int) {
|
||||
if p != nil {
|
||||
tx.db.freelist.free(tx.meta.txid, p)
|
||||
} else {
|
||||
n.free()
|
||||
}
|
||||
})
|
||||
b.root = 0
|
||||
}
|
||||
|
||||
// dereference removes all references to the old mmap.
|
||||
func (b *Bucket) dereference() {
|
||||
if b.rootNode != nil {
|
||||
b.rootNode.root().dereference()
|
||||
}
|
||||
|
||||
for _, child := range b.buckets {
|
||||
child.dereference()
|
||||
}
|
||||
}
|
||||
|
||||
// pageNode returns the in-memory node, if it exists.
|
||||
// Otherwise returns the underlying page.
|
||||
func (b *Bucket) pageNode(id pgid) (*page, *node) {
|
||||
// Inline buckets have a fake page embedded in their value so treat them
|
||||
// differently. We'll return the rootNode (if available) or the fake page.
|
||||
if b.root == 0 {
|
||||
if id != 0 {
|
||||
panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
|
||||
}
|
||||
if b.rootNode != nil {
|
||||
return nil, b.rootNode
|
||||
}
|
||||
return b.page, nil
|
||||
}
|
||||
|
||||
// Check the node cache for non-inline buckets.
|
||||
if b.nodes != nil {
|
||||
if n := b.nodes[id]; n != nil {
|
||||
return nil, n
|
||||
}
|
||||
}
|
||||
|
||||
// Finally lookup the page from the transaction if no node is materialized.
|
||||
return b.tx.page(id), nil
|
||||
}
|
||||
|
||||
// BucketStats records statistics about resources used by a bucket.
|
||||
type BucketStats struct {
|
||||
// Page count statistics.
|
||||
BranchPageN int // number of logical branch pages
|
||||
BranchOverflowN int // number of physical branch overflow pages
|
||||
LeafPageN int // number of logical leaf pages
|
||||
LeafOverflowN int // number of physical leaf overflow pages
|
||||
|
||||
// Tree statistics.
|
||||
KeyN int // number of keys/value pairs
|
||||
Depth int // number of levels in B+tree
|
||||
|
||||
// Page size utilization.
|
||||
BranchAlloc int // bytes allocated for physical branch pages
|
||||
BranchInuse int // bytes actually used for branch data
|
||||
LeafAlloc int // bytes allocated for physical leaf pages
|
||||
LeafInuse int // bytes actually used for leaf data
|
||||
|
||||
// Bucket statistics
|
||||
BucketN int // total number of buckets including the top bucket
|
||||
InlineBucketN int // total number on inlined buckets
|
||||
InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
|
||||
}
|
||||
|
||||
func (s *BucketStats) Add(other BucketStats) {
|
||||
s.BranchPageN += other.BranchPageN
|
||||
s.BranchOverflowN += other.BranchOverflowN
|
||||
s.LeafPageN += other.LeafPageN
|
||||
s.LeafOverflowN += other.LeafOverflowN
|
||||
s.KeyN += other.KeyN
|
||||
if s.Depth < other.Depth {
|
||||
s.Depth = other.Depth
|
||||
}
|
||||
s.BranchAlloc += other.BranchAlloc
|
||||
s.BranchInuse += other.BranchInuse
|
||||
s.LeafAlloc += other.LeafAlloc
|
||||
s.LeafInuse += other.LeafInuse
|
||||
|
||||
s.BucketN += other.BucketN
|
||||
s.InlineBucketN += other.InlineBucketN
|
||||
s.InlineBucketInuse += other.InlineBucketInuse
|
||||
}
|
||||
|
||||
// cloneBytes returns a copy of a given slice.
|
||||
func cloneBytes(v []byte) []byte {
|
||||
var clone = make([]byte, len(v))
|
||||
copy(clone, v)
|
||||
return clone
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,400 @@
|
|||
package bolt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
|
||||
// Cursors see nested buckets with value == nil.
|
||||
// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
|
||||
//
|
||||
// Keys and values returned from the cursor are only valid for the life of the transaction.
|
||||
//
|
||||
// Changing data while traversing with a cursor may cause it to be invalidated
|
||||
// and return unexpected keys and/or values. You must reposition your cursor
|
||||
// after mutating data.
|
||||
type Cursor struct {
|
||||
bucket *Bucket
|
||||
stack []elemRef
|
||||
}
|
||||
|
||||
// Bucket returns the bucket that this cursor was created from.
|
||||
func (c *Cursor) Bucket() *Bucket {
|
||||
return c.bucket
|
||||
}
|
||||
|
||||
// First moves the cursor to the first item in the bucket and returns its key and value.
|
||||
// If the bucket is empty then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) First() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
c.stack = c.stack[:0]
|
||||
p, n := c.bucket.pageNode(c.bucket.root)
|
||||
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
|
||||
c.first()
|
||||
|
||||
// If we land on an empty page then move to the next value.
|
||||
// https://github.com/boltdb/bolt/issues/450
|
||||
if c.stack[len(c.stack)-1].count() == 0 {
|
||||
c.next()
|
||||
}
|
||||
|
||||
k, v, flags := c.keyValue()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
|
||||
}
|
||||
|
||||
// Last moves the cursor to the last item in the bucket and returns its key and value.
|
||||
// If the bucket is empty then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Last() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
c.stack = c.stack[:0]
|
||||
p, n := c.bucket.pageNode(c.bucket.root)
|
||||
ref := elemRef{page: p, node: n}
|
||||
ref.index = ref.count() - 1
|
||||
c.stack = append(c.stack, ref)
|
||||
c.last()
|
||||
k, v, flags := c.keyValue()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Next moves the cursor to the next item in the bucket and returns its key and value.
|
||||
// If the cursor is at the end of the bucket then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Next() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
k, v, flags := c.next()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Prev moves the cursor to the previous item in the bucket and returns its key and value.
|
||||
// If the cursor is at the beginning of the bucket then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Prev() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
|
||||
// Attempt to move back one element until we're successful.
|
||||
// Move up the stack as we hit the beginning of each page in our stack.
|
||||
for i := len(c.stack) - 1; i >= 0; i-- {
|
||||
elem := &c.stack[i]
|
||||
if elem.index > 0 {
|
||||
elem.index--
|
||||
break
|
||||
}
|
||||
c.stack = c.stack[:i]
|
||||
}
|
||||
|
||||
// If we've hit the end then return nil.
|
||||
if len(c.stack) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Move down the stack to find the last element of the last leaf under this branch.
|
||||
c.last()
|
||||
k, v, flags := c.keyValue()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Seek moves the cursor to a given key and returns it.
|
||||
// If the key does not exist then the next key is used. If no keys
|
||||
// follow, a nil key is returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
|
||||
k, v, flags := c.seek(seek)
|
||||
|
||||
// If we ended up after the last element of a page then move to the next one.
|
||||
if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
|
||||
k, v, flags = c.next()
|
||||
}
|
||||
|
||||
if k == nil {
|
||||
return nil, nil
|
||||
} else if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Delete removes the current key/value under the cursor from the bucket.
|
||||
// Delete fails if current key/value is a bucket or if the transaction is not writable.
|
||||
func (c *Cursor) Delete() error {
|
||||
if c.bucket.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !c.bucket.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
key, _, flags := c.keyValue()
|
||||
// Return an error if current value is a bucket.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
c.node().del(key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// seek moves the cursor to a given key and returns it.
|
||||
// If the key does not exist then the next key is used.
|
||||
func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
|
||||
// Start from root page/node and traverse to correct page.
|
||||
c.stack = c.stack[:0]
|
||||
c.search(seek, c.bucket.root)
|
||||
ref := &c.stack[len(c.stack)-1]
|
||||
|
||||
// If the cursor is pointing to the end of page/node then return nil.
|
||||
if ref.index >= ref.count() {
|
||||
return nil, nil, 0
|
||||
}
|
||||
|
||||
// If this is a bucket then return a nil value.
|
||||
return c.keyValue()
|
||||
}
|
||||
|
||||
// first moves the cursor to the first leaf element under the last page in the stack.
|
||||
func (c *Cursor) first() {
|
||||
for {
|
||||
// Exit when we hit a leaf page.
|
||||
var ref = &c.stack[len(c.stack)-1]
|
||||
if ref.isLeaf() {
|
||||
break
|
||||
}
|
||||
|
||||
// Keep adding pages pointing to the first element to the stack.
|
||||
var pgid pgid
|
||||
if ref.node != nil {
|
||||
pgid = ref.node.inodes[ref.index].pgid
|
||||
} else {
|
||||
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
|
||||
}
|
||||
p, n := c.bucket.pageNode(pgid)
|
||||
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
|
||||
}
|
||||
}
|
||||
|
||||
// last moves the cursor to the last leaf element under the last page in the stack.
|
||||
func (c *Cursor) last() {
|
||||
for {
|
||||
// Exit when we hit a leaf page.
|
||||
ref := &c.stack[len(c.stack)-1]
|
||||
if ref.isLeaf() {
|
||||
break
|
||||
}
|
||||
|
||||
// Keep adding pages pointing to the last element in the stack.
|
||||
var pgid pgid
|
||||
if ref.node != nil {
|
||||
pgid = ref.node.inodes[ref.index].pgid
|
||||
} else {
|
||||
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
|
||||
}
|
||||
p, n := c.bucket.pageNode(pgid)
|
||||
|
||||
var nextRef = elemRef{page: p, node: n}
|
||||
nextRef.index = nextRef.count() - 1
|
||||
c.stack = append(c.stack, nextRef)
|
||||
}
|
||||
}
|
||||
|
||||
// next moves to the next leaf element and returns the key and value.
|
||||
// If the cursor is at the last leaf element then it stays there and returns nil.
|
||||
func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
|
||||
for {
|
||||
// Attempt to move over one element until we're successful.
|
||||
// Move up the stack as we hit the end of each page in our stack.
|
||||
var i int
|
||||
for i = len(c.stack) - 1; i >= 0; i-- {
|
||||
elem := &c.stack[i]
|
||||
if elem.index < elem.count()-1 {
|
||||
elem.index++
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If we've hit the root page then stop and return. This will leave the
|
||||
// cursor on the last element of the last page.
|
||||
if i == -1 {
|
||||
return nil, nil, 0
|
||||
}
|
||||
|
||||
// Otherwise start from where we left off in the stack and find the
|
||||
// first element of the first leaf page.
|
||||
c.stack = c.stack[:i+1]
|
||||
c.first()
|
||||
|
||||
// If this is an empty page then restart and move back up the stack.
|
||||
// https://github.com/boltdb/bolt/issues/450
|
||||
if c.stack[len(c.stack)-1].count() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
return c.keyValue()
|
||||
}
|
||||
}
|
||||
|
||||
// search recursively performs a binary search against a given page/node until it finds a given key.
|
||||
func (c *Cursor) search(key []byte, pgid pgid) {
|
||||
p, n := c.bucket.pageNode(pgid)
|
||||
if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
|
||||
panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
|
||||
}
|
||||
e := elemRef{page: p, node: n}
|
||||
c.stack = append(c.stack, e)
|
||||
|
||||
// If we're on a leaf page/node then find the specific node.
|
||||
if e.isLeaf() {
|
||||
c.nsearch(key)
|
||||
return
|
||||
}
|
||||
|
||||
if n != nil {
|
||||
c.searchNode(key, n)
|
||||
return
|
||||
}
|
||||
c.searchPage(key, p)
|
||||
}
|
||||
|
||||
func (c *Cursor) searchNode(key []byte, n *node) {
|
||||
var exact bool
|
||||
index := sort.Search(len(n.inodes), func(i int) bool {
|
||||
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
|
||||
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
|
||||
ret := bytes.Compare(n.inodes[i].key, key)
|
||||
if ret == 0 {
|
||||
exact = true
|
||||
}
|
||||
return ret != -1
|
||||
})
|
||||
if !exact && index > 0 {
|
||||
index--
|
||||
}
|
||||
c.stack[len(c.stack)-1].index = index
|
||||
|
||||
// Recursively search to the next page.
|
||||
c.search(key, n.inodes[index].pgid)
|
||||
}
|
||||
|
||||
func (c *Cursor) searchPage(key []byte, p *page) {
|
||||
// Binary search for the correct range.
|
||||
inodes := p.branchPageElements()
|
||||
|
||||
var exact bool
|
||||
index := sort.Search(int(p.count), func(i int) bool {
|
||||
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
|
||||
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
|
||||
ret := bytes.Compare(inodes[i].key(), key)
|
||||
if ret == 0 {
|
||||
exact = true
|
||||
}
|
||||
return ret != -1
|
||||
})
|
||||
if !exact && index > 0 {
|
||||
index--
|
||||
}
|
||||
c.stack[len(c.stack)-1].index = index
|
||||
|
||||
// Recursively search to the next page.
|
||||
c.search(key, inodes[index].pgid)
|
||||
}
|
||||
|
||||
// nsearch searches the leaf node on the top of the stack for a key.
|
||||
func (c *Cursor) nsearch(key []byte) {
|
||||
e := &c.stack[len(c.stack)-1]
|
||||
p, n := e.page, e.node
|
||||
|
||||
// If we have a node then search its inodes.
|
||||
if n != nil {
|
||||
index := sort.Search(len(n.inodes), func(i int) bool {
|
||||
return bytes.Compare(n.inodes[i].key, key) != -1
|
||||
})
|
||||
e.index = index
|
||||
return
|
||||
}
|
||||
|
||||
// If we have a page then search its leaf elements.
|
||||
inodes := p.leafPageElements()
|
||||
index := sort.Search(int(p.count), func(i int) bool {
|
||||
return bytes.Compare(inodes[i].key(), key) != -1
|
||||
})
|
||||
e.index = index
|
||||
}
|
||||
|
||||
// keyValue returns the key and value of the current leaf element.
|
||||
func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
|
||||
ref := &c.stack[len(c.stack)-1]
|
||||
if ref.count() == 0 || ref.index >= ref.count() {
|
||||
return nil, nil, 0
|
||||
}
|
||||
|
||||
// Retrieve value from node.
|
||||
if ref.node != nil {
|
||||
inode := &ref.node.inodes[ref.index]
|
||||
return inode.key, inode.value, inode.flags
|
||||
}
|
||||
|
||||
// Or retrieve value from page.
|
||||
elem := ref.page.leafPageElement(uint16(ref.index))
|
||||
return elem.key(), elem.value(), elem.flags
|
||||
}
|
||||
|
||||
// node returns the node that the cursor is currently positioned on.
|
||||
func (c *Cursor) node() *node {
|
||||
_assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
|
||||
|
||||
// If the top of the stack is a leaf node then just return it.
|
||||
if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
|
||||
return ref.node
|
||||
}
|
||||
|
||||
// Start from root and traverse down the hierarchy.
|
||||
var n = c.stack[0].node
|
||||
if n == nil {
|
||||
n = c.bucket.node(c.stack[0].page.id, nil)
|
||||
}
|
||||
for _, ref := range c.stack[:len(c.stack)-1] {
|
||||
_assert(!n.isLeaf, "expected branch node")
|
||||
n = n.childAt(int(ref.index))
|
||||
}
|
||||
_assert(n.isLeaf, "expected leaf node")
|
||||
return n
|
||||
}
|
||||
|
||||
// elemRef represents a reference to an element on a given page/node.
|
||||
type elemRef struct {
|
||||
page *page
|
||||
node *node
|
||||
index int
|
||||
}
|
||||
|
||||
// isLeaf returns whether the ref is pointing at a leaf page/node.
|
||||
func (r *elemRef) isLeaf() bool {
|
||||
if r.node != nil {
|
||||
return r.node.isLeaf
|
||||
}
|
||||
return (r.page.flags & leafPageFlag) != 0
|
||||
}
|
||||
|
||||
// count returns the number of inodes or page elements.
|
||||
func (r *elemRef) count() int {
|
||||
if r.node != nil {
|
||||
return len(r.node.inodes)
|
||||
}
|
||||
return int(r.page.count)
|
||||
}
|
|
@ -0,0 +1,986 @@
|
|||
package bolt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// The largest step that can be taken when remapping the mmap.
|
||||
const maxMmapStep = 1 << 30 // 1GB
|
||||
|
||||
// The data file format version.
|
||||
const version = 2
|
||||
|
||||
// Represents a marker value to indicate that a file is a Bolt DB.
|
||||
const magic uint32 = 0xED0CDAED
|
||||
|
||||
// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
|
||||
// syncing changes to a file. This is required as some operating systems,
|
||||
// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
|
||||
// must be synchronized using the msync(2) syscall.
|
||||
const IgnoreNoSync = runtime.GOOS == "openbsd"
|
||||
|
||||
// Default values if not set in a DB instance.
|
||||
const (
|
||||
DefaultMaxBatchSize int = 1000
|
||||
DefaultMaxBatchDelay = 10 * time.Millisecond
|
||||
DefaultAllocSize = 16 * 1024 * 1024
|
||||
)
|
||||
|
||||
// DB represents a collection of buckets persisted to a file on disk.
|
||||
// All data access is performed through transactions which can be obtained through the DB.
|
||||
// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
|
||||
type DB struct {
|
||||
// When enabled, the database will perform a Check() after every commit.
|
||||
// A panic is issued if the database is in an inconsistent state. This
|
||||
// flag has a large performance impact so it should only be used for
|
||||
// debugging purposes.
|
||||
StrictMode bool
|
||||
|
||||
// Setting the NoSync flag will cause the database to skip fsync()
|
||||
// calls after each commit. This can be useful when bulk loading data
|
||||
// into a database and you can restart the bulk load in the event of
|
||||
// a system failure or database corruption. Do not set this flag for
|
||||
// normal use.
|
||||
//
|
||||
// If the package global IgnoreNoSync constant is true, this value is
|
||||
// ignored. See the comment on that constant for more details.
|
||||
//
|
||||
// THIS IS UNSAFE. PLEASE USE WITH CAUTION.
|
||||
NoSync bool
|
||||
|
||||
// When true, skips the truncate call when growing the database.
|
||||
// Setting this to true is only safe on non-ext3/ext4 systems.
|
||||
// Skipping truncation avoids preallocation of hard drive space and
|
||||
// bypasses a truncate() and fsync() syscall on remapping.
|
||||
//
|
||||
// https://github.com/boltdb/bolt/issues/284
|
||||
NoGrowSync bool
|
||||
|
||||
// If you want to read the entire database fast, you can set MmapFlag to
|
||||
// syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead.
|
||||
MmapFlags int
|
||||
|
||||
// MaxBatchSize is the maximum size of a batch. Default value is
|
||||
// copied from DefaultMaxBatchSize in Open.
|
||||
//
|
||||
// If <=0, disables batching.
|
||||
//
|
||||
// Do not change concurrently with calls to Batch.
|
||||
MaxBatchSize int
|
||||
|
||||
// MaxBatchDelay is the maximum delay before a batch starts.
|
||||
// Default value is copied from DefaultMaxBatchDelay in Open.
|
||||
//
|
||||
// If <=0, effectively disables batching.
|
||||
//
|
||||
// Do not change concurrently with calls to Batch.
|
||||
MaxBatchDelay time.Duration
|
||||
|
||||
// AllocSize is the amount of space allocated when the database
|
||||
// needs to create new pages. This is done to amortize the cost
|
||||
// of truncate() and fsync() when growing the data file.
|
||||
AllocSize int
|
||||
|
||||
path string
|
||||
file *os.File
|
||||
dataref []byte // mmap'ed readonly, write throws SEGV
|
||||
data *[maxMapSize]byte
|
||||
datasz int
|
||||
filesz int // current on disk file size
|
||||
meta0 *meta
|
||||
meta1 *meta
|
||||
pageSize int
|
||||
opened bool
|
||||
rwtx *Tx
|
||||
txs []*Tx
|
||||
freelist *freelist
|
||||
stats Stats
|
||||
|
||||
batchMu sync.Mutex
|
||||
batch *batch
|
||||
|
||||
rwlock sync.Mutex // Allows only one writer at a time.
|
||||
metalock sync.Mutex // Protects meta page access.
|
||||
mmaplock sync.RWMutex // Protects mmap access during remapping.
|
||||
statlock sync.RWMutex // Protects stats access.
|
||||
|
||||
ops struct {
|
||||
writeAt func(b []byte, off int64) (n int, err error)
|
||||
}
|
||||
|
||||
// Read only mode.
|
||||
// When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately.
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
// Path returns the path to currently open database file.
|
||||
func (db *DB) Path() string {
|
||||
return db.path
|
||||
}
|
||||
|
||||
// GoString returns the Go string representation of the database.
|
||||
func (db *DB) GoString() string {
|
||||
return fmt.Sprintf("bolt.DB{path:%q}", db.path)
|
||||
}
|
||||
|
||||
// String returns the string representation of the database.
|
||||
func (db *DB) String() string {
|
||||
return fmt.Sprintf("DB<%q>", db.path)
|
||||
}
|
||||
|
||||
// Open creates and opens a database at the given path.
|
||||
// If the file does not exist then it will be created automatically.
|
||||
// Passing in nil options will cause Bolt to open the database with the default options.
|
||||
func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||
var db = &DB{opened: true}
|
||||
|
||||
// Set default options if no options are provided.
|
||||
if options == nil {
|
||||
options = DefaultOptions
|
||||
}
|
||||
db.NoGrowSync = options.NoGrowSync
|
||||
db.MmapFlags = options.MmapFlags
|
||||
|
||||
// Set default values for later DB operations.
|
||||
db.MaxBatchSize = DefaultMaxBatchSize
|
||||
db.MaxBatchDelay = DefaultMaxBatchDelay
|
||||
db.AllocSize = DefaultAllocSize
|
||||
|
||||
flag := os.O_RDWR
|
||||
if options.ReadOnly {
|
||||
flag = os.O_RDONLY
|
||||
db.readOnly = true
|
||||
}
|
||||
|
||||
// Open data file and separate sync handler for metadata writes.
|
||||
db.path = path
|
||||
var err error
|
||||
if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil {
|
||||
_ = db.close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Lock file so that other processes using Bolt in read-write mode cannot
|
||||
// use the database at the same time. This would cause corruption since
|
||||
// the two processes would write meta pages and free pages separately.
|
||||
// The database file is locked exclusively (only one process can grab the lock)
|
||||
// if !options.ReadOnly.
|
||||
// The database file is locked using the shared lock (more than one process may
|
||||
// hold a lock at the same time) otherwise (options.ReadOnly is set).
|
||||
if err := flock(db.file, !db.readOnly, options.Timeout); err != nil {
|
||||
_ = db.close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Default values for test hooks
|
||||
db.ops.writeAt = db.file.WriteAt
|
||||
|
||||
// Initialize the database if it doesn't exist.
|
||||
if info, err := db.file.Stat(); err != nil {
|
||||
return nil, err
|
||||
} else if info.Size() == 0 {
|
||||
// Initialize new files with meta pages.
|
||||
if err := db.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// Read the first meta page to determine the page size.
|
||||
var buf [0x1000]byte
|
||||
if _, err := db.file.ReadAt(buf[:], 0); err == nil {
|
||||
m := db.pageInBuffer(buf[:], 0).meta()
|
||||
if err := m.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db.pageSize = int(m.pageSize)
|
||||
}
|
||||
}
|
||||
|
||||
// Memory map the data file.
|
||||
if err := db.mmap(options.InitialMmapSize); err != nil {
|
||||
_ = db.close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read in the freelist.
|
||||
db.freelist = newFreelist()
|
||||
db.freelist.read(db.page(db.meta().freelist))
|
||||
|
||||
// Mark the database as opened and return.
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// mmap opens the underlying memory-mapped file and initializes the meta references.
|
||||
// minsz is the minimum size that the new mmap can be.
|
||||
func (db *DB) mmap(minsz int) error {
|
||||
db.mmaplock.Lock()
|
||||
defer db.mmaplock.Unlock()
|
||||
|
||||
info, err := db.file.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("mmap stat error: %s", err)
|
||||
} else if int(info.Size()) < db.pageSize*2 {
|
||||
return fmt.Errorf("file size too small")
|
||||
}
|
||||
|
||||
// Ensure the size is at least the minimum size.
|
||||
var size = int(info.Size())
|
||||
if size < minsz {
|
||||
size = minsz
|
||||
}
|
||||
size, err = db.mmapSize(size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Dereference all mmap references before unmapping.
|
||||
if db.rwtx != nil {
|
||||
db.rwtx.root.dereference()
|
||||
}
|
||||
|
||||
// Unmap existing data before continuing.
|
||||
if err := db.munmap(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Memory-map the data file as a byte slice.
|
||||
if err := mmap(db, size); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save references to the meta pages.
|
||||
db.meta0 = db.page(0).meta()
|
||||
db.meta1 = db.page(1).meta()
|
||||
|
||||
// Validate the meta pages.
|
||||
if err := db.meta0.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := db.meta1.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps the data file from memory.
|
||||
func (db *DB) munmap() error {
|
||||
if err := munmap(db); err != nil {
|
||||
return fmt.Errorf("unmap error: " + err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// mmapSize determines the appropriate size for the mmap given the current size
|
||||
// of the database. The minimum size is 32KB and doubles until it reaches 1GB.
|
||||
// Returns an error if the new mmap size is greater than the max allowed.
|
||||
func (db *DB) mmapSize(size int) (int, error) {
|
||||
// Double the size from 32KB until 1GB.
|
||||
for i := uint(15); i <= 30; i++ {
|
||||
if size <= 1<<i {
|
||||
return 1 << i, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the requested size is not above the maximum allowed.
|
||||
if size > maxMapSize {
|
||||
return 0, fmt.Errorf("mmap too large")
|
||||
}
|
||||
|
||||
// If larger than 1GB then grow by 1GB at a time.
|
||||
sz := int64(size)
|
||||
if remainder := sz % int64(maxMmapStep); remainder > 0 {
|
||||
sz += int64(maxMmapStep) - remainder
|
||||
}
|
||||
|
||||
// Ensure that the mmap size is a multiple of the page size.
|
||||
// This should always be true since we're incrementing in MBs.
|
||||
pageSize := int64(db.pageSize)
|
||||
if (sz % pageSize) != 0 {
|
||||
sz = ((sz / pageSize) + 1) * pageSize
|
||||
}
|
||||
|
||||
// If we've exceeded the max size then only grow up to the max size.
|
||||
if sz > maxMapSize {
|
||||
sz = maxMapSize
|
||||
}
|
||||
|
||||
return int(sz), nil
|
||||
}
|
||||
|
||||
// init creates a new database file and initializes its meta pages.
|
||||
func (db *DB) init() error {
|
||||
// Set the page size to the OS page size.
|
||||
db.pageSize = os.Getpagesize()
|
||||
|
||||
// Create two meta pages on a buffer.
|
||||
buf := make([]byte, db.pageSize*4)
|
||||
for i := 0; i < 2; i++ {
|
||||
p := db.pageInBuffer(buf[:], pgid(i))
|
||||
p.id = pgid(i)
|
||||
p.flags = metaPageFlag
|
||||
|
||||
// Initialize the meta page.
|
||||
m := p.meta()
|
||||
m.magic = magic
|
||||
m.version = version
|
||||
m.pageSize = uint32(db.pageSize)
|
||||
m.freelist = 2
|
||||
m.root = bucket{root: 3}
|
||||
m.pgid = 4
|
||||
m.txid = txid(i)
|
||||
}
|
||||
|
||||
// Write an empty freelist at page 3.
|
||||
p := db.pageInBuffer(buf[:], pgid(2))
|
||||
p.id = pgid(2)
|
||||
p.flags = freelistPageFlag
|
||||
p.count = 0
|
||||
|
||||
// Write an empty leaf page at page 4.
|
||||
p = db.pageInBuffer(buf[:], pgid(3))
|
||||
p.id = pgid(3)
|
||||
p.flags = leafPageFlag
|
||||
p.count = 0
|
||||
|
||||
// Write the buffer to our data file.
|
||||
if _, err := db.ops.writeAt(buf, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := fdatasync(db); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close releases all database resources.
|
||||
// All transactions must be closed before closing the database.
|
||||
func (db *DB) Close() error {
|
||||
db.rwlock.Lock()
|
||||
defer db.rwlock.Unlock()
|
||||
|
||||
db.metalock.Lock()
|
||||
defer db.metalock.Unlock()
|
||||
|
||||
db.mmaplock.RLock()
|
||||
defer db.mmaplock.RUnlock()
|
||||
|
||||
return db.close()
|
||||
}
|
||||
|
||||
func (db *DB) close() error {
|
||||
db.opened = false
|
||||
|
||||
db.freelist = nil
|
||||
db.path = ""
|
||||
|
||||
// Clear ops.
|
||||
db.ops.writeAt = nil
|
||||
|
||||
// Close the mmap.
|
||||
if err := db.munmap(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Close file handles.
|
||||
if db.file != nil {
|
||||
// No need to unlock read-only file.
|
||||
if !db.readOnly {
|
||||
// Unlock the file.
|
||||
if err := funlock(db.file); err != nil {
|
||||
log.Printf("bolt.Close(): funlock error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Close the file descriptor.
|
||||
if err := db.file.Close(); err != nil {
|
||||
return fmt.Errorf("db file close: %s", err)
|
||||
}
|
||||
db.file = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Begin starts a new transaction.
|
||||
// Multiple read-only transactions can be used concurrently but only one
|
||||
// write transaction can be used at a time. Starting multiple write transactions
|
||||
// will cause the calls to block and be serialized until the current write
|
||||
// transaction finishes.
|
||||
//
|
||||
// Transactions should not be dependent on one another. Opening a read
|
||||
// transaction and a write transaction in the same goroutine can cause the
|
||||
// writer to deadlock because the database periodically needs to re-mmap itself
|
||||
// as it grows and it cannot do that while a read transaction is open.
|
||||
//
|
||||
// If a long running read transaction (for example, a snapshot transaction) is
|
||||
// needed, you might want to set DB.InitialMmapSize to a large enough value
|
||||
// to avoid potential blocking of write transaction.
|
||||
//
|
||||
// IMPORTANT: You must close read-only transactions after you are finished or
|
||||
// else the database will not reclaim old pages.
|
||||
func (db *DB) Begin(writable bool) (*Tx, error) {
|
||||
if writable {
|
||||
return db.beginRWTx()
|
||||
}
|
||||
return db.beginTx()
|
||||
}
|
||||
|
||||
func (db *DB) beginTx() (*Tx, error) {
|
||||
// Lock the meta pages while we initialize the transaction. We obtain
|
||||
// the meta lock before the mmap lock because that's the order that the
|
||||
// write transaction will obtain them.
|
||||
db.metalock.Lock()
|
||||
|
||||
// Obtain a read-only lock on the mmap. When the mmap is remapped it will
|
||||
// obtain a write lock so all transactions must finish before it can be
|
||||
// remapped.
|
||||
db.mmaplock.RLock()
|
||||
|
||||
// Exit if the database is not open yet.
|
||||
if !db.opened {
|
||||
db.mmaplock.RUnlock()
|
||||
db.metalock.Unlock()
|
||||
return nil, ErrDatabaseNotOpen
|
||||
}
|
||||
|
||||
// Create a transaction associated with the database.
|
||||
t := &Tx{}
|
||||
t.init(db)
|
||||
|
||||
// Keep track of transaction until it closes.
|
||||
db.txs = append(db.txs, t)
|
||||
n := len(db.txs)
|
||||
|
||||
// Unlock the meta pages.
|
||||
db.metalock.Unlock()
|
||||
|
||||
// Update the transaction stats.
|
||||
db.statlock.Lock()
|
||||
db.stats.TxN++
|
||||
db.stats.OpenTxN = n
|
||||
db.statlock.Unlock()
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (db *DB) beginRWTx() (*Tx, error) {
|
||||
// If the database was opened with Options.ReadOnly, return an error.
|
||||
if db.readOnly {
|
||||
return nil, ErrDatabaseReadOnly
|
||||
}
|
||||
|
||||
// Obtain writer lock. This is released by the transaction when it closes.
|
||||
// This enforces only one writer transaction at a time.
|
||||
db.rwlock.Lock()
|
||||
|
||||
// Once we have the writer lock then we can lock the meta pages so that
|
||||
// we can set up the transaction.
|
||||
db.metalock.Lock()
|
||||
defer db.metalock.Unlock()
|
||||
|
||||
// Exit if the database is not open yet.
|
||||
if !db.opened {
|
||||
db.rwlock.Unlock()
|
||||
return nil, ErrDatabaseNotOpen
|
||||
}
|
||||
|
||||
// Create a transaction associated with the database.
|
||||
t := &Tx{writable: true}
|
||||
t.init(db)
|
||||
db.rwtx = t
|
||||
|
||||
// Free any pages associated with closed read-only transactions.
|
||||
var minid txid = 0xFFFFFFFFFFFFFFFF
|
||||
for _, t := range db.txs {
|
||||
if t.meta.txid < minid {
|
||||
minid = t.meta.txid
|
||||
}
|
||||
}
|
||||
if minid > 0 {
|
||||
db.freelist.release(minid - 1)
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// removeTx removes a transaction from the database.
|
||||
func (db *DB) removeTx(tx *Tx) {
|
||||
// Release the read lock on the mmap.
|
||||
db.mmaplock.RUnlock()
|
||||
|
||||
// Use the meta lock to restrict access to the DB object.
|
||||
db.metalock.Lock()
|
||||
|
||||
// Remove the transaction.
|
||||
for i, t := range db.txs {
|
||||
if t == tx {
|
||||
db.txs = append(db.txs[:i], db.txs[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
n := len(db.txs)
|
||||
|
||||
// Unlock the meta pages.
|
||||
db.metalock.Unlock()
|
||||
|
||||
// Merge statistics.
|
||||
db.statlock.Lock()
|
||||
db.stats.OpenTxN = n
|
||||
db.stats.TxStats.add(&tx.stats)
|
||||
db.statlock.Unlock()
|
||||
}
|
||||
|
||||
// Update executes a function within the context of a read-write managed transaction.
|
||||
// If no error is returned from the function then the transaction is committed.
|
||||
// If an error is returned then the entire transaction is rolled back.
|
||||
// Any error that is returned from the function or returned from the commit is
|
||||
// returned from the Update() method.
|
||||
//
|
||||
// Attempting to manually commit or rollback within the function will cause a panic.
|
||||
func (db *DB) Update(fn func(*Tx) error) error {
|
||||
t, err := db.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make sure the transaction rolls back in the event of a panic.
|
||||
defer func() {
|
||||
if t.db != nil {
|
||||
t.rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
// Mark as a managed tx so that the inner function cannot manually commit.
|
||||
t.managed = true
|
||||
|
||||
// If an error is returned from the function then rollback and return error.
|
||||
err = fn(t)
|
||||
t.managed = false
|
||||
if err != nil {
|
||||
_ = t.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
// View executes a function within the context of a managed read-only transaction.
|
||||
// Any error that is returned from the function is returned from the View() method.
|
||||
//
|
||||
// Attempting to manually rollback within the function will cause a panic.
|
||||
func (db *DB) View(fn func(*Tx) error) error {
|
||||
t, err := db.Begin(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make sure the transaction rolls back in the event of a panic.
|
||||
defer func() {
|
||||
if t.db != nil {
|
||||
t.rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
// Mark as a managed tx so that the inner function cannot manually rollback.
|
||||
t.managed = true
|
||||
|
||||
// If an error is returned from the function then pass it through.
|
||||
err = fn(t)
|
||||
t.managed = false
|
||||
if err != nil {
|
||||
_ = t.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.Rollback(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Batch calls fn as part of a batch. It behaves similar to Update,
|
||||
// except:
|
||||
//
|
||||
// 1. concurrent Batch calls can be combined into a single Bolt
|
||||
// transaction.
|
||||
//
|
||||
// 2. the function passed to Batch may be called multiple times,
|
||||
// regardless of whether it returns error or not.
|
||||
//
|
||||
// This means that Batch function side effects must be idempotent and
|
||||
// take permanent effect only after a successful return is seen in
|
||||
// caller.
|
||||
//
|
||||
// The maximum batch size and delay can be adjusted with DB.MaxBatchSize
|
||||
// and DB.MaxBatchDelay, respectively.
|
||||
//
|
||||
// Batch is only useful when there are multiple goroutines calling it.
|
||||
func (db *DB) Batch(fn func(*Tx) error) error {
|
||||
errCh := make(chan error, 1)
|
||||
|
||||
db.batchMu.Lock()
|
||||
if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) {
|
||||
// There is no existing batch, or the existing batch is full; start a new one.
|
||||
db.batch = &batch{
|
||||
db: db,
|
||||
}
|
||||
db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger)
|
||||
}
|
||||
db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh})
|
||||
if len(db.batch.calls) >= db.MaxBatchSize {
|
||||
// wake up batch, it's ready to run
|
||||
go db.batch.trigger()
|
||||
}
|
||||
db.batchMu.Unlock()
|
||||
|
||||
err := <-errCh
|
||||
if err == trySolo {
|
||||
err = db.Update(fn)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type call struct {
|
||||
fn func(*Tx) error
|
||||
err chan<- error
|
||||
}
|
||||
|
||||
type batch struct {
|
||||
db *DB
|
||||
timer *time.Timer
|
||||
start sync.Once
|
||||
calls []call
|
||||
}
|
||||
|
||||
// trigger runs the batch if it hasn't already been run.
|
||||
func (b *batch) trigger() {
|
||||
b.start.Do(b.run)
|
||||
}
|
||||
|
||||
// run performs the transactions in the batch and communicates results
|
||||
// back to DB.Batch.
|
||||
func (b *batch) run() {
|
||||
b.db.batchMu.Lock()
|
||||
b.timer.Stop()
|
||||
// Make sure no new work is added to this batch, but don't break
|
||||
// other batches.
|
||||
if b.db.batch == b {
|
||||
b.db.batch = nil
|
||||
}
|
||||
b.db.batchMu.Unlock()
|
||||
|
||||
retry:
|
||||
for len(b.calls) > 0 {
|
||||
var failIdx = -1
|
||||
err := b.db.Update(func(tx *Tx) error {
|
||||
for i, c := range b.calls {
|
||||
if err := safelyCall(c.fn, tx); err != nil {
|
||||
failIdx = i
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if failIdx >= 0 {
|
||||
// take the failing transaction out of the batch. it's
|
||||
// safe to shorten b.calls here because db.batch no longer
|
||||
// points to us, and we hold the mutex anyway.
|
||||
c := b.calls[failIdx]
|
||||
b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1]
|
||||
// tell the submitter re-run it solo, continue with the rest of the batch
|
||||
c.err <- trySolo
|
||||
continue retry
|
||||
}
|
||||
|
||||
// pass success, or bolt internal errors, to all callers
|
||||
for _, c := range b.calls {
|
||||
if c.err != nil {
|
||||
c.err <- err
|
||||
}
|
||||
}
|
||||
break retry
|
||||
}
|
||||
}
|
||||
|
||||
// trySolo is a special sentinel error value used for signaling that a
|
||||
// transaction function should be re-run. It should never be seen by
|
||||
// callers.
|
||||
var trySolo = errors.New("batch function returned an error and should be re-run solo")
|
||||
|
||||
type panicked struct {
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
func (p panicked) Error() string {
|
||||
if err, ok := p.reason.(error); ok {
|
||||
return err.Error()
|
||||
}
|
||||
return fmt.Sprintf("panic: %v", p.reason)
|
||||
}
|
||||
|
||||
func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
err = panicked{p}
|
||||
}
|
||||
}()
|
||||
return fn(tx)
|
||||
}
|
||||
|
||||
// Sync executes fdatasync() against the database file handle.
|
||||
//
|
||||
// This is not necessary under normal operation, however, if you use NoSync
|
||||
// then it allows you to force the database file to sync against the disk.
|
||||
func (db *DB) Sync() error { return fdatasync(db) }
|
||||
|
||||
// Stats retrieves ongoing performance stats for the database.
|
||||
// This is only updated when a transaction closes.
|
||||
func (db *DB) Stats() Stats {
|
||||
db.statlock.RLock()
|
||||
defer db.statlock.RUnlock()
|
||||
return db.stats
|
||||
}
|
||||
|
||||
// This is for internal access to the raw data bytes from the C cursor, use
|
||||
// carefully, or not at all.
|
||||
func (db *DB) Info() *Info {
|
||||
return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize}
|
||||
}
|
||||
|
||||
// page retrieves a page reference from the mmap based on the current page size.
|
||||
func (db *DB) page(id pgid) *page {
|
||||
pos := id * pgid(db.pageSize)
|
||||
return (*page)(unsafe.Pointer(&db.data[pos]))
|
||||
}
|
||||
|
||||
// pageInBuffer retrieves a page reference from a given byte array based on the current page size.
|
||||
func (db *DB) pageInBuffer(b []byte, id pgid) *page {
|
||||
return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)]))
|
||||
}
|
||||
|
||||
// meta retrieves the current meta page reference.
|
||||
func (db *DB) meta() *meta {
|
||||
if db.meta0.txid > db.meta1.txid {
|
||||
return db.meta0
|
||||
}
|
||||
return db.meta1
|
||||
}
|
||||
|
||||
// allocate returns a contiguous block of memory starting at a given page.
|
||||
func (db *DB) allocate(count int) (*page, error) {
|
||||
// Allocate a temporary buffer for the page.
|
||||
buf := make([]byte, count*db.pageSize)
|
||||
p := (*page)(unsafe.Pointer(&buf[0]))
|
||||
p.overflow = uint32(count - 1)
|
||||
|
||||
// Use pages from the freelist if they are available.
|
||||
if p.id = db.freelist.allocate(count); p.id != 0 {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Resize mmap() if we're at the end.
|
||||
p.id = db.rwtx.meta.pgid
|
||||
var minsz = int((p.id+pgid(count))+1) * db.pageSize
|
||||
if minsz >= db.datasz {
|
||||
if err := db.mmap(minsz); err != nil {
|
||||
return nil, fmt.Errorf("mmap allocate error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Move the page id high water mark.
|
||||
db.rwtx.meta.pgid += pgid(count)
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// grow grows the size of the database to the given sz.
|
||||
func (db *DB) grow(sz int) error {
|
||||
// Ignore if the new size is less than available file size.
|
||||
if sz <= db.filesz {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the data is smaller than the alloc size then only allocate what's needed.
|
||||
// Once it goes over the allocation size then allocate in chunks.
|
||||
if db.datasz < db.AllocSize {
|
||||
sz = db.datasz
|
||||
} else {
|
||||
sz += db.AllocSize
|
||||
}
|
||||
|
||||
// Truncate and fsync to ensure file size metadata is flushed.
|
||||
// https://github.com/boltdb/bolt/issues/284
|
||||
if !db.NoGrowSync && !db.readOnly {
|
||||
if err := db.file.Truncate(int64(sz)); err != nil {
|
||||
return fmt.Errorf("file resize error: %s", err)
|
||||
}
|
||||
if err := db.file.Sync(); err != nil {
|
||||
return fmt.Errorf("file sync error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
db.filesz = sz
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) IsReadOnly() bool {
|
||||
return db.readOnly
|
||||
}
|
||||
|
||||
// Options represents the options that can be set when opening a database.
|
||||
type Options struct {
|
||||
// Timeout is the amount of time to wait to obtain a file lock.
|
||||
// When set to zero it will wait indefinitely. This option is only
|
||||
// available on Darwin and Linux.
|
||||
Timeout time.Duration
|
||||
|
||||
// Sets the DB.NoGrowSync flag before memory mapping the file.
|
||||
NoGrowSync bool
|
||||
|
||||
// Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
|
||||
// grab a shared lock (UNIX).
|
||||
ReadOnly bool
|
||||
|
||||
// Sets the DB.MmapFlags flag before memory mapping the file.
|
||||
MmapFlags int
|
||||
|
||||
// InitialMmapSize is the initial mmap size of the database
|
||||
// in bytes. Read transactions won't block write transaction
|
||||
// if the InitialMmapSize is large enough to hold database mmap
|
||||
// size. (See DB.Begin for more information)
|
||||
//
|
||||
// If <=0, the initial map size is 0.
|
||||
// If initialMmapSize is smaller than the previous database size,
|
||||
// it takes no effect.
|
||||
InitialMmapSize int
|
||||
}
|
||||
|
||||
// DefaultOptions represent the options used if nil options are passed into Open().
|
||||
// No timeout is used which will cause Bolt to wait indefinitely for a lock.
|
||||
var DefaultOptions = &Options{
|
||||
Timeout: 0,
|
||||
NoGrowSync: false,
|
||||
}
|
||||
|
||||
// Stats represents statistics about the database.
|
||||
type Stats struct {
|
||||
// Freelist stats
|
||||
FreePageN int // total number of free pages on the freelist
|
||||
PendingPageN int // total number of pending pages on the freelist
|
||||
FreeAlloc int // total bytes allocated in free pages
|
||||
FreelistInuse int // total bytes used by the freelist
|
||||
|
||||
// Transaction stats
|
||||
TxN int // total number of started read transactions
|
||||
OpenTxN int // number of currently open read transactions
|
||||
|
||||
TxStats TxStats // global, ongoing stats.
|
||||
}
|
||||
|
||||
// Sub calculates and returns the difference between two sets of database stats.
|
||||
// This is useful when obtaining stats at two different points and time and
|
||||
// you need the performance counters that occurred within that time span.
|
||||
func (s *Stats) Sub(other *Stats) Stats {
|
||||
if other == nil {
|
||||
return *s
|
||||
}
|
||||
var diff Stats
|
||||
diff.FreePageN = s.FreePageN
|
||||
diff.PendingPageN = s.PendingPageN
|
||||
diff.FreeAlloc = s.FreeAlloc
|
||||
diff.FreelistInuse = s.FreelistInuse
|
||||
diff.TxN = other.TxN - s.TxN
|
||||
diff.TxStats = s.TxStats.Sub(&other.TxStats)
|
||||
return diff
|
||||
}
|
||||
|
||||
func (s *Stats) add(other *Stats) {
|
||||
s.TxStats.add(&other.TxStats)
|
||||
}
|
||||
|
||||
type Info struct {
|
||||
Data uintptr
|
||||
PageSize int
|
||||
}
|
||||
|
||||
type meta struct {
|
||||
magic uint32
|
||||
version uint32
|
||||
pageSize uint32
|
||||
flags uint32
|
||||
root bucket
|
||||
freelist pgid
|
||||
pgid pgid
|
||||
txid txid
|
||||
checksum uint64
|
||||
}
|
||||
|
||||
// validate checks the marker bytes and version of the meta page to ensure it matches this binary.
|
||||
func (m *meta) validate() error {
|
||||
if m.checksum != 0 && m.checksum != m.sum64() {
|
||||
return ErrChecksum
|
||||
} else if m.magic != magic {
|
||||
return ErrInvalid
|
||||
} else if m.version != version {
|
||||
return ErrVersionMismatch
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// copy copies one meta object to another.
|
||||
func (m *meta) copy(dest *meta) {
|
||||
*dest = *m
|
||||
}
|
||||
|
||||
// write writes the meta onto a page.
|
||||
func (m *meta) write(p *page) {
|
||||
if m.root.root >= m.pgid {
|
||||
panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
|
||||
} else if m.freelist >= m.pgid {
|
||||
panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
|
||||
}
|
||||
|
||||
// Page id is either going to be 0 or 1 which we can determine by the transaction ID.
|
||||
p.id = pgid(m.txid % 2)
|
||||
p.flags |= metaPageFlag
|
||||
|
||||
// Calculate the checksum.
|
||||
m.checksum = m.sum64()
|
||||
|
||||
m.copy(p.meta())
|
||||
}
|
||||
|
||||
// generates the checksum for the meta.
|
||||
func (m *meta) sum64() uint64 {
|
||||
var h = fnv.New64a()
|
||||
_, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:])
|
||||
return h.Sum64()
|
||||
}
|
||||
|
||||
// _assert will panic with a given formatted message if the given condition is false.
|
||||
func _assert(condition bool, msg string, v ...interface{}) {
|
||||
if !condition {
|
||||
panic(fmt.Sprintf("assertion failed: "+msg, v...))
|
||||
}
|
||||
}
|
||||
|
||||
func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) }
|
||||
func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) }
|
||||
|
||||
func printstack() {
|
||||
stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n")
|
||||
fmt.Fprintln(os.Stderr, stack)
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
Package bolt implements a low-level key/value store in pure Go. It supports
|
||||
fully serializable transactions, ACID semantics, and lock-free MVCC with
|
||||
multiple readers and a single writer. Bolt can be used for projects that
|
||||
want a simple data store without the need to add large dependencies such as
|
||||
Postgres or MySQL.
|
||||
|
||||
Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
|
||||
optimized for fast read access and does not require recovery in the event of a
|
||||
system crash. Transactions which have not finished committing will simply be
|
||||
rolled back in the event of a crash.
|
||||
|
||||
The design of Bolt is based on Howard Chu's LMDB database project.
|
||||
|
||||
Bolt currently works on Windows, Mac OS X, and Linux.
|
||||
|
||||
|
||||
Basics
|
||||
|
||||
There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
|
||||
a collection of buckets and is represented by a single file on disk. A bucket is
|
||||
a collection of unique keys that are associated with values.
|
||||
|
||||
Transactions provide either read-only or read-write access to the database.
|
||||
Read-only transactions can retrieve key/value pairs and can use Cursors to
|
||||
iterate over the dataset sequentially. Read-write transactions can create and
|
||||
delete buckets and can insert and remove keys. Only one read-write transaction
|
||||
is allowed at a time.
|
||||
|
||||
|
||||
Caveats
|
||||
|
||||
The database uses a read-only, memory-mapped data file to ensure that
|
||||
applications cannot corrupt the database, however, this means that keys and
|
||||
values returned from Bolt cannot be changed. Writing to a read-only byte slice
|
||||
will cause Go to panic.
|
||||
|
||||
Keys and values retrieved from the database are only valid for the life of
|
||||
the transaction. When used outside the transaction, these byte slices can
|
||||
point to different data or can point to invalid memory which will cause a panic.
|
||||
|
||||
|
||||
*/
|
||||
package bolt
|
|
@ -0,0 +1,70 @@
|
|||
package bolt
|
||||
|
||||
import "errors"
|
||||
|
||||
// These errors can be returned when opening or calling methods on a DB.
|
||||
var (
|
||||
// ErrDatabaseNotOpen is returned when a DB instance is accessed before it
|
||||
// is opened or after it is closed.
|
||||
ErrDatabaseNotOpen = errors.New("database not open")
|
||||
|
||||
// ErrDatabaseOpen is returned when opening a database that is
|
||||
// already open.
|
||||
ErrDatabaseOpen = errors.New("database already open")
|
||||
|
||||
// ErrInvalid is returned when a data file is not a Bolt-formatted database.
|
||||
ErrInvalid = errors.New("invalid database")
|
||||
|
||||
// ErrVersionMismatch is returned when the data file was created with a
|
||||
// different version of Bolt.
|
||||
ErrVersionMismatch = errors.New("version mismatch")
|
||||
|
||||
// ErrChecksum is returned when either meta page checksum does not match.
|
||||
ErrChecksum = errors.New("checksum error")
|
||||
|
||||
// ErrTimeout is returned when a database cannot obtain an exclusive lock
|
||||
// on the data file after the timeout passed to Open().
|
||||
ErrTimeout = errors.New("timeout")
|
||||
)
|
||||
|
||||
// These errors can occur when beginning or committing a Tx.
|
||||
var (
|
||||
// ErrTxNotWritable is returned when performing a write operation on a
|
||||
// read-only transaction.
|
||||
ErrTxNotWritable = errors.New("tx not writable")
|
||||
|
||||
// ErrTxClosed is returned when committing or rolling back a transaction
|
||||
// that has already been committed or rolled back.
|
||||
ErrTxClosed = errors.New("tx closed")
|
||||
|
||||
// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
|
||||
// read-only database.
|
||||
ErrDatabaseReadOnly = errors.New("database is in read-only mode")
|
||||
)
|
||||
|
||||
// These errors can occur when putting or deleting a value or a bucket.
|
||||
var (
|
||||
// ErrBucketNotFound is returned when trying to access a bucket that has
|
||||
// not been created yet.
|
||||
ErrBucketNotFound = errors.New("bucket not found")
|
||||
|
||||
// ErrBucketExists is returned when creating a bucket that already exists.
|
||||
ErrBucketExists = errors.New("bucket already exists")
|
||||
|
||||
// ErrBucketNameRequired is returned when creating a bucket with a blank name.
|
||||
ErrBucketNameRequired = errors.New("bucket name required")
|
||||
|
||||
// ErrKeyRequired is returned when inserting a zero-length key.
|
||||
ErrKeyRequired = errors.New("key required")
|
||||
|
||||
// ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
|
||||
ErrKeyTooLarge = errors.New("key too large")
|
||||
|
||||
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
|
||||
ErrValueTooLarge = errors.New("value too large")
|
||||
|
||||
// ErrIncompatibleValue is returned when trying create or delete a bucket
|
||||
// on an existing non-bucket key or when trying to create or delete a
|
||||
// non-bucket key on an existing bucket key.
|
||||
ErrIncompatibleValue = errors.New("incompatible value")
|
||||
)
|
|
@ -0,0 +1,242 @@
|
|||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// freelist represents a list of all pages that are available for allocation.
|
||||
// It also tracks pages that have been freed but are still in use by open transactions.
|
||||
type freelist struct {
|
||||
ids []pgid // all free and available free page ids.
|
||||
pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
|
||||
cache map[pgid]bool // fast lookup of all free and pending page ids.
|
||||
}
|
||||
|
||||
// newFreelist returns an empty, initialized freelist.
|
||||
func newFreelist() *freelist {
|
||||
return &freelist{
|
||||
pending: make(map[txid][]pgid),
|
||||
cache: make(map[pgid]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// size returns the size of the page after serialization.
|
||||
func (f *freelist) size() int {
|
||||
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count())
|
||||
}
|
||||
|
||||
// count returns count of pages on the freelist
|
||||
func (f *freelist) count() int {
|
||||
return f.free_count() + f.pending_count()
|
||||
}
|
||||
|
||||
// free_count returns count of free pages
|
||||
func (f *freelist) free_count() int {
|
||||
return len(f.ids)
|
||||
}
|
||||
|
||||
// pending_count returns count of pending pages
|
||||
func (f *freelist) pending_count() int {
|
||||
var count int
|
||||
for _, list := range f.pending {
|
||||
count += len(list)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// all returns a list of all free ids and all pending ids in one sorted list.
|
||||
func (f *freelist) all() []pgid {
|
||||
m := make(pgids, 0)
|
||||
|
||||
for _, list := range f.pending {
|
||||
m = append(m, list...)
|
||||
}
|
||||
|
||||
sort.Sort(m)
|
||||
return pgids(f.ids).merge(m)
|
||||
}
|
||||
|
||||
// allocate returns the starting page id of a contiguous list of pages of a given size.
|
||||
// If a contiguous block cannot be found then 0 is returned.
|
||||
func (f *freelist) allocate(n int) pgid {
|
||||
if len(f.ids) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var initial, previd pgid
|
||||
for i, id := range f.ids {
|
||||
if id <= 1 {
|
||||
panic(fmt.Sprintf("invalid page allocation: %d", id))
|
||||
}
|
||||
|
||||
// Reset initial page if this is not contiguous.
|
||||
if previd == 0 || id-previd != 1 {
|
||||
initial = id
|
||||
}
|
||||
|
||||
// If we found a contiguous block then remove it and return it.
|
||||
if (id-initial)+1 == pgid(n) {
|
||||
// If we're allocating off the beginning then take the fast path
|
||||
// and just adjust the existing slice. This will use extra memory
|
||||
// temporarily but the append() in free() will realloc the slice
|
||||
// as is necessary.
|
||||
if (i + 1) == n {
|
||||
f.ids = f.ids[i+1:]
|
||||
} else {
|
||||
copy(f.ids[i-n+1:], f.ids[i+1:])
|
||||
f.ids = f.ids[:len(f.ids)-n]
|
||||
}
|
||||
|
||||
// Remove from the free cache.
|
||||
for i := pgid(0); i < pgid(n); i++ {
|
||||
delete(f.cache, initial+i)
|
||||
}
|
||||
|
||||
return initial
|
||||
}
|
||||
|
||||
previd = id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// free releases a page and its overflow for a given transaction id.
|
||||
// If the page is already free then a panic will occur.
|
||||
func (f *freelist) free(txid txid, p *page) {
|
||||
if p.id <= 1 {
|
||||
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
|
||||
}
|
||||
|
||||
// Free page and all its overflow pages.
|
||||
var ids = f.pending[txid]
|
||||
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
|
||||
// Verify that page is not already free.
|
||||
if f.cache[id] {
|
||||
panic(fmt.Sprintf("page %d already freed", id))
|
||||
}
|
||||
|
||||
// Add to the freelist and cache.
|
||||
ids = append(ids, id)
|
||||
f.cache[id] = true
|
||||
}
|
||||
f.pending[txid] = ids
|
||||
}
|
||||
|
||||
// release moves all page ids for a transaction id (or older) to the freelist.
|
||||
func (f *freelist) release(txid txid) {
|
||||
m := make(pgids, 0)
|
||||
for tid, ids := range f.pending {
|
||||
if tid <= txid {
|
||||
// Move transaction's pending pages to the available freelist.
|
||||
// Don't remove from the cache since the page is still free.
|
||||
m = append(m, ids...)
|
||||
delete(f.pending, tid)
|
||||
}
|
||||
}
|
||||
sort.Sort(m)
|
||||
f.ids = pgids(f.ids).merge(m)
|
||||
}
|
||||
|
||||
// rollback removes the pages from a given pending tx.
|
||||
func (f *freelist) rollback(txid txid) {
|
||||
// Remove page ids from cache.
|
||||
for _, id := range f.pending[txid] {
|
||||
delete(f.cache, id)
|
||||
}
|
||||
|
||||
// Remove pages from pending list.
|
||||
delete(f.pending, txid)
|
||||
}
|
||||
|
||||
// freed returns whether a given page is in the free list.
|
||||
func (f *freelist) freed(pgid pgid) bool {
|
||||
return f.cache[pgid]
|
||||
}
|
||||
|
||||
// read initializes the freelist from a freelist page.
|
||||
func (f *freelist) read(p *page) {
|
||||
// If the page.count is at the max uint16 value (64k) then it's considered
|
||||
// an overflow and the size of the freelist is stored as the first element.
|
||||
idx, count := 0, int(p.count)
|
||||
if count == 0xFFFF {
|
||||
idx = 1
|
||||
count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
|
||||
}
|
||||
|
||||
// Copy the list of page ids from the freelist.
|
||||
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
|
||||
f.ids = make([]pgid, len(ids))
|
||||
copy(f.ids, ids)
|
||||
|
||||
// Make sure they're sorted.
|
||||
sort.Sort(pgids(f.ids))
|
||||
|
||||
// Rebuild the page cache.
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// write writes the page ids onto a freelist page. All free and pending ids are
|
||||
// saved to disk since in the event of a program crash, all pending ids will
|
||||
// become free.
|
||||
func (f *freelist) write(p *page) error {
|
||||
// Combine the old free pgids and pgids waiting on an open transaction.
|
||||
ids := f.all()
|
||||
|
||||
// Update the header flag.
|
||||
p.flags |= freelistPageFlag
|
||||
|
||||
// The page.count can only hold up to 64k elements so if we overflow that
|
||||
// number then we handle it by putting the size in the first element.
|
||||
if len(ids) < 0xFFFF {
|
||||
p.count = uint16(len(ids))
|
||||
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
|
||||
} else {
|
||||
p.count = 0xFFFF
|
||||
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids))
|
||||
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reload reads the freelist from a page and filters out pending items.
|
||||
func (f *freelist) reload(p *page) {
|
||||
f.read(p)
|
||||
|
||||
// Build a cache of only pending pages.
|
||||
pcache := make(map[pgid]bool)
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
pcache[pendingID] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check each page in the freelist and build a new available freelist
|
||||
// with any pages not in the pending lists.
|
||||
var a []pgid
|
||||
for _, id := range f.ids {
|
||||
if !pcache[id] {
|
||||
a = append(a, id)
|
||||
}
|
||||
}
|
||||
f.ids = a
|
||||
|
||||
// Once the available list is rebuilt then rebuild the free cache so that
|
||||
// it includes the available and pending free pages.
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// reindex rebuilds the free cache based on available and pending free lists.
|
||||
func (f *freelist) reindex() {
|
||||
f.cache = make(map[pgid]bool)
|
||||
for _, id := range f.ids {
|
||||
f.cache[id] = true
|
||||
}
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
f.cache[pendingID] = true
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,636 @@
|
|||
package bolt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// node represents an in-memory, deserialized page.
|
||||
type node struct {
|
||||
bucket *Bucket
|
||||
isLeaf bool
|
||||
unbalanced bool
|
||||
spilled bool
|
||||
key []byte
|
||||
pgid pgid
|
||||
parent *node
|
||||
children nodes
|
||||
inodes inodes
|
||||
}
|
||||
|
||||
// root returns the top-level node this node is attached to.
|
||||
func (n *node) root() *node {
|
||||
if n.parent == nil {
|
||||
return n
|
||||
}
|
||||
return n.parent.root()
|
||||
}
|
||||
|
||||
// minKeys returns the minimum number of inodes this node should have.
|
||||
func (n *node) minKeys() int {
|
||||
if n.isLeaf {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
}
|
||||
|
||||
// size returns the size of the node after serialization.
|
||||
func (n *node) size() int {
|
||||
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||
for i := 0; i < len(n.inodes); i++ {
|
||||
item := &n.inodes[i]
|
||||
sz += elsz + len(item.key) + len(item.value)
|
||||
}
|
||||
return sz
|
||||
}
|
||||
|
||||
// sizeLessThan returns true if the node is less than a given size.
|
||||
// This is an optimization to avoid calculating a large node when we only need
|
||||
// to know if it fits inside a certain page size.
|
||||
func (n *node) sizeLessThan(v int) bool {
|
||||
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||
for i := 0; i < len(n.inodes); i++ {
|
||||
item := &n.inodes[i]
|
||||
sz += elsz + len(item.key) + len(item.value)
|
||||
if sz >= v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// pageElementSize returns the size of each page element based on the type of node.
|
||||
func (n *node) pageElementSize() int {
|
||||
if n.isLeaf {
|
||||
return leafPageElementSize
|
||||
}
|
||||
return branchPageElementSize
|
||||
}
|
||||
|
||||
// childAt returns the child node at a given index.
|
||||
func (n *node) childAt(index int) *node {
|
||||
if n.isLeaf {
|
||||
panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
|
||||
}
|
||||
return n.bucket.node(n.inodes[index].pgid, n)
|
||||
}
|
||||
|
||||
// childIndex returns the index of a given child node.
|
||||
func (n *node) childIndex(child *node) int {
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 })
|
||||
return index
|
||||
}
|
||||
|
||||
// numChildren returns the number of children.
|
||||
func (n *node) numChildren() int {
|
||||
return len(n.inodes)
|
||||
}
|
||||
|
||||
// nextSibling returns the next node with the same parent.
|
||||
func (n *node) nextSibling() *node {
|
||||
if n.parent == nil {
|
||||
return nil
|
||||
}
|
||||
index := n.parent.childIndex(n)
|
||||
if index >= n.parent.numChildren()-1 {
|
||||
return nil
|
||||
}
|
||||
return n.parent.childAt(index + 1)
|
||||
}
|
||||
|
||||
// prevSibling returns the previous node with the same parent.
|
||||
func (n *node) prevSibling() *node {
|
||||
if n.parent == nil {
|
||||
return nil
|
||||
}
|
||||
index := n.parent.childIndex(n)
|
||||
if index == 0 {
|
||||
return nil
|
||||
}
|
||||
return n.parent.childAt(index - 1)
|
||||
}
|
||||
|
||||
// put inserts a key/value.
|
||||
func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
|
||||
if pgid >= n.bucket.tx.meta.pgid {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
|
||||
} else if len(oldKey) <= 0 {
|
||||
panic("put: zero-length old key")
|
||||
} else if len(newKey) <= 0 {
|
||||
panic("put: zero-length new key")
|
||||
}
|
||||
|
||||
// Find insertion index.
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
|
||||
|
||||
// Add capacity and shift nodes if we don't have an exact match and need to insert.
|
||||
exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey))
|
||||
if !exact {
|
||||
n.inodes = append(n.inodes, inode{})
|
||||
copy(n.inodes[index+1:], n.inodes[index:])
|
||||
}
|
||||
|
||||
inode := &n.inodes[index]
|
||||
inode.flags = flags
|
||||
inode.key = newKey
|
||||
inode.value = value
|
||||
inode.pgid = pgid
|
||||
_assert(len(inode.key) > 0, "put: zero-length inode key")
|
||||
}
|
||||
|
||||
// del removes a key from the node.
|
||||
func (n *node) del(key []byte) {
|
||||
// Find index of key.
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 })
|
||||
|
||||
// Exit if the key isn't found.
|
||||
if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) {
|
||||
return
|
||||
}
|
||||
|
||||
// Delete inode from the node.
|
||||
n.inodes = append(n.inodes[:index], n.inodes[index+1:]...)
|
||||
|
||||
// Mark the node as needing rebalancing.
|
||||
n.unbalanced = true
|
||||
}
|
||||
|
||||
// read initializes the node from a page.
|
||||
func (n *node) read(p *page) {
|
||||
n.pgid = p.id
|
||||
n.isLeaf = ((p.flags & leafPageFlag) != 0)
|
||||
n.inodes = make(inodes, int(p.count))
|
||||
|
||||
for i := 0; i < int(p.count); i++ {
|
||||
inode := &n.inodes[i]
|
||||
if n.isLeaf {
|
||||
elem := p.leafPageElement(uint16(i))
|
||||
inode.flags = elem.flags
|
||||
inode.key = elem.key()
|
||||
inode.value = elem.value()
|
||||
} else {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
inode.pgid = elem.pgid
|
||||
inode.key = elem.key()
|
||||
}
|
||||
_assert(len(inode.key) > 0, "read: zero-length inode key")
|
||||
}
|
||||
|
||||
// Save first key so we can find the node in the parent when we spill.
|
||||
if len(n.inodes) > 0 {
|
||||
n.key = n.inodes[0].key
|
||||
_assert(len(n.key) > 0, "read: zero-length node key")
|
||||
} else {
|
||||
n.key = nil
|
||||
}
|
||||
}
|
||||
|
||||
// write writes the items onto one or more pages.
|
||||
func (n *node) write(p *page) {
|
||||
// Initialize page.
|
||||
if n.isLeaf {
|
||||
p.flags |= leafPageFlag
|
||||
} else {
|
||||
p.flags |= branchPageFlag
|
||||
}
|
||||
|
||||
if len(n.inodes) >= 0xFFFF {
|
||||
panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
|
||||
}
|
||||
p.count = uint16(len(n.inodes))
|
||||
|
||||
// Loop over each item and write it to the page.
|
||||
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
|
||||
for i, item := range n.inodes {
|
||||
_assert(len(item.key) > 0, "write: zero-length inode key")
|
||||
|
||||
// Write the page element.
|
||||
if n.isLeaf {
|
||||
elem := p.leafPageElement(uint16(i))
|
||||
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
||||
elem.flags = item.flags
|
||||
elem.ksize = uint32(len(item.key))
|
||||
elem.vsize = uint32(len(item.value))
|
||||
} else {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
||||
elem.ksize = uint32(len(item.key))
|
||||
elem.pgid = item.pgid
|
||||
_assert(elem.pgid != p.id, "write: circular dependency occurred")
|
||||
}
|
||||
|
||||
// If the length of key+value is larger than the max allocation size
|
||||
// then we need to reallocate the byte array pointer.
|
||||
//
|
||||
// See: https://github.com/boltdb/bolt/pull/335
|
||||
klen, vlen := len(item.key), len(item.value)
|
||||
if len(b) < klen+vlen {
|
||||
b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
|
||||
}
|
||||
|
||||
// Write data for the element to the end of the page.
|
||||
copy(b[0:], item.key)
|
||||
b = b[klen:]
|
||||
copy(b[0:], item.value)
|
||||
b = b[vlen:]
|
||||
}
|
||||
|
||||
// DEBUG ONLY: n.dump()
|
||||
}
|
||||
|
||||
// split breaks up a node into multiple smaller nodes, if appropriate.
|
||||
// This should only be called from the spill() function.
|
||||
func (n *node) split(pageSize int) []*node {
|
||||
var nodes []*node
|
||||
|
||||
node := n
|
||||
for {
|
||||
// Split node into two.
|
||||
a, b := node.splitTwo(pageSize)
|
||||
nodes = append(nodes, a)
|
||||
|
||||
// If we can't split then exit the loop.
|
||||
if b == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Set node to b so it gets split on the next iteration.
|
||||
node = b
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
// splitTwo breaks up a node into two smaller nodes, if appropriate.
|
||||
// This should only be called from the split() function.
|
||||
func (n *node) splitTwo(pageSize int) (*node, *node) {
|
||||
// Ignore the split if the page doesn't have at least enough nodes for
|
||||
// two pages or if the nodes can fit in a single page.
|
||||
if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Determine the threshold before starting a new node.
|
||||
var fillPercent = n.bucket.FillPercent
|
||||
if fillPercent < minFillPercent {
|
||||
fillPercent = minFillPercent
|
||||
} else if fillPercent > maxFillPercent {
|
||||
fillPercent = maxFillPercent
|
||||
}
|
||||
threshold := int(float64(pageSize) * fillPercent)
|
||||
|
||||
// Determine split position and sizes of the two pages.
|
||||
splitIndex, _ := n.splitIndex(threshold)
|
||||
|
||||
// Split node into two separate nodes.
|
||||
// If there's no parent then we'll need to create one.
|
||||
if n.parent == nil {
|
||||
n.parent = &node{bucket: n.bucket, children: []*node{n}}
|
||||
}
|
||||
|
||||
// Create a new node and add it to the parent.
|
||||
next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent}
|
||||
n.parent.children = append(n.parent.children, next)
|
||||
|
||||
// Split inodes across two nodes.
|
||||
next.inodes = n.inodes[splitIndex:]
|
||||
n.inodes = n.inodes[:splitIndex]
|
||||
|
||||
// Update the statistics.
|
||||
n.bucket.tx.stats.Split++
|
||||
|
||||
return n, next
|
||||
}
|
||||
|
||||
// splitIndex finds the position where a page will fill a given threshold.
|
||||
// It returns the index as well as the size of the first page.
|
||||
// This is only be called from split().
|
||||
func (n *node) splitIndex(threshold int) (index, sz int) {
|
||||
sz = pageHeaderSize
|
||||
|
||||
// Loop until we only have the minimum number of keys required for the second page.
|
||||
for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
|
||||
index = i
|
||||
inode := n.inodes[i]
|
||||
elsize := n.pageElementSize() + len(inode.key) + len(inode.value)
|
||||
|
||||
// If we have at least the minimum number of keys and adding another
|
||||
// node would put us over the threshold then exit and return.
|
||||
if i >= minKeysPerPage && sz+elsize > threshold {
|
||||
break
|
||||
}
|
||||
|
||||
// Add the element size to the total size.
|
||||
sz += elsize
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// spill writes the nodes to dirty pages and splits nodes as it goes.
|
||||
// Returns an error if dirty pages cannot be allocated.
|
||||
func (n *node) spill() error {
|
||||
var tx = n.bucket.tx
|
||||
if n.spilled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Spill child nodes first. Child nodes can materialize sibling nodes in
|
||||
// the case of split-merge so we cannot use a range loop. We have to check
|
||||
// the children size on every loop iteration.
|
||||
sort.Sort(n.children)
|
||||
for i := 0; i < len(n.children); i++ {
|
||||
if err := n.children[i].spill(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We no longer need the child list because it's only used for spill tracking.
|
||||
n.children = nil
|
||||
|
||||
// Split nodes into appropriate sizes. The first node will always be n.
|
||||
var nodes = n.split(tx.db.pageSize)
|
||||
for _, node := range nodes {
|
||||
// Add node's page to the freelist if it's not new.
|
||||
if node.pgid > 0 {
|
||||
tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
|
||||
node.pgid = 0
|
||||
}
|
||||
|
||||
// Allocate contiguous space for the node.
|
||||
p, err := tx.allocate((node.size() / tx.db.pageSize) + 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the node.
|
||||
if p.id >= tx.meta.pgid {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
|
||||
}
|
||||
node.pgid = p.id
|
||||
node.write(p)
|
||||
node.spilled = true
|
||||
|
||||
// Insert into parent inodes.
|
||||
if node.parent != nil {
|
||||
var key = node.key
|
||||
if key == nil {
|
||||
key = node.inodes[0].key
|
||||
}
|
||||
|
||||
node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0)
|
||||
node.key = node.inodes[0].key
|
||||
_assert(len(node.key) > 0, "spill: zero-length node key")
|
||||
}
|
||||
|
||||
// Update the statistics.
|
||||
tx.stats.Spill++
|
||||
}
|
||||
|
||||
// If the root node split and created a new root then we need to spill that
|
||||
// as well. We'll clear out the children to make sure it doesn't try to respill.
|
||||
if n.parent != nil && n.parent.pgid == 0 {
|
||||
n.children = nil
|
||||
return n.parent.spill()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// rebalance attempts to combine the node with sibling nodes if the node fill
|
||||
// size is below a threshold or if there are not enough keys.
|
||||
func (n *node) rebalance() {
|
||||
if !n.unbalanced {
|
||||
return
|
||||
}
|
||||
n.unbalanced = false
|
||||
|
||||
// Update statistics.
|
||||
n.bucket.tx.stats.Rebalance++
|
||||
|
||||
// Ignore if node is above threshold (25%) and has enough keys.
|
||||
var threshold = n.bucket.tx.db.pageSize / 4
|
||||
if n.size() > threshold && len(n.inodes) > n.minKeys() {
|
||||
return
|
||||
}
|
||||
|
||||
// Root node has special handling.
|
||||
if n.parent == nil {
|
||||
// If root node is a branch and only has one node then collapse it.
|
||||
if !n.isLeaf && len(n.inodes) == 1 {
|
||||
// Move root's child up.
|
||||
child := n.bucket.node(n.inodes[0].pgid, n)
|
||||
n.isLeaf = child.isLeaf
|
||||
n.inodes = child.inodes[:]
|
||||
n.children = child.children
|
||||
|
||||
// Reparent all child nodes being moved.
|
||||
for _, inode := range n.inodes {
|
||||
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||
child.parent = n
|
||||
}
|
||||
}
|
||||
|
||||
// Remove old child.
|
||||
child.parent = nil
|
||||
delete(n.bucket.nodes, child.pgid)
|
||||
child.free()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// If node has no keys then just remove it.
|
||||
if n.numChildren() == 0 {
|
||||
n.parent.del(n.key)
|
||||
n.parent.removeChild(n)
|
||||
delete(n.bucket.nodes, n.pgid)
|
||||
n.free()
|
||||
n.parent.rebalance()
|
||||
return
|
||||
}
|
||||
|
||||
_assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
|
||||
|
||||
// Destination node is right sibling if idx == 0, otherwise left sibling.
|
||||
var target *node
|
||||
var useNextSibling = (n.parent.childIndex(n) == 0)
|
||||
if useNextSibling {
|
||||
target = n.nextSibling()
|
||||
} else {
|
||||
target = n.prevSibling()
|
||||
}
|
||||
|
||||
// If target node has extra nodes then just move one over.
|
||||
if target.numChildren() > target.minKeys() {
|
||||
if useNextSibling {
|
||||
// Reparent and move node.
|
||||
if child, ok := n.bucket.nodes[target.inodes[0].pgid]; ok {
|
||||
child.parent.removeChild(child)
|
||||
child.parent = n
|
||||
child.parent.children = append(child.parent.children, child)
|
||||
}
|
||||
n.inodes = append(n.inodes, target.inodes[0])
|
||||
target.inodes = target.inodes[1:]
|
||||
|
||||
// Update target key on parent.
|
||||
target.parent.put(target.key, target.inodes[0].key, nil, target.pgid, 0)
|
||||
target.key = target.inodes[0].key
|
||||
_assert(len(target.key) > 0, "rebalance(1): zero-length node key")
|
||||
} else {
|
||||
// Reparent and move node.
|
||||
if child, ok := n.bucket.nodes[target.inodes[len(target.inodes)-1].pgid]; ok {
|
||||
child.parent.removeChild(child)
|
||||
child.parent = n
|
||||
child.parent.children = append(child.parent.children, child)
|
||||
}
|
||||
n.inodes = append(n.inodes, inode{})
|
||||
copy(n.inodes[1:], n.inodes)
|
||||
n.inodes[0] = target.inodes[len(target.inodes)-1]
|
||||
target.inodes = target.inodes[:len(target.inodes)-1]
|
||||
}
|
||||
|
||||
// Update parent key for node.
|
||||
n.parent.put(n.key, n.inodes[0].key, nil, n.pgid, 0)
|
||||
n.key = n.inodes[0].key
|
||||
_assert(len(n.key) > 0, "rebalance(2): zero-length node key")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// If both this node and the target node are too small then merge them.
|
||||
if useNextSibling {
|
||||
// Reparent all child nodes being moved.
|
||||
for _, inode := range target.inodes {
|
||||
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||
child.parent.removeChild(child)
|
||||
child.parent = n
|
||||
child.parent.children = append(child.parent.children, child)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy over inodes from target and remove target.
|
||||
n.inodes = append(n.inodes, target.inodes...)
|
||||
n.parent.del(target.key)
|
||||
n.parent.removeChild(target)
|
||||
delete(n.bucket.nodes, target.pgid)
|
||||
target.free()
|
||||
} else {
|
||||
// Reparent all child nodes being moved.
|
||||
for _, inode := range n.inodes {
|
||||
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||
child.parent.removeChild(child)
|
||||
child.parent = target
|
||||
child.parent.children = append(child.parent.children, child)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy over inodes to target and remove node.
|
||||
target.inodes = append(target.inodes, n.inodes...)
|
||||
n.parent.del(n.key)
|
||||
n.parent.removeChild(n)
|
||||
delete(n.bucket.nodes, n.pgid)
|
||||
n.free()
|
||||
}
|
||||
|
||||
// Either this node or the target node was deleted from the parent so rebalance it.
|
||||
n.parent.rebalance()
|
||||
}
|
||||
|
||||
// removes a node from the list of in-memory children.
|
||||
// This does not affect the inodes.
|
||||
func (n *node) removeChild(target *node) {
|
||||
for i, child := range n.children {
|
||||
if child == target {
|
||||
n.children = append(n.children[:i], n.children[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dereference causes the node to copy all its inode key/value references to heap memory.
|
||||
// This is required when the mmap is reallocated so inodes are not pointing to stale data.
|
||||
func (n *node) dereference() {
|
||||
if n.key != nil {
|
||||
key := make([]byte, len(n.key))
|
||||
copy(key, n.key)
|
||||
n.key = key
|
||||
_assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
|
||||
}
|
||||
|
||||
for i := range n.inodes {
|
||||
inode := &n.inodes[i]
|
||||
|
||||
key := make([]byte, len(inode.key))
|
||||
copy(key, inode.key)
|
||||
inode.key = key
|
||||
_assert(len(inode.key) > 0, "dereference: zero-length inode key")
|
||||
|
||||
value := make([]byte, len(inode.value))
|
||||
copy(value, inode.value)
|
||||
inode.value = value
|
||||
}
|
||||
|
||||
// Recursively dereference children.
|
||||
for _, child := range n.children {
|
||||
child.dereference()
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
n.bucket.tx.stats.NodeDeref++
|
||||
}
|
||||
|
||||
// free adds the node's underlying page to the freelist.
|
||||
func (n *node) free() {
|
||||
if n.pgid != 0 {
|
||||
n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
|
||||
n.pgid = 0
|
||||
}
|
||||
}
|
||||
|
||||
// dump writes the contents of the node to STDERR for debugging purposes.
|
||||
/*
|
||||
func (n *node) dump() {
|
||||
// Write node header.
|
||||
var typ = "branch"
|
||||
if n.isLeaf {
|
||||
typ = "leaf"
|
||||
}
|
||||
warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))
|
||||
|
||||
// Write out abbreviated version of each item.
|
||||
for _, item := range n.inodes {
|
||||
if n.isLeaf {
|
||||
if item.flags&bucketLeafFlag != 0 {
|
||||
bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
|
||||
warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
|
||||
} else {
|
||||
warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
|
||||
}
|
||||
} else {
|
||||
warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
|
||||
}
|
||||
}
|
||||
warn("")
|
||||
}
|
||||
*/
|
||||
|
||||
type nodes []*node
|
||||
|
||||
func (s nodes) Len() int { return len(s) }
|
||||
func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 }
|
||||
|
||||
// inode represents an internal node inside of a node.
|
||||
// It can be used to point to elements in a page or point
|
||||
// to an element which hasn't been added to a page yet.
|
||||
type inode struct {
|
||||
flags uint32
|
||||
pgid pgid
|
||||
key []byte
|
||||
value []byte
|
||||
}
|
||||
|
||||
type inodes []inode
|
|
@ -0,0 +1,172 @@
|
|||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
|
||||
|
||||
const minKeysPerPage = 2
|
||||
|
||||
const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
|
||||
const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
|
||||
|
||||
const (
|
||||
branchPageFlag = 0x01
|
||||
leafPageFlag = 0x02
|
||||
metaPageFlag = 0x04
|
||||
freelistPageFlag = 0x10
|
||||
)
|
||||
|
||||
const (
|
||||
bucketLeafFlag = 0x01
|
||||
)
|
||||
|
||||
type pgid uint64
|
||||
|
||||
type page struct {
|
||||
id pgid
|
||||
flags uint16
|
||||
count uint16
|
||||
overflow uint32
|
||||
ptr uintptr
|
||||
}
|
||||
|
||||
// typ returns a human readable page type string used for debugging.
|
||||
func (p *page) typ() string {
|
||||
if (p.flags & branchPageFlag) != 0 {
|
||||
return "branch"
|
||||
} else if (p.flags & leafPageFlag) != 0 {
|
||||
return "leaf"
|
||||
} else if (p.flags & metaPageFlag) != 0 {
|
||||
return "meta"
|
||||
} else if (p.flags & freelistPageFlag) != 0 {
|
||||
return "freelist"
|
||||
}
|
||||
return fmt.Sprintf("unknown<%02x>", p.flags)
|
||||
}
|
||||
|
||||
// meta returns a pointer to the metadata section of the page.
|
||||
func (p *page) meta() *meta {
|
||||
return (*meta)(unsafe.Pointer(&p.ptr))
|
||||
}
|
||||
|
||||
// leafPageElement retrieves the leaf node by index
|
||||
func (p *page) leafPageElement(index uint16) *leafPageElement {
|
||||
n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
|
||||
return n
|
||||
}
|
||||
|
||||
// leafPageElements retrieves a list of leaf nodes.
|
||||
func (p *page) leafPageElements() []leafPageElement {
|
||||
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||
}
|
||||
|
||||
// branchPageElement retrieves the branch node by index
|
||||
func (p *page) branchPageElement(index uint16) *branchPageElement {
|
||||
return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
|
||||
}
|
||||
|
||||
// branchPageElements retrieves a list of branch nodes.
|
||||
func (p *page) branchPageElements() []branchPageElement {
|
||||
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||
}
|
||||
|
||||
// dump writes n bytes of the page to STDERR as hex output.
|
||||
func (p *page) hexdump(n int) {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n]
|
||||
fmt.Fprintf(os.Stderr, "%x\n", buf)
|
||||
}
|
||||
|
||||
type pages []*page
|
||||
|
||||
func (s pages) Len() int { return len(s) }
|
||||
func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s pages) Less(i, j int) bool { return s[i].id < s[j].id }
|
||||
|
||||
// branchPageElement represents a node on a branch page.
|
||||
type branchPageElement struct {
|
||||
pos uint32
|
||||
ksize uint32
|
||||
pgid pgid
|
||||
}
|
||||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *branchPageElement) key() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
|
||||
}
|
||||
|
||||
// leafPageElement represents a node on a leaf page.
|
||||
type leafPageElement struct {
|
||||
flags uint32
|
||||
pos uint32
|
||||
ksize uint32
|
||||
vsize uint32
|
||||
}
|
||||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *leafPageElement) key() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
|
||||
}
|
||||
|
||||
// value returns a byte slice of the node value.
|
||||
func (n *leafPageElement) value() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize]
|
||||
}
|
||||
|
||||
// PageInfo represents human readable information about a page.
|
||||
type PageInfo struct {
|
||||
ID int
|
||||
Type string
|
||||
Count int
|
||||
OverflowCount int
|
||||
}
|
||||
|
||||
type pgids []pgid
|
||||
|
||||
func (s pgids) Len() int { return len(s) }
|
||||
func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
|
||||
|
||||
// merge returns the sorted union of a and b.
|
||||
func (a pgids) merge(b pgids) pgids {
|
||||
// Return the opposite slice if one is nil.
|
||||
if len(a) == 0 {
|
||||
return b
|
||||
} else if len(b) == 0 {
|
||||
return a
|
||||
}
|
||||
|
||||
// Create a list to hold all elements from both lists.
|
||||
merged := make(pgids, 0, len(a)+len(b))
|
||||
|
||||
// Assign lead to the slice with a lower starting value, follow to the higher value.
|
||||
lead, follow := a, b
|
||||
if b[0] < a[0] {
|
||||
lead, follow = b, a
|
||||
}
|
||||
|
||||
// Continue while there are elements in the lead.
|
||||
for len(lead) > 0 {
|
||||
// Merge largest prefix of lead that is ahead of follow[0].
|
||||
n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
|
||||
merged = append(merged, lead[:n]...)
|
||||
if n >= len(lead) {
|
||||
break
|
||||
}
|
||||
|
||||
// Swap lead and follow.
|
||||
lead, follow = follow, lead[n:]
|
||||
}
|
||||
|
||||
// Append what's left in follow.
|
||||
merged = append(merged, follow...)
|
||||
|
||||
return merged
|
||||
}
|
|
@ -0,0 +1,634 @@
|
|||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// txid represents the internal transaction identifier.
|
||||
type txid uint64
|
||||
|
||||
// Tx represents a read-only or read/write transaction on the database.
|
||||
// Read-only transactions can be used for retrieving values for keys and creating cursors.
|
||||
// Read/write transactions can create and remove buckets and create and remove keys.
|
||||
//
|
||||
// IMPORTANT: You must commit or rollback transactions when you are done with
|
||||
// them. Pages can not be reclaimed by the writer until no more transactions
|
||||
// are using them. A long running read transaction can cause the database to
|
||||
// quickly grow.
|
||||
type Tx struct {
|
||||
writable bool
|
||||
managed bool
|
||||
db *DB
|
||||
meta *meta
|
||||
root Bucket
|
||||
pages map[pgid]*page
|
||||
stats TxStats
|
||||
commitHandlers []func()
|
||||
|
||||
// WriteFlag specifies the flag for write-related methods like WriteTo().
|
||||
// Tx opens the database file with the specified flag to copy the data.
|
||||
//
|
||||
// By default, the flag is unset, which works well for mostly in-memory
|
||||
// workloads. For databases that are much larger than available RAM,
|
||||
// set the flag to syscall.O_DIRECT to avoid trashing the page cache.
|
||||
WriteFlag int
|
||||
}
|
||||
|
||||
// init initializes the transaction.
|
||||
func (tx *Tx) init(db *DB) {
|
||||
tx.db = db
|
||||
tx.pages = nil
|
||||
|
||||
// Copy the meta page since it can be changed by the writer.
|
||||
tx.meta = &meta{}
|
||||
db.meta().copy(tx.meta)
|
||||
|
||||
// Copy over the root bucket.
|
||||
tx.root = newBucket(tx)
|
||||
tx.root.bucket = &bucket{}
|
||||
*tx.root.bucket = tx.meta.root
|
||||
|
||||
// Increment the transaction id and add a page cache for writable transactions.
|
||||
if tx.writable {
|
||||
tx.pages = make(map[pgid]*page)
|
||||
tx.meta.txid += txid(1)
|
||||
}
|
||||
}
|
||||
|
||||
// ID returns the transaction id.
|
||||
func (tx *Tx) ID() int {
|
||||
return int(tx.meta.txid)
|
||||
}
|
||||
|
||||
// DB returns a reference to the database that created the transaction.
|
||||
func (tx *Tx) DB() *DB {
|
||||
return tx.db
|
||||
}
|
||||
|
||||
// Size returns current database size in bytes as seen by this transaction.
|
||||
func (tx *Tx) Size() int64 {
|
||||
return int64(tx.meta.pgid) * int64(tx.db.pageSize)
|
||||
}
|
||||
|
||||
// Writable returns whether the transaction can perform write operations.
|
||||
func (tx *Tx) Writable() bool {
|
||||
return tx.writable
|
||||
}
|
||||
|
||||
// Cursor creates a cursor associated with the root bucket.
|
||||
// All items in the cursor will return a nil value because all root bucket keys point to buckets.
|
||||
// The cursor is only valid as long as the transaction is open.
|
||||
// Do not use a cursor after the transaction is closed.
|
||||
func (tx *Tx) Cursor() *Cursor {
|
||||
return tx.root.Cursor()
|
||||
}
|
||||
|
||||
// Stats retrieves a copy of the current transaction statistics.
|
||||
func (tx *Tx) Stats() TxStats {
|
||||
return tx.stats
|
||||
}
|
||||
|
||||
// Bucket retrieves a bucket by name.
|
||||
// Returns nil if the bucket does not exist.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (tx *Tx) Bucket(name []byte) *Bucket {
|
||||
return tx.root.Bucket(name)
|
||||
}
|
||||
|
||||
// CreateBucket creates a new bucket.
|
||||
// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
|
||||
return tx.root.CreateBucket(name)
|
||||
}
|
||||
|
||||
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
|
||||
// Returns an error if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
|
||||
return tx.root.CreateBucketIfNotExists(name)
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket.
|
||||
// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
|
||||
func (tx *Tx) DeleteBucket(name []byte) error {
|
||||
return tx.root.DeleteBucket(name)
|
||||
}
|
||||
|
||||
// ForEach executes a function for each bucket in the root.
|
||||
// If the provided function returns an error then the iteration is stopped and
|
||||
// the error is returned to the caller.
|
||||
func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
|
||||
return tx.root.ForEach(func(k, v []byte) error {
|
||||
if err := fn(k, tx.root.Bucket(k)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// OnCommit adds a handler function to be executed after the transaction successfully commits.
|
||||
func (tx *Tx) OnCommit(fn func()) {
|
||||
tx.commitHandlers = append(tx.commitHandlers, fn)
|
||||
}
|
||||
|
||||
// Commit writes all changes to disk and updates the meta page.
|
||||
// Returns an error if a disk write error occurs, or if Commit is
|
||||
// called on a read-only transaction.
|
||||
func (tx *Tx) Commit() error {
|
||||
_assert(!tx.managed, "managed tx commit not allowed")
|
||||
if tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !tx.writable {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
|
||||
|
||||
// Rebalance nodes which have had deletions.
|
||||
var startTime = time.Now()
|
||||
tx.root.rebalance()
|
||||
if tx.stats.Rebalance > 0 {
|
||||
tx.stats.RebalanceTime += time.Since(startTime)
|
||||
}
|
||||
|
||||
// spill data onto dirty pages.
|
||||
startTime = time.Now()
|
||||
if err := tx.root.spill(); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.stats.SpillTime += time.Since(startTime)
|
||||
|
||||
// Free the old root bucket.
|
||||
tx.meta.root.root = tx.root.root
|
||||
|
||||
opgid := tx.meta.pgid
|
||||
|
||||
// Free the freelist and allocate new pages for it. This will overestimate
|
||||
// the size of the freelist but not underestimate the size (which would be bad).
|
||||
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
|
||||
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
|
||||
if err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
if err := tx.db.freelist.write(p); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.meta.freelist = p.id
|
||||
|
||||
// If the high water mark has moved up then attempt to grow the database.
|
||||
if tx.meta.pgid > opgid {
|
||||
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Write dirty pages to disk.
|
||||
startTime = time.Now()
|
||||
if err := tx.write(); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
// If strict mode is enabled then perform a consistency check.
|
||||
// Only the first consistency error is reported in the panic.
|
||||
if tx.db.StrictMode {
|
||||
if err, ok := <-tx.Check(); ok {
|
||||
panic("check fail: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Write meta to disk.
|
||||
if err := tx.writeMeta(); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.stats.WriteTime += time.Since(startTime)
|
||||
|
||||
// Finalize the transaction.
|
||||
tx.close()
|
||||
|
||||
// Execute commit handlers now that the locks have been removed.
|
||||
for _, fn := range tx.commitHandlers {
|
||||
fn()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rollback closes the transaction and ignores all previous updates. Read-only
|
||||
// transactions must be rolled back and not committed.
|
||||
func (tx *Tx) Rollback() error {
|
||||
_assert(!tx.managed, "managed tx rollback not allowed")
|
||||
if tx.db == nil {
|
||||
return ErrTxClosed
|
||||
}
|
||||
tx.rollback()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *Tx) rollback() {
|
||||
if tx.db == nil {
|
||||
return
|
||||
}
|
||||
if tx.writable {
|
||||
tx.db.freelist.rollback(tx.meta.txid)
|
||||
tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
|
||||
}
|
||||
tx.close()
|
||||
}
|
||||
|
||||
func (tx *Tx) close() {
|
||||
if tx.db == nil {
|
||||
return
|
||||
}
|
||||
if tx.writable {
|
||||
// Grab freelist stats.
|
||||
var freelistFreeN = tx.db.freelist.free_count()
|
||||
var freelistPendingN = tx.db.freelist.pending_count()
|
||||
var freelistAlloc = tx.db.freelist.size()
|
||||
|
||||
// Remove transaction ref & writer lock.
|
||||
tx.db.rwtx = nil
|
||||
tx.db.rwlock.Unlock()
|
||||
|
||||
// Merge statistics.
|
||||
tx.db.statlock.Lock()
|
||||
tx.db.stats.FreePageN = freelistFreeN
|
||||
tx.db.stats.PendingPageN = freelistPendingN
|
||||
tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
|
||||
tx.db.stats.FreelistInuse = freelistAlloc
|
||||
tx.db.stats.TxStats.add(&tx.stats)
|
||||
tx.db.statlock.Unlock()
|
||||
} else {
|
||||
tx.db.removeTx(tx)
|
||||
}
|
||||
|
||||
// Clear all references.
|
||||
tx.db = nil
|
||||
tx.meta = nil
|
||||
tx.root = Bucket{tx: tx}
|
||||
tx.pages = nil
|
||||
}
|
||||
|
||||
// Copy writes the entire database to a writer.
|
||||
// This function exists for backwards compatibility. Use WriteTo() instead.
|
||||
func (tx *Tx) Copy(w io.Writer) error {
|
||||
_, err := tx.WriteTo(w)
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteTo writes the entire database to a writer.
|
||||
// If err == nil then exactly tx.Size() bytes will be written into the writer.
|
||||
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
||||
// Attempt to open reader with WriteFlag
|
||||
f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
// Copy the meta pages.
|
||||
tx.db.metalock.Lock()
|
||||
n, err = io.CopyN(w, f, int64(tx.db.pageSize*2))
|
||||
tx.db.metalock.Unlock()
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("meta copy: %s", err)
|
||||
}
|
||||
|
||||
// Copy data pages.
|
||||
wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
|
||||
n += wn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, f.Close()
|
||||
}
|
||||
|
||||
// CopyFile copies the entire database to file at the given path.
|
||||
// A reader transaction is maintained during the copy so it is safe to continue
|
||||
// using the database while a copy is in progress.
|
||||
func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
|
||||
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = tx.Copy(f)
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
return err
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
// Check performs several consistency checks on the database for this transaction.
|
||||
// An error is returned if any inconsistency is found.
|
||||
//
|
||||
// It can be safely run concurrently on a writable transaction. However, this
|
||||
// incurs a high cost for large databases and databases with a lot of subbuckets
|
||||
// because of caching. This overhead can be removed if running on a read-only
|
||||
// transaction, however, it is not safe to execute other writer transactions at
|
||||
// the same time.
|
||||
func (tx *Tx) Check() <-chan error {
|
||||
ch := make(chan error)
|
||||
go tx.check(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (tx *Tx) check(ch chan error) {
|
||||
// Check if any pages are double freed.
|
||||
freed := make(map[pgid]bool)
|
||||
for _, id := range tx.db.freelist.all() {
|
||||
if freed[id] {
|
||||
ch <- fmt.Errorf("page %d: already freed", id)
|
||||
}
|
||||
freed[id] = true
|
||||
}
|
||||
|
||||
// Track every reachable page.
|
||||
reachable := make(map[pgid]*page)
|
||||
reachable[0] = tx.page(0) // meta0
|
||||
reachable[1] = tx.page(1) // meta1
|
||||
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
|
||||
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
|
||||
}
|
||||
|
||||
// Recursively check buckets.
|
||||
tx.checkBucket(&tx.root, reachable, freed, ch)
|
||||
|
||||
// Ensure all pages below high water mark are either reachable or freed.
|
||||
for i := pgid(0); i < tx.meta.pgid; i++ {
|
||||
_, isReachable := reachable[i]
|
||||
if !isReachable && !freed[i] {
|
||||
ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
|
||||
}
|
||||
}
|
||||
|
||||
// Close the channel to signal completion.
|
||||
close(ch)
|
||||
}
|
||||
|
||||
func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
|
||||
// Ignore inline buckets.
|
||||
if b.root == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Check every page used by this bucket.
|
||||
b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
|
||||
if p.id > tx.meta.pgid {
|
||||
ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
|
||||
}
|
||||
|
||||
// Ensure each page is only referenced once.
|
||||
for i := pgid(0); i <= pgid(p.overflow); i++ {
|
||||
var id = p.id + i
|
||||
if _, ok := reachable[id]; ok {
|
||||
ch <- fmt.Errorf("page %d: multiple references", int(id))
|
||||
}
|
||||
reachable[id] = p
|
||||
}
|
||||
|
||||
// We should only encounter un-freed leaf and branch pages.
|
||||
if freed[p.id] {
|
||||
ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
|
||||
} else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
|
||||
ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
|
||||
}
|
||||
})
|
||||
|
||||
// Check each bucket within this bucket.
|
||||
_ = b.ForEach(func(k, v []byte) error {
|
||||
if child := b.Bucket(k); child != nil {
|
||||
tx.checkBucket(child, reachable, freed, ch)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// allocate returns a contiguous block of memory starting at a given page.
|
||||
func (tx *Tx) allocate(count int) (*page, error) {
|
||||
p, err := tx.db.allocate(count)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Save to our page cache.
|
||||
tx.pages[p.id] = p
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.PageCount++
|
||||
tx.stats.PageAlloc += count * tx.db.pageSize
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// write writes any dirty pages to disk.
|
||||
func (tx *Tx) write() error {
|
||||
// Sort pages by id.
|
||||
pages := make(pages, 0, len(tx.pages))
|
||||
for _, p := range tx.pages {
|
||||
pages = append(pages, p)
|
||||
}
|
||||
sort.Sort(pages)
|
||||
|
||||
// Write pages to disk in order.
|
||||
for _, p := range pages {
|
||||
size := (int(p.overflow) + 1) * tx.db.pageSize
|
||||
offset := int64(p.id) * int64(tx.db.pageSize)
|
||||
|
||||
// Write out page in "max allocation" sized chunks.
|
||||
ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
|
||||
for {
|
||||
// Limit our write to our max allocation size.
|
||||
sz := size
|
||||
if sz > maxAllocSize-1 {
|
||||
sz = maxAllocSize - 1
|
||||
}
|
||||
|
||||
// Write chunk to disk.
|
||||
buf := ptr[:sz]
|
||||
if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.Write++
|
||||
|
||||
// Exit inner for loop if we've written all the chunks.
|
||||
size -= sz
|
||||
if size == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Otherwise move offset forward and move pointer to next chunk.
|
||||
offset += int64(sz)
|
||||
ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
|
||||
}
|
||||
}
|
||||
|
||||
// Ignore file sync if flag is set on DB.
|
||||
if !tx.db.NoSync || IgnoreNoSync {
|
||||
if err := fdatasync(tx.db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Clear out page cache.
|
||||
tx.pages = make(map[pgid]*page)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeMeta writes the meta to the disk.
|
||||
func (tx *Tx) writeMeta() error {
|
||||
// Create a temporary buffer for the meta page.
|
||||
buf := make([]byte, tx.db.pageSize)
|
||||
p := tx.db.pageInBuffer(buf, 0)
|
||||
tx.meta.write(p)
|
||||
|
||||
// Write the meta page to file.
|
||||
if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
|
||||
return err
|
||||
}
|
||||
if !tx.db.NoSync || IgnoreNoSync {
|
||||
if err := fdatasync(tx.db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.Write++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// page returns a reference to the page with a given id.
|
||||
// If page has been written to then a temporary buffered page is returned.
|
||||
func (tx *Tx) page(id pgid) *page {
|
||||
// Check the dirty pages first.
|
||||
if tx.pages != nil {
|
||||
if p, ok := tx.pages[id]; ok {
|
||||
return p
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise return directly from the mmap.
|
||||
return tx.db.page(id)
|
||||
}
|
||||
|
||||
// forEachPage iterates over every page within a given page and executes a function.
|
||||
func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
|
||||
p := tx.page(pgid)
|
||||
|
||||
// Execute function.
|
||||
fn(p, depth)
|
||||
|
||||
// Recursively loop over children.
|
||||
if (p.flags & branchPageFlag) != 0 {
|
||||
for i := 0; i < int(p.count); i++ {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
tx.forEachPage(elem.pgid, depth+1, fn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Page returns page information for a given page number.
|
||||
// This is only safe for concurrent use when used by a writable transaction.
|
||||
func (tx *Tx) Page(id int) (*PageInfo, error) {
|
||||
if tx.db == nil {
|
||||
return nil, ErrTxClosed
|
||||
} else if pgid(id) >= tx.meta.pgid {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Build the page info.
|
||||
p := tx.db.page(pgid(id))
|
||||
info := &PageInfo{
|
||||
ID: id,
|
||||
Count: int(p.count),
|
||||
OverflowCount: int(p.overflow),
|
||||
}
|
||||
|
||||
// Determine the type (or if it's free).
|
||||
if tx.db.freelist.freed(pgid(id)) {
|
||||
info.Type = "free"
|
||||
} else {
|
||||
info.Type = p.typ()
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// TxStats represents statistics about the actions performed by the transaction.
|
||||
type TxStats struct {
|
||||
// Page statistics.
|
||||
PageCount int // number of page allocations
|
||||
PageAlloc int // total bytes allocated
|
||||
|
||||
// Cursor statistics.
|
||||
CursorCount int // number of cursors created
|
||||
|
||||
// Node statistics
|
||||
NodeCount int // number of node allocations
|
||||
NodeDeref int // number of node dereferences
|
||||
|
||||
// Rebalance statistics.
|
||||
Rebalance int // number of node rebalances
|
||||
RebalanceTime time.Duration // total time spent rebalancing
|
||||
|
||||
// Split/Spill statistics.
|
||||
Split int // number of nodes split
|
||||
Spill int // number of nodes spilled
|
||||
SpillTime time.Duration // total time spent spilling
|
||||
|
||||
// Write statistics.
|
||||
Write int // number of writes performed
|
||||
WriteTime time.Duration // total time spent writing to disk
|
||||
}
|
||||
|
||||
func (s *TxStats) add(other *TxStats) {
|
||||
s.PageCount += other.PageCount
|
||||
s.PageAlloc += other.PageAlloc
|
||||
s.CursorCount += other.CursorCount
|
||||
s.NodeCount += other.NodeCount
|
||||
s.NodeDeref += other.NodeDeref
|
||||
s.Rebalance += other.Rebalance
|
||||
s.RebalanceTime += other.RebalanceTime
|
||||
s.Split += other.Split
|
||||
s.Spill += other.Spill
|
||||
s.SpillTime += other.SpillTime
|
||||
s.Write += other.Write
|
||||
s.WriteTime += other.WriteTime
|
||||
}
|
||||
|
||||
// Sub calculates and returns the difference between two sets of transaction stats.
|
||||
// This is useful when obtaining stats at two different points and time and
|
||||
// you need the performance counters that occurred within that time span.
|
||||
func (s *TxStats) Sub(other *TxStats) TxStats {
|
||||
var diff TxStats
|
||||
diff.PageCount = s.PageCount - other.PageCount
|
||||
diff.PageAlloc = s.PageAlloc - other.PageAlloc
|
||||
diff.CursorCount = s.CursorCount - other.CursorCount
|
||||
diff.NodeCount = s.NodeCount - other.NodeCount
|
||||
diff.NodeDeref = s.NodeDeref - other.NodeDeref
|
||||
diff.Rebalance = s.Rebalance - other.Rebalance
|
||||
diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
|
||||
diff.Split = s.Split - other.Split
|
||||
diff.Spill = s.Spill - other.Spill
|
||||
diff.SpillTime = s.SpillTime - other.SpillTime
|
||||
diff.Write = s.Write - other.Write
|
||||
diff.WriteTime = s.WriteTime - other.WriteTime
|
||||
return diff
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
Copyright (c) 2014, Elazar Leibovich
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,46 @@
|
|||
# go-bindata-assetfs
|
||||
|
||||
Serve embedded files from [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) with `net/http`.
|
||||
|
||||
[GoDoc](http://godoc.org/github.com/elazarl/go-bindata-assetfs)
|
||||
|
||||
### Installation
|
||||
|
||||
Install with
|
||||
|
||||
$ go get github.com/jteeuwen/go-bindata/...
|
||||
$ go get github.com/elazarl/go-bindata-assetfs/...
|
||||
|
||||
### Creating embedded data
|
||||
|
||||
Usage is identical to [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) usage,
|
||||
instead of running `go-bindata` run `go-bindata-assetfs`.
|
||||
|
||||
The tool will create a `bindata_assetfs.go` file, which contains the embedded data.
|
||||
|
||||
A typical use case is
|
||||
|
||||
$ go-bindata-assetfs data/...
|
||||
|
||||
### Using assetFS in your code
|
||||
|
||||
The generated file provides an `assetFS()` function that returns a `http.Filesystem`
|
||||
wrapping the embedded files. What you usually want to do is:
|
||||
|
||||
http.Handle("/", http.FileServer(assetFS()))
|
||||
|
||||
This would run an HTTP server serving the embedded files.
|
||||
|
||||
## Without running binary tool
|
||||
|
||||
You can always just run the `go-bindata` tool, and then
|
||||
|
||||
use
|
||||
|
||||
import "github.com/elazarl/go-bindata-assetfs"
|
||||
...
|
||||
http.Handle("/",
|
||||
http.FileServer(
|
||||
&assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: AssetInfo, Prefix: "data"}))
|
||||
|
||||
to serve files embedded from the `data` directory.
|
|
@ -0,0 +1,158 @@
|
|||
package assetfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultFileTimestamp = time.Now()
|
||||
)
|
||||
|
||||
// FakeFile implements os.FileInfo interface for a given path and size
|
||||
type FakeFile struct {
|
||||
// Path is the path of this file
|
||||
Path string
|
||||
// Dir marks of the path is a directory
|
||||
Dir bool
|
||||
// Len is the length of the fake file, zero if it is a directory
|
||||
Len int64
|
||||
// Timestamp is the ModTime of this file
|
||||
Timestamp time.Time
|
||||
}
|
||||
|
||||
func (f *FakeFile) Name() string {
|
||||
_, name := filepath.Split(f.Path)
|
||||
return name
|
||||
}
|
||||
|
||||
func (f *FakeFile) Mode() os.FileMode {
|
||||
mode := os.FileMode(0644)
|
||||
if f.Dir {
|
||||
return mode | os.ModeDir
|
||||
}
|
||||
return mode
|
||||
}
|
||||
|
||||
func (f *FakeFile) ModTime() time.Time {
|
||||
return f.Timestamp
|
||||
}
|
||||
|
||||
func (f *FakeFile) Size() int64 {
|
||||
return f.Len
|
||||
}
|
||||
|
||||
func (f *FakeFile) IsDir() bool {
|
||||
return f.Mode().IsDir()
|
||||
}
|
||||
|
||||
func (f *FakeFile) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AssetFile implements http.File interface for a no-directory file with content
|
||||
type AssetFile struct {
|
||||
*bytes.Reader
|
||||
io.Closer
|
||||
FakeFile
|
||||
}
|
||||
|
||||
func NewAssetFile(name string, content []byte, timestamp time.Time) *AssetFile {
|
||||
if timestamp.IsZero() {
|
||||
timestamp = defaultFileTimestamp
|
||||
}
|
||||
return &AssetFile{
|
||||
bytes.NewReader(content),
|
||||
ioutil.NopCloser(nil),
|
||||
FakeFile{name, false, int64(len(content)), timestamp}}
|
||||
}
|
||||
|
||||
func (f *AssetFile) Readdir(count int) ([]os.FileInfo, error) {
|
||||
return nil, errors.New("not a directory")
|
||||
}
|
||||
|
||||
func (f *AssetFile) Size() int64 {
|
||||
return f.FakeFile.Size()
|
||||
}
|
||||
|
||||
func (f *AssetFile) Stat() (os.FileInfo, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// AssetDirectory implements http.File interface for a directory
|
||||
type AssetDirectory struct {
|
||||
AssetFile
|
||||
ChildrenRead int
|
||||
Children []os.FileInfo
|
||||
}
|
||||
|
||||
func NewAssetDirectory(name string, children []string, fs *AssetFS) *AssetDirectory {
|
||||
fileinfos := make([]os.FileInfo, 0, len(children))
|
||||
for _, child := range children {
|
||||
_, err := fs.AssetDir(filepath.Join(name, child))
|
||||
fileinfos = append(fileinfos, &FakeFile{child, err == nil, 0, time.Time{}})
|
||||
}
|
||||
return &AssetDirectory{
|
||||
AssetFile{
|
||||
bytes.NewReader(nil),
|
||||
ioutil.NopCloser(nil),
|
||||
FakeFile{name, true, 0, time.Time{}},
|
||||
},
|
||||
0,
|
||||
fileinfos}
|
||||
}
|
||||
|
||||
func (f *AssetDirectory) Readdir(count int) ([]os.FileInfo, error) {
|
||||
if count <= 0 {
|
||||
return f.Children, nil
|
||||
}
|
||||
if f.ChildrenRead+count > len(f.Children) {
|
||||
count = len(f.Children) - f.ChildrenRead
|
||||
}
|
||||
rv := f.Children[f.ChildrenRead : f.ChildrenRead+count]
|
||||
f.ChildrenRead += count
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
func (f *AssetDirectory) Stat() (os.FileInfo, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// AssetFS implements http.FileSystem, allowing
|
||||
// embedded files to be served from net/http package.
|
||||
type AssetFS struct {
|
||||
// Asset should return content of file in path if exists
|
||||
Asset func(path string) ([]byte, error)
|
||||
// AssetDir should return list of files in the path
|
||||
AssetDir func(path string) ([]string, error)
|
||||
// AssetInfo should return the info of file in path if exists
|
||||
AssetInfo func(path string) (os.FileInfo, error)
|
||||
// Prefix would be prepended to http requests
|
||||
Prefix string
|
||||
}
|
||||
|
||||
func (fs *AssetFS) Open(name string) (http.File, error) {
|
||||
name = path.Join(fs.Prefix, name)
|
||||
if len(name) > 0 && name[0] == '/' {
|
||||
name = name[1:]
|
||||
}
|
||||
if b, err := fs.Asset(name); err == nil {
|
||||
timestamp := defaultFileTimestamp
|
||||
if info, err := fs.AssetInfo(name); err == nil {
|
||||
timestamp = info.ModTime()
|
||||
}
|
||||
return NewAssetFile(name, b, timestamp), nil
|
||||
}
|
||||
if children, err := fs.AssetDir(name); err == nil {
|
||||
return NewAssetDirectory(name, children, fs), nil
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
// assetfs allows packages to serve static content embedded
|
||||
// with the go-bindata tool with the standard net/http package.
|
||||
//
|
||||
// See https://github.com/jteeuwen/go-bindata for more information
|
||||
// about embedding binary data with go-bindata.
|
||||
//
|
||||
// Usage example, after running
|
||||
// $ go-bindata data/...
|
||||
// use:
|
||||
// http.Handle("/",
|
||||
// http.FileServer(
|
||||
// &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: "data"}))
|
||||
package assetfs
|
97
vendor/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go
generated
vendored
Normal file
97
vendor/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const bindatafile = "bindata.go"
|
||||
|
||||
func isDebug(args []string) bool {
|
||||
flagset := flag.NewFlagSet("", flag.ContinueOnError)
|
||||
debug := flagset.Bool("debug", false, "")
|
||||
debugArgs := make([]string, 0)
|
||||
for _, arg := range args {
|
||||
if strings.HasPrefix(arg, "-debug") {
|
||||
debugArgs = append(debugArgs, arg)
|
||||
}
|
||||
}
|
||||
flagset.Parse(debugArgs)
|
||||
if debug == nil {
|
||||
return false
|
||||
}
|
||||
return *debug
|
||||
}
|
||||
|
||||
func main() {
|
||||
if _, err := exec.LookPath("go-bindata"); err != nil {
|
||||
fmt.Println("Cannot find go-bindata executable in path")
|
||||
fmt.Println("Maybe you need: go get github.com/elazarl/go-bindata-assetfs/...")
|
||||
os.Exit(1)
|
||||
}
|
||||
cmd := exec.Command("go-bindata", os.Args[1:]...)
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
in, err := os.Open(bindatafile)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Cannot read", bindatafile, err)
|
||||
return
|
||||
}
|
||||
out, err := os.Create("bindata_assetfs.go")
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Cannot write 'bindata_assetfs.go'", err)
|
||||
return
|
||||
}
|
||||
debug := isDebug(os.Args[1:])
|
||||
r := bufio.NewReader(in)
|
||||
done := false
|
||||
for line, isPrefix, err := r.ReadLine(); err == nil; line, isPrefix, err = r.ReadLine() {
|
||||
if !isPrefix {
|
||||
line = append(line, '\n')
|
||||
}
|
||||
if _, err := out.Write(line); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Cannot write to 'bindata_assetfs.go'", err)
|
||||
return
|
||||
}
|
||||
if !done && !isPrefix && bytes.HasPrefix(line, []byte("import (")) {
|
||||
if debug {
|
||||
fmt.Fprintln(out, "\t\"net/http\"")
|
||||
} else {
|
||||
fmt.Fprintln(out, "\t\"github.com/elazarl/go-bindata-assetfs\"")
|
||||
}
|
||||
done = true
|
||||
}
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintln(out, `
|
||||
func assetFS() http.FileSystem {
|
||||
for k := range _bintree.Children {
|
||||
return http.Dir(k)
|
||||
}
|
||||
panic("unreachable")
|
||||
}`)
|
||||
} else {
|
||||
fmt.Fprintln(out, `
|
||||
func assetFS() *assetfs.AssetFS {
|
||||
for k := range _bintree.Children {
|
||||
return &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: AssetInfo, Prefix: k}
|
||||
}
|
||||
panic("unreachable")
|
||||
}`)
|
||||
}
|
||||
// Close files BEFORE remove calls (don't use defer).
|
||||
in.Close()
|
||||
out.Close()
|
||||
if err := os.Remove(bindatafile); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Cannot remove", bindatafile, err)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
# temporary symlink for testing
|
||||
testing/data/symlink
|
|
@ -0,0 +1,22 @@
|
|||
language: go
|
||||
sudo: required
|
||||
go:
|
||||
- 1.3.3
|
||||
- 1.4.2
|
||||
- 1.5.3
|
||||
- 1.6beta2
|
||||
- tip
|
||||
env:
|
||||
- GOARCH=amd64 DOCKER_VERSION=1.7.1
|
||||
- GOARCH=386 DOCKER_VERSION=1.7.1
|
||||
- GOARCH=amd64 DOCKER_VERSION=1.8.3
|
||||
- GOARCH=386 DOCKER_VERSION=1.8.3
|
||||
- GOARCH=amd64 DOCKER_VERSION=1.9.1
|
||||
- GOARCH=386 DOCKER_VERSION=1.9.1
|
||||
install:
|
||||
- make prepare_docker
|
||||
script:
|
||||
- make test
|
||||
- DOCKER_HOST=tcp://127.0.0.1:2375 make integration
|
||||
services:
|
||||
- docker
|
|
@ -0,0 +1,124 @@
|
|||
# This is the official list of go-dockerclient authors for copyright purposes.
|
||||
|
||||
Abhishek Chanda <abhishek.becs@gmail.com>
|
||||
Adam Bell-Hanssen <adamb@aller.no>
|
||||
Adrien Kohlbecker <adrien.kohlbecker@gmail.com>
|
||||
Aldrin Leal <aldrin@leal.eng.br>
|
||||
Andreas Jaekle <andreas@jaekle.net>
|
||||
Andrews Medina <andrewsmedina@gmail.com>
|
||||
Andrey Sibiryov <kobolog@uber.com>
|
||||
Andy Goldstein <andy.goldstein@redhat.com>
|
||||
Antonio Murdaca <runcom@redhat.com>
|
||||
Artem Sidorenko <artem@2realities.com>
|
||||
Ben Marini <ben@remind101.com>
|
||||
Ben McCann <benmccann.com>
|
||||
Ben Parees <bparees@redhat.com>
|
||||
Benno van den Berg <bennovandenberg@gmail.com>
|
||||
Brendan Fosberry <brendan@codeship.com>
|
||||
Brian Lalor <blalor@bravo5.org>
|
||||
Brian P. Hamachek <brian@brianhama.com>
|
||||
Brian Palmer <brianp@instructure.com>
|
||||
Bryan Boreham <bjboreham@gmail.com>
|
||||
Burke Libbey <burke@libbey.me>
|
||||
Carlos Diaz-Padron <cpadron@mozilla.com>
|
||||
Cesar Wong <cewong@redhat.com>
|
||||
Cezar Sa Espinola <cezar.sa@corp.globo.com>
|
||||
Cheah Chu Yeow <chuyeow@gmail.com>
|
||||
cheneydeng <cheneydeng@qq.com>
|
||||
Chris Bednarski <banzaimonkey@gmail.com>
|
||||
CMGS <ilskdw@gmail.com>
|
||||
Colin Hebert <hebert.colin@gmail.com>
|
||||
Craig Jellick <craig@rancher.com>
|
||||
Dan Williams <dcbw@redhat.com>
|
||||
Daniel, Dao Quang Minh <dqminh89@gmail.com>
|
||||
Daniel Garcia <daniel@danielgarcia.info>
|
||||
Daniel Hiltgen <daniel.hiltgen@docker.com>
|
||||
Darren Shepherd <darren@rancher.com>
|
||||
Dave Choi <dave.choi@daumkakao.com>
|
||||
David Huie <dahuie@gmail.com>
|
||||
Dawn Chen <dawnchen@google.com>
|
||||
Dinesh Subhraveti <dinesh@gemini-systems.net>
|
||||
Drew Wells <drew.wells00@gmail.com>
|
||||
Ed <edrocksit@gmail.com>
|
||||
Elias G. Schneevoigt <eliasgs@gmail.com>
|
||||
Erez Horev <erez.horev@elastifile.com>
|
||||
Eric Anderson <anderson@copperegg.com>
|
||||
Ewout Prangsma <ewout@prangsma.net>
|
||||
Fabio Rehm <fgrehm@gmail.com>
|
||||
Fatih Arslan <ftharsln@gmail.com>
|
||||
Flavia Missi <flaviamissi@gmail.com>
|
||||
Francisco Souza <f@souza.cc>
|
||||
Grégoire Delattre <gregoire.delattre@gmail.com>
|
||||
Guillermo Álvarez Fernández <guillermo@cientifico.net>
|
||||
He Simei <hesimei@zju.edu.cn>
|
||||
Ivan Mikushin <i.mikushin@gmail.com>
|
||||
James Bardin <jbardin@litl.com>
|
||||
James Nugent <james@jen20.com>
|
||||
Jari Kolehmainen <jari.kolehmainen@digia.com>
|
||||
Jason Wilder <jwilder@litl.com>
|
||||
Jawher Moussa <jawher.moussa@gmail.com>
|
||||
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
|
||||
Jeff Mitchell <jeffrey.mitchell@gmail.com>
|
||||
Jeffrey Hulten <jhulten@gmail.com>
|
||||
Jen Andre <jandre@gmail.com>
|
||||
Jérôme Laurens <jeromelaurens@gmail.com>
|
||||
Johan Euphrosine <proppy@google.com>
|
||||
John Hughes <hughesj@visa.com>
|
||||
Kamil Domanski <kamil@domanski.co>
|
||||
Karan Misra <kidoman@gmail.com>
|
||||
Ken Herner <chosenken@gmail.com>
|
||||
Kim, Hirokuni <hirokuni.kim@kvh.co.jp>
|
||||
Kyle Allan <kallan357@gmail.com>
|
||||
Liron Levin <levinlir@gmail.com>
|
||||
Lior Yankovich <lior@twistlock.com>
|
||||
Liu Peng <vslene@gmail.com>
|
||||
Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
|
||||
Lucas Clemente <lucas@clemente.io>
|
||||
Lucas Weiblen <lucasweiblen@gmail.com>
|
||||
Lyon Hill <lyondhill@gmail.com>
|
||||
Mantas Matelis <mmatelis@coursera.org>
|
||||
Martin Sweeney <martin@sweeney.io>
|
||||
Máximo Cuadros Ortiz <mcuadros@gmail.com>
|
||||
Michael Schmatz <michaelschmatz@gmail.com>
|
||||
Michal Fojtik <mfojtik@redhat.com>
|
||||
Mike Dillon <mike.dillon@synctree.com>
|
||||
Mrunal Patel <mrunalp@gmail.com>
|
||||
Nguyen Sy Thanh Son <sonnst@sigma-solutions.eu>
|
||||
Nick Ethier <ncethier@gmail.com>
|
||||
Omeid Matten <public@omeid.me>
|
||||
Orivej Desh <orivej@gmx.fr>
|
||||
Paul Bellamy <paul.a.bellamy@gmail.com>
|
||||
Paul Morie <pmorie@gmail.com>
|
||||
Paul Weil <pweil@redhat.com>
|
||||
Peter Edge <peter.edge@gmail.com>
|
||||
Peter Jihoon Kim <raingrove@gmail.com>
|
||||
Phil Lu <lu@stackengine.com>
|
||||
Philippe Lafoucrière <philippe.lafoucriere@tech-angels.com>
|
||||
Rafe Colton <rafael.colton@gmail.com>
|
||||
Rob Miller <rob@kalistra.com>
|
||||
Robert Williamson <williamson.robert@gmail.com>
|
||||
Salvador Gironès <salvadorgirones@gmail.com>
|
||||
Sam Rijs <srijs@airpost.net>
|
||||
Sami Wagiaalla <swagiaal@redhat.com>
|
||||
Samuel Karp <skarp@amazon.com>
|
||||
Silas Sewell <silas@sewell.org>
|
||||
Simon Eskildsen <sirup@sirupsen.com>
|
||||
Simon Menke <simon.menke@gmail.com>
|
||||
Skolos <skolos@gopherlab.com>
|
||||
Soulou <leo@unbekandt.eu>
|
||||
Sridhar Ratnakumar <sridharr@activestate.com>
|
||||
Summer Mousa <smousa@zenoss.com>
|
||||
Sunjin Lee <styner32@gmail.com>
|
||||
Tarsis Azevedo <tarsis@corp.globo.com>
|
||||
Tim Schindler <tim@catalyst-zero.com>
|
||||
Timothy St. Clair <tstclair@redhat.com>
|
||||
Tobi Knaup <tobi@mesosphere.io>
|
||||
Tom Wilkie <tom.wilkie@gmail.com>
|
||||
Tonic <tonicbupt@gmail.com>
|
||||
ttyh061 <ttyh061@gmail.com>
|
||||
Victor Marmol <vmarmol@google.com>
|
||||
Vincenzo Prignano <vincenzo.prignano@gmail.com>
|
||||
Wiliam Souza <wiliamsouza83@gmail.com>
|
||||
Ye Yin <eyniy@qq.com>
|
||||
Yu, Zou <zouyu7@huawei.com>
|
||||
Yuriy Bogdanov <chinsay@gmail.com>
|
|
@ -0,0 +1,6 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
You can find the Docker license at the following link:
|
||||
https://raw.githubusercontent.com/docker/docker/master/LICENSE
|
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2015, go-dockerclient authors
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,60 @@
|
|||
.PHONY: \
|
||||
all \
|
||||
vendor \
|
||||
lint \
|
||||
vet \
|
||||
fmt \
|
||||
fmtcheck \
|
||||
pretest \
|
||||
test \
|
||||
integration \
|
||||
cov \
|
||||
clean
|
||||
|
||||
SRCS = $(shell git ls-files '*.go' | grep -v '^external/')
|
||||
PKGS = ./. ./testing
|
||||
|
||||
all: test
|
||||
|
||||
vendor:
|
||||
@ go get -v github.com/mjibson/party
|
||||
party -d external -c -u
|
||||
|
||||
lint:
|
||||
@ go get -v github.com/golang/lint/golint
|
||||
$(foreach file,$(SRCS),golint $(file) || exit;)
|
||||
|
||||
vet:
|
||||
@-go get -v golang.org/x/tools/cmd/vet
|
||||
$(foreach pkg,$(PKGS),go vet $(pkg);)
|
||||
|
||||
fmt:
|
||||
gofmt -w $(SRCS)
|
||||
|
||||
fmtcheck:
|
||||
$(foreach file,$(SRCS),gofmt -d $(file);)
|
||||
|
||||
prepare_docker:
|
||||
sudo stop docker
|
||||
sudo rm -rf /var/lib/docker
|
||||
sudo rm -f `which docker`
|
||||
sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
|
||||
echo "deb https://apt.dockerproject.org/repo ubuntu-trusty main" | sudo tee /etc/apt/sources.list.d/docker.list
|
||||
sudo apt-get update
|
||||
sudo apt-get install docker-engine=$(DOCKER_VERSION)-0~$(shell lsb_release -cs) -y --force-yes
|
||||
|
||||
pretest: lint vet fmtcheck
|
||||
|
||||
test: pretest
|
||||
$(foreach pkg,$(PKGS),go test $(pkg) || exit;)
|
||||
|
||||
integration:
|
||||
go test -tags docker_integration -run TestIntegration -v
|
||||
|
||||
cov:
|
||||
@ go get -v github.com/axw/gocov/gocov
|
||||
@ go get golang.org/x/tools/cmd/cover
|
||||
gocov test | gocov report
|
||||
|
||||
clean:
|
||||
$(foreach pkg,$(PKGS),go clean $(pkg) || exit;)
|
|
@ -0,0 +1,105 @@
|
|||
# go-dockerclient
|
||||
|
||||
[![Travis](https://img.shields.io/travis/fsouza/go-dockerclient.svg?style=flat-square)](https://travis-ci.org/fsouza/go-dockerclient)
|
||||
[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/fsouza/go-dockerclient)
|
||||
|
||||
This package presents a client for the Docker remote API. It also provides
|
||||
support for the extensions in the [Swarm API](https://docs.docker.com/swarm/api/swarm-api/).
|
||||
|
||||
This package also provides support for docker's network API, which is a simple
|
||||
passthrough to the libnetwork remote API. Note that docker's network API is
|
||||
only available in docker 1.8 and above, and only enabled in docker if
|
||||
DOCKER_EXPERIMENTAL is defined during the docker build process.
|
||||
|
||||
For more details, check the [remote API documentation](http://docs.docker.com/engine/reference/api/docker_remote_api/).
|
||||
|
||||
## Vendoring
|
||||
|
||||
If you are having issues with Go 1.5 and have `GO15VENDOREXPERIMENT` set with an application that has go-dockerclient vendored,
|
||||
please update your vendoring of go-dockerclient :) We recently moved the `vendor` directory to `external` so that go-dockerclient
|
||||
is compatible with this configuration. See [338](https://github.com/fsouza/go-dockerclient/issues/338) and [339](https://github.com/fsouza/go-dockerclient/pull/339)
|
||||
for details.
|
||||
|
||||
## Example
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
)
|
||||
|
||||
func main() {
|
||||
endpoint := "unix:///var/run/docker.sock"
|
||||
client, _ := docker.NewClient(endpoint)
|
||||
imgs, _ := client.ListImages(docker.ListImagesOptions{All: false})
|
||||
for _, img := range imgs {
|
||||
fmt.Println("ID: ", img.ID)
|
||||
fmt.Println("RepoTags: ", img.RepoTags)
|
||||
fmt.Println("Created: ", img.Created)
|
||||
fmt.Println("Size: ", img.Size)
|
||||
fmt.Println("VirtualSize: ", img.VirtualSize)
|
||||
fmt.Println("ParentId: ", img.ParentID)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Using with TLS
|
||||
|
||||
In order to instantiate the client for a TLS-enabled daemon, you should use NewTLSClient, passing the endpoint and path for key and certificates as parameters.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
)
|
||||
|
||||
func main() {
|
||||
endpoint := "tcp://[ip]:[port]"
|
||||
path := os.Getenv("DOCKER_CERT_PATH")
|
||||
ca := fmt.Sprintf("%s/ca.pem", path)
|
||||
cert := fmt.Sprintf("%s/cert.pem", path)
|
||||
key := fmt.Sprintf("%s/key.pem", path)
|
||||
client, _ := docker.NewTLSClient(endpoint, cert, key, ca)
|
||||
// use client
|
||||
}
|
||||
```
|
||||
|
||||
If using [docker-machine](https://docs.docker.com/machine/), or another application that exports environment variables
|
||||
`DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH`, you can use NewClientFromEnv.
|
||||
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
)
|
||||
|
||||
func main() {
|
||||
client, _ := docker.NewClientFromEnv()
|
||||
// use client
|
||||
}
|
||||
```
|
||||
|
||||
See the documentation for more details.
|
||||
|
||||
## Developing
|
||||
|
||||
All development commands can be seen in the [Makefile](Makefile).
|
||||
|
||||
Commited code must pass:
|
||||
|
||||
* [golint](https://github.com/golang/lint)
|
||||
* [go vet](https://godoc.org/golang.org/x/tools/cmd/vet)
|
||||
* [gofmt](https://golang.org/cmd/gofmt)
|
||||
* [go test](https://golang.org/cmd/go/#hdr-Test_packages)
|
||||
|
||||
Running `make test` will check all of these. If your editor does not automatically call gofmt, `make fmt` will format all go files in this repository.
|
|
@ -0,0 +1,136 @@
|
|||
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ErrCannotParseDockercfg is the error returned by NewAuthConfigurations when the dockercfg cannot be parsed.
|
||||
var ErrCannotParseDockercfg = errors.New("Failed to read authentication from dockercfg")
|
||||
|
||||
// AuthConfiguration represents authentication options to use in the PushImage
|
||||
// method. It represents the authentication in the Docker index server.
|
||||
type AuthConfiguration struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Email string `json:"email,omitempty"`
|
||||
ServerAddress string `json:"serveraddress,omitempty"`
|
||||
}
|
||||
|
||||
// AuthConfigurations represents authentication options to use for the
|
||||
// PushImage method accommodating the new X-Registry-Config header
|
||||
type AuthConfigurations struct {
|
||||
Configs map[string]AuthConfiguration `json:"configs"`
|
||||
}
|
||||
|
||||
// AuthConfigurations119 is used to serialize a set of AuthConfigurations
|
||||
// for Docker API >= 1.19.
|
||||
type AuthConfigurations119 map[string]AuthConfiguration
|
||||
|
||||
// dockerConfig represents a registry authentation configuration from the
|
||||
// .dockercfg file.
|
||||
type dockerConfig struct {
|
||||
Auth string `json:"auth"`
|
||||
Email string `json:"email"`
|
||||
}
|
||||
|
||||
// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from the
|
||||
// ~/.dockercfg file.
|
||||
func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) {
|
||||
var r io.Reader
|
||||
var err error
|
||||
p := path.Join(os.Getenv("HOME"), ".docker", "config.json")
|
||||
r, err = os.Open(p)
|
||||
if err != nil {
|
||||
p := path.Join(os.Getenv("HOME"), ".dockercfg")
|
||||
r, err = os.Open(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return NewAuthConfigurations(r)
|
||||
}
|
||||
|
||||
// NewAuthConfigurations returns AuthConfigurations from a JSON encoded string in the
|
||||
// same format as the .dockercfg file.
|
||||
func NewAuthConfigurations(r io.Reader) (*AuthConfigurations, error) {
|
||||
var auth *AuthConfigurations
|
||||
confs, err := parseDockerConfig(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
auth, err = authConfigs(confs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
func parseDockerConfig(r io.Reader) (map[string]dockerConfig, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(r)
|
||||
byteData := buf.Bytes()
|
||||
|
||||
var confsWrapper map[string]map[string]dockerConfig
|
||||
if err := json.Unmarshal(byteData, &confsWrapper); err == nil {
|
||||
if confs, ok := confsWrapper["auths"]; ok {
|
||||
return confs, nil
|
||||
}
|
||||
}
|
||||
|
||||
var confs map[string]dockerConfig
|
||||
if err := json.Unmarshal(byteData, &confs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return confs, nil
|
||||
}
|
||||
|
||||
// authConfigs converts a dockerConfigs map to a AuthConfigurations object.
|
||||
func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) {
|
||||
c := &AuthConfigurations{
|
||||
Configs: make(map[string]AuthConfiguration),
|
||||
}
|
||||
for reg, conf := range confs {
|
||||
data, err := base64.StdEncoding.DecodeString(conf.Auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
userpass := strings.SplitN(string(data), ":", 2)
|
||||
if len(userpass) != 2 {
|
||||
return nil, ErrCannotParseDockercfg
|
||||
}
|
||||
c.Configs[reg] = AuthConfiguration{
|
||||
Email: conf.Email,
|
||||
Username: userpass[0],
|
||||
Password: userpass[1],
|
||||
ServerAddress: reg,
|
||||
}
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// AuthCheck validates the given credentials. It returns nil if successful.
|
||||
//
|
||||
// See https://goo.gl/m2SleN for more details.
|
||||
func (c *Client) AuthCheck(conf *AuthConfiguration) error {
|
||||
if conf == nil {
|
||||
return fmt.Errorf("conf is nil")
|
||||
}
|
||||
resp, err := c.do("POST", "/auth", doOptions{data: conf})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
// Copyright 2014 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import "fmt"
|
||||
|
||||
// ChangeType is a type for constants indicating the type of change
|
||||
// in a container
|
||||
type ChangeType int
|
||||
|
||||
const (
|
||||
// ChangeModify is the ChangeType for container modifications
|
||||
ChangeModify ChangeType = iota
|
||||
|
||||
// ChangeAdd is the ChangeType for additions to a container
|
||||
ChangeAdd
|
||||
|
||||
// ChangeDelete is the ChangeType for deletions from a container
|
||||
ChangeDelete
|
||||
)
|
||||
|
||||
// Change represents a change in a container.
|
||||
//
|
||||
// See https://goo.gl/9GsTIF for more details.
|
||||
type Change struct {
|
||||
Path string
|
||||
Kind ChangeType
|
||||
}
|
||||
|
||||
func (change *Change) String() string {
|
||||
var kind string
|
||||
switch change.Kind {
|
||||
case ChangeModify:
|
||||
kind = "C"
|
||||
case ChangeAdd:
|
||||
kind = "A"
|
||||
case ChangeDelete:
|
||||
kind = "D"
|
||||
}
|
||||
return fmt.Sprintf("%s %s", kind, change.Path)
|
||||
}
|
|
@ -0,0 +1,928 @@
|
|||
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package docker provides a client for the Docker remote API.
|
||||
//
|
||||
// See https://goo.gl/G3plxW for more details on the remote API.
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp"
|
||||
)
|
||||
|
||||
const userAgent = "go-dockerclient"
|
||||
|
||||
var (
|
||||
// ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL.
|
||||
ErrInvalidEndpoint = errors.New("invalid endpoint")
|
||||
|
||||
// ErrConnectionRefused is returned when the client cannot connect to the given endpoint.
|
||||
ErrConnectionRefused = errors.New("cannot connect to Docker endpoint")
|
||||
|
||||
apiVersion112, _ = NewAPIVersion("1.12")
|
||||
|
||||
apiVersion119, _ = NewAPIVersion("1.19")
|
||||
)
|
||||
|
||||
// APIVersion is an internal representation of a version of the Remote API.
|
||||
type APIVersion []int
|
||||
|
||||
// NewAPIVersion returns an instance of APIVersion for the given string.
|
||||
//
|
||||
// The given string must be in the form <major>.<minor>.<patch>, where <major>,
|
||||
// <minor> and <patch> are integer numbers.
|
||||
func NewAPIVersion(input string) (APIVersion, error) {
|
||||
if !strings.Contains(input, ".") {
|
||||
return nil, fmt.Errorf("Unable to parse version %q", input)
|
||||
}
|
||||
raw := strings.Split(input, "-")
|
||||
arr := strings.Split(raw[0], ".")
|
||||
ret := make(APIVersion, len(arr))
|
||||
var err error
|
||||
for i, val := range arr {
|
||||
ret[i], err = strconv.Atoi(val)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val)
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (version APIVersion) String() string {
|
||||
var str string
|
||||
for i, val := range version {
|
||||
str += strconv.Itoa(val)
|
||||
if i < len(version)-1 {
|
||||
str += "."
|
||||
}
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
// LessThan is a function for comparing APIVersion structs
|
||||
func (version APIVersion) LessThan(other APIVersion) bool {
|
||||
return version.compare(other) < 0
|
||||
}
|
||||
|
||||
// LessThanOrEqualTo is a function for comparing APIVersion structs
|
||||
func (version APIVersion) LessThanOrEqualTo(other APIVersion) bool {
|
||||
return version.compare(other) <= 0
|
||||
}
|
||||
|
||||
// GreaterThan is a function for comparing APIVersion structs
|
||||
func (version APIVersion) GreaterThan(other APIVersion) bool {
|
||||
return version.compare(other) > 0
|
||||
}
|
||||
|
||||
// GreaterThanOrEqualTo is a function for comparing APIVersion structs
|
||||
func (version APIVersion) GreaterThanOrEqualTo(other APIVersion) bool {
|
||||
return version.compare(other) >= 0
|
||||
}
|
||||
|
||||
func (version APIVersion) compare(other APIVersion) int {
|
||||
for i, v := range version {
|
||||
if i <= len(other)-1 {
|
||||
otherVersion := other[i]
|
||||
|
||||
if v < otherVersion {
|
||||
return -1
|
||||
} else if v > otherVersion {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(version) > len(other) {
|
||||
return 1
|
||||
}
|
||||
if len(version) < len(other) {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Client is the basic type of this package. It provides methods for
|
||||
// interaction with the API.
|
||||
type Client struct {
|
||||
SkipServerVersionCheck bool
|
||||
HTTPClient *http.Client
|
||||
TLSConfig *tls.Config
|
||||
Dialer *net.Dialer
|
||||
|
||||
endpoint string
|
||||
endpointURL *url.URL
|
||||
eventMonitor *eventMonitoringState
|
||||
requestedAPIVersion APIVersion
|
||||
serverAPIVersion APIVersion
|
||||
expectedAPIVersion APIVersion
|
||||
unixHTTPClient *http.Client
|
||||
}
|
||||
|
||||
// NewClient returns a Client instance ready for communication with the given
|
||||
// server endpoint. It will use the latest remote API version available in the
|
||||
// server.
|
||||
func NewClient(endpoint string) (*Client, error) {
|
||||
client, err := NewVersionedClient(endpoint, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.SkipServerVersionCheck = true
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewTLSClient returns a Client instance ready for TLS communications with the givens
|
||||
// server endpoint, key and certificates . It will use the latest remote API version
|
||||
// available in the server.
|
||||
func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) {
|
||||
client, err := NewVersionedTLSClient(endpoint, cert, key, ca, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.SkipServerVersionCheck = true
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
|
||||
// server endpoint, key and certificates (passed inline to the function as opposed to being
|
||||
// read from a local file). It will use the latest remote API version available in the server.
|
||||
func NewTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte) (*Client, error) {
|
||||
client, err := NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.SkipServerVersionCheck = true
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewVersionedClient returns a Client instance ready for communication with
|
||||
// the given server endpoint, using a specific remote API version.
|
||||
func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) {
|
||||
u, err := parseEndpoint(endpoint, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var requestedAPIVersion APIVersion
|
||||
if strings.Contains(apiVersionString, ".") {
|
||||
requestedAPIVersion, err = NewAPIVersion(apiVersionString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &Client{
|
||||
HTTPClient: cleanhttp.DefaultClient(),
|
||||
Dialer: &net.Dialer{},
|
||||
endpoint: endpoint,
|
||||
endpointURL: u,
|
||||
eventMonitor: new(eventMonitoringState),
|
||||
requestedAPIVersion: requestedAPIVersion,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewVersionnedTLSClient has been DEPRECATED, please use NewVersionedTLSClient.
|
||||
func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
|
||||
return NewVersionedTLSClient(endpoint, cert, key, ca, apiVersionString)
|
||||
}
|
||||
|
||||
// NewVersionedTLSClient returns a Client instance ready for TLS communications with the givens
|
||||
// server endpoint, key and certificates, using a specific remote API version.
|
||||
func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
|
||||
certPEMBlock, err := ioutil.ReadFile(cert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keyPEMBlock, err := ioutil.ReadFile(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
caPEMCert, err := ioutil.ReadFile(ca)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, apiVersionString)
|
||||
}
|
||||
|
||||
// NewClientFromEnv returns a Client instance ready for communication created from
|
||||
// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH.
|
||||
//
|
||||
// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
|
||||
// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
|
||||
func NewClientFromEnv() (*Client, error) {
|
||||
client, err := NewVersionedClientFromEnv("")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.SkipServerVersionCheck = true
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewVersionedClientFromEnv returns a Client instance ready for TLS communications created from
|
||||
// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH,
|
||||
// and using a specific remote API version.
|
||||
//
|
||||
// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
|
||||
// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
|
||||
func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) {
|
||||
dockerEnv, err := getDockerEnv()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dockerHost := dockerEnv.dockerHost
|
||||
if dockerEnv.dockerTLSVerify {
|
||||
parts := strings.SplitN(dockerEnv.dockerHost, "://", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("could not split %s into two parts by ://", dockerHost)
|
||||
}
|
||||
cert := filepath.Join(dockerEnv.dockerCertPath, "cert.pem")
|
||||
key := filepath.Join(dockerEnv.dockerCertPath, "key.pem")
|
||||
ca := filepath.Join(dockerEnv.dockerCertPath, "ca.pem")
|
||||
return NewVersionedTLSClient(dockerEnv.dockerHost, cert, key, ca, apiVersionString)
|
||||
}
|
||||
return NewVersionedClient(dockerEnv.dockerHost, apiVersionString)
|
||||
}
|
||||
|
||||
// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
|
||||
// server endpoint, key and certificates (passed inline to the function as opposed to being
|
||||
// read from a local file), using a specific remote API version.
|
||||
func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte, apiVersionString string) (*Client, error) {
|
||||
u, err := parseEndpoint(endpoint, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var requestedAPIVersion APIVersion
|
||||
if strings.Contains(apiVersionString, ".") {
|
||||
requestedAPIVersion, err = NewAPIVersion(apiVersionString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if certPEMBlock == nil || keyPEMBlock == nil {
|
||||
return nil, errors.New("Both cert and key are required")
|
||||
}
|
||||
tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsConfig := &tls.Config{Certificates: []tls.Certificate{tlsCert}}
|
||||
if caPEMCert == nil {
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
} else {
|
||||
caPool := x509.NewCertPool()
|
||||
if !caPool.AppendCertsFromPEM(caPEMCert) {
|
||||
return nil, errors.New("Could not add RootCA pem")
|
||||
}
|
||||
tlsConfig.RootCAs = caPool
|
||||
}
|
||||
tr := cleanhttp.DefaultTransport()
|
||||
tr.TLSClientConfig = tlsConfig
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Client{
|
||||
HTTPClient: &http.Client{Transport: tr},
|
||||
TLSConfig: tlsConfig,
|
||||
Dialer: &net.Dialer{},
|
||||
endpoint: endpoint,
|
||||
endpointURL: u,
|
||||
eventMonitor: new(eventMonitoringState),
|
||||
requestedAPIVersion: requestedAPIVersion,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) checkAPIVersion() error {
|
||||
serverAPIVersionString, err := c.getServerAPIVersionString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.serverAPIVersion, err = NewAPIVersion(serverAPIVersionString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.requestedAPIVersion == nil {
|
||||
c.expectedAPIVersion = c.serverAPIVersion
|
||||
} else {
|
||||
c.expectedAPIVersion = c.requestedAPIVersion
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Endpoint returns the current endpoint. It's useful for getting the endpoint
|
||||
// when using functions that get this data from the environment (like
|
||||
// NewClientFromEnv.
|
||||
func (c *Client) Endpoint() string {
|
||||
return c.endpoint
|
||||
}
|
||||
|
||||
// Ping pings the docker server
|
||||
//
|
||||
// See https://goo.gl/kQCfJj for more details.
|
||||
func (c *Client) Ping() error {
|
||||
path := "/_ping"
|
||||
resp, err := c.do("GET", path, doOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return newError(resp)
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) getServerAPIVersionString() (version string, err error) {
|
||||
resp, err := c.do("GET", "/version", doOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", resp.StatusCode)
|
||||
}
|
||||
var versionResponse map[string]interface{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&versionResponse); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if version, ok := (versionResponse["ApiVersion"]).(string); ok {
|
||||
return version, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
type doOptions struct {
|
||||
data interface{}
|
||||
forceJSON bool
|
||||
headers map[string]string
|
||||
}
|
||||
|
||||
func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, error) {
|
||||
var params io.Reader
|
||||
if doOptions.data != nil || doOptions.forceJSON {
|
||||
buf, err := json.Marshal(doOptions.data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
params = bytes.NewBuffer(buf)
|
||||
}
|
||||
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
|
||||
err := c.checkAPIVersion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
httpClient := c.HTTPClient
|
||||
protocol := c.endpointURL.Scheme
|
||||
var u string
|
||||
if protocol == "unix" {
|
||||
httpClient = c.unixClient()
|
||||
u = c.getFakeUnixURL(path)
|
||||
} else {
|
||||
u = c.getURL(path)
|
||||
}
|
||||
req, err := http.NewRequest(method, u, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
if doOptions.data != nil {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
} else if method == "POST" {
|
||||
req.Header.Set("Content-Type", "plain/text")
|
||||
}
|
||||
|
||||
for k, v := range doOptions.headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return nil, ErrConnectionRefused
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
return nil, newError(resp)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
type streamOptions struct {
|
||||
setRawTerminal bool
|
||||
rawJSONStream bool
|
||||
useJSONDecoder bool
|
||||
headers map[string]string
|
||||
in io.Reader
|
||||
stdout io.Writer
|
||||
stderr io.Writer
|
||||
// timeout is the inital connection timeout
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func (c *Client) stream(method, path string, streamOptions streamOptions) error {
|
||||
if (method == "POST" || method == "PUT") && streamOptions.in == nil {
|
||||
streamOptions.in = bytes.NewReader(nil)
|
||||
}
|
||||
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
|
||||
err := c.checkAPIVersion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
req, err := http.NewRequest(method, c.getURL(path), streamOptions.in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
if method == "POST" {
|
||||
req.Header.Set("Content-Type", "plain/text")
|
||||
}
|
||||
for key, val := range streamOptions.headers {
|
||||
req.Header.Set(key, val)
|
||||
}
|
||||
var resp *http.Response
|
||||
protocol := c.endpointURL.Scheme
|
||||
address := c.endpointURL.Path
|
||||
if streamOptions.stdout == nil {
|
||||
streamOptions.stdout = ioutil.Discard
|
||||
}
|
||||
if streamOptions.stderr == nil {
|
||||
streamOptions.stderr = ioutil.Discard
|
||||
}
|
||||
if protocol == "unix" {
|
||||
dial, err := c.Dialer.Dial(protocol, address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dial.Close()
|
||||
breader := bufio.NewReader(dial)
|
||||
err = req.Write(dial)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadResponse may hang if server does not replay
|
||||
if streamOptions.timeout > 0 {
|
||||
dial.SetDeadline(time.Now().Add(streamOptions.timeout))
|
||||
}
|
||||
|
||||
if resp, err = http.ReadResponse(breader, req); err != nil {
|
||||
// Cancel timeout for future I/O operations
|
||||
if streamOptions.timeout > 0 {
|
||||
dial.SetDeadline(time.Time{})
|
||||
}
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return ErrConnectionRefused
|
||||
}
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if resp, err = c.HTTPClient.Do(req); err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return ErrConnectionRefused
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
return newError(resp)
|
||||
}
|
||||
if streamOptions.useJSONDecoder || resp.Header.Get("Content-Type") == "application/json" {
|
||||
// if we want to get raw json stream, just copy it back to output
|
||||
// without decoding it
|
||||
if streamOptions.rawJSONStream {
|
||||
_, err = io.Copy(streamOptions.stdout, resp.Body)
|
||||
return err
|
||||
}
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
for {
|
||||
var m jsonMessage
|
||||
if err := dec.Decode(&m); err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
if m.Stream != "" {
|
||||
fmt.Fprint(streamOptions.stdout, m.Stream)
|
||||
} else if m.Progress != "" {
|
||||
fmt.Fprintf(streamOptions.stdout, "%s %s\r", m.Status, m.Progress)
|
||||
} else if m.Error != "" {
|
||||
return errors.New(m.Error)
|
||||
}
|
||||
if m.Status != "" {
|
||||
fmt.Fprintln(streamOptions.stdout, m.Status)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if streamOptions.setRawTerminal {
|
||||
_, err = io.Copy(streamOptions.stdout, resp.Body)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type hijackOptions struct {
|
||||
success chan struct{}
|
||||
setRawTerminal bool
|
||||
in io.Reader
|
||||
stdout io.Writer
|
||||
stderr io.Writer
|
||||
data interface{}
|
||||
}
|
||||
|
||||
type CloseWaiter interface {
|
||||
io.Closer
|
||||
Wait() error
|
||||
}
|
||||
|
||||
type waiterFunc func() error
|
||||
|
||||
func (w waiterFunc) Wait() error { return w() }
|
||||
|
||||
type closerFunc func() error
|
||||
|
||||
func (c closerFunc) Close() error { return c() }
|
||||
|
||||
func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (CloseWaiter, error) {
|
||||
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
|
||||
err := c.checkAPIVersion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var params io.Reader
|
||||
if hijackOptions.data != nil {
|
||||
buf, err := json.Marshal(hijackOptions.data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
params = bytes.NewBuffer(buf)
|
||||
}
|
||||
req, err := http.NewRequest(method, c.getURL(path), params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "plain/text")
|
||||
req.Header.Set("Connection", "Upgrade")
|
||||
req.Header.Set("Upgrade", "tcp")
|
||||
protocol := c.endpointURL.Scheme
|
||||
address := c.endpointURL.Path
|
||||
if protocol != "unix" {
|
||||
protocol = "tcp"
|
||||
address = c.endpointURL.Host
|
||||
}
|
||||
var dial net.Conn
|
||||
if c.TLSConfig != nil && protocol != "unix" {
|
||||
dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
dial, err = c.Dialer.Dial(protocol, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
errs := make(chan error)
|
||||
quit := make(chan struct{})
|
||||
go func() {
|
||||
clientconn := httputil.NewClientConn(dial, nil)
|
||||
defer clientconn.Close()
|
||||
clientconn.Do(req)
|
||||
if hijackOptions.success != nil {
|
||||
hijackOptions.success <- struct{}{}
|
||||
<-hijackOptions.success
|
||||
}
|
||||
rwc, br := clientconn.Hijack()
|
||||
defer rwc.Close()
|
||||
|
||||
errChanOut := make(chan error, 1)
|
||||
errChanIn := make(chan error, 1)
|
||||
if hijackOptions.stdout == nil && hijackOptions.stderr == nil {
|
||||
close(errChanOut)
|
||||
} else {
|
||||
// Only copy if hijackOptions.stdout and/or hijackOptions.stderr is actually set.
|
||||
// Otherwise, if the only stream you care about is stdin, your attach session
|
||||
// will "hang" until the container terminates, even though you're not reading
|
||||
// stdout/stderr
|
||||
if hijackOptions.stdout == nil {
|
||||
hijackOptions.stdout = ioutil.Discard
|
||||
}
|
||||
if hijackOptions.stderr == nil {
|
||||
hijackOptions.stderr = ioutil.Discard
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
if hijackOptions.in != nil {
|
||||
if closer, ok := hijackOptions.in.(io.Closer); ok {
|
||||
closer.Close()
|
||||
}
|
||||
errChanIn <- nil
|
||||
}
|
||||
}()
|
||||
|
||||
var err error
|
||||
if hijackOptions.setRawTerminal {
|
||||
_, err = io.Copy(hijackOptions.stdout, br)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br)
|
||||
}
|
||||
errChanOut <- err
|
||||
}()
|
||||
}
|
||||
|
||||
go func() {
|
||||
var err error
|
||||
if hijackOptions.in != nil {
|
||||
_, err = io.Copy(rwc, hijackOptions.in)
|
||||
}
|
||||
errChanIn <- err
|
||||
rwc.(interface {
|
||||
CloseWrite() error
|
||||
}).CloseWrite()
|
||||
}()
|
||||
|
||||
var errIn error
|
||||
select {
|
||||
case errIn = <-errChanIn:
|
||||
case <-quit:
|
||||
return
|
||||
}
|
||||
|
||||
var errOut error
|
||||
select {
|
||||
case errOut = <-errChanOut:
|
||||
case <-quit:
|
||||
return
|
||||
}
|
||||
|
||||
if errIn != nil {
|
||||
errs <- errIn
|
||||
} else {
|
||||
errs <- errOut
|
||||
}
|
||||
}()
|
||||
|
||||
return struct {
|
||||
closerFunc
|
||||
waiterFunc
|
||||
}{
|
||||
closerFunc(func() error { close(quit); return nil }),
|
||||
waiterFunc(func() error { return <-errs }),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) getURL(path string) string {
|
||||
urlStr := strings.TrimRight(c.endpointURL.String(), "/")
|
||||
if c.endpointURL.Scheme == "unix" {
|
||||
urlStr = ""
|
||||
}
|
||||
if c.requestedAPIVersion != nil {
|
||||
return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
|
||||
}
|
||||
return fmt.Sprintf("%s%s", urlStr, path)
|
||||
}
|
||||
|
||||
// getFakeUnixURL returns the URL needed to make an HTTP request over a UNIX
|
||||
// domain socket to the given path.
|
||||
func (c *Client) getFakeUnixURL(path string) string {
|
||||
u := *c.endpointURL // Copy.
|
||||
|
||||
// Override URL so that net/http will not complain.
|
||||
u.Scheme = "http"
|
||||
u.Host = "unix.sock" // Doesn't matter what this is - it's not used.
|
||||
u.Path = ""
|
||||
urlStr := strings.TrimRight(u.String(), "/")
|
||||
if c.requestedAPIVersion != nil {
|
||||
return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
|
||||
}
|
||||
return fmt.Sprintf("%s%s", urlStr, path)
|
||||
}
|
||||
|
||||
func (c *Client) unixClient() *http.Client {
|
||||
if c.unixHTTPClient != nil {
|
||||
return c.unixHTTPClient
|
||||
}
|
||||
socketPath := c.endpointURL.Path
|
||||
tr := &http.Transport{
|
||||
Dial: func(network, addr string) (net.Conn, error) {
|
||||
return c.Dialer.Dial("unix", socketPath)
|
||||
},
|
||||
}
|
||||
cleanhttp.SetTransportFinalizer(tr)
|
||||
c.unixHTTPClient = &http.Client{Transport: tr}
|
||||
return c.unixHTTPClient
|
||||
}
|
||||
|
||||
type jsonMessage struct {
|
||||
Status string `json:"status,omitempty"`
|
||||
Progress string `json:"progress,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Stream string `json:"stream,omitempty"`
|
||||
}
|
||||
|
||||
func queryString(opts interface{}) string {
|
||||
if opts == nil {
|
||||
return ""
|
||||
}
|
||||
value := reflect.ValueOf(opts)
|
||||
if value.Kind() == reflect.Ptr {
|
||||
value = value.Elem()
|
||||
}
|
||||
if value.Kind() != reflect.Struct {
|
||||
return ""
|
||||
}
|
||||
items := url.Values(map[string][]string{})
|
||||
for i := 0; i < value.NumField(); i++ {
|
||||
field := value.Type().Field(i)
|
||||
if field.PkgPath != "" {
|
||||
continue
|
||||
}
|
||||
key := field.Tag.Get("qs")
|
||||
if key == "" {
|
||||
key = strings.ToLower(field.Name)
|
||||
} else if key == "-" {
|
||||
continue
|
||||
}
|
||||
addQueryStringValue(items, key, value.Field(i))
|
||||
}
|
||||
return items.Encode()
|
||||
}
|
||||
|
||||
func addQueryStringValue(items url.Values, key string, v reflect.Value) {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
if v.Bool() {
|
||||
items.Add(key, "1")
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
if v.Int() > 0 {
|
||||
items.Add(key, strconv.FormatInt(v.Int(), 10))
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
if v.Float() > 0 {
|
||||
items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64))
|
||||
}
|
||||
case reflect.String:
|
||||
if v.String() != "" {
|
||||
items.Add(key, v.String())
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if !v.IsNil() {
|
||||
if b, err := json.Marshal(v.Interface()); err == nil {
|
||||
items.Add(key, string(b))
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
if len(v.MapKeys()) > 0 {
|
||||
if b, err := json.Marshal(v.Interface()); err == nil {
|
||||
items.Add(key, string(b))
|
||||
}
|
||||
}
|
||||
case reflect.Array, reflect.Slice:
|
||||
vLen := v.Len()
|
||||
if vLen > 0 {
|
||||
for i := 0; i < vLen; i++ {
|
||||
addQueryStringValue(items, key, v.Index(i))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Error represents failures in the API. It represents a failure from the API.
|
||||
type Error struct {
|
||||
Status int
|
||||
Message string
|
||||
}
|
||||
|
||||
func newError(resp *http.Response) *Error {
|
||||
defer resp.Body.Close()
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return &Error{Status: resp.StatusCode, Message: fmt.Sprintf("cannot read body, err: %v", err)}
|
||||
}
|
||||
return &Error{Status: resp.StatusCode, Message: string(data)}
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("API error (%d): %s", e.Status, e.Message)
|
||||
}
|
||||
|
||||
func parseEndpoint(endpoint string, tls bool) (*url.URL, error) {
|
||||
if endpoint != "" && !strings.Contains(endpoint, "://") {
|
||||
endpoint = "tcp://" + endpoint
|
||||
}
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return nil, ErrInvalidEndpoint
|
||||
}
|
||||
if tls {
|
||||
u.Scheme = "https"
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "unix":
|
||||
return u, nil
|
||||
case "http", "https", "tcp":
|
||||
_, port, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
if e, ok := err.(*net.AddrError); ok {
|
||||
if e.Err == "missing port in address" {
|
||||
return u, nil
|
||||
}
|
||||
}
|
||||
return nil, ErrInvalidEndpoint
|
||||
}
|
||||
number, err := strconv.ParseInt(port, 10, 64)
|
||||
if err == nil && number > 0 && number < 65536 {
|
||||
if u.Scheme == "tcp" {
|
||||
if tls {
|
||||
u.Scheme = "https"
|
||||
} else {
|
||||
u.Scheme = "http"
|
||||
}
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
return nil, ErrInvalidEndpoint
|
||||
default:
|
||||
return nil, ErrInvalidEndpoint
|
||||
}
|
||||
}
|
||||
|
||||
type dockerEnv struct {
|
||||
dockerHost string
|
||||
dockerTLSVerify bool
|
||||
dockerCertPath string
|
||||
}
|
||||
|
||||
func getDockerEnv() (*dockerEnv, error) {
|
||||
dockerHost := os.Getenv("DOCKER_HOST")
|
||||
var err error
|
||||
if dockerHost == "" {
|
||||
dockerHost, err = DefaultDockerHost()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != ""
|
||||
var dockerCertPath string
|
||||
if dockerTLSVerify {
|
||||
dockerCertPath = os.Getenv("DOCKER_CERT_PATH")
|
||||
if dockerCertPath == "" {
|
||||
home := homedir.Get()
|
||||
if home == "" {
|
||||
return nil, errors.New("environment variable HOME must be set if DOCKER_CERT_PATH is not set")
|
||||
}
|
||||
dockerCertPath = filepath.Join(home, ".docker")
|
||||
dockerCertPath, err = filepath.Abs(dockerCertPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return &dockerEnv{
|
||||
dockerHost: dockerHost,
|
||||
dockerTLSVerify: dockerTLSVerify,
|
||||
dockerCertPath: dockerCertPath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DefaultDockerHost returns the default docker socket for the current OS
|
||||
func DefaultDockerHost() (string, error) {
|
||||
var defaultHost string
|
||||
if runtime.GOOS == "windows" {
|
||||
// If we do not have a host, default to TCP socket on Windows
|
||||
defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort)
|
||||
} else {
|
||||
// If we do not have a host, default to unix socket
|
||||
defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket)
|
||||
}
|
||||
return opts.ValidateHost(defaultHost)
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,168 @@
|
|||
// Copyright 2014 Docker authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the DOCKER-LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Env represents a list of key-pair represented in the form KEY=VALUE.
|
||||
type Env []string
|
||||
|
||||
// Get returns the string value of the given key.
|
||||
func (env *Env) Get(key string) (value string) {
|
||||
return env.Map()[key]
|
||||
}
|
||||
|
||||
// Exists checks whether the given key is defined in the internal Env
|
||||
// representation.
|
||||
func (env *Env) Exists(key string) bool {
|
||||
_, exists := env.Map()[key]
|
||||
return exists
|
||||
}
|
||||
|
||||
// GetBool returns a boolean representation of the given key. The key is false
|
||||
// whenever its value if 0, no, false, none or an empty string. Any other value
|
||||
// will be interpreted as true.
|
||||
func (env *Env) GetBool(key string) (value bool) {
|
||||
s := strings.ToLower(strings.Trim(env.Get(key), " \t"))
|
||||
if s == "" || s == "0" || s == "no" || s == "false" || s == "none" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// SetBool defines a boolean value to the given key.
|
||||
func (env *Env) SetBool(key string, value bool) {
|
||||
if value {
|
||||
env.Set(key, "1")
|
||||
} else {
|
||||
env.Set(key, "0")
|
||||
}
|
||||
}
|
||||
|
||||
// GetInt returns the value of the provided key, converted to int.
|
||||
//
|
||||
// It the value cannot be represented as an integer, it returns -1.
|
||||
func (env *Env) GetInt(key string) int {
|
||||
return int(env.GetInt64(key))
|
||||
}
|
||||
|
||||
// SetInt defines an integer value to the given key.
|
||||
func (env *Env) SetInt(key string, value int) {
|
||||
env.Set(key, strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// GetInt64 returns the value of the provided key, converted to int64.
|
||||
//
|
||||
// It the value cannot be represented as an integer, it returns -1.
|
||||
func (env *Env) GetInt64(key string) int64 {
|
||||
s := strings.Trim(env.Get(key), " \t")
|
||||
val, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// SetInt64 defines an integer (64-bit wide) value to the given key.
|
||||
func (env *Env) SetInt64(key string, value int64) {
|
||||
env.Set(key, strconv.FormatInt(value, 10))
|
||||
}
|
||||
|
||||
// GetJSON unmarshals the value of the provided key in the provided iface.
|
||||
//
|
||||
// iface is a value that can be provided to the json.Unmarshal function.
|
||||
func (env *Env) GetJSON(key string, iface interface{}) error {
|
||||
sval := env.Get(key)
|
||||
if sval == "" {
|
||||
return nil
|
||||
}
|
||||
return json.Unmarshal([]byte(sval), iface)
|
||||
}
|
||||
|
||||
// SetJSON marshals the given value to JSON format and stores it using the
|
||||
// provided key.
|
||||
func (env *Env) SetJSON(key string, value interface{}) error {
|
||||
sval, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
env.Set(key, string(sval))
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetList returns a list of strings matching the provided key. It handles the
|
||||
// list as a JSON representation of a list of strings.
|
||||
//
|
||||
// If the given key matches to a single string, it will return a list
|
||||
// containing only the value that matches the key.
|
||||
func (env *Env) GetList(key string) []string {
|
||||
sval := env.Get(key)
|
||||
if sval == "" {
|
||||
return nil
|
||||
}
|
||||
var l []string
|
||||
if err := json.Unmarshal([]byte(sval), &l); err != nil {
|
||||
l = append(l, sval)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// SetList stores the given list in the provided key, after serializing it to
|
||||
// JSON format.
|
||||
func (env *Env) SetList(key string, value []string) error {
|
||||
return env.SetJSON(key, value)
|
||||
}
|
||||
|
||||
// Set defines the value of a key to the given string.
|
||||
func (env *Env) Set(key, value string) {
|
||||
*env = append(*env, key+"="+value)
|
||||
}
|
||||
|
||||
// Decode decodes `src` as a json dictionary, and adds each decoded key-value
|
||||
// pair to the environment.
|
||||
//
|
||||
// If `src` cannot be decoded as a json dictionary, an error is returned.
|
||||
func (env *Env) Decode(src io.Reader) error {
|
||||
m := make(map[string]interface{})
|
||||
if err := json.NewDecoder(src).Decode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range m {
|
||||
env.SetAuto(k, v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAuto will try to define the Set* method to call based on the given value.
|
||||
func (env *Env) SetAuto(key string, value interface{}) {
|
||||
if fval, ok := value.(float64); ok {
|
||||
env.SetInt64(key, int64(fval))
|
||||
} else if sval, ok := value.(string); ok {
|
||||
env.Set(key, sval)
|
||||
} else if val, err := json.Marshal(value); err == nil {
|
||||
env.Set(key, string(val))
|
||||
} else {
|
||||
env.Set(key, fmt.Sprintf("%v", value))
|
||||
}
|
||||
}
|
||||
|
||||
// Map returns the map representation of the env.
|
||||
func (env *Env) Map() map[string]string {
|
||||
if len(*env) == 0 {
|
||||
return nil
|
||||
}
|
||||
m := make(map[string]string)
|
||||
for _, kv := range *env {
|
||||
parts := strings.SplitN(kv, "=", 2)
|
||||
m[parts[0]] = parts[1]
|
||||
}
|
||||
return m
|
||||
}
|
|
@ -0,0 +1,304 @@
|
|||
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// APIEvents represents an event returned by the API.
|
||||
type APIEvents struct {
|
||||
Status string `json:"Status,omitempty" yaml:"Status,omitempty"`
|
||||
ID string `json:"ID,omitempty" yaml:"ID,omitempty"`
|
||||
From string `json:"From,omitempty" yaml:"From,omitempty"`
|
||||
Time int64 `json:"Time,omitempty" yaml:"Time,omitempty"`
|
||||
}
|
||||
|
||||
type eventMonitoringState struct {
|
||||
sync.RWMutex
|
||||
sync.WaitGroup
|
||||
enabled bool
|
||||
lastSeen *int64
|
||||
C chan *APIEvents
|
||||
errC chan error
|
||||
listeners []chan<- *APIEvents
|
||||
}
|
||||
|
||||
const (
|
||||
maxMonitorConnRetries = 5
|
||||
retryInitialWaitTime = 10.
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoListeners is the error returned when no listeners are available
|
||||
// to receive an event.
|
||||
ErrNoListeners = errors.New("no listeners present to receive event")
|
||||
|
||||
// ErrListenerAlreadyExists is the error returned when the listerner already
|
||||
// exists.
|
||||
ErrListenerAlreadyExists = errors.New("listener already exists for docker events")
|
||||
|
||||
// EOFEvent is sent when the event listener receives an EOF error.
|
||||
EOFEvent = &APIEvents{
|
||||
Status: "EOF",
|
||||
}
|
||||
)
|
||||
|
||||
// AddEventListener adds a new listener to container events in the Docker API.
|
||||
//
|
||||
// The parameter is a channel through which events will be sent.
|
||||
func (c *Client) AddEventListener(listener chan<- *APIEvents) error {
|
||||
var err error
|
||||
if !c.eventMonitor.isEnabled() {
|
||||
err = c.eventMonitor.enableEventMonitoring(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = c.eventMonitor.addListener(listener)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveEventListener removes a listener from the monitor.
|
||||
func (c *Client) RemoveEventListener(listener chan *APIEvents) error {
|
||||
err := c.eventMonitor.removeListener(listener)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(c.eventMonitor.listeners) == 0 {
|
||||
c.eventMonitor.disableEventMonitoring()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error {
|
||||
eventState.Lock()
|
||||
defer eventState.Unlock()
|
||||
if listenerExists(listener, &eventState.listeners) {
|
||||
return ErrListenerAlreadyExists
|
||||
}
|
||||
eventState.Add(1)
|
||||
eventState.listeners = append(eventState.listeners, listener)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error {
|
||||
eventState.Lock()
|
||||
defer eventState.Unlock()
|
||||
if listenerExists(listener, &eventState.listeners) {
|
||||
var newListeners []chan<- *APIEvents
|
||||
for _, l := range eventState.listeners {
|
||||
if l != listener {
|
||||
newListeners = append(newListeners, l)
|
||||
}
|
||||
}
|
||||
eventState.listeners = newListeners
|
||||
eventState.Add(-1)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) closeListeners() {
|
||||
for _, l := range eventState.listeners {
|
||||
close(l)
|
||||
eventState.Add(-1)
|
||||
}
|
||||
eventState.listeners = nil
|
||||
}
|
||||
|
||||
func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool {
|
||||
for _, b := range *list {
|
||||
if b == a {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error {
|
||||
eventState.Lock()
|
||||
defer eventState.Unlock()
|
||||
if !eventState.enabled {
|
||||
eventState.enabled = true
|
||||
var lastSeenDefault = int64(0)
|
||||
eventState.lastSeen = &lastSeenDefault
|
||||
eventState.C = make(chan *APIEvents, 100)
|
||||
eventState.errC = make(chan error, 1)
|
||||
go eventState.monitorEvents(c)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) disableEventMonitoring() error {
|
||||
eventState.Lock()
|
||||
defer eventState.Unlock()
|
||||
|
||||
eventState.closeListeners()
|
||||
|
||||
eventState.Wait()
|
||||
|
||||
if eventState.enabled {
|
||||
eventState.enabled = false
|
||||
close(eventState.C)
|
||||
close(eventState.errC)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) monitorEvents(c *Client) {
|
||||
var err error
|
||||
for eventState.noListeners() {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
if err = eventState.connectWithRetry(c); err != nil {
|
||||
// terminate if connect failed
|
||||
eventState.disableEventMonitoring()
|
||||
return
|
||||
}
|
||||
for eventState.isEnabled() {
|
||||
timeout := time.After(100 * time.Millisecond)
|
||||
select {
|
||||
case ev, ok := <-eventState.C:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if ev == EOFEvent {
|
||||
eventState.disableEventMonitoring()
|
||||
return
|
||||
}
|
||||
eventState.updateLastSeen(ev)
|
||||
go eventState.sendEvent(ev)
|
||||
case err = <-eventState.errC:
|
||||
if err == ErrNoListeners {
|
||||
eventState.disableEventMonitoring()
|
||||
return
|
||||
} else if err != nil {
|
||||
defer func() { go eventState.monitorEvents(c) }()
|
||||
return
|
||||
}
|
||||
case <-timeout:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) connectWithRetry(c *Client) error {
|
||||
var retries int
|
||||
var err error
|
||||
for err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC); err != nil && retries < maxMonitorConnRetries; retries++ {
|
||||
waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries)))
|
||||
time.Sleep(time.Duration(waitTime) * time.Millisecond)
|
||||
err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) noListeners() bool {
|
||||
eventState.RLock()
|
||||
defer eventState.RUnlock()
|
||||
return len(eventState.listeners) == 0
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) isEnabled() bool {
|
||||
eventState.RLock()
|
||||
defer eventState.RUnlock()
|
||||
return eventState.enabled
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) sendEvent(event *APIEvents) {
|
||||
eventState.RLock()
|
||||
defer eventState.RUnlock()
|
||||
eventState.Add(1)
|
||||
defer eventState.Done()
|
||||
if eventState.enabled {
|
||||
if len(eventState.listeners) == 0 {
|
||||
eventState.errC <- ErrNoListeners
|
||||
return
|
||||
}
|
||||
|
||||
for _, listener := range eventState.listeners {
|
||||
listener <- event
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) {
|
||||
eventState.Lock()
|
||||
defer eventState.Unlock()
|
||||
if atomic.LoadInt64(eventState.lastSeen) < e.Time {
|
||||
atomic.StoreInt64(eventState.lastSeen, e.Time)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error {
|
||||
uri := "/events"
|
||||
if startTime != 0 {
|
||||
uri += fmt.Sprintf("?since=%d", startTime)
|
||||
}
|
||||
protocol := c.endpointURL.Scheme
|
||||
address := c.endpointURL.Path
|
||||
if protocol != "unix" {
|
||||
protocol = "tcp"
|
||||
address = c.endpointURL.Host
|
||||
}
|
||||
var dial net.Conn
|
||||
var err error
|
||||
if c.TLSConfig == nil {
|
||||
dial, err = c.Dialer.Dial(protocol, address)
|
||||
} else {
|
||||
dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conn := httputil.NewClientConn(dial, nil)
|
||||
req, err := http.NewRequest("GET", uri, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := conn.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func(res *http.Response, conn *httputil.ClientConn) {
|
||||
defer conn.Close()
|
||||
defer res.Body.Close()
|
||||
decoder := json.NewDecoder(res.Body)
|
||||
for {
|
||||
var event APIEvents
|
||||
if err = decoder.Decode(&event); err != nil {
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
if c.eventMonitor.isEnabled() {
|
||||
// Signal that we're exiting.
|
||||
eventChan <- EOFEvent
|
||||
}
|
||||
break
|
||||
}
|
||||
errChan <- err
|
||||
}
|
||||
if event.Time == 0 {
|
||||
continue
|
||||
}
|
||||
if !c.eventMonitor.isEnabled() {
|
||||
return
|
||||
}
|
||||
eventChan <- &event
|
||||
}
|
||||
}(res, conn)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,202 @@
|
|||
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Exec is the type representing a `docker exec` instance and containing the
|
||||
// instance ID
|
||||
type Exec struct {
|
||||
ID string `json:"Id,omitempty" yaml:"Id,omitempty"`
|
||||
}
|
||||
|
||||
// CreateExecOptions specify parameters to the CreateExecContainer function.
|
||||
//
|
||||
// See https://goo.gl/1KSIb7 for more details
|
||||
type CreateExecOptions struct {
|
||||
AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
|
||||
AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
|
||||
AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
|
||||
Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
|
||||
Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty"`
|
||||
Container string `json:"Container,omitempty" yaml:"Container,omitempty"`
|
||||
User string `json:"User,omitempty" yaml:"User,omitempty"`
|
||||
}
|
||||
|
||||
// CreateExec sets up an exec instance in a running container `id`, returning the exec
|
||||
// instance, or an error in case of failure.
|
||||
//
|
||||
// See https://goo.gl/1KSIb7 for more details
|
||||
func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) {
|
||||
path := fmt.Sprintf("/containers/%s/exec", opts.Container)
|
||||
resp, err := c.do("POST", path, doOptions{data: opts})
|
||||
if err != nil {
|
||||
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
|
||||
return nil, &NoSuchContainer{ID: opts.Container}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
var exec Exec
|
||||
if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &exec, nil
|
||||
}
|
||||
|
||||
// StartExecOptions specify parameters to the StartExecContainer function.
|
||||
//
|
||||
// See https://goo.gl/iQCnto for more details
|
||||
type StartExecOptions struct {
|
||||
Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty"`
|
||||
|
||||
Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
|
||||
|
||||
InputStream io.Reader `qs:"-"`
|
||||
OutputStream io.Writer `qs:"-"`
|
||||
ErrorStream io.Writer `qs:"-"`
|
||||
|
||||
// Use raw terminal? Usually true when the container contains a TTY.
|
||||
RawTerminal bool `qs:"-"`
|
||||
|
||||
// If set, after a successful connect, a sentinel will be sent and then the
|
||||
// client will block on receive before continuing.
|
||||
//
|
||||
// It must be an unbuffered channel. Using a buffered channel can lead
|
||||
// to unexpected behavior.
|
||||
Success chan struct{} `json:"-"`
|
||||
}
|
||||
|
||||
// StartExec starts a previously set up exec instance id. If opts.Detach is
|
||||
// true, it returns after starting the exec command. Otherwise, it sets up an
|
||||
// interactive session with the exec command.
|
||||
//
|
||||
// See https://goo.gl/iQCnto for more details
|
||||
func (c *Client) StartExec(id string, opts StartExecOptions) error {
|
||||
cw, err := c.StartExecNonBlocking(id, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cw != nil {
|
||||
return cw.Wait()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartExecNonBlocking starts a previously set up exec instance id. If opts.Detach is
|
||||
// true, it returns after starting the exec command. Otherwise, it sets up an
|
||||
// interactive session with the exec command.
|
||||
//
|
||||
// See https://goo.gl/iQCnto for more details
|
||||
func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWaiter, error) {
|
||||
if id == "" {
|
||||
return nil, &NoSuchExec{ID: id}
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("/exec/%s/start", id)
|
||||
|
||||
if opts.Detach {
|
||||
resp, err := c.do("POST", path, doOptions{data: opts})
|
||||
if err != nil {
|
||||
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
|
||||
return nil, &NoSuchExec{ID: id}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return c.hijack("POST", path, hijackOptions{
|
||||
success: opts.Success,
|
||||
setRawTerminal: opts.RawTerminal,
|
||||
in: opts.InputStream,
|
||||
stdout: opts.OutputStream,
|
||||
stderr: opts.ErrorStream,
|
||||
data: opts,
|
||||
})
|
||||
}
|
||||
|
||||
// ResizeExecTTY resizes the tty session used by the exec command id. This API
|
||||
// is valid only if Tty was specified as part of creating and starting the exec
|
||||
// command.
|
||||
//
|
||||
// See https://goo.gl/e1JpsA for more details
|
||||
func (c *Client) ResizeExecTTY(id string, height, width int) error {
|
||||
params := make(url.Values)
|
||||
params.Set("h", strconv.Itoa(height))
|
||||
params.Set("w", strconv.Itoa(width))
|
||||
|
||||
path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode())
|
||||
resp, err := c.do("POST", path, doOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExecProcessConfig is a type describing the command associated to a Exec
|
||||
// instance. It's used in the ExecInspect type.
|
||||
type ExecProcessConfig struct {
|
||||
Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"`
|
||||
User string `json:"user,omitempty" yaml:"user,omitempty"`
|
||||
Tty bool `json:"tty,omitempty" yaml:"tty,omitempty"`
|
||||
EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty"`
|
||||
Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty"`
|
||||
}
|
||||
|
||||
// ExecInspect is a type with details about a exec instance, including the
|
||||
// exit code if the command has finished running. It's returned by a api
|
||||
// call to /exec/(id)/json
|
||||
//
|
||||
// See https://goo.gl/gPtX9R for more details
|
||||
type ExecInspect struct {
|
||||
ID string `json:"ID,omitempty" yaml:"ID,omitempty"`
|
||||
Running bool `json:"Running,omitempty" yaml:"Running,omitempty"`
|
||||
ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"`
|
||||
OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
|
||||
OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty"`
|
||||
OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty"`
|
||||
ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty"`
|
||||
Container Container `json:"Container,omitempty" yaml:"Container,omitempty"`
|
||||
}
|
||||
|
||||
// InspectExec returns low-level information about the exec command id.
|
||||
//
|
||||
// See https://goo.gl/gPtX9R for more details
|
||||
func (c *Client) InspectExec(id string) (*ExecInspect, error) {
|
||||
path := fmt.Sprintf("/exec/%s/json", id)
|
||||
resp, err := c.do("GET", path, doOptions{})
|
||||
if err != nil {
|
||||
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
|
||||
return nil, &NoSuchExec{ID: id}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
var exec ExecInspect
|
||||
if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &exec, nil
|
||||
}
|
||||
|
||||
// NoSuchExec is the error returned when a given exec instance does not exist.
|
||||
type NoSuchExec struct {
|
||||
ID string
|
||||
}
|
||||
|
||||
func (err *NoSuchExec) Error() string {
|
||||
return "No such exec instance: " + err.ID
|
||||
}
|
55
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
Normal file
55
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
# 0.9.0 (Unreleased)
|
||||
|
||||
* logrus/text_formatter: don't emit empty msg
|
||||
* logrus/hooks/airbrake: move out of main repository
|
||||
* logrus/hooks/sentry: move out of main repository
|
||||
* logrus/hooks/papertrail: move out of main repository
|
||||
* logrus/hooks/bugsnag: move out of main repository
|
||||
|
||||
# 0.8.7
|
||||
|
||||
* logrus/core: fix possible race (#216)
|
||||
* logrus/doc: small typo fixes and doc improvements
|
||||
|
||||
|
||||
# 0.8.6
|
||||
|
||||
* hooks/raven: allow passing an initialized client
|
||||
|
||||
# 0.8.5
|
||||
|
||||
* logrus/core: revert #208
|
||||
|
||||
# 0.8.4
|
||||
|
||||
* formatter/text: fix data race (#218)
|
||||
|
||||
# 0.8.3
|
||||
|
||||
* logrus/core: fix entry log level (#208)
|
||||
* logrus/core: improve performance of text formatter by 40%
|
||||
* logrus/core: expose `LevelHooks` type
|
||||
* logrus/core: add support for DragonflyBSD and NetBSD
|
||||
* formatter/text: print structs more verbosely
|
||||
|
||||
# 0.8.2
|
||||
|
||||
* logrus: fix more Fatal family functions
|
||||
|
||||
# 0.8.1
|
||||
|
||||
* logrus: fix not exiting on `Fatalf` and `Fatalln`
|
||||
|
||||
# 0.8.0
|
||||
|
||||
* logrus: defaults to stderr instead of stdout
|
||||
* hooks/sentry: add special field for `*http.Request`
|
||||
* formatter/text: ignore Windows for colors
|
||||
|
||||
# 0.7.3
|
||||
|
||||
* formatter/\*: allow configuration of timestamp layout
|
||||
|
||||
# 0.7.2
|
||||
|
||||
* formatter/text: Add configuration option for time format (#158)
|
21
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE
generated
vendored
Normal file
21
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Simon Eskildsen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
365
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md
generated
vendored
Normal file
365
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md
generated
vendored
Normal file
|
@ -0,0 +1,365 @@
|
|||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc]
|
||||
|
||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
|
||||
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
|
||||
many large deployments. The core API is unlikely to change much but please
|
||||
version control your Logrus to make sure you aren't fetching latest `master` on
|
||||
every build.**
|
||||
|
||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||
plain text):
|
||||
|
||||
![Colored](http://i.imgur.com/PY7qMwd.png)
|
||||
|
||||
With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
|
||||
or Splunk:
|
||||
|
||||
```json
|
||||
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
||||
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
||||
|
||||
{"level":"warning","msg":"The group's number increased tremendously!",
|
||||
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
|
||||
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
|
||||
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
|
||||
|
||||
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
|
||||
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
||||
```
|
||||
|
||||
With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not
|
||||
attached, the output is compatible with the
|
||||
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
|
||||
|
||||
```text
|
||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
|
||||
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
||||
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
||||
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
||||
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
||||
exit status 1
|
||||
```
|
||||
|
||||
#### Example
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
```
|
||||
|
||||
Note that it's completely api-compatible with the stdlib logger, so you can
|
||||
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
|
||||
and you'll now have the flexibility of Logrus. You can customize it all you
|
||||
want:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Log as JSON instead of the default ASCII formatter.
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
|
||||
// Output to stderr instead of stdout, could also be a file.
|
||||
log.SetOutput(os.Stderr)
|
||||
|
||||
// Only log the warning severity or above.
|
||||
log.SetLevel(log.WarnLevel)
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 100,
|
||||
}).Fatal("The ice breaks!")
|
||||
|
||||
// A common pattern is to re-use fields between logging statements by re-using
|
||||
// the logrus.Entry returned from WithFields()
|
||||
contextLogger := log.WithFields(log.Fields{
|
||||
"common": "this is a common field",
|
||||
"other": "I also should be logged always",
|
||||
})
|
||||
|
||||
contextLogger.Info("I'll be logged with common and other field")
|
||||
contextLogger.Info("Me too")
|
||||
}
|
||||
```
|
||||
|
||||
For more advanced usage such as logging to multiple locations from the same
|
||||
application, you can also create an instance of the `logrus` Logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Create a new instance of the logger. You can have any number of instances.
|
||||
var log = logrus.New()
|
||||
|
||||
func main() {
|
||||
// The API for setting attributes is a little different than the package level
|
||||
// exported logger. See Godoc.
|
||||
log.Out = os.Stderr
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
}
|
||||
```
|
||||
|
||||
#### Fields
|
||||
|
||||
Logrus encourages careful, structured logging though logging fields instead of
|
||||
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
||||
to send event %s to topic %s with key %d")`, you should log the much more
|
||||
discoverable:
|
||||
|
||||
```go
|
||||
log.WithFields(log.Fields{
|
||||
"event": event,
|
||||
"topic": topic,
|
||||
"key": key,
|
||||
}).Fatal("Failed to send event")
|
||||
```
|
||||
|
||||
We've found this API forces you to think about logging in a way that produces
|
||||
much more useful logging messages. We've been in countless situations where just
|
||||
a single added field to a log statement that was already there would've saved us
|
||||
hours. The `WithFields` call is optional.
|
||||
|
||||
In general, with Logrus using any of the `printf`-family functions should be
|
||||
seen as a hint you should add a field, however, you can still use the
|
||||
`printf`-family functions with Logrus.
|
||||
|
||||
#### Hooks
|
||||
|
||||
You can add hooks for logging levels. For example to send errors to an exception
|
||||
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
||||
multiple places simultaneously, e.g. syslog.
|
||||
|
||||
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
||||
`init`:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
|
||||
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
// Use the Airbrake hook to report errors that have Error severity or above to
|
||||
// an exception tracker. You can create custom hooks, see the Hooks section.
|
||||
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
|
||||
|
||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
if err != nil {
|
||||
log.Error("Unable to connect to local syslog daemon")
|
||||
} else {
|
||||
log.AddHook(hook)
|
||||
}
|
||||
}
|
||||
```
|
||||
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
||||
|
||||
| Hook | Description |
|
||||
| ----- | ----------- |
|
||||
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
||||
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
||||
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
||||
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
||||
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
||||
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
||||
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
||||
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
||||
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
||||
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
||||
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
||||
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
||||
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
||||
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
||||
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
||||
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
||||
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
||||
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
||||
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
||||
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
||||
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
|
||||
|
||||
#### Level logging
|
||||
|
||||
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
||||
|
||||
```go
|
||||
log.Debug("Useful debugging information.")
|
||||
log.Info("Something noteworthy happened!")
|
||||
log.Warn("You should probably take a look at this.")
|
||||
log.Error("Something failed but I'm not quitting.")
|
||||
// Calls os.Exit(1) after logging
|
||||
log.Fatal("Bye.")
|
||||
// Calls panic() after logging
|
||||
log.Panic("I'm bailing.")
|
||||
```
|
||||
|
||||
You can set the logging level on a `Logger`, then it will only log entries with
|
||||
that severity or anything above it:
|
||||
|
||||
```go
|
||||
// Will log anything that is info or above (warn, error, fatal, panic). Default.
|
||||
log.SetLevel(log.InfoLevel)
|
||||
```
|
||||
|
||||
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
|
||||
environment if your application has that.
|
||||
|
||||
#### Entries
|
||||
|
||||
Besides the fields added with `WithField` or `WithFields` some fields are
|
||||
automatically added to all logging events:
|
||||
|
||||
1. `time`. The timestamp when the entry was created.
|
||||
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
|
||||
the `AddFields` call. E.g. `Failed to send event.`
|
||||
3. `level`. The logging level. E.g. `info`.
|
||||
|
||||
#### Environments
|
||||
|
||||
Logrus has no notion of environment.
|
||||
|
||||
If you wish for hooks and formatters to only be used in specific environments,
|
||||
you should handle that yourself. For example, if your application has a global
|
||||
variable `Environment`, which is a string representation of the environment you
|
||||
could do:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
init() {
|
||||
// do something here to set environment depending on an environment variable
|
||||
// or command-line flag
|
||||
if Environment == "production" {
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
} else {
|
||||
// The TextFormatter is default, you don't actually have to do this.
|
||||
log.SetFormatter(&log.TextFormatter{})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This configuration is how `logrus` was intended to be used, but JSON in
|
||||
production is mostly only useful if you do log aggregation with tools like
|
||||
Splunk or Logstash.
|
||||
|
||||
#### Formatters
|
||||
|
||||
The built-in logging formatters are:
|
||||
|
||||
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
|
||||
without colors.
|
||||
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
||||
field to `true`. To force no colored output even if there is a TTY set the
|
||||
`DisableColors` field to `true`
|
||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||
* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events.
|
||||
|
||||
```go
|
||||
logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"})
|
||||
```
|
||||
|
||||
Third party logging formatters:
|
||||
|
||||
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
||||
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
||||
|
||||
You can define your formatter by implementing the `Formatter` interface,
|
||||
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
||||
`Fields` type (`map[string]interface{}`) with all your fields as well as the
|
||||
default ones (see Entries section above):
|
||||
|
||||
```go
|
||||
type MyJSONFormatter struct {
|
||||
}
|
||||
|
||||
log.SetFormatter(new(MyJSONFormatter))
|
||||
|
||||
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
// Note this doesn't include Time, Level and Message which are available on
|
||||
// the Entry. Consult `godoc` on information about those fields or read the
|
||||
// source of the official loggers.
|
||||
serialized, err := json.Marshal(entry.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
```
|
||||
|
||||
#### Logger as an `io.Writer`
|
||||
|
||||
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
|
||||
|
||||
```go
|
||||
w := logger.Writer()
|
||||
defer w.Close()
|
||||
|
||||
srv := http.Server{
|
||||
// create a stdlib log.Logger that writes to
|
||||
// logrus.Logger.
|
||||
ErrorLog: log.New(w, "", 0),
|
||||
}
|
||||
```
|
||||
|
||||
Each line written to that writer will be printed the usual way, using formatters
|
||||
and hooks. The level for those entries is `info`.
|
||||
|
||||
#### Rotation
|
||||
|
||||
Log rotation is not provided with Logrus. Log rotation should be done by an
|
||||
external program (like `logrotate(8)`) that can compress and delete old log
|
||||
entries. It should not be a feature of the application-level logger.
|
||||
|
||||
#### Tools
|
||||
|
||||
| Tool | Description |
|
||||
| ---- | ----------- |
|
||||
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|
||||
|
||||
[godoc]: https://godoc.org/github.com/Sirupsen/logrus
|
26
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/doc.go
generated
vendored
Normal file
26
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
|
||||
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"number": 1,
|
||||
"size": 10,
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
|
||||
Output:
|
||||
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
||||
|
||||
For a full guide visit https://github.com/Sirupsen/logrus
|
||||
*/
|
||||
package logrus
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue